refresh patches

This commit is contained in:
VIKING 2025-05-29 12:50:54 +08:00
parent c8772bddae
commit 2eed7fed78
10 changed files with 1261 additions and 418 deletions

View File

@ -0,0 +1,170 @@
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -2428,13 +2428,13 @@ err_sc_free:
}
EXPORT_SYMBOL(ath11k_core_alloc);
-int ath11k_init(void)
+static int ath11k_init(void)
{
return ath11k_debugfs_create();
}
module_init(ath11k_init);
-void ath11k_exit(void)
+static void ath11k_exit(void)
{
ath11k_debugfs_destroy();
}
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -1823,7 +1823,7 @@ struct htt_ppdu_stats_info *ath11k_dp_ht
return ppdu_info;
}
-void ath11k_copy_to_delay_stats(struct ath11k_peer *peer,
+static void ath11k_copy_to_delay_stats(struct ath11k_peer *peer,
struct htt_ppdu_user_stats* usr_stats)
{
peer->ppdu_stats_delayba.reserved0 = usr_stats->rate.reserved0;
@@ -1838,7 +1838,7 @@ void ath11k_copy_to_delay_stats(struct a
peer->delayba_flag = true;
}
-void ath11k_copy_to_bar(struct ath11k_peer *peer,
+static void ath11k_copy_to_bar(struct ath11k_peer *peer,
struct htt_ppdu_user_stats* usr_stats)
{
usr_stats->rate.reserved0 = peer->ppdu_stats_delayba.reserved0;
@@ -6305,7 +6305,7 @@ static void ath11k_dp_rx_mon_dest_proces
}
}
-void ath11k_dp_rx_mon_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info)
+static void ath11k_dp_rx_mon_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct hal_rx_user_status *rx_user_status;
u32 num_users;
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -719,7 +719,7 @@ u8 ath11k_mac_get_target_pdev_id(struct
return ar->ab->target_pdev_ids[0].pdev_id;
}
-struct ath11k_vif *ath11k_mac_get_ap_arvif_by_addr(struct ath11k_base *ab,
+static struct ath11k_vif *ath11k_mac_get_ap_arvif_by_addr(struct ath11k_base *ab,
const u8 *addr)
{
int i;
--- a/drivers/net/wireless/ath/ath11k/nss.c
+++ b/drivers/net/wireless/ath/ath11k/nss.c
@@ -313,7 +313,7 @@ static void ath11k_nss_peer_mem_free(str
/*-----------------------------Events/Callbacks------------------------------*/
-void ath11k_nss_wifili_event_receive(void *context, struct nss_wifili_msg *msg)
+static void ath11k_nss_wifili_event_receive(void *context, struct nss_wifili_msg *msg)
{
u32 msg_type = msg->cm.type;
enum nss_cmn_response response = msg->cm.response;
@@ -424,7 +424,7 @@ void ath11k_nss_wifili_event_receive(voi
}
}
-void ath11k_nss_process_mic_error(struct ath11k_base *ab, struct sk_buff *skb)
+static void ath11k_nss_process_mic_error(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k_vif *arvif;
struct ath11k *ar;
@@ -531,7 +531,7 @@ static inline void ath11k_nss_wifili_ext
ath11k_nss_wifili_ext_callback_fn(ab, skb, napi);
}
-void ath11k_nss_vdev_cfg_cb(void *app_data, struct nss_cmn_msg *msg)
+static void ath11k_nss_vdev_cfg_cb(void *app_data, struct nss_cmn_msg *msg)
{
struct ath11k_vif *arvif = (struct ath11k_vif *)app_data;
@@ -1075,7 +1075,7 @@ ath11k_nss_ext_vdev_data_receive(struct
#ifdef CPTCFG_ATH11K_NSS_MESH_SUPPORT
/*------Mesh offload------*/
-void ath11k_nss_mesh_wifili_event_receive(void *app_data,
+static void ath11k_nss_mesh_wifili_event_receive(void *app_data,
struct nss_cmn_msg *cmn_msg)
{
struct nss_wifi_mesh_msg *msg = (struct nss_wifi_mesh_msg *)cmn_msg;
@@ -1936,7 +1936,7 @@ int ath11k_nss_dump_mpp_request(struct a
return 0;
}
-void ath11k_nss_mpp_timer_cb(struct timer_list *timer)
+static void ath11k_nss_mpp_timer_cb(struct timer_list *timer)
{
nss_wifi_mesh_msg_callback_t msg_cb;
struct arvif_nss *nss = from_timer(nss, timer,mpp_expiry_timer);
@@ -2607,7 +2607,7 @@ static void ath11k_nss_mesh_vdev_free(st
}
#endif
-void ath11k_nss_vdev_free(struct ath11k_vif *arvif)
+static void ath11k_nss_vdev_free(struct ath11k_vif *arvif)
{
struct ath11k_base *ab = arvif->ar->ab;
nss_tx_status_t status;
@@ -2639,7 +2639,7 @@ void ath11k_nss_vdev_free(struct ath11k_
}
#ifdef CPTCFG_ATH11K_NSS_MESH_SUPPORT
-struct arvif_nss *ath11k_nss_find_arvif_by_if_num(int if_num)
+static struct arvif_nss *ath11k_nss_find_arvif_by_if_num(int if_num)
{
struct arvif_nss *nss;
@@ -4624,7 +4624,7 @@ void ath11k_nss_peer_stats_enable(struct
ATH11K_NSS_STATS_ENABLE);
}
-int ath11k_nss_pdev_init(struct ath11k_base *ab, int radio_id)
+static int ath11k_nss_pdev_init(struct ath11k_base *ab, int radio_id)
{
struct ath11k *ar = ab->pdevs[radio_id].ar;
struct nss_wifili_pdev_init_msg *pdevmsg;
@@ -4757,7 +4757,7 @@ dealloc:
/* TODO : Check if start, reset and stop messages can be done using single function as
* body is similar, having it now for clarity */
-int ath11k_nss_start(struct ath11k_base *ab)
+static int ath11k_nss_start(struct ath11k_base *ab)
{
struct nss_wifili_msg *wlmsg = NULL;
nss_wifili_msg_callback_t msg_cb;
@@ -4922,7 +4922,7 @@ free:
return ret;
}
-int ath11k_nss_pdev_deinit(struct ath11k_base *ab, int radio_id)
+static int ath11k_nss_pdev_deinit(struct ath11k_base *ab, int radio_id)
{
struct ath11k *ar = ab->pdevs[radio_id].ar;
struct nss_wifili_pdev_deinit_msg *deinit;
--- a/drivers/net/wireless/ath/ath11k/nss.h
+++ b/drivers/net/wireless/ath/ath11k/nss.h
@@ -179,7 +179,7 @@ struct ath11k_nss_mpath_entry {
struct list_head list;
u32 num_entries;
#ifdef CPTCFG_ATH11K_NSS_MESH_SUPPORT
- struct nss_wifi_mesh_path_dump_entry mpath[0];
+ struct nss_wifi_mesh_path_dump_entry mpath[];
#endif
};
@@ -187,7 +187,7 @@ struct ath11k_nss_mpp_entry {
struct list_head list;
u32 num_entries;
#ifdef CPTCFG_ATH11K_NSS_MESH_SUPPORT
- struct nss_wifi_mesh_proxy_path_dump_entry mpp[0];
+ struct nss_wifi_mesh_proxy_path_dump_entry mpp[];
#endif
};

View File

@ -0,0 +1,44 @@
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1250,7 +1250,7 @@ void ieee80211_del_virtual_monitor(struc
/* This callback is registered for nss redirect to receive packet exceptioned from nss in Rx path.
* When packet does not match any of the ecm rules is redirected back here.
*/
-void receive_from_nss(struct net_device *dev, struct sk_buff *sk_buff, struct napi_struct *napi)
+static void receive_from_nss(struct net_device *dev, struct sk_buff *sk_buff, struct napi_struct *napi)
{
struct net_device *netdev;
struct sk_buff *skb;
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -4562,7 +4562,7 @@ static void ieee80211_mlo_multicast_tx(s
}
#ifdef CPTCFG_MAC80211_NSS_SUPPORT
-void ieee80211_xmit_nss_fixup(struct sk_buff *skb,
+static void ieee80211_xmit_nss_fixup(struct sk_buff *skb,
struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -4807,7 +4807,7 @@ out_free:
kfree_skb(skb);
}
-void ieee80211_8023_xmit_ap(struct ieee80211_sub_if_data *sdata,
+static void ieee80211_8023_xmit_ap(struct ieee80211_sub_if_data *sdata,
struct net_device *dev, struct sta_info *sta,
struct ieee80211_key *key, struct sk_buff *skb,
u32 info_flags, u32 ctrl_flags, u64 *cookie)
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2522,8 +2522,8 @@ static int copy_mesh_setup(struct ieee80
ifmsh->security |= IEEE80211_MESH_SEC_SECURED;
/* mcast rate setting in Mesh Node */
- memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate,
- sizeof(setup->mcast_rate));
+ for (i = 0; i < NUM_NL80211_BANDS; i++)
+ sdata->vif.bss_conf.mcast_rate[i] = setup->mcast_rate[i];
sdata->vif.bss_conf.basic_rates = setup->basic_rates;
sdata->vif.bss_conf.beacon_int = setup->beacon_interval;

View File

@ -1,346 +1,346 @@
--- a/nat46/modules/nat46-core.c
+++ b/nat46/modules/nat46-core.c
@@ -209,7 +209,7 @@ static inline void nat46_swap(nat46_xlat
/*
* Sort rule pairs based on prefix length.
*/
-void nat46_sort_rule_array(nat46_instance_t *nat46) {
+static void nat46_sort_rule_array(nat46_instance_t *nat46) {
int i, j;
int nelem = nat46->npairs;
nat46_xlate_rulepair_t *array = NULL;
@@ -256,7 +256,7 @@ void nat46_sort_rule_array(nat46_instanc
}
}
-bool nat46_validate_RFC6052_style(nat46_instance_t *nat46, nat46_xlate_rule_t rule)
+static bool nat46_validate_RFC6052_style(nat46_instance_t *nat46, nat46_xlate_rule_t rule)
{
if (rule.style == NAT46_XLATE_RFC6052) {
if (!((rule.v6_pref_len == 32) || (rule.v6_pref_len == 40) ||
@@ -269,7 +269,7 @@ bool nat46_validate_RFC6052_style(nat46_
return true;
}
-bool nat46_validate_MAP_style(nat46_instance_t *nat46, nat46_xlate_rule_t rule)
+static bool nat46_validate_MAP_style(nat46_instance_t *nat46, nat46_xlate_rule_t rule)
{
int psid_len;
if (rule.style == NAT46_XLATE_MAP) {
@@ -296,7 +296,7 @@ bool nat46_validate_MAP_style(nat46_inst
return true;
}
-int nat46_validate_ipair_config(nat46_instance_t *nat46, nat46_xlate_rulepair_t *apair)
+static int nat46_validate_ipair_config(nat46_instance_t *nat46, nat46_xlate_rulepair_t *apair)
{
if (!nat46_validate_RFC6052_style(nat46, apair->local)) {
return -1;
@@ -999,7 +999,7 @@ static __sum16 csum_ipv6_unmagic(nat46_i
}
/* Update UDP with incremental checksum */
-__sum16 csum_ipv6_udp_remagic(struct ipv6hdr *ip6hdr, u32 csum) {
+static __sum16 csum_ipv6_udp_remagic(struct ipv6hdr *ip6hdr, u32 csum) {
uint32_t sum;
sum = csum_partial(ip6hdr->saddr.s6_addr16, 2 * sizeof(ip6hdr->saddr), ~csum);
sum = ((sum >> 16) & 0xffff) + (sum & 0xffff);
@@ -1008,7 +1008,7 @@ __sum16 csum_ipv6_udp_remagic(struct ipv
}
/* Undo the IPv4 pseudoheader inclusion into the checksum */
-__sum16 csum_ipv4_unmagic(__be32 saddr, __be32 daddr,
+static __sum16 csum_ipv4_unmagic(__be32 saddr, __be32 daddr,
u32 csum) {
u32 s;
uint32_t addr_csum;
@@ -1049,7 +1049,7 @@ static void *add_offset(void *ptr, u16 o
return (((char *)ptr)+offset);
}
-void fill_v6hdr_from_v4hdr(struct iphdr *iph, struct ipv6hdr *ip6h) {
+static void fill_v6hdr_from_v4hdr(struct iphdr *iph, struct ipv6hdr *ip6h) {
*((__be16 *)ip6h) = htons((6 << 12) | (iph->tos << 4)); /* Version, Traffic Class */
memset(&(ip6h->flow_lbl), 0, sizeof(ip6h->flow_lbl)); /* Flowlabel */
ip6h->payload_len = htons(ntohs(iph->tot_len) - IPV4HDRSIZE);
@@ -1104,7 +1104,7 @@ static u16 rechecksum16(void *p, int cou
return csum;
}
-nat46_xlate_rulepair_t *nat46_lpm(nat46_instance_t *nat46, nat46_rule_type_t type, void *paddr) {
+static nat46_xlate_rulepair_t *nat46_lpm(nat46_instance_t *nat46, nat46_rule_type_t type, void *paddr) {
int ipair = 0;
nat46_xlate_rulepair_t *apair = NULL;
uint32_t mask = 0;
@@ -1567,6 +1567,135 @@ static int ip6_input_not_interested(nat4
return 0;
}
+/*
+ * The sport & dport in inner header will be dport & sport of the outer header, respectively.
+ * Hence, dest. and source ips of inner header will be found in local & remote rules, respectively.
+ */
+static int pairs_xlate_v4_to_v6_inner(nat46_instance_t *nat46, struct iphdr *iph,
+ uint16_t sport, uint16_t dport, void *v6saddr, void *v6daddr) {
+ int ipair = 0;
+ nat46_xlate_rulepair_t *apair = NULL;
+ int xlate_src = -1;
+ int xlate_dst = -1;
+
+ apair = nat46_lpm(nat46, NAT46_IPV4_REMOTE, &iph->saddr);
+ if (!apair) {
+ return 0;
+ }
+
+ if (xlate_v4_to_v6(nat46, &apair->local, &iph->daddr, v6daddr, &dport)) {
+ nat46debug(3, "Dst addr %pI4 to %pI6 \n", &iph->daddr, v6daddr);
+ xlate_dst = ipair;
+ }
+ if (xlate_v4_to_v6(nat46, &apair->remote, &iph->saddr, v6saddr, &sport)) {
+ nat46debug(3, "Src addr %pI4 to %pI6 \n", &iph->saddr, v6saddr);
+ xlate_src = ipair;
+ }
+ if ((xlate_src >= 0) && (xlate_dst >= 0)) {
+ /* we did manage to translate it */
+ nat46debug(5, "[nat46] Inner header xlate results: src %d dst %d", xlate_src, xlate_dst);
+ return 1;
+ } else {
+ nat46debug(1, "[nat46] Could not find a translation pair v4->v6");
+ }
+
+ return 0;
+}
+
+static uint16_t xlate_pkt_in_err_v4_to_v6(nat46_instance_t *nat46, struct iphdr *iph,
+ struct sk_buff *old_skb, uint16_t *sport, uint16_t *dport) {
+ struct ipv6hdr ip6h;
+ char v6saddr[16], v6daddr[16];
+ uint16_t temp_port = 0;
+ int ret = 0;
+ struct icmphdr *icmph = (struct icmphdr *)(iph + 1);
+ struct iphdr *iiph = (struct iphdr *)(icmph + 1);
+
+ switch (iiph->protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr *th = (struct tcphdr *)(iiph + 1);
+ *sport = th->source;
+ *dport = th->dest;
+ iiph->protocol = NEXTHDR_TCP;
+ break;
+ }
+ case IPPROTO_UDP: {
+ struct udphdr *udp = (struct udphdr *)(iiph + 1);
+ *sport = udp->source;
+ *dport = udp->dest;
+ iiph->protocol = NEXTHDR_UDP;
+ break;
+ }
+ case IPPROTO_ICMP: {
+ struct icmphdr *icmph = (struct icmphdr *)(iiph + 1);
+ iiph->protocol = NEXTHDR_ICMP;
+ switch (icmph->type) {
+ case ICMP_ECHO:
+ icmph->type = ICMPV6_ECHO_REQUEST;
+ *sport = *dport = icmph->un.echo.id;
+ break;
+ case ICMP_ECHOREPLY:
+ icmph->type = ICMPV6_ECHO_REPLY;
+ *sport = *dport = icmph->un.echo.id;
+ break;
+ default:
+ nat46debug(3, "ICMP Error message can't be inside another ICMP Error messgae.");
+ *sport = *dport = 0;
+ return 0;
+ }
+ break;
+ }
+ default:
+ nat46debug(3, "[ICMPv4] Next header: %u. Only TCP, UDP, and ICMP are supported.", iiph->protocol);
+ *sport = *dport = 0;
+ return 0;
+ }
+
+ nat46debug(3, "Retrieved from pkt in error: dest port %d, and src port %d.", ntohs(*dport), ntohs(*sport));
+
+ if (!pairs_xlate_v4_to_v6_inner(nat46, iiph, *sport, *dport, v6saddr, v6daddr)) {
+ nat46debug(0, "[nat46] Could not translate inner header v4->v6");
+ *sport = *dport = 0;
+ return 0;
+ }
+
+ fill_v6hdr_from_v4hdr (iiph, &ip6h);
+ memcpy(&ip6h.saddr, v6saddr, sizeof(ip6h.saddr));
+ memcpy(&ip6h.daddr, v6daddr, sizeof(ip6h.daddr));
+
+ if (skb_tailroom(old_skb) >= IPV6V4HDRDELTA){
+ skb_put(old_skb, IPV6V4HDRDELTA);
+ /* ErrorICMP size is less than 576, the inner ipv4 packet will be trimmed */
+ memmove(((char *)iiph + IPV6HDRSIZE), (iiph + 1),
+ ntohs(iph->tot_len) - 2 * IPV4HDRSIZE - sizeof(struct icmphdr));
+ memcpy(iiph, &ip6h, IPV6HDRSIZE);
+ }
+ else {
+ ret = pskb_expand_head(old_skb, 0, IPV6V4HDRDELTA, GFP_ATOMIC);
+ if (unlikely(ret)) {
+ nat46debug(0, "[nat46] Could not copy v4 skb");
+ *sport = *dport = 0;
+ return 0;
+ }
+
+ skb_put(old_skb, IPV6V4HDRDELTA);
+ iiph = (struct iphdr *)(icmp_hdr(old_skb) + 1);
+ /* ErrorICMP size is less than 576, the inner ipv4 packet will be trimmed */
+ memmove(((char *)iiph + IPV6HDRSIZE), (iiph + 1),
+ ntohs(iph->tot_len) - 2 * IPV4HDRSIZE - sizeof(struct icmphdr));
+ memcpy(iiph, &ip6h, IPV6HDRSIZE);
+ }
+ iph->tot_len = htons(ntohs(iph->tot_len) + IPV6V4HDRDELTA);
+
+ /* Swapping Ports for outer header */
+ /* Another work-around till LPM is not present. */
+ temp_port = *sport;
+ *sport = *dport;
+ *dport = temp_port;
+
+ return 1;
+}
+
static uint16_t nat46_fixup_icmp_time_exceeded(nat46_instance_t *nat46, struct iphdr *iph,
struct icmphdr *icmph, struct sk_buff *old_skb, uint16_t *sport, uint16_t *dport) {
/*
@@ -2187,135 +2316,6 @@ int xlate_4_to_6(struct net_device *dev,
}
EXPORT_SYMBOL(xlate_4_to_6);
-/*
- * The sport & dport in inner header will be dport & sport of the outer header, respectively.
- * Hence, dest. and source ips of inner header will be found in local & remote rules, respectively.
- */
-int pairs_xlate_v4_to_v6_inner(nat46_instance_t *nat46, struct iphdr *iph,
- uint16_t sport, uint16_t dport, void *v6saddr, void *v6daddr) {
- int ipair = 0;
- nat46_xlate_rulepair_t *apair = NULL;
- int xlate_src = -1;
- int xlate_dst = -1;
-
- apair = nat46_lpm(nat46, NAT46_IPV4_REMOTE, &iph->saddr);
- if (!apair) {
- return 0;
- }
-
- if (xlate_v4_to_v6(nat46, &apair->local, &iph->daddr, v6daddr, &dport)) {
- nat46debug(3, "Dst addr %pI4 to %pI6 \n", &iph->daddr, v6daddr);
- xlate_dst = ipair;
- }
- if (xlate_v4_to_v6(nat46, &apair->remote, &iph->saddr, v6saddr, &sport)) {
- nat46debug(3, "Src addr %pI4 to %pI6 \n", &iph->saddr, v6saddr);
- xlate_src = ipair;
- }
- if ((xlate_src >= 0) && (xlate_dst >= 0)) {
- /* we did manage to translate it */
- nat46debug(5, "[nat46] Inner header xlate results: src %d dst %d", xlate_src, xlate_dst);
- return 1;
- } else {
- nat46debug(1, "[nat46] Could not find a translation pair v4->v6");
- }
-
- return 0;
-}
-
-static uint16_t xlate_pkt_in_err_v4_to_v6(nat46_instance_t *nat46, struct iphdr *iph,
- struct sk_buff *old_skb, uint16_t *sport, uint16_t *dport) {
- struct ipv6hdr ip6h;
- char v6saddr[16], v6daddr[16];
- uint16_t temp_port = 0;
- int ret = 0;
- struct icmphdr *icmph = (struct icmphdr *)(iph + 1);
- struct iphdr *iiph = (struct iphdr *)(icmph + 1);
-
- switch (iiph->protocol) {
- case IPPROTO_TCP: {
- struct tcphdr *th = (struct tcphdr *)(iiph + 1);
- *sport = th->source;
- *dport = th->dest;
- iiph->protocol = NEXTHDR_TCP;
- break;
- }
- case IPPROTO_UDP: {
- struct udphdr *udp = (struct udphdr *)(iiph + 1);
- *sport = udp->source;
- *dport = udp->dest;
- iiph->protocol = NEXTHDR_UDP;
- break;
- }
- case IPPROTO_ICMP: {
- struct icmphdr *icmph = (struct icmphdr *)(iiph + 1);
- iiph->protocol = NEXTHDR_ICMP;
- switch (icmph->type) {
- case ICMP_ECHO:
- icmph->type = ICMPV6_ECHO_REQUEST;
- *sport = *dport = icmph->un.echo.id;
- break;
- case ICMP_ECHOREPLY:
- icmph->type = ICMPV6_ECHO_REPLY;
- *sport = *dport = icmph->un.echo.id;
- break;
- default:
- nat46debug(3, "ICMP Error message can't be inside another ICMP Error messgae.");
- *sport = *dport = 0;
- return 0;
- }
- break;
- }
- default:
- nat46debug(3, "[ICMPv4] Next header: %u. Only TCP, UDP, and ICMP are supported.", iiph->protocol);
- *sport = *dport = 0;
- return 0;
- }
-
- nat46debug(3, "Retrieved from pkt in error: dest port %d, and src port %d.", ntohs(*dport), ntohs(*sport));
-
- if (!pairs_xlate_v4_to_v6_inner(nat46, iiph, *sport, *dport, v6saddr, v6daddr)) {
- nat46debug(0, "[nat46] Could not translate inner header v4->v6");
- *sport = *dport = 0;
- return 0;
- }
-
- fill_v6hdr_from_v4hdr (iiph, &ip6h);
- memcpy(&ip6h.saddr, v6saddr, sizeof(ip6h.saddr));
- memcpy(&ip6h.daddr, v6daddr, sizeof(ip6h.daddr));
-
- if (skb_tailroom(old_skb) >= IPV6V4HDRDELTA){
- skb_put(old_skb, IPV6V4HDRDELTA);
- /* ErrorICMP size is less than 576, the inner ipv4 packet will be trimmed */
- memmove(((char *)iiph + IPV6HDRSIZE), (iiph + 1),
- ntohs(iph->tot_len) - 2 * IPV4HDRSIZE - sizeof(struct icmphdr));
- memcpy(iiph, &ip6h, IPV6HDRSIZE);
- }
- else {
- ret = pskb_expand_head(old_skb, 0, IPV6V4HDRDELTA, GFP_ATOMIC);
- if (unlikely(ret)) {
- nat46debug(0, "[nat46] Could not copy v4 skb");
- *sport = *dport = 0;
- return 0;
- }
-
- skb_put(old_skb, IPV6V4HDRDELTA);
- iiph = (struct iphdr *)(icmp_hdr(old_skb) + 1);
- /* ErrorICMP size is less than 576, the inner ipv4 packet will be trimmed */
- memmove(((char *)iiph + IPV6HDRSIZE), (iiph + 1),
- ntohs(iph->tot_len) - 2 * IPV4HDRSIZE - sizeof(struct icmphdr));
- memcpy(iiph, &ip6h, IPV6HDRSIZE);
- }
- iph->tot_len = htons(ntohs(iph->tot_len) + IPV6V4HDRDELTA);
-
- /* Swapping Ports for outer header */
- /* Another work-around till LPM is not present. */
- temp_port = *sport;
- *sport = *dport;
- *dport = temp_port;
-
- return 1;
-}
-
/* Return the port number from CE's port set */
static uint16_t nat46_get_ce_port(nat46_xlate_rulepair_t *pair, uint16_t sport)
{
--- a/nat46/modules/nat46-core.c
+++ b/nat46/modules/nat46-core.c
@@ -209,7 +209,7 @@ static inline void nat46_swap(nat46_xlat
/*
* Sort rule pairs based on prefix length.
*/
-void nat46_sort_rule_array(nat46_instance_t *nat46) {
+static void nat46_sort_rule_array(nat46_instance_t *nat46) {
int i, j;
int nelem = nat46->npairs;
nat46_xlate_rulepair_t *array = NULL;
@@ -256,7 +256,7 @@ void nat46_sort_rule_array(nat46_instanc
}
}
-bool nat46_validate_RFC6052_style(nat46_instance_t *nat46, nat46_xlate_rule_t rule)
+static bool nat46_validate_RFC6052_style(nat46_instance_t *nat46, nat46_xlate_rule_t rule)
{
if (rule.style == NAT46_XLATE_RFC6052) {
if (!((rule.v6_pref_len == 32) || (rule.v6_pref_len == 40) ||
@@ -269,7 +269,7 @@ bool nat46_validate_RFC6052_style(nat46_
return true;
}
-bool nat46_validate_MAP_style(nat46_instance_t *nat46, nat46_xlate_rule_t rule)
+static bool nat46_validate_MAP_style(nat46_instance_t *nat46, nat46_xlate_rule_t rule)
{
int psid_len;
if (rule.style == NAT46_XLATE_MAP) {
@@ -296,7 +296,7 @@ bool nat46_validate_MAP_style(nat46_inst
return true;
}
-int nat46_validate_ipair_config(nat46_instance_t *nat46, nat46_xlate_rulepair_t *apair)
+static int nat46_validate_ipair_config(nat46_instance_t *nat46, nat46_xlate_rulepair_t *apair)
{
if (!nat46_validate_RFC6052_style(nat46, apair->local)) {
return -1;
@@ -999,7 +999,7 @@ static __sum16 csum_ipv6_unmagic(nat46_i
}
/* Update UDP with incremental checksum */
-__sum16 csum_ipv6_udp_remagic(struct ipv6hdr *ip6hdr, u32 csum) {
+static __sum16 csum_ipv6_udp_remagic(struct ipv6hdr *ip6hdr, u32 csum) {
uint32_t sum;
sum = csum_partial(ip6hdr->saddr.s6_addr16, 2 * sizeof(ip6hdr->saddr), ~csum);
sum = ((sum >> 16) & 0xffff) + (sum & 0xffff);
@@ -1008,7 +1008,7 @@ __sum16 csum_ipv6_udp_remagic(struct ipv
}
/* Undo the IPv4 pseudoheader inclusion into the checksum */
-__sum16 csum_ipv4_unmagic(__be32 saddr, __be32 daddr,
+static __sum16 csum_ipv4_unmagic(__be32 saddr, __be32 daddr,
u32 csum) {
u32 s;
uint32_t addr_csum;
@@ -1049,7 +1049,7 @@ static void *add_offset(void *ptr, u16 o
return (((char *)ptr)+offset);
}
-void fill_v6hdr_from_v4hdr(struct iphdr *iph, struct ipv6hdr *ip6h) {
+static void fill_v6hdr_from_v4hdr(struct iphdr *iph, struct ipv6hdr *ip6h) {
*((__be16 *)ip6h) = htons((6 << 12) | (iph->tos << 4)); /* Version, Traffic Class */
memset(&(ip6h->flow_lbl), 0, sizeof(ip6h->flow_lbl)); /* Flowlabel */
ip6h->payload_len = htons(ntohs(iph->tot_len) - IPV4HDRSIZE);
@@ -1104,7 +1104,7 @@ static u16 rechecksum16(void *p, int cou
return csum;
}
-nat46_xlate_rulepair_t *nat46_lpm(nat46_instance_t *nat46, nat46_rule_type_t type, void *paddr) {
+static nat46_xlate_rulepair_t *nat46_lpm(nat46_instance_t *nat46, nat46_rule_type_t type, void *paddr) {
int ipair = 0;
nat46_xlate_rulepair_t *apair = NULL;
uint32_t mask = 0;
@@ -1567,6 +1567,135 @@ static int ip6_input_not_interested(nat4
return 0;
}
+/*
+ * The sport & dport in inner header will be dport & sport of the outer header, respectively.
+ * Hence, dest. and source ips of inner header will be found in local & remote rules, respectively.
+ */
+static int pairs_xlate_v4_to_v6_inner(nat46_instance_t *nat46, struct iphdr *iph,
+ uint16_t sport, uint16_t dport, void *v6saddr, void *v6daddr) {
+ int ipair = 0;
+ nat46_xlate_rulepair_t *apair = NULL;
+ int xlate_src = -1;
+ int xlate_dst = -1;
+
+ apair = nat46_lpm(nat46, NAT46_IPV4_REMOTE, &iph->saddr);
+ if (!apair) {
+ return 0;
+ }
+
+ if (xlate_v4_to_v6(nat46, &apair->local, &iph->daddr, v6daddr, &dport)) {
+ nat46debug(3, "Dst addr %pI4 to %pI6 \n", &iph->daddr, v6daddr);
+ xlate_dst = ipair;
+ }
+ if (xlate_v4_to_v6(nat46, &apair->remote, &iph->saddr, v6saddr, &sport)) {
+ nat46debug(3, "Src addr %pI4 to %pI6 \n", &iph->saddr, v6saddr);
+ xlate_src = ipair;
+ }
+ if ((xlate_src >= 0) && (xlate_dst >= 0)) {
+ /* we did manage to translate it */
+ nat46debug(5, "[nat46] Inner header xlate results: src %d dst %d", xlate_src, xlate_dst);
+ return 1;
+ } else {
+ nat46debug(1, "[nat46] Could not find a translation pair v4->v6");
+ }
+
+ return 0;
+}
+
+static uint16_t xlate_pkt_in_err_v4_to_v6(nat46_instance_t *nat46, struct iphdr *iph,
+ struct sk_buff *old_skb, uint16_t *sport, uint16_t *dport) {
+ struct ipv6hdr ip6h;
+ char v6saddr[16], v6daddr[16];
+ uint16_t temp_port = 0;
+ int ret = 0;
+ struct icmphdr *icmph = (struct icmphdr *)(iph + 1);
+ struct iphdr *iiph = (struct iphdr *)(icmph + 1);
+
+ switch (iiph->protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr *th = (struct tcphdr *)(iiph + 1);
+ *sport = th->source;
+ *dport = th->dest;
+ iiph->protocol = NEXTHDR_TCP;
+ break;
+ }
+ case IPPROTO_UDP: {
+ struct udphdr *udp = (struct udphdr *)(iiph + 1);
+ *sport = udp->source;
+ *dport = udp->dest;
+ iiph->protocol = NEXTHDR_UDP;
+ break;
+ }
+ case IPPROTO_ICMP: {
+ struct icmphdr *icmph = (struct icmphdr *)(iiph + 1);
+ iiph->protocol = NEXTHDR_ICMP;
+ switch (icmph->type) {
+ case ICMP_ECHO:
+ icmph->type = ICMPV6_ECHO_REQUEST;
+ *sport = *dport = icmph->un.echo.id;
+ break;
+ case ICMP_ECHOREPLY:
+ icmph->type = ICMPV6_ECHO_REPLY;
+ *sport = *dport = icmph->un.echo.id;
+ break;
+ default:
+ nat46debug(3, "ICMP Error message can't be inside another ICMP Error messgae.");
+ *sport = *dport = 0;
+ return 0;
+ }
+ break;
+ }
+ default:
+ nat46debug(3, "[ICMPv4] Next header: %u. Only TCP, UDP, and ICMP are supported.", iiph->protocol);
+ *sport = *dport = 0;
+ return 0;
+ }
+
+ nat46debug(3, "Retrieved from pkt in error: dest port %d, and src port %d.", ntohs(*dport), ntohs(*sport));
+
+ if (!pairs_xlate_v4_to_v6_inner(nat46, iiph, *sport, *dport, v6saddr, v6daddr)) {
+ nat46debug(0, "[nat46] Could not translate inner header v4->v6");
+ *sport = *dport = 0;
+ return 0;
+ }
+
+ fill_v6hdr_from_v4hdr (iiph, &ip6h);
+ memcpy(&ip6h.saddr, v6saddr, sizeof(ip6h.saddr));
+ memcpy(&ip6h.daddr, v6daddr, sizeof(ip6h.daddr));
+
+ if (skb_tailroom(old_skb) >= IPV6V4HDRDELTA){
+ skb_put(old_skb, IPV6V4HDRDELTA);
+ /* ErrorICMP size is less than 576, the inner ipv4 packet will be trimmed */
+ memmove(((char *)iiph + IPV6HDRSIZE), (iiph + 1),
+ ntohs(iph->tot_len) - 2 * IPV4HDRSIZE - sizeof(struct icmphdr));
+ memcpy(iiph, &ip6h, IPV6HDRSIZE);
+ }
+ else {
+ ret = pskb_expand_head(old_skb, 0, IPV6V4HDRDELTA, GFP_ATOMIC);
+ if (unlikely(ret)) {
+ nat46debug(0, "[nat46] Could not copy v4 skb");
+ *sport = *dport = 0;
+ return 0;
+ }
+
+ skb_put(old_skb, IPV6V4HDRDELTA);
+ iiph = (struct iphdr *)(icmp_hdr(old_skb) + 1);
+ /* ErrorICMP size is less than 576, the inner ipv4 packet will be trimmed */
+ memmove(((char *)iiph + IPV6HDRSIZE), (iiph + 1),
+ ntohs(iph->tot_len) - 2 * IPV4HDRSIZE - sizeof(struct icmphdr));
+ memcpy(iiph, &ip6h, IPV6HDRSIZE);
+ }
+ iph->tot_len = htons(ntohs(iph->tot_len) + IPV6V4HDRDELTA);
+
+ /* Swapping Ports for outer header */
+ /* Another work-around till LPM is not present. */
+ temp_port = *sport;
+ *sport = *dport;
+ *dport = temp_port;
+
+ return 1;
+}
+
static uint16_t nat46_fixup_icmp_time_exceeded(nat46_instance_t *nat46, struct iphdr *iph,
struct icmphdr *icmph, struct sk_buff *old_skb, uint16_t *sport, uint16_t *dport) {
/*
@@ -2187,135 +2316,6 @@ int xlate_4_to_6(struct net_device *dev,
}
EXPORT_SYMBOL(xlate_4_to_6);
-/*
- * The sport & dport in inner header will be dport & sport of the outer header, respectively.
- * Hence, dest. and source ips of inner header will be found in local & remote rules, respectively.
- */
-int pairs_xlate_v4_to_v6_inner(nat46_instance_t *nat46, struct iphdr *iph,
- uint16_t sport, uint16_t dport, void *v6saddr, void *v6daddr) {
- int ipair = 0;
- nat46_xlate_rulepair_t *apair = NULL;
- int xlate_src = -1;
- int xlate_dst = -1;
-
- apair = nat46_lpm(nat46, NAT46_IPV4_REMOTE, &iph->saddr);
- if (!apair) {
- return 0;
- }
-
- if (xlate_v4_to_v6(nat46, &apair->local, &iph->daddr, v6daddr, &dport)) {
- nat46debug(3, "Dst addr %pI4 to %pI6 \n", &iph->daddr, v6daddr);
- xlate_dst = ipair;
- }
- if (xlate_v4_to_v6(nat46, &apair->remote, &iph->saddr, v6saddr, &sport)) {
- nat46debug(3, "Src addr %pI4 to %pI6 \n", &iph->saddr, v6saddr);
- xlate_src = ipair;
- }
- if ((xlate_src >= 0) && (xlate_dst >= 0)) {
- /* we did manage to translate it */
- nat46debug(5, "[nat46] Inner header xlate results: src %d dst %d", xlate_src, xlate_dst);
- return 1;
- } else {
- nat46debug(1, "[nat46] Could not find a translation pair v4->v6");
- }
-
- return 0;
-}
-
-static uint16_t xlate_pkt_in_err_v4_to_v6(nat46_instance_t *nat46, struct iphdr *iph,
- struct sk_buff *old_skb, uint16_t *sport, uint16_t *dport) {
- struct ipv6hdr ip6h;
- char v6saddr[16], v6daddr[16];
- uint16_t temp_port = 0;
- int ret = 0;
- struct icmphdr *icmph = (struct icmphdr *)(iph + 1);
- struct iphdr *iiph = (struct iphdr *)(icmph + 1);
-
- switch (iiph->protocol) {
- case IPPROTO_TCP: {
- struct tcphdr *th = (struct tcphdr *)(iiph + 1);
- *sport = th->source;
- *dport = th->dest;
- iiph->protocol = NEXTHDR_TCP;
- break;
- }
- case IPPROTO_UDP: {
- struct udphdr *udp = (struct udphdr *)(iiph + 1);
- *sport = udp->source;
- *dport = udp->dest;
- iiph->protocol = NEXTHDR_UDP;
- break;
- }
- case IPPROTO_ICMP: {
- struct icmphdr *icmph = (struct icmphdr *)(iiph + 1);
- iiph->protocol = NEXTHDR_ICMP;
- switch (icmph->type) {
- case ICMP_ECHO:
- icmph->type = ICMPV6_ECHO_REQUEST;
- *sport = *dport = icmph->un.echo.id;
- break;
- case ICMP_ECHOREPLY:
- icmph->type = ICMPV6_ECHO_REPLY;
- *sport = *dport = icmph->un.echo.id;
- break;
- default:
- nat46debug(3, "ICMP Error message can't be inside another ICMP Error messgae.");
- *sport = *dport = 0;
- return 0;
- }
- break;
- }
- default:
- nat46debug(3, "[ICMPv4] Next header: %u. Only TCP, UDP, and ICMP are supported.", iiph->protocol);
- *sport = *dport = 0;
- return 0;
- }
-
- nat46debug(3, "Retrieved from pkt in error: dest port %d, and src port %d.", ntohs(*dport), ntohs(*sport));
-
- if (!pairs_xlate_v4_to_v6_inner(nat46, iiph, *sport, *dport, v6saddr, v6daddr)) {
- nat46debug(0, "[nat46] Could not translate inner header v4->v6");
- *sport = *dport = 0;
- return 0;
- }
-
- fill_v6hdr_from_v4hdr (iiph, &ip6h);
- memcpy(&ip6h.saddr, v6saddr, sizeof(ip6h.saddr));
- memcpy(&ip6h.daddr, v6daddr, sizeof(ip6h.daddr));
-
- if (skb_tailroom(old_skb) >= IPV6V4HDRDELTA){
- skb_put(old_skb, IPV6V4HDRDELTA);
- /* ErrorICMP size is less than 576, the inner ipv4 packet will be trimmed */
- memmove(((char *)iiph + IPV6HDRSIZE), (iiph + 1),
- ntohs(iph->tot_len) - 2 * IPV4HDRSIZE - sizeof(struct icmphdr));
- memcpy(iiph, &ip6h, IPV6HDRSIZE);
- }
- else {
- ret = pskb_expand_head(old_skb, 0, IPV6V4HDRDELTA, GFP_ATOMIC);
- if (unlikely(ret)) {
- nat46debug(0, "[nat46] Could not copy v4 skb");
- *sport = *dport = 0;
- return 0;
- }
-
- skb_put(old_skb, IPV6V4HDRDELTA);
- iiph = (struct iphdr *)(icmp_hdr(old_skb) + 1);
- /* ErrorICMP size is less than 576, the inner ipv4 packet will be trimmed */
- memmove(((char *)iiph + IPV6HDRSIZE), (iiph + 1),
- ntohs(iph->tot_len) - 2 * IPV4HDRSIZE - sizeof(struct icmphdr));
- memcpy(iiph, &ip6h, IPV6HDRSIZE);
- }
- iph->tot_len = htons(ntohs(iph->tot_len) + IPV6V4HDRDELTA);
-
- /* Swapping Ports for outer header */
- /* Another work-around till LPM is not present. */
- temp_port = *sport;
- *sport = *dport;
- *dport = temp_port;
-
- return 1;
-}
-
/* Return the port number from CE's port set */
static uint16_t nat46_get_ce_port(nat46_xlate_rulepair_t *pair, uint16_t sport)
{

View File

@ -81,8 +81,8 @@
__be16 vlan_proto, u16 vlan_id)
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1690,6 +1690,36 @@ enum netdev_priv_flags {
IFF_NO_IP_ALIGN = BIT_ULL(32),
@@ -613,6 +613,36 @@ enum netdev_queue_state_t {
__QUEUE_STATE_FROZEN,
};
+/**
@ -115,9 +115,9 @@
+ IFF_EXT_ETH_L2TPV3 = 1<<9,
+};
+
/* Specifies the type of the struct net_device::ml_priv pointer */
enum netdev_ml_priv_type {
ML_PRIV_NONE,
#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
@@ -2026,6 +2056,7 @@ struct net_device {
unsigned long priv_flags:32;
unsigned long lltx:1;
@ -985,11 +985,11 @@
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -994,6 +994,7 @@ static int tun_net_init(struct net_devic
dev->vlan_features = dev->features &
@@ -995,6 +995,7 @@ static int tun_net_init(struct net_devic
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
+ dev->priv_flags_ext |= IFF_EXT_TUN_TAP;
dev->lltx = true;
+ dev->priv_flags_ext |= IFF_EXT_TUN_TAP;
tun->flags = (tun->flags & ~TUN_FEATURES) |
(ifr->ifr_flags & TUN_FEATURES);

View File

@ -205,7 +205,21 @@
{
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -288,6 +288,21 @@ const char *bond_mode_name(int mode)
@@ -268,6 +268,13 @@ static bool bond_time_in_interval(struct
int mod);
static void bond_netdev_notify_work(struct work_struct *work);
+/* QCA NSS ECM bonding support */
+struct net_device *bond_get_tx_dev(struct sk_buff *skb, uint8_t *src_mac,
+ u8 *dst_mac, void *src,
+ void *dst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr);
+
/*---------------------------- General routines -----------------------------*/
const char *bond_mode_name(int mode)
@@ -288,6 +295,21 @@ const char *bond_mode_name(int mode)
return names[mode];
}
@ -227,7 +241,7 @@
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
@@ -1306,6 +1321,23 @@ void bond_change_active_slave(struct bon
@@ -1306,6 +1328,23 @@ void bond_change_active_slave(struct bon
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
@ -251,7 +265,7 @@
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
@@ -1950,6 +1982,7 @@ int bond_enslave(struct net_device *bond
@@ -1950,6 +1989,7 @@ int bond_enslave(struct net_device *bond
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
struct sockaddr_storage ss;
@ -259,7 +273,7 @@
int link_reporting;
int res = 0, i;
@@ -2399,6 +2432,15 @@ int bond_enslave(struct net_device *bond
@@ -2399,6 +2439,15 @@ int bond_enslave(struct net_device *bond
bond_is_active_slave(new_slave) ? "an active" : "a backup",
new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
@ -275,7 +289,7 @@
/* enslave is successful */
bond_queue_slave_event(new_slave);
return 0;
@@ -2464,6 +2506,15 @@ err_undo_flags:
@@ -2464,6 +2513,15 @@ err_undo_flags:
}
}
@ -291,7 +305,7 @@
return res;
}
@@ -2485,6 +2536,7 @@ static int __bond_release_one(struct net
@@ -2485,6 +2543,7 @@ static int __bond_release_one(struct net
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr_storage ss;
@ -299,7 +313,7 @@
int old_flags = bond_dev->flags;
netdev_features_t old_features = bond_dev->features;
@@ -2507,6 +2559,15 @@ static int __bond_release_one(struct net
@@ -2507,6 +2566,15 @@ static int __bond_release_one(struct net
bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
@ -315,7 +329,7 @@
bond_sysfs_slave_del(slave);
/* recompute stats just before removing the slave */
@@ -2837,6 +2898,8 @@ static void bond_miimon_commit(struct bo
@@ -2837,6 +2905,8 @@ static void bond_miimon_commit(struct bo
struct slave *slave, *primary, *active;
bool do_failover = false;
struct list_head *iter;
@ -324,7 +338,7 @@
ASSERT_RTNL();
@@ -2876,6 +2939,12 @@ static void bond_miimon_commit(struct bo
@@ -2876,6 +2946,12 @@ static void bond_miimon_commit(struct bo
bond_set_active_slave(slave);
}
@ -337,7 +351,7 @@
slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
slave->duplex ? "full" : "half");
@@ -2924,6 +2993,16 @@ static void bond_miimon_commit(struct bo
@@ -2924,6 +3000,16 @@ static void bond_miimon_commit(struct bo
unblock_netpoll_tx();
}
@ -354,14 +368,14 @@
bond_set_carrier(bond);
}
@@ -4176,8 +4255,219 @@ static inline u32 bond_eth_hash(struct s
@@ -4176,9 +4262,220 @@ static inline u32 bond_eth_hash(struct s
return 0;
ep = (struct ethhdr *)(data + mhoff);
- return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
+ return ep->h_dest[5] ^ ep->h_source[5]; /* QCA NSS ECM bonding support */
+}
+
}
+/* QCA NSS ECM bonding support - Start */
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect_without_skb(struct bonding *bond,
@ -546,7 +560,7 @@
+ default:
+ return NULL;
+ }
}
+}
+EXPORT_SYMBOL(bond_get_tx_dev);
+
+/* In bond_xmit_xor() , we determine the output device by using a pre-
@ -572,10 +586,11 @@
+ return NETDEV_TX_OK;
+}
+/* QCA NSS ECM bonding support - End */
+
static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
@@ -5306,15 +5596,18 @@ static netdev_tx_t bond_3ad_xor_xmit(str
{
@@ -5306,15 +5603,18 @@ static netdev_tx_t bond_3ad_xor_xmit(str
struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
@ -601,7 +616,7 @@
}
/* in broadcast mode, we send everything to all usable interfaces. */
@@ -5564,8 +5857,9 @@ static netdev_tx_t __bond_start_xmit(str
@@ -5564,8 +5864,9 @@ static netdev_tx_t __bond_start_xmit(str
return bond_xmit_roundrobin(skb, dev);
case BOND_MODE_ACTIVEBACKUP:
return bond_xmit_activebackup(skb, dev);

View File

@ -336,7 +336,7 @@
}
+/* QCA NSS Qdisc Support - Start */
+bool tcf_destroy(struct tcf_proto *tp, bool force)
+static bool tcf_destroy(struct tcf_proto *tp, bool force)
+{
+ tp->ops->destroy(tp, force, NULL);
+ module_put(tp->ops->owner);
@ -451,3 +451,13 @@
__u8 remcsum_offload:1;
__u8 csum_complete_sw:1;
__u8 csum_level:2;
--- a/include/linux/timer_types.h
+++ b/include/linux/timer_types.h
@@ -14,6 +14,7 @@ struct timer_list {
unsigned long expires;
void (*function)(struct timer_list *);
u32 flags;
+ unsigned long cust_data;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;

View File

@ -0,0 +1,452 @@
From 94972ba5210ab34f071b011e9001f7234860fe89 Mon Sep 17 00:00:00 2001
From: "Hardik S. Panchal" <hpanchal@codeaurora.org>
Date: Tue, 8 May 2018 10:16:32 +0530
Subject: [PATCH 281/500] net: Add API to update L4 protocol registrant.
Change-Id: I0d01fe33a590bb3eec596de621f86537f60c7071
Signed-off-by: Hardik S. Panchal <hpanchal@codeaurora.org>
Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
---
include/net/protocol.h | 4 ++++
net/ipv4/protocol.c | 26 ++++++++++++++++++++++++++
net/ipv6/protocol.c | 26 ++++++++++++++++++++++++++
3 files changed, 56 insertions(+)
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -104,12 +104,16 @@ int inet_add_protocol(const struct net_p
int inet_del_protocol(const struct net_protocol *prot, unsigned char num);
int inet_add_offload(const struct net_offload *prot, unsigned char num);
int inet_del_offload(const struct net_offload *prot, unsigned char num);
+int inet_update_protocol(const struct net_protocol *new_prot,
+ unsigned char num, const struct net_protocol **old_prot);
void inet_register_protosw(struct inet_protosw *p);
void inet_unregister_protosw(struct inet_protosw *p);
#if IS_ENABLED(CONFIG_IPV6)
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_update_protocol(const struct inet6_protocol *new_prot,
+ unsigned char num, const struct inet6_protocol **old_prot);
int inet6_register_protosw(struct inet_protosw *p);
void inet6_unregister_protosw(struct inet_protosw *p);
#endif
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -68,3 +68,29 @@ int inet_del_offload(const struct net_of
return ret;
}
EXPORT_SYMBOL(inet_del_offload);
+
+int inet_update_protocol(const struct net_protocol *new_prot,
+ unsigned char protocol, const struct net_protocol **old_prot)
+{
+ int ret;
+
+ rcu_read_lock();
+ *old_prot = rcu_dereference(inet_protos[protocol]);
+ if (!*old_prot) {
+ rcu_read_unlock();
+ return -1;
+ }
+ rcu_read_unlock();
+
+ /*
+ * old_prot is not protected as cmpxchg is successful only if
+ * old_prot matches with the value in inet_protos[protocol]
+ */
+ ret = (cmpxchg((const struct net_protocol **)&inet_protos[protocol],
+ *old_prot, new_prot) == *old_prot) ? 0 : -1;
+
+ synchronize_net();
+
+ return ret;
+}
+EXPORT_SYMBOL(inet_update_protocol);
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -44,6 +44,32 @@ int inet6_del_protocol(const struct inet
return ret;
}
EXPORT_SYMBOL(inet6_del_protocol);
+
+int inet6_update_protocol(const struct inet6_protocol *new_prot,
+ unsigned char protocol, const struct inet6_protocol **old_prot)
+{
+ int ret;
+
+ rcu_read_lock();
+ *old_prot = rcu_dereference(inet6_protos[protocol]);
+ if (!*old_prot) {
+ rcu_read_unlock();
+ return -1;
+ }
+ rcu_read_unlock();
+
+ /*
+ * old_prot is not protected as cmpxchg is successful only if
+ * old_prot matches with the value in inet6_protos[protocol]
+ */
+ ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol],
+ *old_prot, new_prot) == *old_prot) ? 0 : -1;
+
+ synchronize_net();
+
+ return ret;
+}
+EXPORT_SYMBOL(inet6_update_protocol);
#endif
const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS] __read_mostly;
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -84,6 +84,8 @@ struct netns_xfrm {
spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
struct delayed_work nat_keepalive_work;
+ spinlock_t xfrm_event_lock;
+ struct list_head event_notifier_list;
};
#endif
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -304,6 +304,21 @@ struct xfrm_state {
u8 dir;
};
+enum xfrm_event_type {
+ XFRM_EVENT_NONE = 0,
+ XFRM_EVENT_STATE_ADD,
+ XFRM_EVENT_STATE_DEL,
+ XFRM_EVENT_MAX
+};
+
+struct xfrm_event_notifier {
+ struct list_head list;
+ void (*state_notify)(struct xfrm_state *x, enum xfrm_event_type event);
+};
+
+int xfrm_event_register_notifier(struct net *net, struct xfrm_event_notifier *event);
+void xfrm_event_unregister_notifier(struct net *net, struct xfrm_event_notifier *event);
+
static inline struct net *xs_net(struct xfrm_state *x)
{
return read_pnet(&x->xs_net);
@@ -312,6 +327,7 @@ static inline struct net *xs_net(struct
/* xflags - make enum if more show up */
#define XFRM_TIME_DEFER 1
#define XFRM_SOFT_EXPIRE 2
+#define XFRM_STATE_OFFLOAD_NSS 4
enum {
XFRM_STATE_VOID,
@@ -413,6 +429,7 @@ int xfrm_state_register_afinfo(struct xf
int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
+struct xfrm_state_afinfo *xfrm_state_update_afinfo(unsigned int family, struct xfrm_state_afinfo *new);
struct xfrm_input_afinfo {
u8 family;
@@ -443,6 +460,8 @@ struct xfrm_type {
int (*output)(struct xfrm_state *, struct sk_buff *pskb);
int (*reject)(struct xfrm_state *, struct sk_buff *,
const struct flowi *);
+ /* Estimate maximal size of result of transformation of a dgram */
+ u32 (*get_mtu)(struct xfrm_state *, int size);
};
int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
@@ -1670,6 +1689,7 @@ struct xfrm_state *xfrm_state_lookup_bya
const xfrm_address_t *saddr,
u8 proto,
unsigned short family);
+void xfrm_state_change_notify(struct xfrm_state *x, enum xfrm_event_type);
#ifdef CONFIG_XFRM_SUB_POLICY
void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
unsigned short family);
@@ -1714,7 +1734,7 @@ void xfrm_sad_getinfo(struct net *net, s
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
-u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
+int xfrm_state_mtu(struct xfrm_state *x, int mtu);
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
struct netlink_ext_ack *extack);
int xfrm_init_state(struct xfrm_state *x);
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -4296,6 +4296,10 @@ static int __net_init xfrm_net_init(stru
spin_lock_init(&net->xfrm.xfrm_policy_lock);
seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
mutex_init(&net->xfrm.xfrm_cfg_mutex);
+ spin_lock_init(&net->xfrm.xfrm_event_lock);
+
+ INIT_LIST_HEAD(&net->xfrm.event_notifier_list);
+
net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -28,8 +28,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <crypto/aead.h>
-
#include "xfrm_hash.h"
#define xfrm_state_deref_prot(table, net) \
@@ -777,6 +775,7 @@ int __xfrm_state_delete(struct xfrm_stat
sock_put(rcu_dereference_raw(x->encap_sk));
xfrm_dev_state_delete(x);
+ xfrm_state_change_notify(x, XFRM_EVENT_STATE_DEL);
/* All xfrm_state objects are created by xfrm_state_alloc.
* The xfrm_state_alloc call gives a reference, and that
@@ -2990,6 +2989,20 @@ struct xfrm_state_afinfo *xfrm_state_get
return afinfo;
}
+struct xfrm_state_afinfo *xfrm_state_update_afinfo(unsigned int family, struct xfrm_state_afinfo *new)
+{
+ struct xfrm_state_afinfo *afinfo;
+
+ spin_lock_bh(&xfrm_state_afinfo_lock);
+ afinfo = rcu_dereference_protected(xfrm_state_afinfo[family], lockdep_is_held(&xfrm_state_afinfo_lock));
+ rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], new);
+ spin_unlock_bh(&xfrm_state_afinfo_lock);
+
+ synchronize_rcu();
+ return afinfo;
+}
+EXPORT_SYMBOL(xfrm_state_update_afinfo);
+
void xfrm_flush_gc(void)
{
flush_work(&xfrm_state_gc_work);
@@ -3011,38 +3024,16 @@ void xfrm_state_delete_tunnel(struct xfr
}
EXPORT_SYMBOL(xfrm_state_delete_tunnel);
-u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
+int xfrm_state_mtu(struct xfrm_state *x, int mtu)
{
const struct xfrm_type *type = READ_ONCE(x->type);
- struct crypto_aead *aead;
- u32 blksize, net_adj = 0;
- if (x->km.state != XFRM_STATE_VALID ||
- !type || type->proto != IPPROTO_ESP)
- return mtu - x->props.header_len;
+ if (x->km.state == XFRM_STATE_VALID &&
+ type && type->get_mtu)
+ return type->get_mtu(x, mtu);
- aead = x->data;
- blksize = ALIGN(crypto_aead_blocksize(aead), 4);
-
- switch (x->props.mode) {
- case XFRM_MODE_TRANSPORT:
- case XFRM_MODE_BEET:
- if (x->props.family == AF_INET)
- net_adj = sizeof(struct iphdr);
- else if (x->props.family == AF_INET6)
- net_adj = sizeof(struct ipv6hdr);
- break;
- case XFRM_MODE_TUNNEL:
- break;
- default:
- WARN_ON_ONCE(1);
- break;
- }
-
- return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
- net_adj) & ~(blksize - 1)) + net_adj - 2;
+ return mtu - x->props.header_len;
}
-EXPORT_SYMBOL_GPL(xfrm_state_mtu);
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
struct netlink_ext_ack *extack)
@@ -3387,3 +3378,39 @@ void xfrm_audit_state_icvfail(struct xfr
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
#endif /* CONFIG_AUDITSYSCALL */
+
+void xfrm_state_change_notify(struct xfrm_state *x, enum xfrm_event_type type)
+{
+ struct xfrm_event_notifier *event;
+ struct net *net = xs_net(x);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(event, &net->xfrm.event_notifier_list, list) {
+ if (event->state_notify) {
+ event->state_notify(x, type);
+ }
+
+ BUG_ON(refcount_read(&x->refcnt) <= 0);
+ }
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(xfrm_state_change_notify);
+
+int xfrm_event_register_notifier(struct net *net, struct xfrm_event_notifier *event)
+{
+ spin_lock_bh(&net->xfrm.xfrm_event_lock);
+ list_add_tail_rcu(&event->list, &net->xfrm.event_notifier_list);
+ spin_unlock_bh(&net->xfrm.xfrm_event_lock);
+ return 0;
+}
+EXPORT_SYMBOL(xfrm_event_register_notifier);
+
+void xfrm_event_unregister_notifier(struct net *net, struct xfrm_event_notifier *event)
+{
+ spin_lock_bh(&net->xfrm.xfrm_event_lock);
+ list_del_rcu(&event->list);
+ spin_unlock_bh(&net->xfrm.xfrm_event_lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(xfrm_event_unregister_notifier);
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -888,6 +888,8 @@ static struct xfrm_state *xfrm_state_con
goto error;
}
+ xfrm_state_change_notify(x, XFRM_EVENT_STATE_ADD);
+
return x;
error:
@@ -926,6 +928,7 @@ static int xfrm_add_sa(struct sk_buff *s
if (err < 0) {
x->km.state = XFRM_STATE_DEAD;
xfrm_dev_state_delete(x);
+ xfrm_state_change_notify(x, XFRM_EVENT_STATE_DEL);
__xfrm_state_put(x);
goto out;
}
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -37,6 +37,8 @@ struct esp_output_extra {
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
+static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
+
/*
* Allocate an AEAD request structure with extra space for SG and IV.
*
@@ -680,7 +682,7 @@ static int esp_output(struct xfrm_state
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
- padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
+ padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
esp.tfclen = padto - skb->len;
}
@@ -980,6 +982,28 @@ out:
return err;
}
+static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
+{
+ struct crypto_aead *aead = x->data;
+ u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
+ unsigned int net_adj;
+
+ switch (x->props.mode) {
+ case XFRM_MODE_TRANSPORT:
+ case XFRM_MODE_BEET:
+ net_adj = sizeof(struct iphdr);
+ break;
+ case XFRM_MODE_TUNNEL:
+ net_adj = 0;
+ break;
+ default:
+ BUG();
+ }
+
+ return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
+ net_adj) & ~(blksize - 1)) + net_adj - 2;
+}
+
static int esp4_err(struct sk_buff *skb, u32 info)
{
struct net *net = dev_net(skb->dev);
@@ -1221,6 +1245,7 @@ static const struct xfrm_type esp_type =
.flags = XFRM_TYPE_REPLAY_PROT,
.init_state = esp_init_state,
.destructor = esp_destroy,
+ .get_mtu = esp4_get_mtu,
.input = esp_input,
.output = esp_output,
};
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -53,6 +53,8 @@ struct esp_output_extra {
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
+static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
+
/*
* Allocate an AEAD request structure with extra space for SG and IV.
*
@@ -712,7 +714,7 @@ static int esp6_output(struct xfrm_state
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
- padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
+ padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
esp.tfclen = padto - skb->len;
}
@@ -1022,6 +1024,19 @@ out:
return ret;
}
+static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
+{
+ struct crypto_aead *aead = x->data;
+ u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
+ unsigned int net_adj = 0;
+
+ if (x->props.mode != XFRM_MODE_TUNNEL)
+ net_adj = sizeof(struct ipv6hdr);
+
+ return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
+ net_adj) & ~(blksize - 1)) + net_adj - 2;
+}
+
static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
@@ -1267,6 +1282,7 @@ static const struct xfrm_type esp6_type
.flags = XFRM_TYPE_REPLAY_PROT,
.init_state = esp6_init_state,
.destructor = esp6_destroy,
+ .get_mtu = esp6_get_mtu,
.input = esp6_input,
.output = esp6_output,
};
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -420,8 +420,9 @@ bool xfrm_dev_offload_ok(struct sk_buff
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
- !xdst->child->xfrm)) {
- mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
+ !(xdst->child->xfrm && x->type->get_mtu))) {
+ mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
+
if (skb->len <= mtu)
goto ok;

View File

@ -0,0 +1,60 @@
From 6363738ddf830c23fb41ea1c945b055180b6c356 Mon Sep 17 00:00:00 2001
From: Ram Chandra Jangir <quic_rjangir@quicinc.com>
Date: Mon, 7 Apr 2014 16:51:59 -0700
Subject: [PATCH 451/500] mtd: ubi: add a ubi forced detach ioctl
When running ubi rootfs upgrade on nand based OpenWrt device after
pivot_root init process is still "hooked" on the old file system.
Thus, the old file system can not be umounted. If the filesystem is
mounted it can not be upgraded with using for example ubiupdatevol
or removed with ubirmvol. Forcing umount would allow to run the
before mentioned commands.
Change-Id: I431c5957aea1b2f1f7cc7645ff7a1ae5b22e6d35
Signed-off-by: Ram Chandra Jangir <quic_rjangir@quicinc.com>
---
drivers/mtd/ubi/cdev.c | 8 ++++++--
include/uapi/mtd/ubi-user.h | 1 +
2 files changed, 7 insertions(+), 2 deletions(-)
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -1003,7 +1003,7 @@ static long ubi_cdev_ioctl(struct file *
static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- int err = 0;
+ int err = 0, force = 0;
void __user *argp = (void __user *)arg;
if (!capable(CAP_SYS_RESOURCE))
@@ -1054,6 +1054,10 @@ static long ctrl_cdev_ioctl(struct file
}
/* Detach an MTD device command */
+ case UBI_IOCFDET:
+ force = 1;
+ /* fallthrough */
+ break;
case UBI_IOCDET:
{
int ubi_num;
@@ -1066,7 +1070,7 @@ static long ctrl_cdev_ioctl(struct file
}
mutex_lock(&ubi_devices_mutex);
- err = ubi_detach_mtd_dev(ubi_num, 0);
+ err = ubi_detach_mtd_dev(ubi_num, force);
mutex_unlock(&ubi_devices_mutex);
break;
}
--- a/include/uapi/mtd/ubi-user.h
+++ b/include/uapi/mtd/ubi-user.h
@@ -184,6 +184,7 @@
#define UBI_IOCATT _IOW(UBI_CTRL_IOC_MAGIC, 64, struct ubi_attach_req)
/* Detach an MTD device */
#define UBI_IOCDET _IOW(UBI_CTRL_IOC_MAGIC, 65, __s32)
+#define UBI_IOCFDET _IOW(UBI_CTRL_IOC_MAGIC, 99, __s32)
/* ioctl commands of UBI volume character devices */

View File

@ -10,10 +10,10 @@
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1015,6 +1015,20 @@ struct sk_buff {
__u8 csum_not_inet:1;
@@ -989,6 +989,20 @@ struct sk_buff {
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
__u8 unreadable:1;
+ /* Linear packets processed by dev_fast_xmit() */
+ __u8 fast_xmit:1;
+ __u8 fast_forwarded:1;
@ -28,9 +28,9 @@
+ /* Packets processed in dev_fast_xmit_qdisc() path */
+ __u8 int_pri:4;
+ /* Priority info for hardware qdiscs */
#if defined(CONFIG_NET_SCHED) || defined(CONFIG_NET_XGRESS)
__u16 tc_index; /* traffic control index */
#endif
#if IS_ENABLED(CONFIG_IP_VS)
__u8 ipvs_property:1;
@@ -1085,6 +1099,10 @@ struct sk_buff {
/* only usable after checking ->active_extensions != 0 */
struct skb_ext *extensions;
@ -51,20 +51,19 @@
void consume_skb(struct sk_buff *skb);
#else
static inline void consume_skb(struct sk_buff *skb)
@@ -1295,9 +1313,13 @@ static inline void consume_skb(struct sk
@@ -1295,8 +1313,12 @@ static inline void consume_skb(struct sk
}
#endif
+void consume_skb_list_fast(struct sk_buff_head *skb_list);
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason);
+
+extern void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
+ bool napi_safe);
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
bool *fragstolen, int *delta_truesize);
@@ -1419,6 +1441,7 @@ static inline int skb_pad(struct sk_buff
return __skb_pad(skb, pad, true);
}
@ -140,16 +139,14 @@
config NET_PKTGEN
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -44,5 +44,6 @@ obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
@@ -46,3 +46,4 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_stor
obj-$(CONFIG_OF) += of_net.o
+obj-$(CONFIG_SKB_RECYCLER) += skbuff_recycle.o
obj-$(CONFIG_NET_TEST) += net_test.o
obj-$(CONFIG_NET_DEVMEM) += devmem.o
+obj-$(CONFIG_SKB_RECYCLER) += skbuff_recycle.o
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6183,12 +6183,17 @@ static int process_backlog(struct napi_s
@@ -6183,12 +6183,18 @@ static int process_backlog(struct napi_s
napi->weight = READ_ONCE(net_hotdata.dev_rx_weight);
while (again) {
@ -165,6 +162,7 @@
+ if (likely(next_skb)) {
+ prefetch(next_skb->data);
+ }
+
__netif_receive_skb(skb);
rcu_read_unlock();
if (++work >= quota) {
@ -208,15 +206,7 @@
#ifdef CONFIG_SKB_EXTENSIONS
static struct kmem_cache *skbuff_ext_cache __ro_after_init;
@@ -408,7 +439,6 @@ static inline void __finalize_skb_around
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
-
skb_set_kcov_handle(skb, kcov_common_handle());
}
@@ -600,19 +630,33 @@ static void *kmalloc_reserve(unsigned in
@@ -600,19 +631,33 @@ static void *kmalloc_reserve(unsigned in
bool ret_pfmemalloc = false;
size_t obj_size;
void *obj;
@ -255,12 +245,11 @@
goto out;
}
@@ -695,10 +739,12 @@ struct sk_buff *__alloc_skb(unsigned int
@@ -695,10 +740,11 @@ struct sk_buff *__alloc_skb(unsigned int
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
* Both skb->head and skb_shared_info are cache line aligned.
*/
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ size = SKB_HEAD_ALIGN(size);
data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
if (unlikely(!data))
goto nodata;
@ -401,16 +390,17 @@
/**
* napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
* @napi: napi instance this buffer was allocated for
@@ -1120,7 +1248,7 @@ static void skb_free_head(struct sk_buff
@@ -1120,7 +1248,8 @@ static void skb_free_head(struct sk_buff
}
}
-static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason)
+void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason)
+void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
+ bool napi_safe)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
@@ -1160,7 +1288,7 @@ exit:
@@ -1160,7 +1289,7 @@ exit:
/*
* Free an skbuff by memory without cleaning the state.
*/
@ -419,7 +409,31 @@
{
struct sk_buff_fclones *fclones;
@@ -1259,8 +1387,13 @@ bool __sk_skb_reason_drop(struct sock *s
@@ -1204,11 +1333,12 @@ void skb_release_head_state(struct sk_bu
}
/* Free everything but the sk_buff shell. */
-static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason)
+static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason,
+ bool napi_safe)
{
skb_release_head_state(skb);
if (likely(skb->head))
- skb_release_data(skb, reason);
+ skb_release_data(skb, reason, napi_safe);
}
/**
@@ -1222,7 +1352,7 @@ static void skb_release_all(struct sk_bu
void __kfree_skb(struct sk_buff *skb)
{
- skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED);
+ skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED, false);
kfree_skbmem(skb);
}
EXPORT_SYMBOL(__kfree_skb);
@@ -1259,8 +1389,13 @@ bool __sk_skb_reason_drop(struct sock *s
void __fix_address
sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason)
{
@ -434,7 +448,16 @@
}
EXPORT_SYMBOL(sk_skb_reason_drop);
@@ -1439,7 +1572,6 @@ void skb_tx_error(struct sk_buff *skb)
@@ -1281,7 +1416,7 @@ static void kfree_skb_add_bulk(struct sk
return;
}
- skb_release_all(skb, reason);
+ skb_release_all(skb, reason, false);
sa->skb_array[sa->skb_count++] = skb;
if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
@@ -1439,7 +1574,6 @@ void skb_tx_error(struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_tx_error);
@ -442,7 +465,7 @@
/**
* consume_skb - free an skbuff
* @skb: buffer to free
@@ -1448,18 +1580,93 @@ EXPORT_SYMBOL(skb_tx_error);
@@ -1448,18 +1582,93 @@ EXPORT_SYMBOL(skb_tx_error);
* Functions identically to kfree_skb, but kfree_skb assumes that the frame
* is being dropped after a failure and notes that
*/
@ -488,7 +511,7 @@
+ * that we already did).
+ */
+ if (likely(skb->head))
+ skb_release_data(skb, SKB_CONSUMED);
+ skb_release_data(skb, SKB_CONSUMED, false);
+
+ kfree_skbmem(skb);
}
@ -526,7 +549,7 @@
+ * that we already did).
+ */
+ if (likely(skb->head))
+ skb_release_data(skb, SKB_CONSUMED);
+ skb_release_data(skb, SKB_CONSUMED, false);
+
+ kfree_skbmem(skb);
+ }
@ -537,7 +560,34 @@
* __consume_stateless_skb - free an skbuff, assuming it is stateless
* @skb: buffer to free
*
@@ -1562,6 +1769,11 @@ static void __copy_skb_header(struct sk_
@@ -1469,7 +1678,7 @@ EXPORT_SYMBOL(consume_skb);
void __consume_stateless_skb(struct sk_buff *skb)
{
trace_consume_skb(skb, __builtin_return_address(0));
- skb_release_data(skb, SKB_CONSUMED);
+ skb_release_data(skb, SKB_CONSUMED, false);
kfree_skbmem(skb);
}
@@ -1498,7 +1707,7 @@ static void napi_skb_cache_put(struct sk
void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason)
{
- skb_release_all(skb, reason);
+ skb_release_all(skb, reason, true);
napi_skb_cache_put(skb);
}
@@ -1536,7 +1745,7 @@ void napi_consume_skb(struct sk_buff *sk
return;
}
- skb_release_all(skb, SKB_CONSUMED);
+ skb_release_all(skb, SKB_CONSUMED, !!budget);
napi_skb_cache_put(skb);
}
EXPORT_SYMBOL(napi_consume_skb);
@@ -1562,6 +1771,11 @@ static void __copy_skb_header(struct sk_
new->queue_mapping = old->queue_mapping;
memcpy(&new->headers, &old->headers, sizeof(new->headers));
@ -549,7 +599,33 @@
CHECK_SKB_FIELD(protocol);
CHECK_SKB_FIELD(csum);
CHECK_SKB_FIELD(hash);
@@ -2353,6 +2565,10 @@ int pskb_expand_head(struct sk_buff *skb
@@ -1667,7 +1881,7 @@ EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
*/
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
{
- skb_release_all(dst, SKB_CONSUMED);
+ skb_release_all(dst, SKB_CONSUMED, false);
return __skb_clone(dst, src);
}
EXPORT_SYMBOL_GPL(skb_morph);
@@ -2293,6 +2507,7 @@ int pskb_expand_head(struct sk_buff *skb
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
+ size = SKB_HEAD_ALIGN(size);
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
goto nodata;
@@ -2323,7 +2538,7 @@ int pskb_expand_head(struct sk_buff *skb
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
- skb_release_data(skb, SKB_CONSUMED);
+ skb_release_data(skb, SKB_CONSUMED, false);
} else {
skb_free_head(skb);
}
@@ -2353,6 +2568,10 @@ int pskb_expand_head(struct sk_buff *skb
if (!skb->sk || skb->destructor == sock_edemux)
skb->truesize += size - osize;
@ -560,7 +636,7 @@
return 0;
nofrags:
@@ -5122,6 +5338,17 @@ static void skb_extensions_init(void) {}
@@ -5122,6 +5341,17 @@ static void skb_extensions_init(void) {}
void __init skb_init(void)
{
@ -578,7 +654,7 @@
net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
sizeof(struct sk_buff),
0,
@@ -5147,6 +5374,7 @@ void __init skb_init(void)
@@ -5147,6 +5377,7 @@ void __init skb_init(void)
SKB_SMALL_HEAD_HEADROOM,
NULL);
skb_extensions_init();
@ -586,21 +662,37 @@
}
static int
@@ -6681,6 +6909,8 @@ static int pskb_carve_inside_header(stru
@@ -6681,6 +6912,7 @@ static int pskb_carve_inside_header(stru
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ size = SKB_HEAD_ALIGN(size);
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;
@@ -6797,6 +7027,8 @@ static int pskb_carve_inside_nonlinear(s
@@ -6704,7 +6936,7 @@ static int pskb_carve_inside_header(stru
skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
- skb_release_data(skb, SKB_CONSUMED);
+ skb_release_data(skb, SKB_CONSUMED, false);
} else {
/* we can reuse existing recount- all we did was
* relocate values
@@ -6797,6 +7029,7 @@ static int pskb_carve_inside_nonlinear(s
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ size = SKB_HEAD_ALIGN(size);
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;
@@ -6844,7 +7077,7 @@ static int pskb_carve_inside_nonlinear(s
skb_kfree_head(data, size);
return -ENOMEM;
}
- skb_release_data(skb, SKB_CONSUMED);
+ skb_release_data(skb, SKB_CONSUMED, false);
skb->head = data;
skb->head_frag = 0;