mirror of
https://github.com/breeze303/openwrt-ipq.git
synced 2025-12-16 18:21:07 +00:00
qualcommax: NSS: kernel 6.6 support (squash)
Signed-off-by: Sean Khan <datapronix@protonmail.com>
This commit is contained in:
parent
d0af869c9e
commit
4ba2dbda8d
@ -0,0 +1,875 @@
|
||||
--- a/include/linux/if_bridge.h
|
||||
+++ b/include/linux/if_bridge.h
|
||||
@@ -71,6 +71,9 @@ void brioctl_set(int (*hook)(struct net
|
||||
void __user *uarg));
|
||||
int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
|
||||
struct ifreq *ifr, void __user *uarg);
|
||||
+extern void br_dev_update_stats(struct net_device *dev,
|
||||
+ struct rtnl_link_stats64 *nlstats);
|
||||
+extern bool br_is_hairpin_enabled(struct net_device *dev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
|
||||
int br_multicast_list_adjacent(struct net_device *dev,
|
||||
@@ -213,4 +216,42 @@ static inline clock_t br_get_ageing_time
|
||||
}
|
||||
#endif
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+extern struct net_device *br_port_dev_get(struct net_device *dev,
|
||||
+ unsigned char *addr,
|
||||
+ struct sk_buff *skb,
|
||||
+ unsigned int cookie);
|
||||
+extern void br_refresh_fdb_entry(struct net_device *dev, const char *addr);
|
||||
+extern void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid);
|
||||
+extern struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
|
||||
+ const char *addr,
|
||||
+ __u16 vid);
|
||||
+extern void br_fdb_update_register_notify(struct notifier_block *nb);
|
||||
+extern void br_fdb_update_unregister_notify(struct notifier_block *nb);
|
||||
+
|
||||
+typedef struct net_bridge_port *br_port_dev_get_hook_t(struct net_device *dev,
|
||||
+ struct sk_buff *skb,
|
||||
+ unsigned char *addr,
|
||||
+ unsigned int cookie);
|
||||
+extern br_port_dev_get_hook_t __rcu *br_port_dev_get_hook;
|
||||
+
|
||||
+#define BR_FDB_EVENT_ADD 0x01
|
||||
+#define BR_FDB_EVENT_DEL 0x02
|
||||
+
|
||||
+struct br_fdb_event {
|
||||
+ struct net_device *dev;
|
||||
+ unsigned char addr[6];
|
||||
+ unsigned char is_local;
|
||||
+ struct net_bridge *br;
|
||||
+ struct net_device *orig_dev;
|
||||
+};
|
||||
+extern void br_fdb_register_notify(struct notifier_block *nb);
|
||||
+extern void br_fdb_unregister_notify(struct notifier_block *nb);
|
||||
+
|
||||
+typedef struct net_bridge_port *br_get_dst_hook_t(
|
||||
+ const struct net_bridge_port *src,
|
||||
+ struct sk_buff **skb);
|
||||
+extern br_get_dst_hook_t __rcu *br_get_dst_hook;
|
||||
+/* QCA NSS ECM support - End */
|
||||
+
|
||||
#endif
|
||||
--- a/include/linux/if_vlan.h
|
||||
+++ b/include/linux/if_vlan.h
|
||||
@@ -143,7 +143,10 @@ extern struct net_device *__vlan_find_de
|
||||
extern int vlan_for_each(struct net_device *dev,
|
||||
int (*action)(struct net_device *dev, int vid,
|
||||
void *arg), void *arg);
|
||||
+extern void __vlan_dev_update_accel_stats(struct net_device *dev,
|
||||
+ struct rtnl_link_stats64 *stats); /* QCA NSS ECM support */
|
||||
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
|
||||
+extern struct net_device *vlan_dev_next_dev(const struct net_device *dev); /* QCA NSS ECM support */
|
||||
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
|
||||
extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
|
||||
|
||||
@@ -236,6 +239,12 @@ extern void vlan_vids_del_by_dev(struct
|
||||
extern bool vlan_uses_dev(const struct net_device *dev);
|
||||
|
||||
#else
|
||||
+static inline void __vlan_dev_update_accel_stats(struct net_device *dev,
|
||||
+ struct rtnl_link_stats64 *stats)
|
||||
+{
|
||||
+
|
||||
+} /* QCA NSS ECM support */
|
||||
+
|
||||
static inline struct net_device *
|
||||
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
|
||||
__be16 vlan_proto, u16 vlan_id)
|
||||
--- a/include/linux/netdevice.h
|
||||
+++ b/include/linux/netdevice.h
|
||||
@@ -2936,6 +2936,10 @@ enum netdev_cmd {
|
||||
NETDEV_OFFLOAD_XSTATS_REPORT_USED,
|
||||
NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
|
||||
NETDEV_XDP_FEAT_CHANGE,
|
||||
+ /* QCA NSS ECM Support - Start */
|
||||
+ NETDEV_BR_JOIN,
|
||||
+ NETDEV_BR_LEAVE,
|
||||
+ /* QCA NSS ECM Support - End */
|
||||
};
|
||||
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
|
||||
|
||||
--- a/include/net/ip6_route.h
|
||||
+++ b/include/net/ip6_route.h
|
||||
@@ -207,6 +207,11 @@ void rt6_multipath_rebalance(struct fib6
|
||||
void rt6_uncached_list_add(struct rt6_info *rt);
|
||||
void rt6_uncached_list_del(struct rt6_info *rt);
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+int rt6_register_notifier(struct notifier_block *nb);
|
||||
+int rt6_unregister_notifier(struct notifier_block *nb);
|
||||
+/* QCA NSS ECM support - End */
|
||||
+
|
||||
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
|
||||
{
|
||||
const struct dst_entry *dst = skb_dst(skb);
|
||||
--- a/include/net/neighbour.h
|
||||
+++ b/include/net/neighbour.h
|
||||
@@ -249,6 +249,13 @@ static inline int neigh_parms_family(str
|
||||
return p->tbl->family;
|
||||
}
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+struct neigh_mac_update {
|
||||
+ unsigned char old_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
|
||||
+ unsigned char update_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
|
||||
+};
|
||||
+/* QCA NSS ECM support - End */
|
||||
+
|
||||
#define NEIGH_PRIV_ALIGN sizeof(long long)
|
||||
#define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN)
|
||||
|
||||
@@ -395,6 +402,11 @@ void __neigh_for_each_release(struct nei
|
||||
int (*cb)(struct neighbour *));
|
||||
int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+extern void neigh_mac_update_register_notify(struct notifier_block *nb);
|
||||
+extern void neigh_mac_update_unregister_notify(struct notifier_block *nb);
|
||||
+/* QCA NSS ECM support - End */
|
||||
+
|
||||
struct neigh_seq_state {
|
||||
struct seq_net_private p;
|
||||
struct neigh_table *tbl;
|
||||
@@ -600,4 +612,5 @@ static inline void neigh_update_is_route
|
||||
*notify = 1;
|
||||
}
|
||||
}
|
||||
+
|
||||
#endif
|
||||
--- a/include/net/route.h
|
||||
+++ b/include/net/route.h
|
||||
@@ -237,6 +237,11 @@ struct rtable *rt_dst_alloc(struct net_d
|
||||
unsigned int flags, u16 type, bool noxfrm);
|
||||
struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+int ip_rt_register_notifier(struct notifier_block *nb);
|
||||
+int ip_rt_unregister_notifier(struct notifier_block *nb);
|
||||
+/* QCA NSS ECM support - End */
|
||||
+
|
||||
struct in_ifaddr;
|
||||
void fib_add_ifaddr(struct in_ifaddr *);
|
||||
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
|
||||
--- a/net/bridge/br_private.h
|
||||
+++ b/net/bridge/br_private.h
|
||||
@@ -2266,4 +2266,6 @@ void br_do_suppress_nd(struct sk_buff *s
|
||||
u16 vid, struct net_bridge_port *p, struct nd_msg *msg);
|
||||
struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *m);
|
||||
bool br_is_neigh_suppress_enabled(const struct net_bridge_port *p, u16 vid);
|
||||
+#define __br_get(__hook, __default, __args ...) \
|
||||
+ (__hook ? (__hook(__args)) : (__default)) /* QCA NSS ECM support */
|
||||
#endif
|
||||
--- a/net/8021q/vlan_core.c
|
||||
+++ b/net/8021q/vlan_core.c
|
||||
@@ -72,6 +72,28 @@ bool vlan_do_receive(struct sk_buff **sk
|
||||
return true;
|
||||
}
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+/* Update the VLAN device with statistics from network offload engines */
|
||||
+void __vlan_dev_update_accel_stats(struct net_device *dev,
|
||||
+ struct rtnl_link_stats64 *nlstats)
|
||||
+{
|
||||
+ struct vlan_pcpu_stats *stats;
|
||||
+
|
||||
+ if (!is_vlan_dev(dev))
|
||||
+ return;
|
||||
+
|
||||
+ stats = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, 0);
|
||||
+
|
||||
+ u64_stats_update_begin(&stats->syncp);
|
||||
+ u64_stats_add(&stats->rx_packets, nlstats->rx_packets);
|
||||
+ u64_stats_add(&stats->rx_bytes, nlstats->rx_bytes);
|
||||
+ u64_stats_add(&stats->tx_packets, nlstats->tx_packets);
|
||||
+ u64_stats_add(&stats->tx_bytes, nlstats->tx_bytes);
|
||||
+ u64_stats_update_end(&stats->syncp);
|
||||
+}
|
||||
+EXPORT_SYMBOL(__vlan_dev_update_accel_stats);
|
||||
+/* QCA NSS ECM support - End */
|
||||
+
|
||||
/* Must be invoked with rcu_read_lock. */
|
||||
struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
|
||||
__be16 vlan_proto, u16 vlan_id)
|
||||
@@ -110,6 +132,15 @@ struct net_device *vlan_dev_real_dev(con
|
||||
}
|
||||
EXPORT_SYMBOL(vlan_dev_real_dev);
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+/* Caller is responsible to hold the reference of the returned device */
|
||||
+struct net_device *vlan_dev_next_dev(const struct net_device *dev)
|
||||
+{
|
||||
+ return vlan_dev_priv(dev)->real_dev;
|
||||
+}
|
||||
+EXPORT_SYMBOL(vlan_dev_next_dev);
|
||||
+/* QCA NSS ECM support - End */
|
||||
+
|
||||
u16 vlan_dev_vlan_id(const struct net_device *dev)
|
||||
{
|
||||
return vlan_dev_priv(dev)->vlan_id;
|
||||
--- a/net/bridge/br_fdb.c
|
||||
+++ b/net/bridge/br_fdb.c
|
||||
@@ -33,6 +33,20 @@ static const struct rhashtable_params br
|
||||
|
||||
static struct kmem_cache *br_fdb_cache __read_mostly;
|
||||
|
||||
+ATOMIC_NOTIFIER_HEAD(br_fdb_notifier_list);
|
||||
+
|
||||
+void br_fdb_register_notify(struct notifier_block *nb)
|
||||
+{
|
||||
+ atomic_notifier_chain_register(&br_fdb_notifier_list, nb);
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(br_fdb_register_notify);
|
||||
+
|
||||
+void br_fdb_unregister_notify(struct notifier_block *nb)
|
||||
+{
|
||||
+ atomic_notifier_chain_unregister(&br_fdb_notifier_list, nb);
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(br_fdb_unregister_notify);
|
||||
+
|
||||
int __init br_fdb_init(void)
|
||||
{
|
||||
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
|
||||
@@ -195,6 +209,25 @@ static void fdb_notify(struct net_bridge
|
||||
if (swdev_notify)
|
||||
br_switchdev_fdb_notify(br, fdb, type);
|
||||
|
||||
+ /* QCA NSS ECM support - Start */
|
||||
+ if (fdb->dst) {
|
||||
+ int event;
|
||||
+ struct br_fdb_event fdb_event;
|
||||
+
|
||||
+ if (type == RTM_NEWNEIGH)
|
||||
+ event = BR_FDB_EVENT_ADD;
|
||||
+ else
|
||||
+ event = BR_FDB_EVENT_DEL;
|
||||
+
|
||||
+ fdb_event.dev = fdb->dst->dev;
|
||||
+ ether_addr_copy(fdb_event.addr, fdb->key.addr.addr);
|
||||
+ fdb_event.is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
|
||||
+ atomic_notifier_call_chain(&br_fdb_notifier_list,
|
||||
+ event,
|
||||
+ (void *)&fdb_event);
|
||||
+ }
|
||||
+ /* QCA NSS ECM support - End */
|
||||
+
|
||||
skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
|
||||
if (skb == NULL)
|
||||
goto errout;
|
||||
@@ -519,6 +552,22 @@ out:
|
||||
spin_unlock_bh(&br->hash_lock);
|
||||
}
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+ATOMIC_NOTIFIER_HEAD(br_fdb_update_notifier_list);
|
||||
+
|
||||
+void br_fdb_update_register_notify(struct notifier_block *nb)
|
||||
+{
|
||||
+ atomic_notifier_chain_register(&br_fdb_update_notifier_list, nb);
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(br_fdb_update_register_notify);
|
||||
+
|
||||
+void br_fdb_update_unregister_notify(struct notifier_block *nb)
|
||||
+{
|
||||
+ atomic_notifier_chain_unregister(&br_fdb_update_notifier_list, nb);
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(br_fdb_update_unregister_notify);
|
||||
+/* QCA NSS ECM support - End */
|
||||
+
|
||||
void br_fdb_cleanup(struct work_struct *work)
|
||||
{
|
||||
struct net_bridge *br = container_of(work, struct net_bridge,
|
||||
@@ -527,6 +576,7 @@ void br_fdb_cleanup(struct work_struct *
|
||||
unsigned long delay = hold_time(br);
|
||||
unsigned long work_delay = delay;
|
||||
unsigned long now = jiffies;
|
||||
+ u8 mac_addr[6]; /* QCA NSS ECM support */
|
||||
|
||||
/* this part is tricky, in order to avoid blocking learning and
|
||||
* consequently forwarding, we rely on rcu to delete objects with
|
||||
@@ -553,8 +603,15 @@ void br_fdb_cleanup(struct work_struct *
|
||||
work_delay = min(work_delay, this_timer - now);
|
||||
} else {
|
||||
spin_lock_bh(&br->hash_lock);
|
||||
- if (!hlist_unhashed(&f->fdb_node))
|
||||
+ if (!hlist_unhashed(&f->fdb_node)) {
|
||||
+ ether_addr_copy(mac_addr, f->key.addr.addr);
|
||||
fdb_delete(br, f, true);
|
||||
+ /* QCA NSS ECM support - Start */
|
||||
+ atomic_notifier_call_chain(
|
||||
+ &br_fdb_update_notifier_list, 0,
|
||||
+ (void *)mac_addr);
|
||||
+ /* QCA NSS ECM support - End */
|
||||
+ }
|
||||
spin_unlock_bh(&br->hash_lock);
|
||||
}
|
||||
}
|
||||
@@ -891,6 +948,12 @@ void br_fdb_update(struct net_bridge *br
|
||||
*/
|
||||
if (unlikely(test_bit(BR_FDB_LOCKED, &fdb->flags)))
|
||||
clear_bit(BR_FDB_LOCKED, &fdb->flags);
|
||||
+
|
||||
+ /* QCA NSS ECM support - Start */
|
||||
+ atomic_notifier_call_chain(
|
||||
+ &br_fdb_update_notifier_list,
|
||||
+ 0, (void *)addr);
|
||||
+ /* QCA NSS ECM support - End */
|
||||
}
|
||||
|
||||
if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
|
||||
@@ -914,6 +977,64 @@ void br_fdb_update(struct net_bridge *br
|
||||
}
|
||||
}
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+/* Refresh FDB entries for bridge packets being forwarded by offload engines */
|
||||
+void br_refresh_fdb_entry(struct net_device *dev, const char *addr)
|
||||
+{
|
||||
+ struct net_bridge_port *p = br_port_get_rcu(dev);
|
||||
+
|
||||
+ if (!p || p->state == BR_STATE_DISABLED)
|
||||
+ return;
|
||||
+
|
||||
+ if (!is_valid_ether_addr(addr)) {
|
||||
+ pr_info("bridge: Attempt to refresh with invalid ether address %pM\n",
|
||||
+ addr);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ rcu_read_lock();
|
||||
+ br_fdb_update(p->br, p, addr, 0, true);
|
||||
+ rcu_read_unlock();
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(br_refresh_fdb_entry);
|
||||
+
|
||||
+/* Update timestamp of FDB entries for bridge packets being forwarded by offload engines */
|
||||
+void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid)
|
||||
+{
|
||||
+ struct net_bridge_fdb_entry *fdb;
|
||||
+ struct net_bridge_port *p = br_port_get_rcu(dev);
|
||||
+
|
||||
+ if (!p || p->state == BR_STATE_DISABLED)
|
||||
+ return;
|
||||
+
|
||||
+ rcu_read_lock();
|
||||
+ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid);
|
||||
+ if (likely(fdb)) {
|
||||
+ fdb->updated = jiffies;
|
||||
+ }
|
||||
+ rcu_read_unlock();
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(br_fdb_entry_refresh);
|
||||
+
|
||||
+/* Look up the MAC address in the device's bridge fdb table */
|
||||
+struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
|
||||
+ const char *addr, __u16 vid)
|
||||
+{
|
||||
+ struct net_bridge_port *p = br_port_get_rcu(dev);
|
||||
+ struct net_bridge_fdb_entry *fdb;
|
||||
+
|
||||
+ if (!p || p->state == BR_STATE_DISABLED)
|
||||
+ return NULL;
|
||||
+
|
||||
+ rcu_read_lock();
|
||||
+ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid);
|
||||
+ rcu_read_unlock();
|
||||
+
|
||||
+ return fdb;
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(br_fdb_has_entry);
|
||||
+
|
||||
+/* QCA NSS ECM support - End */
|
||||
/* Dump information about entries, in response to GETNEIGH */
|
||||
int br_fdb_dump(struct sk_buff *skb,
|
||||
struct netlink_callback *cb,
|
||||
--- a/net/bridge/br_if.c
|
||||
+++ b/net/bridge/br_if.c
|
||||
@@ -26,6 +26,12 @@
|
||||
|
||||
#include "br_private.h"
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+/* Hook for external forwarding logic */
|
||||
+br_port_dev_get_hook_t __rcu *br_port_dev_get_hook __read_mostly;
|
||||
+EXPORT_SYMBOL_GPL(br_port_dev_get_hook);
|
||||
+/* QCA NSS ECM support - End */
|
||||
+
|
||||
/*
|
||||
* Determine initial path cost based on speed.
|
||||
* using recommendations from 802.1d standard
|
||||
@@ -697,6 +703,8 @@ int br_add_if(struct net_bridge *br, str
|
||||
|
||||
kobject_uevent(&p->kobj, KOBJ_ADD);
|
||||
|
||||
+ call_netdevice_notifiers(NETDEV_BR_JOIN, dev); /* QCA NSS ECM support */
|
||||
+
|
||||
return 0;
|
||||
|
||||
err6:
|
||||
@@ -732,6 +740,8 @@ int br_del_if(struct net_bridge *br, str
|
||||
if (!p || p->br != br)
|
||||
return -EINVAL;
|
||||
|
||||
+ call_netdevice_notifiers(NETDEV_BR_LEAVE, dev); /* QCA NSS ECM support */
|
||||
+
|
||||
/* Since more than one interface can be attached to a bridge,
|
||||
* there still maybe an alternate path for netconsole to use;
|
||||
* therefore there is no reason for a NETDEV_RELEASE event.
|
||||
@@ -775,3 +785,97 @@ bool br_port_flag_is_set(const struct ne
|
||||
return p->flags & flag;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(br_port_flag_is_set);
|
||||
+
|
||||
+/* br_port_dev_get()
|
||||
+ * If a skb is provided, and the br_port_dev_get_hook_t hook exists,
|
||||
+ * use that to try and determine the egress port for that skb.
|
||||
+ * If not, or no egress port could be determined, use the given addr
|
||||
+ * to identify the port to which it is reachable,
|
||||
+ * returing a reference to the net device associated with that port.
|
||||
+ *
|
||||
+ * NOTE: Return NULL if given dev is not a bridge or the mac has no
|
||||
+ * associated port.
|
||||
+ */
|
||||
+struct net_device *br_port_dev_get(struct net_device *dev, unsigned char *addr,
|
||||
+ struct sk_buff *skb,
|
||||
+ unsigned int cookie)
|
||||
+{
|
||||
+ struct net_bridge_fdb_entry *fdbe;
|
||||
+ struct net_bridge *br;
|
||||
+ struct net_device *netdev = NULL;
|
||||
+
|
||||
+ /* Is this a bridge? */
|
||||
+ if (!(dev->priv_flags & IFF_EBRIDGE))
|
||||
+ return NULL;
|
||||
+
|
||||
+ rcu_read_lock();
|
||||
+
|
||||
+ /* If the hook exists and the skb isn't NULL, try and get the port */
|
||||
+ if (skb) {
|
||||
+ br_port_dev_get_hook_t *port_dev_get_hook;
|
||||
+
|
||||
+ port_dev_get_hook = rcu_dereference(br_port_dev_get_hook);
|
||||
+ if (port_dev_get_hook) {
|
||||
+ struct net_bridge_port *pdst =
|
||||
+ __br_get(port_dev_get_hook, NULL, dev, skb,
|
||||
+ addr, cookie);
|
||||
+ if (pdst) {
|
||||
+ dev_hold(pdst->dev);
|
||||
+ netdev = pdst->dev;
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* Either there is no hook, or can't
|
||||
+ * determine the port to use - fall back to using FDB
|
||||
+ */
|
||||
+
|
||||
+ br = netdev_priv(dev);
|
||||
+
|
||||
+ /* Lookup the fdb entry and get reference to the port dev */
|
||||
+ fdbe = br_fdb_find_rcu(br, addr, 0);
|
||||
+ if (fdbe && fdbe->dst) {
|
||||
+ netdev = fdbe->dst->dev; /* port device */
|
||||
+ dev_hold(netdev);
|
||||
+ }
|
||||
+out:
|
||||
+ rcu_read_unlock();
|
||||
+ return netdev;
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(br_port_dev_get);
|
||||
+
|
||||
+/* Update bridge statistics for bridge packets processed by offload engines */
|
||||
+void br_dev_update_stats(struct net_device *dev,
|
||||
+ struct rtnl_link_stats64 *nlstats)
|
||||
+{
|
||||
+ struct pcpu_sw_netstats *tstats;
|
||||
+
|
||||
+ /* Is this a bridge? */
|
||||
+ if (!(dev->priv_flags & IFF_EBRIDGE))
|
||||
+ return;
|
||||
+
|
||||
+ tstats = this_cpu_ptr(dev->tstats);
|
||||
+
|
||||
+ u64_stats_update_begin(&tstats->syncp);
|
||||
+ u64_stats_add(&tstats->rx_packets, nlstats->rx_packets);
|
||||
+ u64_stats_add(&tstats->rx_bytes, nlstats->rx_bytes);
|
||||
+ u64_stats_add(&tstats->tx_packets, nlstats->tx_packets);
|
||||
+ u64_stats_add(&tstats->tx_bytes, nlstats->tx_bytes);
|
||||
+ u64_stats_update_end(&tstats->syncp);
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(br_dev_update_stats);
|
||||
+
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+/* API to know if hairpin feature is enabled/disabled on this bridge port */
|
||||
+bool br_is_hairpin_enabled(struct net_device *dev)
|
||||
+{
|
||||
+ struct net_bridge_port *port = br_port_get_check_rcu(dev);
|
||||
+
|
||||
+ if (likely(port))
|
||||
+ return port->flags & BR_HAIRPIN_MODE;
|
||||
+ return false;
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(br_is_hairpin_enabled);
|
||||
+
|
||||
+/* QCA NSS ECM support - End */
|
||||
--- a/net/core/neighbour.c
|
||||
+++ b/net/core/neighbour.c
|
||||
@@ -1275,6 +1275,22 @@ static void neigh_update_hhs(struct neig
|
||||
}
|
||||
}
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+ATOMIC_NOTIFIER_HEAD(neigh_mac_update_notifier_list);
|
||||
+
|
||||
+void neigh_mac_update_register_notify(struct notifier_block *nb)
|
||||
+{
|
||||
+ atomic_notifier_chain_register(&neigh_mac_update_notifier_list, nb);
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(neigh_mac_update_register_notify);
|
||||
+
|
||||
+void neigh_mac_update_unregister_notify(struct notifier_block *nb)
|
||||
+{
|
||||
+ atomic_notifier_chain_unregister(&neigh_mac_update_notifier_list, nb);
|
||||
+}
|
||||
+EXPORT_SYMBOL_GPL(neigh_mac_update_unregister_notify);
|
||||
+/* QCA NSS ECM support - End */
|
||||
+
|
||||
/* Generic update routine.
|
||||
-- lladdr is new lladdr or NULL, if it is not supplied.
|
||||
-- new is new state.
|
||||
@@ -1303,6 +1319,7 @@ static int __neigh_update(struct neighbo
|
||||
struct net_device *dev;
|
||||
int err, notify = 0;
|
||||
u8 old;
|
||||
+ struct neigh_mac_update nmu; /* QCA NSS ECM support */
|
||||
|
||||
trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
|
||||
|
||||
@@ -1317,7 +1334,10 @@ static int __neigh_update(struct neighbo
|
||||
new = old;
|
||||
goto out;
|
||||
}
|
||||
- if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
|
||||
+
|
||||
+ memset(&nmu, 0, sizeof(struct neigh_mac_update)); /* QCA NSS ECM support */
|
||||
+
|
||||
+ if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
|
||||
(old & (NUD_NOARP | NUD_PERMANENT)))
|
||||
goto out;
|
||||
|
||||
@@ -1354,7 +1374,12 @@ static int __neigh_update(struct neighbo
|
||||
- compare new & old
|
||||
- if they are different, check override flag
|
||||
*/
|
||||
- if ((old & NUD_VALID) &&
|
||||
+ /* QCA NSS ECM update - Start */
|
||||
+ memcpy(nmu.old_mac, neigh->ha, dev->addr_len);
|
||||
+ memcpy(nmu.update_mac, lladdr, dev->addr_len);
|
||||
+ /* QCA NSS ECM update - End */
|
||||
+
|
||||
+ if ((old & NUD_VALID) &&
|
||||
!memcmp(lladdr, neigh->ha, dev->addr_len))
|
||||
lladdr = neigh->ha;
|
||||
} else {
|
||||
@@ -1476,8 +1501,11 @@ out:
|
||||
neigh_update_gc_list(neigh);
|
||||
if (managed_update)
|
||||
neigh_update_managed_list(neigh);
|
||||
- if (notify)
|
||||
+ if (notify) {
|
||||
neigh_update_notify(neigh, nlmsg_pid);
|
||||
+ atomic_notifier_call_chain(&neigh_mac_update_notifier_list, 0,
|
||||
+ (struct neigh_mac_update *)&nmu); /* QCA NSS ECM support */
|
||||
+ }
|
||||
trace_neigh_update_done(neigh, err);
|
||||
return err;
|
||||
}
|
||||
--- a/net/ipv4/fib_trie.c
|
||||
+++ b/net/ipv4/fib_trie.c
|
||||
@@ -1211,6 +1211,9 @@ static bool fib_valid_key_len(u32 key, u
|
||||
static void fib_remove_alias(struct trie *t, struct key_vector *tp,
|
||||
struct key_vector *l, struct fib_alias *old);
|
||||
|
||||
+/* Define route change notification chain. */
|
||||
+static BLOCKING_NOTIFIER_HEAD(iproute_chain); /* QCA NSS ECM support */
|
||||
+
|
||||
/* Caller must hold RTNL. */
|
||||
int fib_table_insert(struct net *net, struct fib_table *tb,
|
||||
struct fib_config *cfg, struct netlink_ext_ack *extack)
|
||||
@@ -1404,6 +1407,9 @@ int fib_table_insert(struct net *net, st
|
||||
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
|
||||
&cfg->fc_nlinfo, nlflags);
|
||||
succeeded:
|
||||
+ blocking_notifier_call_chain(&iproute_chain,
|
||||
+ RTM_NEWROUTE, fi);
|
||||
+
|
||||
return 0;
|
||||
|
||||
out_remove_new_fa:
|
||||
@@ -1775,6 +1781,9 @@ int fib_table_delete(struct net *net, st
|
||||
if (fa_to_delete->fa_state & FA_S_ACCESSED)
|
||||
rt_cache_flush(cfg->fc_nlinfo.nl_net);
|
||||
|
||||
+ blocking_notifier_call_chain(&iproute_chain,
|
||||
+ RTM_DELROUTE, fa_to_delete->fa_info);
|
||||
+
|
||||
fib_release_info(fa_to_delete->fa_info);
|
||||
alias_free_mem_rcu(fa_to_delete);
|
||||
return 0;
|
||||
@@ -2407,6 +2416,20 @@ void __init fib_trie_init(void)
|
||||
0, SLAB_PANIC | SLAB_ACCOUNT, NULL);
|
||||
}
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+int ip_rt_register_notifier(struct notifier_block *nb)
|
||||
+{
|
||||
+ return blocking_notifier_chain_register(&iproute_chain, nb);
|
||||
+}
|
||||
+EXPORT_SYMBOL(ip_rt_register_notifier);
|
||||
+
|
||||
+int ip_rt_unregister_notifier(struct notifier_block *nb)
|
||||
+{
|
||||
+ return blocking_notifier_chain_unregister(&iproute_chain, nb);
|
||||
+}
|
||||
+EXPORT_SYMBOL(ip_rt_unregister_notifier);
|
||||
+/* QCA NSS ECM support - End */
|
||||
+
|
||||
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
|
||||
{
|
||||
struct fib_table *tb;
|
||||
--- a/net/ipv6/ndisc.c
|
||||
+++ b/net/ipv6/ndisc.c
|
||||
@@ -666,6 +666,7 @@ void ndisc_send_ns(struct net_device *de
|
||||
if (skb)
|
||||
ndisc_send_skb(skb, daddr, saddr);
|
||||
}
|
||||
+EXPORT_SYMBOL(ndisc_send_ns);
|
||||
|
||||
void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
|
||||
const struct in6_addr *daddr)
|
||||
--- a/net/ipv6/route.c
|
||||
+++ b/net/ipv6/route.c
|
||||
@@ -197,6 +197,9 @@ static void rt6_uncached_list_flush_dev(
|
||||
}
|
||||
}
|
||||
|
||||
+/* Define route change notification chain. */
|
||||
+ATOMIC_NOTIFIER_HEAD(ip6route_chain); /* QCA NSS ECM support */
|
||||
+
|
||||
static inline const void *choose_neigh_daddr(const struct in6_addr *p,
|
||||
struct sk_buff *skb,
|
||||
const void *daddr)
|
||||
@@ -3864,6 +3867,10 @@ int ip6_route_add(struct fib6_config *cf
|
||||
return PTR_ERR(rt);
|
||||
|
||||
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
|
||||
+ if (!err)
|
||||
+ atomic_notifier_call_chain(&ip6route_chain,
|
||||
+ RTM_NEWROUTE, rt);
|
||||
+
|
||||
fib6_info_release(rt);
|
||||
|
||||
return err;
|
||||
@@ -3885,6 +3892,9 @@ static int __ip6_del_rt(struct fib6_info
|
||||
err = fib6_del(rt, info);
|
||||
spin_unlock_bh(&table->tb6_lock);
|
||||
|
||||
+ if (!err)
|
||||
+ atomic_notifier_call_chain(&ip6route_chain,
|
||||
+ RTM_DELROUTE, rt);
|
||||
out:
|
||||
fib6_info_release(rt);
|
||||
return err;
|
||||
@@ -6329,6 +6339,20 @@ static int ip6_route_dev_notify(struct n
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
+/* QCA NSS ECM support - Start */
|
||||
+int rt6_register_notifier(struct notifier_block *nb)
|
||||
+{
|
||||
+ return atomic_notifier_chain_register(&ip6route_chain, nb);
|
||||
+}
|
||||
+EXPORT_SYMBOL(rt6_register_notifier);
|
||||
+
|
||||
+int rt6_unregister_notifier(struct notifier_block *nb)
|
||||
+{
|
||||
+ return atomic_notifier_chain_unregister(&ip6route_chain, nb);
|
||||
+}
|
||||
+EXPORT_SYMBOL(rt6_unregister_notifier);
|
||||
+/* QCA NSS ECM support - End */
|
||||
+
|
||||
/*
|
||||
* /proc
|
||||
*/
|
||||
--- a/net/core/dev.c
|
||||
+++ b/net/core/dev.c
|
||||
@@ -1673,6 +1673,7 @@ const char *netdev_cmd_to_name(enum netd
|
||||
N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
|
||||
N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
|
||||
N(XDP_FEAT_CHANGE)
|
||||
+ N(BR_JOIN) N(BR_LEAVE)
|
||||
}
|
||||
#undef N
|
||||
return "UNKNOWN_NETDEV_EVENT";
|
||||
--- a/net/ipv6/addrconf.c
|
||||
+++ b/net/ipv6/addrconf.c
|
||||
@@ -1002,6 +1002,7 @@ void inet6_ifa_finish_destroy(struct ine
|
||||
|
||||
kfree_rcu(ifp, rcu);
|
||||
}
|
||||
+EXPORT_SYMBOL(inet6_ifa_finish_destroy);
|
||||
|
||||
static void
|
||||
ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
|
||||
--- a/include/net/vxlan.h
|
||||
+++ b/include/net/vxlan.h
|
||||
@@ -440,6 +440,15 @@ static inline __be32 vxlan_compute_rco(u
|
||||
return vni_field;
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * vxlan_get_vni()
|
||||
+ * Returns the vni corresponding to tunnel
|
||||
+ */
|
||||
+static inline u32 vxlan_get_vni(struct vxlan_dev *vxlan_tun)
|
||||
+{
|
||||
+ return be32_to_cpu(vxlan_tun->cfg.vni);
|
||||
+}
|
||||
+
|
||||
static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
|
||||
{
|
||||
return vs->sock->sk->sk_family;
|
||||
--- a/include/uapi/linux/in.h
|
||||
+++ b/include/uapi/linux/in.h
|
||||
@@ -63,6 +63,8 @@ enum {
|
||||
#define IPPROTO_MTP IPPROTO_MTP
|
||||
IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
|
||||
#define IPPROTO_BEETPH IPPROTO_BEETPH
|
||||
+ IPPROTO_ETHERIP = 97, /* ETHERIP protocol number */
|
||||
+#define IPPROTO_ETHERIP IPPROTO_ETHERIP
|
||||
IPPROTO_ENCAP = 98, /* Encapsulation Header */
|
||||
#define IPPROTO_ENCAP IPPROTO_ENCAP
|
||||
IPPROTO_PIM = 103, /* Protocol Independent Multicast */
|
||||
@@ -327,7 +329,7 @@ struct sockaddr_in {
|
||||
#endif
|
||||
|
||||
/* <asm/byteorder.h> contains the htonl type stuff.. */
|
||||
-#include <asm/byteorder.h>
|
||||
+#include <asm/byteorder.h>
|
||||
|
||||
|
||||
#endif /* _UAPI_LINUX_IN_H */
|
||||
--- a/tools/include/uapi/linux/in.h
|
||||
+++ b/tools/include/uapi/linux/in.h
|
||||
@@ -63,6 +63,8 @@ enum {
|
||||
#define IPPROTO_MTP IPPROTO_MTP
|
||||
IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
|
||||
#define IPPROTO_BEETPH IPPROTO_BEETPH
|
||||
+ IPPROTO_ETHERIP = 97, /* ETHERIP protocol number */
|
||||
+#define IPPROTO_ETHERIP IPPROTO_ETHERIP
|
||||
IPPROTO_ENCAP = 98, /* Encapsulation Header */
|
||||
#define IPPROTO_ENCAP IPPROTO_ENCAP
|
||||
IPPROTO_PIM = 103, /* Protocol Independent Multicast */
|
||||
@@ -327,7 +329,7 @@ struct sockaddr_in {
|
||||
#endif
|
||||
|
||||
/* <asm/byteorder.h> contains the htonl type stuff.. */
|
||||
-#include <asm/byteorder.h>
|
||||
+#include <asm/byteorder.h>
|
||||
|
||||
|
||||
#endif /* _UAPI_LINUX_IN_H */
|
||||
--- a/net/netfilter/nf_conntrack_ecache.c
|
||||
+++ b/net/netfilter/nf_conntrack_ecache.c
|
||||
@@ -266,7 +266,6 @@ void nf_conntrack_register_notifier(stru
|
||||
mutex_lock(&nf_ct_ecache_mutex);
|
||||
notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
|
||||
lockdep_is_held(&nf_ct_ecache_mutex));
|
||||
- WARN_ON_ONCE(notify);
|
||||
rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
}
|
||||
--- a/include/net/netns/conntrack.h
|
||||
+++ b/include/net/netns/conntrack.h
|
||||
@@ -26,6 +26,7 @@ struct nf_tcp_net {
|
||||
unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
|
||||
u8 tcp_loose;
|
||||
u8 tcp_be_liberal;
|
||||
+ u8 tcp_no_window_check;
|
||||
u8 tcp_max_retrans;
|
||||
u8 tcp_ignore_invalid_rst;
|
||||
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
|
||||
--- a/net/netfilter/nf_conntrack_proto_tcp.c
|
||||
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
|
||||
@@ -515,11 +515,15 @@ tcp_in_window(struct nf_conn *ct, enum i
|
||||
struct ip_ct_tcp *state = &ct->proto.tcp;
|
||||
struct ip_ct_tcp_state *sender = &state->seen[dir];
|
||||
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
|
||||
+ const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct));
|
||||
__u32 seq, ack, sack, end, win, swin;
|
||||
bool in_recv_win, seq_ok;
|
||||
s32 receiver_offset;
|
||||
u16 win_raw;
|
||||
|
||||
+ if (tn->tcp_no_window_check)
|
||||
+ return NFCT_TCP_ACCEPT;
|
||||
+
|
||||
/*
|
||||
* Get the required data from the packet.
|
||||
*/
|
||||
@@ -1285,7 +1289,7 @@ int nf_conntrack_tcp_packet(struct nf_co
|
||||
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
|
||||
timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
|
||||
timeout = timeouts[TCP_CONNTRACK_UNACK];
|
||||
- else if (ct->proto.tcp.last_win == 0 &&
|
||||
+ else if (!tn->tcp_no_window_check && ct->proto.tcp.last_win == 0 &&
|
||||
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
|
||||
timeout = timeouts[TCP_CONNTRACK_RETRANS];
|
||||
else
|
||||
@@ -1601,6 +1605,9 @@ void nf_conntrack_tcp_init_net(struct ne
|
||||
*/
|
||||
tn->tcp_be_liberal = 0;
|
||||
|
||||
+ /* Skip Windows Check */
|
||||
+ tn->tcp_no_window_check = 0;
|
||||
+
|
||||
/* If it's non-zero, we turn off RST sequence number check */
|
||||
tn->tcp_ignore_invalid_rst = 0;
|
||||
|
||||
--- a/net/netfilter/nf_conntrack_standalone.c
|
||||
+++ b/net/netfilter/nf_conntrack_standalone.c
|
||||
@@ -633,6 +633,7 @@ enum nf_ct_sysctl_index {
|
||||
#endif
|
||||
NF_SYSCTL_CT_PROTO_TCP_LOOSE,
|
||||
NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
|
||||
+ NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK,
|
||||
NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST,
|
||||
NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
|
||||
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP,
|
||||
@@ -840,6 +841,14 @@ static struct ctl_table nf_ct_sysctl_tab
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_ONE,
|
||||
},
|
||||
+ [NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK] = {
|
||||
+ .procname = "nf_conntrack_tcp_no_window_check",
|
||||
+ .maxlen = sizeof(u8),
|
||||
+ .mode = 0644,
|
||||
+ .proc_handler = proc_dou8vec_minmax,
|
||||
+ .extra1 = SYSCTL_ZERO,
|
||||
+ .extra2 = SYSCTL_ONE,
|
||||
+ },
|
||||
[NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST] = {
|
||||
.procname = "nf_conntrack_tcp_ignore_invalid_rst",
|
||||
.maxlen = sizeof(u8),
|
||||
@@ -1050,6 +1059,7 @@ static void nf_conntrack_standalone_init
|
||||
|
||||
XASSIGN(LOOSE, &tn->tcp_loose);
|
||||
XASSIGN(LIBERAL, &tn->tcp_be_liberal);
|
||||
+ XASSIGN(NO_WINDOW_CHECK, &tn->tcp_no_window_check);
|
||||
XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans);
|
||||
XASSIGN(IGNORE_INVALID_RST, &tn->tcp_ignore_invalid_rst);
|
||||
#undef XASSIGN
|
||||
@ -0,0 +1,87 @@
|
||||
From ce18a6fdff6a39a01111d74f513d2ef66142047c Mon Sep 17 00:00:00 2001
|
||||
From: Murat Sezgin <msezgin@codeaurora.org>
|
||||
Date: Wed, 5 Aug 2020 13:21:27 -0700
|
||||
Subject: [PATCH 246/281] net:ipv6: Fix IPv6 user route change event calls
|
||||
|
||||
These events should be called only when the route table is
|
||||
changed by the userspace. So, we should call them in the
|
||||
ioctl and the netlink message handler function.
|
||||
|
||||
Change-Id: If7ec615014cfc79d5fa72878e49eaf99c2560c32
|
||||
Signed-off-by: Murat Sezgin <msezgin@codeaurora.org>
|
||||
---
|
||||
net/ipv6/route.c | 31 +++++++++++++++++++++----------
|
||||
1 file changed, 21 insertions(+), 10 deletions(-)
|
||||
|
||||
--- a/net/ipv6/route.c
|
||||
+++ b/net/ipv6/route.c
|
||||
@@ -3867,10 +3867,6 @@ int ip6_route_add(struct fib6_config *cf
|
||||
return PTR_ERR(rt);
|
||||
|
||||
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
|
||||
- if (!err)
|
||||
- atomic_notifier_call_chain(&ip6route_chain,
|
||||
- RTM_NEWROUTE, rt);
|
||||
-
|
||||
fib6_info_release(rt);
|
||||
|
||||
return err;
|
||||
@@ -3892,9 +3888,6 @@ static int __ip6_del_rt(struct fib6_info
|
||||
err = fib6_del(rt, info);
|
||||
spin_unlock_bh(&table->tb6_lock);
|
||||
|
||||
- if (!err)
|
||||
- atomic_notifier_call_chain(&ip6route_chain,
|
||||
- RTM_DELROUTE, rt);
|
||||
out:
|
||||
fib6_info_release(rt);
|
||||
return err;
|
||||
@@ -4500,6 +4493,10 @@ int ipv6_route_ioctl(struct net *net, un
|
||||
break;
|
||||
}
|
||||
rtnl_unlock();
|
||||
+ if (!err)
|
||||
+ atomic_notifier_call_chain(&ip6route_chain,
|
||||
+ (cmd == SIOCADDRT) ? RTM_NEWROUTE : RTM_DELROUTE, &cfg);
|
||||
+
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -5518,11 +5515,17 @@ static int inet6_rtm_delroute(struct sk_
|
||||
}
|
||||
|
||||
if (cfg.fc_mp)
|
||||
- return ip6_route_multipath_del(&cfg, extack);
|
||||
+ err = ip6_route_multipath_del(&cfg, extack);
|
||||
else {
|
||||
cfg.fc_delete_all_nh = 1;
|
||||
- return ip6_route_del(&cfg, extack);
|
||||
+ err = ip6_route_del(&cfg, extack);
|
||||
}
|
||||
+
|
||||
+ if (!err)
|
||||
+ atomic_notifier_call_chain(&ip6route_chain,
|
||||
+ RTM_DELROUTE, &cfg);
|
||||
+
|
||||
+ return err;
|
||||
}
|
||||
|
||||
static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
@@ -5539,9 +5542,15 @@ static int inet6_rtm_newroute(struct sk_
|
||||
cfg.fc_metric = IP6_RT_PRIO_USER;
|
||||
|
||||
if (cfg.fc_mp)
|
||||
- return ip6_route_multipath_add(&cfg, extack);
|
||||
+ err = ip6_route_multipath_add(&cfg, extack);
|
||||
else
|
||||
- return ip6_route_add(&cfg, GFP_KERNEL, extack);
|
||||
+ err = ip6_route_add(&cfg, GFP_KERNEL, extack);
|
||||
+
|
||||
+ if (!err)
|
||||
+ atomic_notifier_call_chain(&ip6route_chain,
|
||||
+ RTM_NEWROUTE, &cfg);
|
||||
+
|
||||
+ return err;
|
||||
}
|
||||
|
||||
/* add the overhead of this fib6_nh to nexthop_len */
|
||||
@ -0,0 +1,127 @@
|
||||
--- a/crypto/authenc.c
|
||||
+++ b/crypto/authenc.c
|
||||
@@ -417,6 +417,8 @@ static int crypto_authenc_create(struct
|
||||
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
+ inst->alg.base.cra_flags |= (auth_base->cra_flags |
|
||||
+ enc->base.cra_flags) & CRYPTO_ALG_NOSUPP_SG;
|
||||
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
|
||||
auth_base->cra_priority;
|
||||
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
|
||||
--- a/include/linux/crypto.h
|
||||
+++ b/include/linux/crypto.h
|
||||
@@ -101,6 +101,11 @@
|
||||
#define CRYPTO_NOLOAD 0x00008000
|
||||
|
||||
/*
|
||||
+ * Set this flag if algorithm does not support SG list transforms
|
||||
+ */
|
||||
+#define CRYPTO_ALG_NOSUPP_SG 0x0000c000
|
||||
+
|
||||
+/*
|
||||
* The algorithm may allocate memory during request processing, i.e. during
|
||||
* encryption, decryption, or hashing. Users can request an algorithm with this
|
||||
* flag unset if they can't handle memory allocation failures.
|
||||
--- a/net/ipv4/esp4.c
|
||||
+++ b/net/ipv4/esp4.c
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/authenc.h>
|
||||
+#include <crypto/algapi.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <net/ip.h>
|
||||
@@ -658,6 +658,7 @@ static int esp_output(struct xfrm_state
|
||||
struct ip_esp_hdr *esph;
|
||||
struct crypto_aead *aead;
|
||||
struct esp_info esp;
|
||||
+ bool nosupp_sg;
|
||||
|
||||
esp.inplace = true;
|
||||
|
||||
@@ -669,6 +670,11 @@ static int esp_output(struct xfrm_state
|
||||
aead = x->data;
|
||||
alen = crypto_aead_authsize(aead);
|
||||
|
||||
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
|
||||
+ if (nosupp_sg && skb_linearize(skb)) {
|
||||
+ return -ENOMEM;
|
||||
+ }
|
||||
+
|
||||
esp.tfclen = 0;
|
||||
if (x->tfcpad) {
|
||||
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
|
||||
@@ -890,6 +896,7 @@ static int esp_input(struct xfrm_state *
|
||||
u8 *iv;
|
||||
struct scatterlist *sg;
|
||||
int err = -EINVAL;
|
||||
+ bool nosupp_sg;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
|
||||
goto out;
|
||||
@@ -897,6 +904,12 @@ static int esp_input(struct xfrm_state *
|
||||
if (elen <= 0)
|
||||
goto out;
|
||||
|
||||
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
|
||||
+ if (nosupp_sg && skb_linearize(skb)) {
|
||||
+ err = -ENOMEM;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
assoclen = sizeof(struct ip_esp_hdr);
|
||||
seqhilen = 0;
|
||||
|
||||
--- a/net/ipv6/esp6.c
|
||||
+++ b/net/ipv6/esp6.c
|
||||
@@ -15,6 +15,7 @@
|
||||
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/authenc.h>
|
||||
+#include <crypto/algapi.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <net/ip.h>
|
||||
@@ -696,6 +696,7 @@ static int esp6_output(struct xfrm_state
|
||||
struct ip_esp_hdr *esph;
|
||||
struct crypto_aead *aead;
|
||||
struct esp_info esp;
|
||||
+ bool nosupp_sg;
|
||||
|
||||
esp.inplace = true;
|
||||
|
||||
@@ -707,6 +708,11 @@ static int esp6_output(struct xfrm_state
|
||||
aead = x->data;
|
||||
alen = crypto_aead_authsize(aead);
|
||||
|
||||
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
|
||||
+ if (nosupp_sg && skb_linearize(skb)) {
|
||||
+ return -ENOMEM;
|
||||
+ }
|
||||
+
|
||||
esp.tfclen = 0;
|
||||
if (x->tfcpad) {
|
||||
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
|
||||
@@ -934,6 +940,7 @@ static int esp6_input(struct xfrm_state
|
||||
__be32 *seqhi;
|
||||
u8 *iv;
|
||||
struct scatterlist *sg;
|
||||
+ bool nosupp_sg;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
|
||||
ret = -EINVAL;
|
||||
@@ -945,6 +952,12 @@ static int esp6_input(struct xfrm_state
|
||||
goto out;
|
||||
}
|
||||
|
||||
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
|
||||
+ if (nosupp_sg && skb_linearize(skb)) {
|
||||
+ ret = -ENOMEM;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
assoclen = sizeof(struct ip_esp_hdr);
|
||||
seqhilen = 0;
|
||||
|
||||
@ -0,0 +1,384 @@
|
||||
--- a/include/linux/cpuhotplug.h
|
||||
+++ b/include/linux/cpuhotplug.h
|
||||
@@ -94,6 +94,7 @@ enum cpuhp_state {
|
||||
CPUHP_RADIX_DEAD,
|
||||
CPUHP_PAGE_ALLOC,
|
||||
CPUHP_NET_DEV_DEAD,
|
||||
+ CPUHP_SKB_RECYCLER_DEAD,
|
||||
CPUHP_PCI_XGENE_DEAD,
|
||||
CPUHP_IOMMU_IOVA_DEAD,
|
||||
CPUHP_LUSTRE_CFS_DEAD,
|
||||
--- a/include/linux/skbuff.h
|
||||
+++ b/include/linux/skbuff.h
|
||||
@@ -1065,6 +1065,10 @@ struct sk_buff {
|
||||
/* only useable after checking ->active_extensions != 0 */
|
||||
struct skb_ext *extensions;
|
||||
#endif
|
||||
+
|
||||
+#ifdef CONFIG_DEBUG_OBJECTS_SKBUFF
|
||||
+ void *free_addr;
|
||||
+#endif
|
||||
};
|
||||
|
||||
/* if you move pkt_type around you also must adapt those constants */
|
||||
@@ -1250,7 +1254,7 @@ static inline void kfree_skb_list(struct sk_buff *segs)
|
||||
kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED);
|
||||
}
|
||||
|
||||
-#ifdef CONFIG_TRACEPOINTS
|
||||
+#ifdef CONFIG_SKB_RECYCLER
|
||||
void consume_skb(struct sk_buff *skb);
|
||||
#else
|
||||
static inline void consume_skb(struct sk_buff *skb)
|
||||
@@ -1262,6 +1266,9 @@ static inline void consume_skb(struct sk_buff *skb)
|
||||
void __consume_stateless_skb(struct sk_buff *skb);
|
||||
void __kfree_skb(struct sk_buff *skb);
|
||||
extern struct kmem_cache *skbuff_cache;
|
||||
+extern void kfree_skbmem(struct sk_buff *skb);
|
||||
+extern void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
|
||||
+ bool napi_safe);
|
||||
|
||||
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
|
||||
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
|
||||
--- a/net/Kconfig
|
||||
+++ b/net/Kconfig
|
||||
@@ -369,6 +369,27 @@ config NET_FLOW_LIMIT
|
||||
with many clients some protection against DoS by a single (spoofed)
|
||||
flow that greatly exceeds average workload.
|
||||
|
||||
+config SKB_RECYCLER
|
||||
+ bool "Generic skb recycling"
|
||||
+ default y
|
||||
+ help
|
||||
+ SKB_RECYCLER is used to implement RX-to-RX skb recycling.
|
||||
+ This config enables the recycling scheme for bridging and
|
||||
+ routing workloads. It can reduce skbuff freeing or
|
||||
+ reallocation overhead.
|
||||
+
|
||||
+config SKB_RECYCLER_MULTI_CPU
|
||||
+ bool "Cross-CPU recycling for CPU-locked workloads"
|
||||
+ depends on SMP && SKB_RECYCLER
|
||||
+ default n
|
||||
+
|
||||
+config ALLOC_SKB_PAGE_FRAG_DISABLE
|
||||
+ bool "Disable page fragment based skbuff payload allocations"
|
||||
+ depends on !SKB_RECYCLER
|
||||
+ default n
|
||||
+ help
|
||||
+ Disable page fragment based allocations for skbuff payloads.
|
||||
+
|
||||
menu "Network testing"
|
||||
|
||||
config NET_PKTGEN
|
||||
--- a/net/core/Makefile
|
||||
+++ b/net/core/Makefile
|
||||
@@ -41,3 +41,4 @@ obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
|
||||
obj-$(CONFIG_OF) += of_net.o
|
||||
+obj-$(CONFIG_SKB_RECYCLER) += skbuff_recycle.o
|
||||
--- a/net/core/dev.c
|
||||
+++ b/net/core/dev.c
|
||||
@@ -6016,10 +6016,16 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
||||
|
||||
napi->weight = READ_ONCE(dev_rx_weight);
|
||||
while (again) {
|
||||
- struct sk_buff *skb;
|
||||
+ struct sk_buff *skb, *next_skb;
|
||||
|
||||
while ((skb = __skb_dequeue(&sd->process_queue))) {
|
||||
rcu_read_lock();
|
||||
+
|
||||
+ next_skb = skb_peek(&sd->process_queue);
|
||||
+ if (likely(next_skb)) {
|
||||
+ prefetch(next_skb->data);
|
||||
+ }
|
||||
+
|
||||
__netif_receive_skb(skb);
|
||||
rcu_read_unlock();
|
||||
input_queue_head_incr(sd);
|
||||
--- a/net/core/skbuff.c
|
||||
+++ b/net/core/skbuff.c
|
||||
@@ -87,6 +87,31 @@
|
||||
|
||||
#include "dev.h"
|
||||
#include "sock_destructor.h"
|
||||
+#include "skbuff_recycle.h"
|
||||
+
|
||||
+struct kmem_cache *skb_data_cache;
|
||||
+/*
|
||||
+ * For low memory profile, NSS_SKB_FIXED_SIZE_2K is enabled and
|
||||
+ * CONFIG_SKB_RECYCLER is disabled. For premium and enterprise profile
|
||||
+ * CONFIG_SKB_RECYCLER is enabled and NSS_SKB_FIXED_SIZE_2K is disabled.
|
||||
+ * Irrespective of NSS_SKB_FIXED_SIZE_2K enabled/disabled, the
|
||||
+ * CONFIG_SKB_RECYCLER and __LP64__ determines the value of SKB_DATA_CACHE_SIZE
|
||||
+ */
|
||||
+#if defined(CONFIG_SKB_RECYCLER)
|
||||
+/*
|
||||
+ * 2688 for 64bit arch, 2624 for 32bit arch
|
||||
+ */
|
||||
+#define SKB_DATA_CACHE_SIZE (SKB_DATA_ALIGN(SKB_RECYCLE_SIZE + NET_SKB_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
|
||||
+#else
|
||||
+/*
|
||||
+ * 2368 for 64bit arch, 2176 for 32bit arch
|
||||
+ */
|
||||
+#if defined(__LP64__)
|
||||
+#define SKB_DATA_CACHE_SIZE ((SKB_DATA_ALIGN(1984 + NET_SKB_PAD)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
|
||||
+#else
|
||||
+#define SKB_DATA_CACHE_SIZE ((SKB_DATA_ALIGN(1856 + NET_SKB_PAD)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
|
||||
+#endif
|
||||
+#endif
|
||||
|
||||
struct kmem_cache *skbuff_cache __ro_after_init;
|
||||
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
|
||||
@@ -551,21 +576,20 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
|
||||
bool *pfmemalloc)
|
||||
{
|
||||
bool ret_pfmemalloc = false;
|
||||
- size_t obj_size;
|
||||
+ unsigned int obj_size = *size;
|
||||
void *obj;
|
||||
|
||||
obj_size = SKB_HEAD_ALIGN(*size);
|
||||
- if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
|
||||
- !(flags & KMALLOC_NOT_NORMAL_BITS)) {
|
||||
- obj = kmem_cache_alloc_node(skb_small_head_cache,
|
||||
- flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
|
||||
- node);
|
||||
- *size = SKB_SMALL_HEAD_CACHE_SIZE;
|
||||
+ if (obj_size > SZ_2K && obj_size <= SKB_DATA_CACHE_SIZE) {
|
||||
+ obj = kmem_cache_alloc_node(skb_data_cache,
|
||||
+ flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
|
||||
+ node);
|
||||
+ *size = SKB_DATA_CACHE_SIZE;
|
||||
if (obj || !(gfp_pfmemalloc_allowed(flags)))
|
||||
goto out;
|
||||
/* Try again but now we are using pfmemalloc reserves */
|
||||
ret_pfmemalloc = true;
|
||||
- obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
|
||||
+ obj = kmem_cache_alloc_node(skb_data_cache, flags, node);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -648,10 +671,12 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
||||
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
|
||||
* Both skb->head and skb_shared_info are cache line aligned.
|
||||
*/
|
||||
+ size = SKB_DATA_ALIGN(size);
|
||||
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
|
||||
if (unlikely(!data))
|
||||
goto nodata;
|
||||
- /* kmalloc_size_roundup() might give us more room than requested.
|
||||
+ /* kmalloc_reserve(size) might give us more room than requested.
|
||||
* Put skb_shared_info exactly at the end of allocated zone,
|
||||
* to allow max possible filling before reallocation.
|
||||
*/
|
||||
@@ -686,7 +711,7 @@ EXPORT_SYMBOL(__alloc_skb);
|
||||
/**
|
||||
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
|
||||
* @dev: network device to receive on
|
||||
- * @len: length to allocate
|
||||
+ * @length: length to allocate
|
||||
* @gfp_mask: get_free_pages mask, passed to alloc_skb
|
||||
*
|
||||
* Allocate a new &sk_buff and assign it a usage count of one. The
|
||||
@@ -696,29 +721,53 @@ EXPORT_SYMBOL(__alloc_skb);
|
||||
*
|
||||
* %NULL is returned if there is no free memory.
|
||||
*/
|
||||
-struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
|
||||
- gfp_t gfp_mask)
|
||||
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
|
||||
+ unsigned int length, gfp_t gfp_mask)
|
||||
{
|
||||
- struct page_frag_cache *nc;
|
||||
struct sk_buff *skb;
|
||||
+ unsigned int len = length;
|
||||
+
|
||||
+#ifdef CONFIG_SKB_RECYCLER
|
||||
+ skb = skb_recycler_alloc(dev, length);
|
||||
+ if (likely(skb))
|
||||
+ return skb;
|
||||
+
|
||||
+ len = SKB_RECYCLE_SIZE;
|
||||
+ if (unlikely(length > SKB_RECYCLE_SIZE))
|
||||
+ len = length;
|
||||
+
|
||||
+ skb = __alloc_skb(len + NET_SKB_PAD, gfp_mask,
|
||||
+ SKB_ALLOC_RX, NUMA_NO_NODE);
|
||||
+ if (!skb)
|
||||
+ goto skb_fail;
|
||||
+ goto skb_success;
|
||||
+#else
|
||||
+ struct page_frag_cache *nc;
|
||||
bool pfmemalloc;
|
||||
+ bool page_frag_alloc_enable = true;
|
||||
void *data;
|
||||
|
||||
len += NET_SKB_PAD;
|
||||
|
||||
+
|
||||
+#ifdef CONFIG_ALLOC_SKB_PAGE_FRAG_DISABLE
|
||||
+ page_frag_alloc_enable = false;
|
||||
+#endif
|
||||
/* If requested length is either too small or too big,
|
||||
* we use kmalloc() for skb->head allocation.
|
||||
*/
|
||||
if (len <= SKB_WITH_OVERHEAD(1024) ||
|
||||
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
|
||||
- (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
|
||||
+ (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA)) ||
|
||||
+ !page_frag_alloc_enable) {
|
||||
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
|
||||
if (!skb)
|
||||
goto skb_fail;
|
||||
goto skb_success;
|
||||
}
|
||||
|
||||
- len = SKB_HEAD_ALIGN(len);
|
||||
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
+ len = SKB_DATA_ALIGN(len);
|
||||
|
||||
if (sk_memalloc_socks())
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
@@ -747,6 +796,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
|
||||
if (pfmemalloc)
|
||||
skb->pfmemalloc = 1;
|
||||
skb->head_frag = 1;
|
||||
+#endif
|
||||
|
||||
skb_success:
|
||||
skb_reserve(skb, NET_SKB_PAD);
|
||||
@@ -817,7 +867,8 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
|
||||
data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
|
||||
pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
|
||||
} else {
|
||||
- len = SKB_HEAD_ALIGN(len);
|
||||
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
+ len = SKB_DATA_ALIGN(len);
|
||||
|
||||
data = page_frag_alloc(&nc->page, len, gfp_mask);
|
||||
pfmemalloc = nc->page.pfmemalloc;
|
||||
@@ -975,7 +1026,7 @@ static void skb_free_head(struct sk_buff *skb, bool napi_safe)
|
||||
}
|
||||
}
|
||||
|
||||
-static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
|
||||
+void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
|
||||
bool napi_safe)
|
||||
{
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
@@ -1018,7 +1069,7 @@ static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
|
||||
/*
|
||||
* Free an skbuff by memory without cleaning the state.
|
||||
*/
|
||||
-static void kfree_skbmem(struct sk_buff *skb)
|
||||
+void kfree_skbmem(struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff_fclones *fclones;
|
||||
|
||||
@@ -1282,7 +1333,6 @@ void skb_tx_error(struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL(skb_tx_error);
|
||||
|
||||
-#ifdef CONFIG_TRACEPOINTS
|
||||
/**
|
||||
* consume_skb - free an skbuff
|
||||
* @skb: buffer to free
|
||||
@@ -1291,13 +1341,48 @@ EXPORT_SYMBOL(skb_tx_error);
|
||||
* Functions identically to kfree_skb, but kfree_skb assumes that the frame
|
||||
* is being dropped after a failure and notes that
|
||||
*/
|
||||
+#ifdef CONFIG_SKB_RECYCLER
|
||||
void consume_skb(struct sk_buff *skb)
|
||||
{
|
||||
if (!skb_unref(skb))
|
||||
return;
|
||||
+ prefetch(&skb->destructor);
|
||||
+
|
||||
+ /*Tian: Not sure if we need to continue using this since
|
||||
+ * since unref does the work in 5.4
|
||||
+ */
|
||||
+
|
||||
+ /*
|
||||
+ if (likely(atomic_read(&skb->users) == 1))
|
||||
+ smp_rmb();
|
||||
+ else if (likely(!atomic_dec_and_test(&skb->users)))
|
||||
+ return;
|
||||
+ */
|
||||
|
||||
+ /* If possible we'd like to recycle any skb rather than just free it,
|
||||
+ * but in order to do that we need to release any head state too.
|
||||
+ * We don't want to do this later because we'll be in a pre-emption
|
||||
+ * disabled state.
|
||||
+ */
|
||||
+ skb_release_head_state(skb);
|
||||
+
|
||||
+ /* Can we recycle this skb? If we can then it will be much faster
|
||||
+ * for us to recycle this one later than to allocate a new one
|
||||
+ * from scratch.
|
||||
+ */
|
||||
+ if (likely(skb->head) && likely(skb_recycler_consume(skb)))
|
||||
+ return;
|
||||
+
|
||||
+#ifdef CONFIG_TRACEPOINTS
|
||||
trace_consume_skb(skb, __builtin_return_address(0));
|
||||
- __kfree_skb(skb);
|
||||
+#endif
|
||||
+ /* We're not recycling so now we need to do the rest of what we would
|
||||
+ * have done in __kfree_skb (above and beyond the skb_release_head_state
|
||||
+ * that we already did).
|
||||
+ */
|
||||
+ if (likely(skb->head))
|
||||
+ skb_release_data(skb, SKB_CONSUMED, false);
|
||||
+ kfree_skbmem(skb);
|
||||
}
|
||||
EXPORT_SYMBOL(consume_skb);
|
||||
#endif
|
||||
@@ -2107,6 +2192,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
|
||||
if (skb_pfmemalloc(skb))
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
|
||||
+ size = SKB_DATA_ALIGN(size);
|
||||
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
|
||||
if (!data)
|
||||
goto nodata;
|
||||
@@ -4854,6 +4941,10 @@ static void skb_extensions_init(void) {}
|
||||
|
||||
void __init skb_init(void)
|
||||
{
|
||||
+ skb_data_cache = kmem_cache_create_usercopy("skb_data_cache",
|
||||
+ SKB_DATA_CACHE_SIZE,
|
||||
+ 0, SLAB_PANIC, 0, SKB_DATA_CACHE_SIZE,
|
||||
+ NULL);
|
||||
skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
|
||||
sizeof(struct sk_buff),
|
||||
0,
|
||||
@@ -4879,6 +4970,7 @@ void __init skb_init(void)
|
||||
SKB_SMALL_HEAD_HEADROOM,
|
||||
NULL);
|
||||
skb_extensions_init();
|
||||
+ skb_recycler_init();
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -6382,6 +6474,8 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
|
||||
if (skb_pfmemalloc(skb))
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
|
||||
+ size = SKB_DATA_ALIGN(size);
|
||||
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
@@ -6498,6 +6592,8 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
|
||||
if (skb_pfmemalloc(skb))
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
|
||||
+ size = SKB_DATA_ALIGN(size);
|
||||
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
Loading…
Reference in New Issue
Block a user