qualcommax: skbuff_recycle: add proc option to enable/disable

There are instances where `skbuff_recycle` messes up skb headers causing
various issues in network related activities. This was initially meant
to resolve issues with memory usage issues on 6.1.

Introduce a proc option to enable or disable skbuff_recycle. The default
is to have it enabled. Users on 512M platforms should test if memory
leaks still occur with this option disabled.

echo 0 > /proc/net/skb_recycler/skb_recycler_enable

echo 1 > /proc/net/skb_recycler/skb_recycler_enable

NOTE: There is a new script in `qca-nss-drv` that will automatically
manage this as an UCI configurable option.

commit 217a4fa43ba2ad0a02e8080dfde11ad23534aa8c (HEAD -> NSS-12.5-K6.x)
Author:     Sean Khan <datapronix@protonmail.com>
AuthorDate: Sat Jul 13 18:43:52 2024 -0400
Commit:     Sean Khan <datapronix@protonmail.com>
CommitDate: Sat Jul 13 18:43:52 2024 -0400

    nss-drv: add skb_recycler init script

Signed-off-by: Sean Khan <datapronix@protonmail.com>
This commit is contained in:
Sean Khan 2024-07-13 18:36:51 -04:00
parent 57c7743358
commit 8611b43a4f
2 changed files with 87 additions and 9 deletions

View File

@ -33,14 +33,20 @@ static struct global_recycler glob_recycler;
static int skb_recycle_spare_max_skbs = SKB_RECYCLE_SPARE_MAX_SKBS; static int skb_recycle_spare_max_skbs = SKB_RECYCLE_SPARE_MAX_SKBS;
#endif #endif
static int skb_recycling_enable = 1;
inline struct sk_buff *skb_recycler_alloc(struct net_device *dev, inline struct sk_buff *skb_recycler_alloc(struct net_device *dev,
unsigned int length) unsigned int length, bool reset_skb)
{ {
unsigned long flags; unsigned long flags;
struct sk_buff_head *h; struct sk_buff_head *h;
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct sk_buff *ln = NULL; struct sk_buff *ln = NULL;
/* Allocate the recycled skbs if the skb_recycling_enable */
if (unlikely(!skb_recycling_enable)) {
return NULL;
}
if (unlikely(length > SKB_RECYCLE_SIZE)) if (unlikely(length > SKB_RECYCLE_SIZE))
return NULL; return NULL;
@ -106,10 +112,15 @@ inline struct sk_buff *skb_recycler_alloc(struct net_device *dev,
* zero most of the structure so prefetch the start * zero most of the structure so prefetch the start
* of the shinfo region now so it's in the D-cache * of the shinfo region now so it's in the D-cache
* before we start to write that. * before we start to write that.
* For buffers recycled by PPE DS rings, the packets wouldnt
* have been processed by host and hence shinfo reset can be
* avoided. Avoid it if specifically requested for it
* (by DS rings), and the buffer is found to be recycled by
* DS previously
*/ */
shinfo = skb_shinfo(skb); shinfo = skb_shinfo(skb);
prefetchw(shinfo); prefetchw(shinfo);
zero_struct(skb, offsetof(struct sk_buff, tail)); zero_struct(skb, offsetof(struct sk_buff, tail));
refcount_set(&skb->users, 1); refcount_set(&skb->users, 1);
skb->mac_header = (typeof(skb->mac_header))~0U; skb->mac_header = (typeof(skb->mac_header))~0U;
@ -131,6 +142,12 @@ inline bool skb_recycler_consume(struct sk_buff *skb)
unsigned long flags; unsigned long flags;
struct sk_buff_head *h; struct sk_buff_head *h;
struct sk_buff *ln = NULL; struct sk_buff *ln = NULL;
/* Consume the skbs if the skb_recycling_enable */
if (unlikely(!skb_recycling_enable)) {
return false;
}
/* Can we recycle this skb? If not, simply return that we cannot */ /* Can we recycle this skb? If not, simply return that we cannot */
if (unlikely(!consume_skb_can_recycle(skb, SKB_RECYCLE_MIN_SIZE, if (unlikely(!consume_skb_can_recycle(skb, SKB_RECYCLE_MIN_SIZE,
SKB_RECYCLE_MAX_SIZE))) SKB_RECYCLE_MAX_SIZE)))
@ -237,7 +254,9 @@ inline bool skb_recycler_consume(struct sk_buff *skb)
inline bool skb_recycler_consume_list_fast(struct sk_buff_head *skb_list) inline bool skb_recycler_consume_list_fast(struct sk_buff_head *skb_list)
{ {
struct sk_buff *skb = NULL, *next = NULL; struct sk_buff *skb = NULL, *next = NULL;
if (unlikely(!skb_recycling_enable)) {
return false;
}
skb_queue_walk_safe(skb_list, skb, next) { skb_queue_walk_safe(skb_list, skb, next) {
if (skb) { if (skb) {
__skb_unlink(skb, skb_list); __skb_unlink(skb, skb_list);
@ -253,6 +272,11 @@ inline bool skb_recycler_consume_list_fast(struct sk_buff_head *skb_list)
unsigned long flags; unsigned long flags;
struct sk_buff_head *h; struct sk_buff_head *h;
/* Allocate the recycled skbs if the skb_recycling_enable */
if (unlikely(!skb_recycling_enable)) {
return false;
}
h = &get_cpu_var(recycle_list); h = &get_cpu_var(recycle_list);
local_irq_save(flags); local_irq_save(flags);
/* Attempt to enqueue the CPU hot recycle list first */ /* Attempt to enqueue the CPU hot recycle list first */
@ -537,6 +561,46 @@ static const struct proc_ops proc_skb_max_spare_skbs_fops = {
}; };
#endif /* CONFIG_SKB_RECYCLER_MULTI_CPU */ #endif /* CONFIG_SKB_RECYCLER_MULTI_CPU */
/* procfs: skb_recycler_enable
* By default, recycler is disabled for QSDK_512 profile.
* Can be enabled for alder/miami QSDK_512 profile.
*/
static int proc_skb_recycler_enable_show(struct seq_file *seq, void *v) {
seq_printf(seq, "%d\n", skb_recycling_enable);
return 0;
}
static int proc_skb_recycle_enable_open(struct inode *inode, struct file *file) {
return single_open(file, proc_skb_recycler_enable_show,
pde_data(inode));
}
static ssize_t proc_skb_recycle_enable_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos) {
int ret;
int enable;
char buffer[13];
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count) != 0)
return -EFAULT;
ret = kstrtoint(strstrip(buffer), 10, &enable);
if (ret == 0 && enable >= 0)
skb_recycling_enable = enable;
return count;
}
static const struct proc_ops proc_skb_recycle_enable_fops = {
.proc_open = proc_skb_recycle_enable_open,
.proc_read = seq_read,
.proc_write = proc_skb_recycle_enable_write,
.proc_release = single_release,
};
static void skb_recycler_init_procfs(void) static void skb_recycler_init_procfs(void)
{ {
proc_net_skbrecycler = proc_mkdir("skb_recycler", init_net.proc_net); proc_net_skbrecycler = proc_mkdir("skb_recycler", init_net.proc_net);
@ -570,6 +634,13 @@ static void skb_recycler_init_procfs(void)
&proc_skb_max_spare_skbs_fops)) &proc_skb_max_spare_skbs_fops))
pr_err("cannot create proc net skb_recycle max_spare_skbs\n"); pr_err("cannot create proc net skb_recycle max_spare_skbs\n");
#endif #endif
if (!proc_create("skb_recycler_enable",
S_IRUGO | S_IWUGO,
proc_net_skbrecycler,
&proc_skb_recycle_enable_fops))
pr_err("cannot create proc net skb_recycle enable\n");
} }
void __init skb_recycler_init(void) void __init skb_recycler_init(void)
@ -607,6 +678,7 @@ void __init skb_recycler_init(void)
void skb_recycler_print_all_lists(void) void skb_recycler_print_all_lists(void)
{ {
unsigned long flags; unsigned long flags;
int cpu; int cpu;
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU #ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
@ -637,6 +709,7 @@ void skb_recycler_print_all_lists(void)
local_irq_restore(flags); local_irq_restore(flags);
preempt_enable(); preempt_enable();
} }
#ifdef CONFIG_SKB_FAST_RECYCLABLE_DEBUG_ENABLE #ifdef CONFIG_SKB_FAST_RECYCLABLE_DEBUG_ENABLE
@ -717,11 +790,11 @@ static inline bool consume_skb_can_fast_recycle_debug(const struct sk_buff *skb,
} }
/** /**
* check_skb_fast_recyclable - Debug API to flag any sanity check failures * check_skb_fast_recyclable - Debug API to flag any sanity check failures
* on a fast recycled skb * on a fast recycled skb
* @skb: buffer to be checked * @skb: buffer to be checked
* *
* Checks skb recyclability * Checks skb recyclability
*/ */
void check_skb_fast_recyclable(struct sk_buff *skb) void check_skb_fast_recyclable(struct sk_buff *skb)
{ {

View File

@ -162,20 +162,25 @@ static inline bool consume_skb_can_recycle(const struct sk_buff *skb,
if (unlikely(skb_pfmemalloc(skb))) if (unlikely(skb_pfmemalloc(skb)))
return false; return false;
if (skb->active_extensions)
return false;
return true; return true;
} }
#ifdef CONFIG_SKB_RECYCLER #ifdef CONFIG_SKB_RECYCLER
void __init skb_recycler_init(void); void __init skb_recycler_init(void);
struct sk_buff *skb_recycler_alloc(struct net_device *dev, unsigned int length); struct sk_buff *skb_recycler_alloc(struct net_device *dev, unsigned int length, bool reset_skb);
bool skb_recycler_consume(struct sk_buff *skb); bool skb_recycler_consume(struct sk_buff *skb);
bool skb_recycler_consume_list_fast(struct sk_buff_head *skb_list); bool skb_recycler_consume_list_fast(struct sk_buff_head *skb_list);
void skb_recycler_print_all_lists(void); void skb_recycler_print_all_lists(void);
void skb_recycler_clear_flags(struct sk_buff *skb);
#else #else
#define skb_recycler_init() {} #define skb_recycler_init() {}
#define skb_recycler_alloc(dev, len) NULL #define skb_recycler_alloc(dev, len, reset_skb) NULL
#define skb_recycler_consume(skb) false #define skb_recycler_consume(skb) false
#define skb_recycler_consume_list_fast(skb_list) false #define skb_recycler_consume_list_fast(skb_list) false
#define skb_recycler_print_all_lists() false #define skb_recycler_print_all_lists() false
#define skb_recycler_clear_flags(skb) {}
#endif #endif
#endif #endif