diff --git a/target/linux/qualcommax/files/net/core/skbuff_recycle.c b/target/linux/qualcommax/files/net/core/skbuff_recycle.c index 3f1afcb198..fc126abab0 100644 --- a/target/linux/qualcommax/files/net/core/skbuff_recycle.c +++ b/target/linux/qualcommax/files/net/core/skbuff_recycle.c @@ -33,14 +33,20 @@ static struct global_recycler glob_recycler; static int skb_recycle_spare_max_skbs = SKB_RECYCLE_SPARE_MAX_SKBS; #endif +static int skb_recycling_enable = 1; inline struct sk_buff *skb_recycler_alloc(struct net_device *dev, - unsigned int length) + unsigned int length, bool reset_skb) { unsigned long flags; struct sk_buff_head *h; struct sk_buff *skb = NULL; struct sk_buff *ln = NULL; + /* Allocate the recycled skbs if the skb_recycling_enable */ + if (unlikely(!skb_recycling_enable)) { + return NULL; + } + if (unlikely(length > SKB_RECYCLE_SIZE)) return NULL; @@ -106,10 +112,15 @@ inline struct sk_buff *skb_recycler_alloc(struct net_device *dev, * zero most of the structure so prefetch the start * of the shinfo region now so it's in the D-cache * before we start to write that. + * For buffers recycled by PPE DS rings, the packets wouldnt + * have been processed by host and hence shinfo reset can be + * avoided. Avoid it if specifically requested for it + * (by DS rings), and the buffer is found to be recycled by + * DS previously */ + shinfo = skb_shinfo(skb); prefetchw(shinfo); - zero_struct(skb, offsetof(struct sk_buff, tail)); refcount_set(&skb->users, 1); skb->mac_header = (typeof(skb->mac_header))~0U; @@ -131,6 +142,12 @@ inline bool skb_recycler_consume(struct sk_buff *skb) unsigned long flags; struct sk_buff_head *h; struct sk_buff *ln = NULL; + + /* Consume the skbs if the skb_recycling_enable */ + if (unlikely(!skb_recycling_enable)) { + return false; + } + /* Can we recycle this skb? If not, simply return that we cannot */ if (unlikely(!consume_skb_can_recycle(skb, SKB_RECYCLE_MIN_SIZE, SKB_RECYCLE_MAX_SIZE))) @@ -237,7 +254,9 @@ inline bool skb_recycler_consume(struct sk_buff *skb) inline bool skb_recycler_consume_list_fast(struct sk_buff_head *skb_list) { struct sk_buff *skb = NULL, *next = NULL; - + if (unlikely(!skb_recycling_enable)) { + return false; + } skb_queue_walk_safe(skb_list, skb, next) { if (skb) { __skb_unlink(skb, skb_list); @@ -253,6 +272,11 @@ inline bool skb_recycler_consume_list_fast(struct sk_buff_head *skb_list) unsigned long flags; struct sk_buff_head *h; + /* Allocate the recycled skbs if the skb_recycling_enable */ + if (unlikely(!skb_recycling_enable)) { + return false; + } + h = &get_cpu_var(recycle_list); local_irq_save(flags); /* Attempt to enqueue the CPU hot recycle list first */ @@ -537,6 +561,46 @@ static const struct proc_ops proc_skb_max_spare_skbs_fops = { }; #endif /* CONFIG_SKB_RECYCLER_MULTI_CPU */ +/* procfs: skb_recycler_enable + * By default, recycler is disabled for QSDK_512 profile. + * Can be enabled for alder/miami QSDK_512 profile. + */ +static int proc_skb_recycler_enable_show(struct seq_file *seq, void *v) { + seq_printf(seq, "%d\n", skb_recycling_enable); + return 0; +} + +static int proc_skb_recycle_enable_open(struct inode *inode, struct file *file) { + return single_open(file, proc_skb_recycler_enable_show, + pde_data(inode)); +} + +static ssize_t proc_skb_recycle_enable_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) { + int ret; + int enable; + char buffer[13]; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count) != 0) + return -EFAULT; + ret = kstrtoint(strstrip(buffer), 10, &enable); + if (ret == 0 && enable >= 0) + skb_recycling_enable = enable; + + return count; +} + +static const struct proc_ops proc_skb_recycle_enable_fops = { + .proc_open = proc_skb_recycle_enable_open, + .proc_read = seq_read, + .proc_write = proc_skb_recycle_enable_write, + .proc_release = single_release, +}; + static void skb_recycler_init_procfs(void) { proc_net_skbrecycler = proc_mkdir("skb_recycler", init_net.proc_net); @@ -570,6 +634,13 @@ static void skb_recycler_init_procfs(void) &proc_skb_max_spare_skbs_fops)) pr_err("cannot create proc net skb_recycle max_spare_skbs\n"); #endif + + if (!proc_create("skb_recycler_enable", + S_IRUGO | S_IWUGO, + proc_net_skbrecycler, + &proc_skb_recycle_enable_fops)) + pr_err("cannot create proc net skb_recycle enable\n"); + } void __init skb_recycler_init(void) @@ -607,6 +678,7 @@ void __init skb_recycler_init(void) void skb_recycler_print_all_lists(void) { + unsigned long flags; int cpu; #ifdef CONFIG_SKB_RECYCLER_MULTI_CPU @@ -637,6 +709,7 @@ void skb_recycler_print_all_lists(void) local_irq_restore(flags); preempt_enable(); + } #ifdef CONFIG_SKB_FAST_RECYCLABLE_DEBUG_ENABLE @@ -717,11 +790,11 @@ static inline bool consume_skb_can_fast_recycle_debug(const struct sk_buff *skb, } /** - * check_skb_fast_recyclable - Debug API to flag any sanity check failures - * on a fast recycled skb - * @skb: buffer to be checked + * check_skb_fast_recyclable - Debug API to flag any sanity check failures + * on a fast recycled skb + * @skb: buffer to be checked * - * Checks skb recyclability + * Checks skb recyclability */ void check_skb_fast_recyclable(struct sk_buff *skb) { diff --git a/target/linux/qualcommax/files/net/core/skbuff_recycle.h b/target/linux/qualcommax/files/net/core/skbuff_recycle.h index dd4b5b39d2..bfc4692017 100644 --- a/target/linux/qualcommax/files/net/core/skbuff_recycle.h +++ b/target/linux/qualcommax/files/net/core/skbuff_recycle.h @@ -162,20 +162,25 @@ static inline bool consume_skb_can_recycle(const struct sk_buff *skb, if (unlikely(skb_pfmemalloc(skb))) return false; + if (skb->active_extensions) + return false; + return true; } #ifdef CONFIG_SKB_RECYCLER void __init skb_recycler_init(void); -struct sk_buff *skb_recycler_alloc(struct net_device *dev, unsigned int length); +struct sk_buff *skb_recycler_alloc(struct net_device *dev, unsigned int length, bool reset_skb); bool skb_recycler_consume(struct sk_buff *skb); bool skb_recycler_consume_list_fast(struct sk_buff_head *skb_list); void skb_recycler_print_all_lists(void); +void skb_recycler_clear_flags(struct sk_buff *skb); #else #define skb_recycler_init() {} -#define skb_recycler_alloc(dev, len) NULL +#define skb_recycler_alloc(dev, len, reset_skb) NULL #define skb_recycler_consume(skb) false #define skb_recycler_consume_list_fast(skb_list) false #define skb_recycler_print_all_lists() false +#define skb_recycler_clear_flags(skb) {} #endif #endif