qualcommax: Introduce skbuff_recycle for performance

QSDK NSS builds utilize skbuff recycling for better handling of memory.

On a Dynalink DL-WRX36 (pbuf script should be set to 'auto') a significant drop in
memory usage was observed as well consistent sustained RX/TX speeds.

BEFORE:
echo 3 >! /proc/sys/vm/drop_caches

free -m
					total        used        free      shared  buff/cache   available
Mem:             867         338         547          90         101         528
Swap:              0           0           0

AFTER:
					total        used        free      shared  buff/cache   available
Mem:             867         242         594           1          81         624
Swap:              0           0           0

NOTE:

For 512MB platforms, users need to test with the following scenarios,
as the patch `999-233-ath11k-Disable-rx_header-tlv-for-2K-SKB.patch` is
really only testable on platforms with 512M or less RAM.

1.) Explicitly setting 'ATH11K_MEM_PROFILE_512M' on and see if system
crashes on boot.

2.) Explicitly setting 'ATH11K_MEM_PROFILE_1G'

3.) Remove patches
999-233-ath11k-Disable-rx_header-tlv-for-2K-SKB.patch
999-311-ath11k-configure-nss-thread-priority-during-pdev_ini.patch

And re-test with #1 and #2

It was incorrectly assumed that setting a 512M for 1G platforms would save
memory, instead it needs to be explicitly set to know proper memory
regions, otherwise it would cause fw crash.

Signed-off-by: Sean Khan <datapronix@protonmail.com>
This commit is contained in:
Sean Khan 2024-03-09 11:59:20 -05:00
parent 858bb6da15
commit f99f8c2609
7 changed files with 1391 additions and 0 deletions

View File

@ -506,6 +506,11 @@ CONFIG_SERIAL_MSM=y
CONFIG_SERIAL_MSM_CONSOLE=y
CONFIG_SGL_ALLOC=y
CONFIG_SG_POOL=y
# CONFIG_ALLOC_SKB_PAGE_FRAG_DISABLE is not set
# CONFIG_DEBUG_OBJECTS_SKBUFF is not set
CONFIG_SKB_RECYCLER=y
CONFIG_SKB_RECYCLER_MULTI_CPU=y
# CONFIG_SKB_FAST_RECYCLABLE_DEBUG_ENABLE is not set
CONFIG_SMP=y
# CONFIG_SM_CAMCC_6350 is not set
# CONFIG_SM_CAMCC_8450 is not set

View File

@ -0,0 +1,332 @@
/*
* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <asm/stacktrace.h>
#include <asm/current.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/smp.h>
#include "skbuff_debug.h"
#include "skbuff_notifier.h"
#include "skbuff_recycle.h"
static int skbuff_debugobj_enabled __read_mostly = 1;
static int skbuff_debug_event_handler(struct notifier_block *nb,
unsigned long action, void *data);
static struct notifier_block skbuff_debug_notify = {
.notifier_call = skbuff_debug_event_handler,
.priority = 0
};
inline u32 skbuff_debugobj_sum(struct sk_buff *skb)
{
int pos = offsetof(struct sk_buff, free_addr);
u32 sum = 0;
while (pos--)
sum += ((u8 *)skb)[pos];
return sum;
}
struct skbuff_debugobj_walking {
int pos;
void **d;
};
#ifdef CONFIG_ARM
static int skbuff_debugobj_walkstack(struct stackframe *frame, void *p) {
struct skbuff_debugobj_walking *w = (struct skbuff_debugobj_walking *)p;
unsigned long pc = frame->pc;
if (w->pos < DEBUG_OBJECTS_SKBUFF_STACKSIZE - 1) {
w->d[w->pos++] = (void *)pc;
return 0;
}
return -ENOENT;
}
#else
static bool skbuff_debugobj_walkstack(void *p, unsigned long pc)
{
struct skbuff_debugobj_walking *w = (struct skbuff_debugobj_walking *)p;
if (w->pos < DEBUG_OBJECTS_SKBUFF_STACKSIZE - 1) {
w->d[w->pos++] = (void *)pc;
return true;
}
return false;
}
#endif
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
static void skbuff_debugobj_get_stack(void **ret)
{
struct skbuff_debugobj_walking w = {0, ret};
void *p = &w;
#ifdef CONFIG_ARM
struct stackframe frame;
register unsigned long current_sp asm ("sp");
frame.lr = (unsigned long)__builtin_return_address(0);
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.pc = (unsigned long)skbuff_debugobj_get_stack;
walk_stackframe(&frame, skbuff_debugobj_walkstack, p);
#else
arch_stack_walk(skbuff_debugobj_walkstack, p, current, NULL);
#endif
ret[w.pos] = NULL;
}
#else
#error
static void skbuff_debugobj_get_stack(void **ret)
{
/* not supported */
ret[0] = 0xdeadbeef;
}
#endif
void skbuff_debugobj_print_stack(void *const *stack)
{
int i;
for (i = 0; stack[i]; i++)
pr_emerg("\t %pS (0x%p)\n", stack[i], stack[i]);
}
static const char *skbuff_debugobj_state_name(const struct sk_buff *skb)
{
int obj_state;
obj_state = debug_object_get_state((struct sk_buff *)skb);
switch (obj_state) {
case ODEBUG_STATE_NONE:
return "none";
case ODEBUG_STATE_INIT:
return "init";
case ODEBUG_STATE_INACTIVE:
return "inactive";
case ODEBUG_STATE_ACTIVE:
return "active";
case ODEBUG_STATE_DESTROYED:
return "destroyed";
case ODEBUG_STATE_NOTAVAILABLE:
return "not available";
default:
return "invalid";
}
}
void skbuff_debugobj_print_skb(const struct sk_buff *skb)
{
pr_emerg("skb_debug: current process = %s (pid %i)\n",
current->comm, current->pid);
pr_emerg("skb_debug: skb 0x%p, next 0x%p, prev 0x%p, state = %s\n", skb,
skb->next, skb->prev, skbuff_debugobj_state_name(skb));
pr_emerg("skb_debug: free stack:\n");
skbuff_debugobj_print_stack(skb->free_addr);
pr_emerg("skb_debug: alloc stack:\n");
skbuff_debugobj_print_stack(skb->alloc_addr);
}
EXPORT_SYMBOL(skbuff_debugobj_print_skb);
/* skbuff_debugobj_fixup():
* Called when an error is detected in the state machine for
* the objects
*/
static bool skbuff_debugobj_fixup(void *addr, enum debug_obj_state state)
{
struct sk_buff *skb = (struct sk_buff *)addr;
ftrace_dump(DUMP_ALL);
WARN(1, "skb_debug: state = %d, skb = 0x%p sum = %d (now %d)\n",
state, skb, skb->sum, skbuff_debugobj_sum(skb));
skb_recycler_notifier_send_event(SKB_RECYCLER_NOTIFIER_FSM, skb);
return true;
}
static struct debug_obj_descr skbuff_debug_descr = {
.name = "sk_buff_struct",
.fixup_init = skbuff_debugobj_fixup,
.fixup_activate = skbuff_debugobj_fixup,
.fixup_destroy = skbuff_debugobj_fixup,
.fixup_free = skbuff_debugobj_fixup,
};
inline void skbuff_debugobj_activate(struct sk_buff *skb)
{
int ret = 0;
if (!skbuff_debugobj_enabled)
return;
skbuff_debugobj_get_stack(skb->alloc_addr);
ret = debug_object_activate(skb, &skbuff_debug_descr);
if (ret)
goto err_act;
skbuff_debugobj_sum_validate(skb);
return;
err_act:
ftrace_dump(DUMP_ALL);
WARN(1, "skb_debug: failed to activate err = %d skb = 0x%p sum = %d (now %d)\n",
ret, skb, skb->sum, skbuff_debugobj_sum(skb));
skb_recycler_notifier_send_event(SKB_RECYCLER_NOTIFIER_DBLALLOC, skb);
}
inline void skbuff_debugobj_init_and_activate(struct sk_buff *skb)
{
if (!skbuff_debugobj_enabled)
return;
/* if we're coming from the slab, the skb->sum might
* be invalid anyways
*/
skb->sum = skbuff_debugobj_sum(skb);
debug_object_init(skb, &skbuff_debug_descr);
skbuff_debugobj_activate(skb);
}
inline void skbuff_debugobj_deactivate(struct sk_buff *skb)
{
int obj_state;
if (!skbuff_debugobj_enabled)
return;
skb->sum = skbuff_debugobj_sum(skb);
obj_state = debug_object_get_state(skb);
if (obj_state == ODEBUG_STATE_ACTIVE) {
debug_object_deactivate(skb, &skbuff_debug_descr);
skbuff_debugobj_get_stack(skb->free_addr);
return;
}
ftrace_dump(DUMP_ALL);
WARN(1, "skb_debug: deactivating inactive object skb=0x%p state=%d sum = %d (now %d)\n",
skb, obj_state, skb->sum, skbuff_debugobj_sum(skb));
skb_recycler_notifier_send_event(SKB_RECYCLER_NOTIFIER_DBLFREE, skb);
}
inline void _skbuff_debugobj_sum_validate(struct sk_buff *skb,
const char *var, const char *src,
int line, const char *fxn)
{
if (!skbuff_debugobj_enabled || !skb)
return;
if (skb->sum == skbuff_debugobj_sum(skb))
return;
ftrace_dump(DUMP_ALL);
WARN(1, "skb_debug: skb sum changed skb = 0x%p sum = %d (now %d)\n",
skb, skb->sum, skbuff_debugobj_sum(skb));
pr_emerg("skb_debug: %s() checking %s in %s:%d\n", fxn, var, src, line);
skb_recycler_notifier_send_event(SKB_RECYCLER_NOTIFIER_SUMERR, skb);
}
inline void skbuff_debugobj_sum_update(struct sk_buff *skb)
{
if (!skbuff_debugobj_enabled || !skb)
return;
skb->sum = skbuff_debugobj_sum(skb);
}
inline void skbuff_debugobj_destroy(struct sk_buff *skb)
{
if (!skbuff_debugobj_enabled)
return;
debug_object_destroy(skb, &skbuff_debug_descr);
}
static int __init disable_object_debug(char *str)
{
skbuff_debugobj_enabled = 0;
pr_info("skb_debug: debug objects is disabled\n");
return 0;
}
early_param("no_skbuff_debug_objects", disable_object_debug);
void skbuff_debugobj_print_skb_list(const struct sk_buff *skb_list,
const char *list_title, int cpu)
{
int count;
struct sk_buff *skb_i = (struct sk_buff *)skb_list;
u32 sum_i, sum_now;
int obj_state;
if (cpu < 0) {
cpu = get_cpu();
put_cpu();
}
pr_emerg("skb_debug: start skb list '%s' [CPU#%d]\n", list_title, cpu);
count = 0;
if (skb_list) {
do {
obj_state =
debug_object_get_state(skb_i);
if (obj_state < ODEBUG_STATE_NOTAVAILABLE) {
sum_i = skb_i->sum;
sum_now = skbuff_debugobj_sum(skb_i);
} else {
sum_i = 0;
sum_now = 0;
}
if (sum_i != sum_now) {
pr_emerg("skb_debug: [%02d] skb 0x%p, next 0x%p, prev 0x%p, state %d (%s), sum %d (now %d)\n",
count, skb_i, skb_i->next, skb_i->prev,
obj_state, skbuff_debugobj_state_name(skb_i),
sum_i, sum_now);
}
skb_i = skb_i->next;
count++;
} while (skb_list != skb_i);
}
pr_emerg("skb_debug: end skb list '%s'. In total %d skbs iterated.\n", list_title, count);
}
void skbuff_debugobj_register_callback(void)
{
skb_recycler_notifier_register(&skbuff_debug_notify);
}
int skbuff_debug_event_handler(struct notifier_block *nb, unsigned long action,
void *data)
{
struct sk_buff *skb = (struct sk_buff *)data;
pr_emerg("skb_debug: notifier event %lu\n", action);
skbuff_debugobj_print_skb(skb);
skb_recycler_print_all_lists();
return NOTIFY_DONE;
}

View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/skbuff.h>
#include <linux/debugobjects.h>
#ifndef _LINUX_SKBBUFF_DEBUG_OBJECTS
#define _LINUX_SKBBUFF_DEBUG_OBJECTS
#ifdef CONFIG_DEBUG_OBJECTS_SKBUFF
void skbuff_debugobj_init_and_activate(struct sk_buff *skb);
void skbuff_debugobj_activate(struct sk_buff *skb);
void skbuff_debugobj_deactivate(struct sk_buff *skb);
void skbuff_debugobj_destroy(struct sk_buff *skb);
#define skbuff_debugobj_sum_validate(skb) _skbuff_debugobj_sum_validate(skb, \
#skb, __FILE__, __LINE__, __func__)
void _skbuff_debugobj_sum_validate(struct sk_buff *skb, const char *var,
const char *src, int line, const char *fxn);
void skbuff_debugobj_sum_update(struct sk_buff *skb);
void skbuff_debugobj_print_skb(const struct sk_buff *skb);
void skbuff_debugobj_print_skb_list(const struct sk_buff *skb_list,
const char *list_title, int cpu);
void skbuff_debugobj_register_callback(void);
#else
static inline void skbuff_debugobj_init_and_activate(struct sk_buff *skb) { }
static inline void skbuff_debugobj_activate(struct sk_buff *skb) { }
static inline void skbuff_debugobj_deactivate(struct sk_buff *skb) { }
static inline void skbuff_debugobj_destroy(struct sk_buff *skb) { }
static inline void skbuff_debugobj_sum_validate(struct sk_buff *skb) { }
static inline void skbuff_debugobj_sum_update(struct sk_buff *skb) { }
static inline void skbuff_debugobj_print_skb(const struct sk_buff *skb) { }
static inline void skbuff_debugobj_print_skb_list
(const struct sk_buff *skb_list, const char *list_title, int cpu) { }
static inline void skbuff_debugobj_register_callback(void) { }
#endif
#endif /* _LINUX_SKBBUFF_DEBUG_OBJECTS */

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Notifier interface for the SKB Recycler */
#include "skbuff_notifier.h"
static BLOCKING_NOTIFIER_HEAD(skb_recycler_notifier);
int skb_recycler_notifier_register(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&skb_recycler_notifier, nb);
}
EXPORT_SYMBOL(skb_recycler_notifier_register);
int skb_recycler_notifier_unregister(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&skb_recycler_notifier, nb);
}
EXPORT_SYMBOL(skb_recycler_notifier_unregister);
int skb_recycler_notifier_send_event(unsigned long action, struct sk_buff *skb)
{
int ret;
ret = blocking_notifier_call_chain(&skb_recycler_notifier, action, skb);
return 0;
}

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef SKBUFF_NOTIFIER_H
#define SKBUFF_NOTIFIER_H
#include <linux/notifier.h>
#include <linux/skbuff.h>
/* notifier events */
#define SKB_RECYCLER_NOTIFIER_SUMERR 0x0001
#define SKB_RECYCLER_NOTIFIER_DBLFREE 0x0002
#define SKB_RECYCLER_NOTIFIER_DBLALLOC 0x0004
#define SKB_RECYCLER_NOTIFIER_FSM 0x0008
#if defined(CONFIG_DEBUG_OBJECTS_SKBUFF)
int skb_recycler_notifier_register(struct notifier_block *nb);
int skb_recycler_notifier_unregister(struct notifier_block *nb);
int skb_recycler_notifier_send_event(unsigned long action,
struct sk_buff *skb);
#else
static inline int skb_recycler_notifier_register(struct notifier_block *nb)
{
return 0;
}
static inline int skb_recycler_notifier_unregister(struct notifier_block *nb)
{
return 0;
}
static inline int skb_recycler_notifier_send_event(unsigned long action,
struct sk_buff *skb)
{
return 1;
}
#endif /* CONFIG_DEBUG_OBJECTS_SKBUFF */
#endif /* SKBUFF_NOTIFIER_H */

View File

@ -0,0 +1,729 @@
/*
* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Generic skb recycler */
#include "skbuff_recycle.h"
#include <linux/proc_fs.h>
#include <linux/string.h>
#include "skbuff_debug.h"
static struct proc_dir_entry *proc_net_skbrecycler;
static DEFINE_PER_CPU(struct sk_buff_head, recycle_list);
static int skb_recycle_max_skbs = SKB_RECYCLE_MAX_SKBS;
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
static DEFINE_PER_CPU(struct sk_buff_head, recycle_spare_list);
static struct global_recycler glob_recycler;
static int skb_recycle_spare_max_skbs = SKB_RECYCLE_SPARE_MAX_SKBS;
#endif
inline struct sk_buff *skb_recycler_alloc(struct net_device *dev,
unsigned int length)
{
unsigned long flags;
struct sk_buff_head *h;
struct sk_buff *skb = NULL;
struct sk_buff *ln = NULL;
if (unlikely(length > SKB_RECYCLE_SIZE))
return NULL;
h = &get_cpu_var(recycle_list);
local_irq_save(flags);
skb = skb_peek(h);
if (skb) {
ln = skb_peek_next(skb, h);
skbuff_debugobj_activate(skb);
/* Recalculate the sum for skb->next as next and prev pointers
* of skb->next will be updated in __skb_unlink
*/
skbuff_debugobj_sum_validate(ln);
__skb_unlink(skb, h);
skbuff_debugobj_sum_update(ln);
}
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
if (unlikely(!skb)) {
u8 head;
spin_lock(&glob_recycler.lock);
/* If global recycle list is not empty, use global buffers */
head = glob_recycler.head;
if (unlikely(head == glob_recycler.tail)) {
spin_unlock(&glob_recycler.lock);
} else {
struct sk_buff *gn = glob_recycler.pool[head].next;
struct sk_buff *gp = glob_recycler.pool[head].prev;
/* Move SKBs from global list to CPU pool */
skbuff_debugobj_sum_validate(gn);
skbuff_debugobj_sum_validate(gp);
skb_queue_splice_init(&glob_recycler.pool[head], h);
skbuff_debugobj_sum_update(gn);
skbuff_debugobj_sum_update(gp);
head = (head + 1) & SKB_RECYCLE_MAX_SHARED_POOLS_MASK;
glob_recycler.head = head;
spin_unlock(&glob_recycler.lock);
/* We have refilled the CPU pool - dequeue */
skb = skb_peek(h);
if (skb) {
/* Recalculate the sum for skb->next as next and
* prev pointers of skb->next will be updated
* in __skb_unlink
*/
ln = skb_peek_next(skb, h);
skbuff_debugobj_activate(skb);
skbuff_debugobj_sum_validate(ln);
__skb_unlink(skb, h);
skbuff_debugobj_sum_update(ln);
}
}
}
#endif
local_irq_restore(flags);
put_cpu_var(recycle_list);
if (likely(skb)) {
struct skb_shared_info *shinfo;
/* We're about to write a large amount to the skb to
* zero most of the structure so prefetch the start
* of the shinfo region now so it's in the D-cache
* before we start to write that.
*/
shinfo = skb_shinfo(skb);
prefetchw(shinfo);
zero_struct(skb, offsetof(struct sk_buff, tail));
refcount_set(&skb->users, 1);
skb->mac_header = (typeof(skb->mac_header))~0U;
skb->transport_header = (typeof(skb->transport_header))~0U;
zero_struct(shinfo, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
skb->data = skb->head + NET_SKB_PAD;
skb_reset_tail_pointer(skb);
skb->dev = dev;
}
return skb;
}
inline bool skb_recycler_consume(struct sk_buff *skb)
{
unsigned long flags;
struct sk_buff_head *h;
struct sk_buff *ln = NULL;
/* Can we recycle this skb? If not, simply return that we cannot */
if (unlikely(!consume_skb_can_recycle(skb, SKB_RECYCLE_MIN_SIZE,
SKB_RECYCLE_MAX_SIZE)))
return false;
/* If we can, then it will be much faster for us to recycle this one
* later than to allocate a new one from scratch.
*/
h = &get_cpu_var(recycle_list);
local_irq_save(flags);
/* Attempt to enqueue the CPU hot recycle list first */
if (likely(skb_queue_len(h) < skb_recycle_max_skbs)) {
ln = skb_peek(h);
/* Recalculate the sum for peek of list as next and prev
* pointers of skb->next will be updated in __skb_queue_head
*/
skbuff_debugobj_sum_validate(ln);
__skb_queue_head(h, skb);
skbuff_debugobj_deactivate(skb);
skbuff_debugobj_sum_update(ln);
local_irq_restore(flags);
preempt_enable();
return true;
}
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
h = this_cpu_ptr(&recycle_spare_list);
/* The CPU hot recycle list was full; if the spare list is also full,
* attempt to move the spare list to the global list for other CPUs to
* use.
*/
if (unlikely(skb_queue_len(h) >= skb_recycle_spare_max_skbs)) {
u8 cur_tail, next_tail;
spin_lock(&glob_recycler.lock);
cur_tail = glob_recycler.tail;
next_tail = (cur_tail + 1) & SKB_RECYCLE_MAX_SHARED_POOLS_MASK;
if (next_tail != glob_recycler.head) {
struct sk_buff_head *p = &glob_recycler.pool[cur_tail];
struct sk_buff *hn = h->next, *hp = h->prev;
/* Move SKBs from CPU pool to Global pool*/
skbuff_debugobj_sum_validate(hp);
skbuff_debugobj_sum_validate(hn);
skb_queue_splice_init(h, p);
skbuff_debugobj_sum_update(hp);
skbuff_debugobj_sum_update(hn);
/* Done with global list init */
glob_recycler.tail = next_tail;
spin_unlock(&glob_recycler.lock);
/* Recalculate the sum for peek of list as next and prev
* pointers of skb->next will be updated in
* __skb_queue_head
*/
ln = skb_peek(h);
skbuff_debugobj_sum_validate(ln);
/* We have now cleared room in the spare;
* Initialize and enqueue skb into spare
*/
__skb_queue_head(h, skb);
skbuff_debugobj_sum_update(ln);
skbuff_debugobj_deactivate(skb);
local_irq_restore(flags);
preempt_enable();
return true;
}
/* We still have a full spare because the global is also full */
spin_unlock(&glob_recycler.lock);
} else {
/* We have room in the spare list; enqueue to spare list */
ln = skb_peek(h);
/* Recalculate the sum for peek of list as next and prev
* pointers of skb->next will be updated in __skb_queue_head
*/
skbuff_debugobj_sum_validate(ln);
__skb_queue_head(h, skb);
skbuff_debugobj_deactivate(skb);
skbuff_debugobj_sum_update(ln);
local_irq_restore(flags);
preempt_enable();
return true;
}
#endif
local_irq_restore(flags);
preempt_enable();
return false;
}
/**
* skb_recycler_consume_list_fast - free a list of skbs
* @skb_list: head of the buffer list
*
* Add the list of given SKBs to CPU list. Assumption is that these buffers
* have been allocated originally from recycler and have been transmitted through
* a controlled fast xmit path, thus removing the need for additional checks
* before recycling the buffers back to pool
*/
#ifdef CONFIG_DEBUG_OBJECTS_SKBUFF
inline bool skb_recycler_consume_list_fast(struct sk_buff_head *skb_list)
{
struct sk_buff *skb = NULL, *next = NULL;
skb_queue_walk_safe(skb_list, skb, next) {
if (skb) {
__skb_unlink(skb, skb_list);
skb_recycler_consume(skb);
}
}
return true;
}
#else
inline bool skb_recycler_consume_list_fast(struct sk_buff_head *skb_list)
{
unsigned long flags;
struct sk_buff_head *h;
h = &get_cpu_var(recycle_list);
local_irq_save(flags);
/* Attempt to enqueue the CPU hot recycle list first */
if (likely(skb_queue_len(h) < skb_recycle_max_skbs)) {
skb_queue_splice(skb_list,h);
local_irq_restore(flags);
preempt_enable();
return true;
}
local_irq_restore(flags);
preempt_enable();
return false;
}
#endif
static void skb_recycler_free_skb(struct sk_buff_head *list)
{
struct sk_buff *skb = NULL, *next = NULL;
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
while ((skb = skb_peek(list)) != NULL) {
skbuff_debugobj_activate(skb);
next = skb->next;
__skb_unlink(skb, list);
skb_release_data(skb);
kfree_skbmem(skb);
/*
* Update the skb->sum for next due to skb_link operation
*/
if (next) {
skbuff_debugobj_sum_update(next);
}
}
spin_unlock_irqrestore(&list->lock, flags);
}
static int skb_cpu_callback(unsigned int ocpu)
{
unsigned long oldcpu = (unsigned long)ocpu;
skb_recycler_free_skb(&per_cpu(recycle_list, oldcpu));
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
spin_lock(&glob_recycler.lock);
skb_recycler_free_skb(&per_cpu(recycle_spare_list, oldcpu));
spin_unlock(&glob_recycler.lock);
#endif
return NOTIFY_DONE;
}
#ifdef CONFIG_SKB_RECYCLER_PREALLOC
static int __init skb_prealloc_init_list(void)
{
int i;
struct sk_buff *skb;
for (i = 0; i < SKB_RECYCLE_MAX_PREALLOC_SKBS; i++) {
skb = __alloc_skb(SKB_RECYCLE_MAX_SIZE + NET_SKB_PAD,
GFP_KERNEL, 0, NUMA_NO_NODE);
if (unlikely(!skb))
return -ENOMEM;
skb_reserve(skb, NET_SKB_PAD);
skb_recycler_consume(skb);
}
return 0;
}
#endif
/* procfs: count
* Show skb counts
*/
static int proc_skb_count_show(struct seq_file *seq, void *v)
{
int cpu;
int len;
int total;
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
unsigned int i;
unsigned long flags;
#endif
total = 0;
for_each_online_cpu(cpu) {
len = skb_queue_len(&per_cpu(recycle_list, cpu));
seq_printf(seq, "recycle_list[%d]: %d\n", cpu, len);
total += len;
}
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
for_each_online_cpu(cpu) {
len = skb_queue_len(&per_cpu(recycle_spare_list, cpu));
seq_printf(seq, "recycle_spare_list[%d]: %d\n", cpu, len);
total += len;
}
for (i = 0; i < SKB_RECYCLE_MAX_SHARED_POOLS; i++) {
spin_lock_irqsave(&glob_recycler.lock, flags);
len = skb_queue_len(&glob_recycler.pool[i]);
spin_unlock_irqrestore(&glob_recycler.lock, flags);
seq_printf(seq, "global_list[%d]: %d\n", i, len);
total += len;
}
#endif
seq_printf(seq, "total: %d\n", total);
return 0;
}
static int proc_skb_count_open(struct inode *inode, struct file *file)
{
return single_open(file, proc_skb_count_show, pde_data(inode));
}
static const struct proc_ops proc_skb_count_fops = {
.proc_open = proc_skb_count_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
/* procfs: flush
* Flush skbs
*/
static void skb_recycler_flush_task(struct work_struct *work)
{
unsigned long flags;
struct sk_buff_head *h;
struct sk_buff_head tmp;
struct sk_buff *skb = NULL;
skb_queue_head_init(&tmp);
h = &get_cpu_var(recycle_list);
local_irq_save(flags);
skb_queue_splice_init(h, &tmp);
/*
* Update the sum for first skb present in tmp list.
* Since the skb is changed in splice init
*/
skb = skb_peek(&tmp);
skbuff_debugobj_sum_update(skb);
local_irq_restore(flags);
put_cpu_var(recycle_list);
skb_recycler_free_skb(&tmp);
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
h = &get_cpu_var(recycle_spare_list);
local_irq_save(flags);
skb_queue_splice_init(h, &tmp);
skb = skb_peek(&tmp);
skbuff_debugobj_sum_update(skb);
local_irq_restore(flags);
put_cpu_var(recycle_spare_list);
skb_recycler_free_skb(&tmp);
#endif
}
static ssize_t proc_skb_flush_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
unsigned int i;
unsigned long flags;
#endif
schedule_on_each_cpu(&skb_recycler_flush_task);
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
spin_lock_irqsave(&glob_recycler.lock, flags);
for (i = 0; i < SKB_RECYCLE_MAX_SHARED_POOLS; i++)
skb_recycler_free_skb(&glob_recycler.pool[i]);
glob_recycler.head = 0;
glob_recycler.tail = 0;
spin_unlock_irqrestore(&glob_recycler.lock, flags);
#endif
return count;
}
static const struct proc_ops proc_skb_flush_fops = {
.proc_write = proc_skb_flush_write,
.proc_open = simple_open,
.proc_lseek = noop_llseek,
};
/* procfs: max_skbs
* Show max skbs
*/
static int proc_skb_max_skbs_show(struct seq_file *seq, void *v)
{
seq_printf(seq, "%d\n", skb_recycle_max_skbs);
return 0;
}
static int proc_skb_max_skbs_open(struct inode *inode, struct file *file)
{
return single_open(file, proc_skb_max_skbs_show, pde_data(inode));
}
static ssize_t proc_skb_max_skbs_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret;
int max;
char buffer[13];
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count) != 0)
return -EFAULT;
ret = kstrtoint(strstrip(buffer), 10, &max);
if (ret == 0 && max >= 0)
skb_recycle_max_skbs = max;
return count;
}
static const struct proc_ops proc_skb_max_skbs_fops = {
.proc_open = proc_skb_max_skbs_open,
.proc_read = seq_read,
.proc_write = proc_skb_max_skbs_write,
.proc_release = single_release,
};
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
/* procfs: max_spare_skbs
* Show max spare skbs
*/
static int proc_skb_max_spare_skbs_show(struct seq_file *seq, void *v)
{
seq_printf(seq, "%d\n", skb_recycle_spare_max_skbs);
return 0;
}
static int proc_skb_max_spare_skbs_open(struct inode *inode, struct file *file)
{
return single_open(file,
proc_skb_max_spare_skbs_show,
pde_data(inode));
}
static ssize_t
proc_skb_max_spare_skbs_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret;
int max;
char buffer[13];
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count) != 0)
return -EFAULT;
ret = kstrtoint(strstrip(buffer), 10, &max);
if (ret == 0 && max >= 0)
skb_recycle_spare_max_skbs = max;
return count;
}
static const struct proc_ops proc_skb_max_spare_skbs_fops = {
.proc_open = proc_skb_max_spare_skbs_open,
.proc_read = seq_read,
.proc_write = proc_skb_max_spare_skbs_write,
.proc_release = single_release,
};
#endif /* CONFIG_SKB_RECYCLER_MULTI_CPU */
static void skb_recycler_init_procfs(void)
{
proc_net_skbrecycler = proc_mkdir("skb_recycler", init_net.proc_net);
if (!proc_net_skbrecycler) {
pr_err("cannot create skb_recycle proc dir");
return;
}
if (!proc_create("count",
S_IRUGO,
proc_net_skbrecycler,
&proc_skb_count_fops))
pr_err("cannot create proc net skb_recycle held\n");
if (!proc_create("flush",
S_IWUGO,
proc_net_skbrecycler,
&proc_skb_flush_fops))
pr_err("cannot create proc net skb_recycle flush\n");
if (!proc_create("max_skbs",
S_IRUGO | S_IWUGO,
proc_net_skbrecycler,
&proc_skb_max_skbs_fops))
pr_err("cannot create proc net skb_recycle max_skbs\n");
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
if (!proc_create("max_spare_skbs",
S_IRUGO | S_IWUGO,
proc_net_skbrecycler,
&proc_skb_max_spare_skbs_fops))
pr_err("cannot create proc net skb_recycle max_spare_skbs\n");
#endif
}
void __init skb_recycler_init(void)
{
int cpu;
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
unsigned int i;
#endif
for_each_possible_cpu(cpu) {
skb_queue_head_init(&per_cpu(recycle_list, cpu));
}
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
for_each_possible_cpu(cpu) {
skb_queue_head_init(&per_cpu(recycle_spare_list, cpu));
}
spin_lock_init(&glob_recycler.lock);
for (i = 0; i < SKB_RECYCLE_MAX_SHARED_POOLS; i++)
skb_queue_head_init(&glob_recycler.pool[i]);
glob_recycler.head = 0;
glob_recycler.tail = 0;
#endif
#ifdef CONFIG_SKB_RECYCLER_PREALLOC
if (skb_prealloc_init_list())
pr_err("Failed to preallocate SKBs for recycle list\n");
#endif
cpuhp_setup_state_nocalls(CPUHP_SKB_RECYCLER_DEAD, "net/skbuff_recycler:dead:",NULL, skb_cpu_callback);
skbuff_debugobj_register_callback();
skb_recycler_init_procfs();
}
void skb_recycler_print_all_lists(void)
{
unsigned long flags;
int cpu;
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
int i;
struct sk_buff_head *h;
cpu = get_cpu();
spin_lock_irqsave(&glob_recycler.lock, flags);
for (i = 0; i < SKB_RECYCLE_MAX_SHARED_POOLS; i++)
skbuff_debugobj_print_skb_list((&glob_recycler.pool[i])->next,
"Global Pool", -1);
spin_unlock_irqrestore(&glob_recycler.lock, flags);
preempt_disable();
local_irq_save(flags);
h = &per_cpu(recycle_spare_list, cpu);
skbuff_debugobj_print_skb_list(h->next, "Recycle Spare", cpu);
local_irq_restore(flags);
preempt_enable();
#endif
preempt_disable();
local_irq_save(flags);
h = &per_cpu(recycle_list, cpu);
skbuff_debugobj_print_skb_list(h->next, "Recycle List", cpu);
local_irq_restore(flags);
preempt_enable();
}
#ifdef SKB_FAST_RECYCLABLE_DEBUG_ENABLE
/**
* consume_skb_can_fast_recycle_debug - Debug API to flag any sanity check
* failures on a fast recycled skb
* @skb: buffer to be checked
* @min_skb_size: minimum skb size allowed
* @max_skb_size: maximum skb size allowed
*
* Returns false with warning message if any of the checks fail
*/
static inline bool consume_skb_can_fast_recycle_debug(const struct sk_buff *skb,
int min_skb_size, int max_skb_size)
{
if (unlikely(irqs_disabled())) {
WARN(1, "skb_debug: irqs_disabled for skb = 0x%p \n", skb);
return false;
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)) {
WARN(1, "skb_debug: ZEROCOPY flag set for skb = 0x%p \n", skb);
return false;
}
if (unlikely(skb_is_nonlinear(skb))) {
WARN(1, "skb_debug: non-linear skb = 0x%p \n", skb);
return false;
}
if (unlikely(skb_shinfo(skb)->frag_list)) {
WARN(1, "skb_debug: set frag_list for skb = 0x%p \n", skb);
return false;
}
if (unlikely(skb_shinfo(skb)->nr_frags)) {
WARN(1, "skb_debug: set nr_frags for skb = 0x%p \n", skb);
return false;
}
if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) {
WARN(1, "skb_debug: FCLONE available for skb = 0x%p \n", skb);
return false;
}
min_skb_size = SKB_DATA_ALIGN(min_skb_size + NET_SKB_PAD);
if (unlikely(skb_end_pointer(skb) - skb->head < min_skb_size)) {
WARN(1, "skb_debug: invalid min size for skb = 0x%p \n", skb);
return false;
}
max_skb_size = SKB_DATA_ALIGN(max_skb_size + NET_SKB_PAD);
if (unlikely(skb_end_pointer(skb) - skb->head > max_skb_size)) {
WARN(1, "skb_debug: invalid max size for skb = 0x%p \n", skb);
return false;
}
if (unlikely(skb_cloned(skb))) {
WARN(1, "skb_debug: cloned skb = 0x%p \n", skb);
return false;
}
if (unlikely(skb_pfmemalloc(skb))) {
WARN(1, "skb_debug: enabled pfmemalloc for skb = 0x%p \n", skb);
return false;
}
if (skb->_skb_refdst) {
WARN(1, "skb_debug: _skb_refdst flag enabled = 0x%p \n", skb);
return false;
}
if (skb->destructor) {
WARN(1, "skb_debug: destructor flag enabled = 0x%p \n", skb);
return false;
}
if (skb->active_extensions) {
WARN(1, "skb_debug: active_extensions flag enabled = 0x%p \n",
skb);
return false;
}
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (skb->_nfct & NFCT_PTRMASK) {
WARN(1, "skb_debug: nfctinfo bits set for skb = 0x%p \n", skb);
return false;
}
#endif
return true;
}
/**
* check_skb_fast_recyclable - Debug API to flag any sanity check failures
* on a fast recycled skb
* @skb: buffer to be checked
*
* Checks skb recyclability
*/
void check_skb_fast_recyclable(struct sk_buff *skb)
{
bool check = true;
check = consume_skb_can_fast_recycle_debug(skb, SKB_RECYCLE_MIN_SIZE, SKB_RECYCLE_MAX_SIZE);
if (!check)
BUG_ON(1);
}
EXPORT_SYMBOL(check_skb_fast_recyclable);
#endif

View File

@ -0,0 +1,178 @@
/*
* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/* Definitions for the skb recycler functions */
#ifndef _LINUX_SKBUFF_RECYCLE_H
#define _LINUX_SKBUFF_RECYCLE_H
#include <linux/module.h>
#include <linux/types.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#ifdef CONFIG_NET_CLS_ACT
#include <net/pkt_sched.h>
#endif
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/splice.h>
#include <linux/init.h>
#include <linux/prefetch.h>
#include <linux/if.h>
#define SKB_RECYCLE_SIZE 2304
#define SKB_RECYCLE_MIN_SIZE SKB_RECYCLE_SIZE
#define SKB_RECYCLE_MAX_SIZE SKB_RECYCLE_SIZE
#define SKB_RECYCLE_MAX_SKBS 1024
#define SKB_RECYCLE_SPARE_MAX_SKBS 256
#ifdef CONFIG_SKB_RECYCLER_PREALLOC
#define SKB_RECYCLE_MAX_PREALLOC_SKBS CONFIG_SKB_RECYCLE_MAX_PREALLOC_SKBS
#define SKB_RECYCLE_MAX_SHARED_POOLS \
DIV_ROUND_UP(SKB_RECYCLE_MAX_PREALLOC_SKBS, \
SKB_RECYCLE_SPARE_MAX_SKBS)
#else
#define SKB_RECYCLE_MAX_SHARED_POOLS 8
#endif
#define SKB_RECYCLE_MAX_SHARED_POOLS_MASK \
(SKB_RECYCLE_MAX_SHARED_POOLS - 1)
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
struct global_recycler {
/* Global circular list which holds the shared skb pools */
struct sk_buff_head pool[SKB_RECYCLE_MAX_SHARED_POOLS];
u8 head; /* head of the circular list */
u8 tail; /* tail of the circular list */
spinlock_t lock;
};
#endif
static __always_inline void zero_struct(void *v, int size)
{
u32 *s = (u32 *)v;
/* We assume that size is word aligned; in fact, it's constant */
WARN_ON((size & 3) != 0);
/* This looks odd but we "know" size is a constant, and so the
* compiler can fold away all of the conditionals. The compiler is
* pretty smart here, and can fold away the loop, too!
*/
while (size > 0) {
if (size >= 4)
s[0] = 0;
if (size >= 8)
s[1] = 0;
if (size >= 12)
s[2] = 0;
if (size >= 16)
s[3] = 0;
if (size >= 20)
s[4] = 0;
if (size >= 24)
s[5] = 0;
if (size >= 28)
s[6] = 0;
if (size >= 32)
s[7] = 0;
if (size >= 36)
s[8] = 0;
if (size >= 40)
s[9] = 0;
if (size >= 44)
s[10] = 0;
if (size >= 48)
s[11] = 0;
if (size >= 52)
s[12] = 0;
if (size >= 56)
s[13] = 0;
if (size >= 60)
s[14] = 0;
if (size >= 64)
s[15] = 0;
size -= 64;
s += 16;
}
}
static inline bool consume_skb_can_recycle(const struct sk_buff *skb,
int min_skb_size, int max_skb_size)
{
if (unlikely(irqs_disabled()))
return false;
if (unlikely(skb_shinfo(skb)->tx_flags & SKBFL_ZEROCOPY_ENABLE))
return false;
if (unlikely(skb->head_frag))
return false;
if (unlikely(skb_is_nonlinear(skb)))
return false;
if (unlikely(skb_shinfo(skb)->frag_list))
return false;
if (unlikely(skb_shinfo(skb)->nr_frags))
return false;
if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE))
return false;
min_skb_size = SKB_DATA_ALIGN(min_skb_size + NET_SKB_PAD);
if (unlikely(skb_end_pointer(skb) - skb->head < min_skb_size))
return false;
max_skb_size = SKB_DATA_ALIGN(max_skb_size + NET_SKB_PAD);
if (unlikely(skb_end_pointer(skb) - skb->head > max_skb_size))
return false;
if (unlikely(skb_cloned(skb)))
return false;
if (unlikely(skb_pfmemalloc(skb)))
return false;
return true;
}
#ifdef CONFIG_SKB_RECYCLER
void __init skb_recycler_init(void);
struct sk_buff *skb_recycler_alloc(struct net_device *dev, unsigned int length);
bool skb_recycler_consume(struct sk_buff *skb);
bool skb_recycler_consume_list_fast(struct sk_buff_head *skb_list);
void skb_recycler_print_all_lists(void);
#else
#define skb_recycler_init() {}
#define skb_recycler_alloc(dev, len) NULL
#define skb_recycler_consume(skb) false
#define skb_recycler_consume_list_fast(skb_list) false
#define skb_recycler_print_all_lists() false
#endif
#endif