From d30297828bd54324ca19412ae8097d878319556b Mon Sep 17 00:00:00 2001 From: Tian Yang Date: Mon, 21 Sep 2015 18:50:13 -0500 Subject: [PATCH] net: skbuff: use debug objects to track skb allocations * tracks skb allocations and frees and warns / errors if re-use occurs * init/destroy for slab allocations * activate/deactivate for in use Change-Id: Ia2dd0c7549d765a282295daf27bee6f99e5c7a43 Signed-off-by: Matthew McClintock Signed-off-by: Casey Chen Signed-off-by: Tian Yang --- MAINTAINERS | 1 + lib/Kconfig.debug | 6 ++++++ net/core/Makefile | 1 + net/core/dev.c | 1 + net/core/skbuff.c | 9 ++++++++- 5 files changed, 17 insertions(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 85b77e2deb99..f474b10b98a3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -147,6 +147,7 @@ SKB RECYCLER SUPPORT M: Casey Chen S: Maintained F: net/core/skbuff_recycle.* +F: net/core/skbuff_debug.* 3C59X NETWORK DRIVER M: Steffen Klassert diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 12dfe6691dd5..8a3d0b3c91e7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -701,6 +701,12 @@ config DEBUG_OBJECTS_PERCPU_COUNTER percpu counter routines to track the life time of percpu counter objects and validate the percpu counter operations. +config DEBUG_OBJECTS_SKBUFF + bool "Debug sk_buff allocations" + depends on DEBUG_OBJECTS + help + Enable this to turn on debugging of sk_buff's (incl. recycler) + config DEBUG_OBJECTS_ENABLE_DEFAULT int "debug_objects bootup default value (0-1)" range 0 1 diff --git a/net/core/Makefile b/net/core/Makefile index eec6d32d44de..2f2e6042facf 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -42,3 +42,4 @@ obj-$(CONFIG_BPF_SYSCALL) += sock_map.o obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o obj-$(CONFIG_OF) += of_net.o obj-$(CONFIG_SKB_RECYCLER) += skbuff_recycle.o +obj-$(CONFIG_DEBUG_OBJECTS_SKBUFF) += skbuff_debug.o diff --git a/net/core/dev.c b/net/core/dev.c index 806406df0d43..d78e342c162a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -153,6 +153,7 @@ #include "dev.h" #include "net-sysfs.h" +#include "skbuff_debug.h" static DEFINE_SPINLOCK(ptype_lock); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4a6d1feb64fc..79e7be538e61 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -110,6 +110,7 @@ struct kmem_cache *skb_data_cache; #endif #include "skbuff_recycle.h" +#include "skbuff_debug.h" struct kmem_cache *skbuff_head_cache __ro_after_init; static struct kmem_cache *skbuff_fclone_cache __ro_after_init; @@ -320,8 +321,8 @@ static void __build_skb_around(struct sk_buff *skb, void *data, shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); - skb_set_kcov_handle(skb, kcov_common_handle()); + skbuff_debugobj_init_and_activate(skb); } /** @@ -571,6 +572,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, refcount_set(&fclones->fclone_ref, 1); } + skbuff_debugobj_init_and_activate(skb); return skb; nodata: @@ -879,6 +881,7 @@ void kfree_skbmem(struct sk_buff *skb) switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: + skbuff_debugobj_deactivate(skb); kmem_cache_free(skbuff_head_cache, skb); return; @@ -899,7 +902,9 @@ void kfree_skbmem(struct sk_buff *skb) } if (!refcount_dec_and_test(&fclones->fclone_ref)) return; + fastpath: + skbuff_debugobj_deactivate(&fclones->skb1); kmem_cache_free(skbuff_fclone_cache, fclones); } @@ -1763,6 +1768,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) return NULL; n->fclone = SKB_FCLONE_UNAVAILABLE; + skbuff_debugobj_init_and_activate(n); } return __skb_clone(n, skb); @@ -5509,6 +5515,7 @@ void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) if (head_stolen) { skb_release_head_state(skb); kmem_cache_free(skbuff_head_cache, skb); + skbuff_debugobj_deactivate(skb); } else { __kfree_skb(skb); } -- 2.34.1