wlan-ap-Telecominfraproject/feeds/qca-wifi-7/ipq53xx/patches-6.1/0417-skb_recycler-Add-an-API-to-free-skb-recycler-list.patch
John Crispin 68cf54d9f7 qca-wifi-7: update to ath12.5.5
Signed-off-by: John Crispin <john@phrozen.org>
2025-02-27 12:45:52 +01:00

92 lines
2.9 KiB
Diff

From 4f8a1b77e9ef835d3ddf5a830dda2d051299f0eb Mon Sep 17 00:00:00 2001
From: Neha Bisht <quic_nbisht@quicinc.com>
Date: Mon, 18 Jul 2022 17:09:00 +0530
Subject: [PATCH] skb_recycler: Add an API to free skb recycler list
Currently we have consume_skb which takes a single skb buffer
as an input and process on them one by one. To avoid this processing
and local_irq calls, we add an API to take a list of skbs as an input
to free them at once to save CPU cycles.
Also, we have avoided global and spare list processing here by assuming
hot skb recycle list is always available.
Change-Id: I5c99b9281ec6d7ab189da4e8d30c6cb8a8817d7c
Signed-off-by: Neha Bisht <quic_nbisht@quicinc.com>
---
include/linux/skbuff.h | 2 ++
net/core/skbuff.c | 38 ++++++++++++++++++++++++++++++++++++++
2 files changed, 40 insertions(+)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bade53e3b92d..99b9ad9f35ae 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1253,6 +1253,7 @@ static inline void consume_skb(struct sk_buff *skb)
}
#endif
+void consume_skb_list_fast(struct sk_buff_head *skb_list);
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;
@@ -1379,6 +1380,7 @@ static inline int skb_pad(struct sk_buff *skb, int pad)
return __skb_pad(skb, pad, true);
}
#define dev_kfree_skb(a) consume_skb(a)
+#define dev_kfree_skb_list_fast(a) consume_skb_list_fast(a)
int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
int offset, size_t size);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ead714006d6c..c1fae017ab81 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1152,6 +1152,44 @@ void consume_skb(struct sk_buff *skb)
EXPORT_SYMBOL(consume_skb);
#endif
+/**
+ * consume_skb_list_fast - free a list of skbs
+ * @skb_list: head of the buffer list
+ *
+ * Add the list of given SKBs to CPU list. Assumption is that these buffers
+ * have been allocated originally from the skb recycler and have been transmitted
+ * through a controlled fast xmit path, thus removing the need for additional checks
+ * before recycling the buffers back to pool
+ */
+void consume_skb_list_fast(struct sk_buff_head *skb_list)
+{
+ struct sk_buff *skb = NULL;
+
+ if (likely(skb_recycler_consume_list_fast(skb_list))) {
+ return;
+ }
+
+ while ((skb = skb_dequeue(skb_list)) != NULL) {
+ /*
+ * Check if release head state is needed
+ */
+ skb_release_head_state(skb);
+
+ trace_consume_skb(skb);
+
+ /*
+ * We're not recycling so now we need to do the rest of what we would
+ * have done in __kfree_skb (above and beyond the skb_release_head_state
+ * that we already did).
+ */
+ if (likely(skb->head))
+ skb_release_data(skb);
+
+ kfree_skbmem(skb);
+ }
+}
+EXPORT_SYMBOL(consume_skb_list_fast);
+
/**
* __consume_stateless_skb - free an skbuff, assuming it is stateless
* @skb: buffer to free
--
2.34.1