summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorJason Xing <kernelxing@tencent.com>2025-11-18 15:06:45 +0800
committerJakub Kicinski <kuba@kernel.org>2025-11-19 20:29:24 -0800
commit2d67b5c5c67f934c54a55e00ee291a8587c5b4fe (patch)
tree8afb369c0d379acde657f06112a39d3450f3c9e7 /net/core
parent01d7385618992a9017198534870aec62f54f963a (diff)
net: use NAPI_SKB_CACHE_FREE to keep 32 as default to do bulk free
- Replace NAPI_SKB_CACHE_HALF with NAPI_SKB_CACHE_FREE - Only free 32 skbs in napi_skb_cache_put() Since the first patch adjusting NAPI_SKB_CACHE_SIZE to 128, the number of packets to be freed in the softirq was increased from 32 to 64. Considering a subsequent net_rx_action() calling napi_poll() a few times can easily consume the 64 available slots and we can afford keeping a higher value of sk_buffs in per-cpu storage, decrease NAPI_SKB_CACHE_FREE to 32 like before. So now the logic is 1) keeping 96 skbs, 2) freeing 32 skbs at one time. Suggested-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Jason Xing <kernelxing@tencent.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com> Link: https://patch.msgid.link/20251118070646.61344-4-kerneljasonxing@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/skbuff.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b6fe7ab85c4a..d81ac78c32ff 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -225,7 +225,7 @@ static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
#define NAPI_SKB_CACHE_SIZE 128
#define NAPI_SKB_CACHE_BULK 32
-#define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
+#define NAPI_SKB_CACHE_FREE 32
struct napi_alloc_cache {
local_lock_t bh_lock;
@@ -1445,7 +1445,6 @@ void __consume_stateless_skb(struct sk_buff *skb)
static void napi_skb_cache_put(struct sk_buff *skb)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- u32 i;
if (!kasan_mempool_poison_object(skb))
return;
@@ -1454,13 +1453,16 @@ static void napi_skb_cache_put(struct sk_buff *skb)
nc->skb_cache[nc->skb_count++] = skb;
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
- for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
+ u32 i, remaining = NAPI_SKB_CACHE_SIZE - NAPI_SKB_CACHE_FREE;
+
+ for (i = remaining; i < NAPI_SKB_CACHE_SIZE; i++)
kasan_mempool_unpoison_object(nc->skb_cache[i],
skbuff_cache_size);
- kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF,
- nc->skb_cache + NAPI_SKB_CACHE_HALF);
- nc->skb_count = NAPI_SKB_CACHE_HALF;
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache,
+ NAPI_SKB_CACHE_FREE,
+ nc->skb_cache + remaining);
+ nc->skb_count = remaining;
}
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
}