summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Xing <kernelxing@tencent.com>2025-11-18 15:06:46 +0800
committerJakub Kicinski <kuba@kernel.org>2025-11-19 20:29:25 -0800
commit5d7fc63ab84182f75d5e73e299ce0e7c9c20c092 (patch)
tree04605effebde6df055d414f699255777996ec4d0
parent2d67b5c5c67f934c54a55e00ee291a8587c5b4fe (diff)
net: prefetch the next skb in napi_skb_cache_get()
After getting the current skb in napi_skb_cache_get(), the next skb in cache is highly likely to be used soon, so prefetch would be helpful. Suggested-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Jason Xing <kernelxing@tencent.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com> Link: https://patch.msgid.link/20251118070646.61344-5-kerneljasonxing@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--net/core/skbuff.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d81ac78c32ff..5a1d123e7ef7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -299,6 +299,8 @@ static struct sk_buff *napi_skb_cache_get(bool alloc)
}
skb = nc->skb_cache[--nc->skb_count];
+ if (nc->skb_count)
+ prefetch(nc->skb_cache[nc->skb_count - 1]);
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
kasan_mempool_unpoison_object(skb, skbuff_cache_size);