summaryrefslogtreecommitdiff
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2025-09-28 08:49:33 +0000
committerPaolo Abeni <pabeni@redhat.com>2025-09-30 15:45:53 +0200
commit844c9db7f7f5fe1b0b53ed9f1c2bc7313b3021c8 (patch)
tree9dd9c2e17b08ae6dd785ea2e7dbf7ac65c3f70f6 /net/core/skbuff.c
parent9c94ae6bb0b2895024b6e29fcc1cbec968b4776a (diff)
net: use llist for sd->defer_list
Get rid of sd->defer_lock and adopt llist operations. We optimize skb_attempt_defer_free() for the common case, where the packet is queued. Otherwise sd->defer_count is increasing, until skb_defer_free_flush() clears it. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Jason Xing <kerneljasonxing@gmail.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20250928084934.3266948-3-edumazet@google.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 16cd357d62a6..17455fc1e692 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -7185,6 +7185,7 @@ static void kfree_skb_napi_cache(struct sk_buff *skb)
*/
void skb_attempt_defer_free(struct sk_buff *skb)
{
+ unsigned long defer_count;
int cpu = skb->alloc_cpu;
struct softnet_data *sd;
unsigned int defer_max;
@@ -7202,17 +7203,15 @@ nodefer: kfree_skb_napi_cache(skb);
sd = &per_cpu(softnet_data, cpu);
defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max);
- if (atomic_read(&sd->defer_count) >= defer_max)
+ defer_count = atomic_long_inc_return(&sd->defer_count);
+
+ if (defer_count >= defer_max)
goto nodefer;
- spin_lock_bh(&sd->defer_lock);
- /* Send an IPI every time queue reaches half capacity. */
- kick = (atomic_inc_return(&sd->defer_count) - 1) == (defer_max >> 1);
+ llist_add(&skb->ll_node, &sd->defer_list);
- skb->next = sd->defer_list;
- /* Paired with READ_ONCE() in skb_defer_free_flush() */
- WRITE_ONCE(sd->defer_list, skb);
- spin_unlock_bh(&sd->defer_lock);
+ /* Send an IPI every time queue reaches half capacity. */
+ kick = (defer_count - 1) == (defer_max >> 1);
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
* if we are unlucky enough (this seems very unlikely).