summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2025-09-09 12:19:42 +0000
committerJakub Kicinski <kuba@kernel.org>2025-09-14 11:35:17 -0700
commitfdae0ab67d57d480dc61e9fb45678bbdc3786711 (patch)
tree9a45268e106c211e75c55a285db8569a9849747b /include/net
parent278289bcec901663868048497e36c92560bd1b14 (diff)
net: use NUMA drop counters for softnet_data.dropped
Hosts under DOS attack can suffer from false sharing in enqueue_to_backlog() : atomic_inc(&sd->dropped). This is because sd->dropped can be touched from many cpus, possibly residing on different NUMA nodes. Generalize the sk_drop_counters infrastucture added in commit c51613fa276f ("net: add sk->sk_drop_counters") and use it to replace softnet_data.dropped with NUMA friendly softnet_data.drop_counters. This adds 64 bytes per cpu, maybe more in the future if we increase the number of counters (currently 2) per 'struct numa_drop_counters'. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20250909121942.1202585-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/raw.h2
-rw-r--r--include/net/sock.h37
2 files changed, 13 insertions, 26 deletions
diff --git a/include/net/raw.h b/include/net/raw.h
index d52709139060..66c0ffeada2e 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -81,7 +81,7 @@ struct raw_sock {
struct inet_sock inet;
struct icmp_filter filter;
u32 ipmr_table;
- struct socket_drop_counters drop_counters;
+ struct numa_drop_counters drop_counters;
};
#define raw_sk(ptr) container_of_const(ptr, struct raw_sock, inet.sk)
diff --git a/include/net/sock.h b/include/net/sock.h
index 896bec2d2176..0fd465935334 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -102,11 +102,6 @@ struct net;
typedef __u32 __bitwise __portpair;
typedef __u64 __bitwise __addrpair;
-struct socket_drop_counters {
- atomic_t drops0 ____cacheline_aligned_in_smp;
- atomic_t drops1 ____cacheline_aligned_in_smp;
-};
-
/**
* struct sock_common - minimal network layer representation of sockets
* @skc_daddr: Foreign IPv4 addr
@@ -287,7 +282,7 @@ struct sk_filter;
* @sk_err_soft: errors that don't cause failure but are the cause of a
* persistent failure not just 'timed out'
* @sk_drops: raw/udp drops counter
- * @sk_drop_counters: optional pointer to socket_drop_counters
+ * @sk_drop_counters: optional pointer to numa_drop_counters
* @sk_ack_backlog: current listen backlog
* @sk_max_ack_backlog: listen backlog set in listen()
* @sk_uid: user id of owner
@@ -456,7 +451,7 @@ struct sock {
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
- struct socket_drop_counters *sk_drop_counters;
+ struct numa_drop_counters *sk_drop_counters;
__cacheline_group_end(sock_read_rxtx);
__cacheline_group_begin(sock_write_rxtx);
@@ -2698,18 +2693,12 @@ struct sock_skb_cb {
static inline void sk_drops_add(struct sock *sk, int segs)
{
- struct socket_drop_counters *sdc = sk->sk_drop_counters;
+ struct numa_drop_counters *ndc = sk->sk_drop_counters;
- if (sdc) {
- int n = numa_node_id() % 2;
-
- if (n)
- atomic_add(segs, &sdc->drops1);
- else
- atomic_add(segs, &sdc->drops0);
- } else {
+ if (ndc)
+ numa_drop_add(ndc, segs);
+ else
atomic_add(segs, &sk->sk_drops);
- }
}
static inline void sk_drops_inc(struct sock *sk)
@@ -2719,23 +2708,21 @@ static inline void sk_drops_inc(struct sock *sk)
static inline int sk_drops_read(const struct sock *sk)
{
- const struct socket_drop_counters *sdc = sk->sk_drop_counters;
+ const struct numa_drop_counters *ndc = sk->sk_drop_counters;
- if (sdc) {
+ if (ndc) {
DEBUG_NET_WARN_ON_ONCE(atomic_read(&sk->sk_drops));
- return atomic_read(&sdc->drops0) + atomic_read(&sdc->drops1);
+ return numa_drop_read(ndc);
}
return atomic_read(&sk->sk_drops);
}
static inline void sk_drops_reset(struct sock *sk)
{
- struct socket_drop_counters *sdc = sk->sk_drop_counters;
+ struct numa_drop_counters *ndc = sk->sk_drop_counters;
- if (sdc) {
- atomic_set(&sdc->drops0, 0);
- atomic_set(&sdc->drops1, 0);
- }
+ if (ndc)
+ numa_drop_reset(ndc);
atomic_set(&sk->sk_drops, 0);
}