summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorKuniyuki Iwashima <kuniyu@google.com>2025-10-23 23:16:53 +0000
committerJakub Kicinski <kuba@kernel.org>2025-10-27 18:04:57 -0700
commit151b98d10ef7c3174465e409b99d8762e7e8de60 (patch)
treef6510eab756cae61ea6886ef44ad07864fa1b3fb /net
parentb7185792f80a0069aa4eab87cb4263a1fb611a4e (diff)
net: Add sk_clone().
sctp_accept() will use sk_clone_lock(), but it will be called with the parent socket locked, and sctp_migrate() acquires the child lock later. Let's add no lock version of sk_clone_lock(). Note that lockdep complains if we simply use bh_lock_sock_nested(). Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com> Reviewed-by: Xin Long <lucien.xin@gmail.com> Link: https://patch.msgid.link/20251023231751.4168390-5-kuniyu@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/sock.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index a99132cc0965..7a9bbc2afcf0 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2462,13 +2462,16 @@ static void sk_init_common(struct sock *sk)
}
/**
- * sk_clone_lock - clone a socket, and lock its clone
- * @sk: the socket to clone
- * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ * sk_clone - clone a socket
+ * @sk: the socket to clone
+ * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ * @lock: if true, lock the cloned sk
*
- * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
+ * If @lock is true, the clone is locked by bh_lock_sock(), and
+ * caller must unlock socket even in error path by bh_unlock_sock().
*/
-struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+struct sock *sk_clone(const struct sock *sk, const gfp_t priority,
+ bool lock)
{
struct proto *prot = READ_ONCE(sk->sk_prot);
struct sk_filter *filter;
@@ -2497,9 +2500,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
__netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker,
false, priority);
}
+
sk_node_init(&newsk->sk_node);
sock_lock_init(newsk);
- bh_lock_sock(newsk);
+
+ if (lock)
+ bh_lock_sock(newsk);
+
newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
newsk->sk_backlog.len = 0;
@@ -2590,12 +2597,13 @@ free:
* destructor and make plain sk_free()
*/
newsk->sk_destruct = NULL;
- bh_unlock_sock(newsk);
+ if (lock)
+ bh_unlock_sock(newsk);
sk_free(newsk);
newsk = NULL;
goto out;
}
-EXPORT_SYMBOL_GPL(sk_clone_lock);
+EXPORT_SYMBOL_GPL(sk_clone);
static u32 sk_dst_gso_max_size(struct sock *sk, const struct net_device *dev)
{