summaryrefslogtreecommitdiff
path: root/net/unix/garbage.c
diff options
context:
space:
mode:
authorKuniyuki Iwashima <kuniyu@google.com>2025-11-15 02:08:38 +0000
committerJakub Kicinski <kuba@kernel.org>2025-11-18 19:19:32 -0800
commit24fa77dad25c2f55cc4615c09df2201ef72c66f4 (patch)
tree806c03df24aa522a3dc108cbecddb2e12c2adb18 /net/unix/garbage.c
parentab8b23150abccd34fddc3effe7776ad32c44b6c9 (diff)
af_unix: Consolidate unix_schedule_gc() and wait_for_unix_gc().
unix_schedule_gc() and wait_for_unix_gc() share some code. Let's consolidate the two. Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20251115020935.2643121-8-kuniyu@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/unix/garbage.c')
-rw-r--r--net/unix/garbage.c28
1 files changed, 9 insertions, 19 deletions
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index fe1f74345b66..78323d43e63e 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -279,8 +279,6 @@ void unix_update_edges(struct unix_sock *receiver)
}
}
-static void wait_for_unix_gc(struct scm_fp_list *fpl);
-
int unix_prepare_fpl(struct scm_fp_list *fpl)
{
struct unix_vertex *vertex;
@@ -302,7 +300,7 @@ int unix_prepare_fpl(struct scm_fp_list *fpl)
if (!fpl->edges)
goto err;
- wait_for_unix_gc(fpl);
+ unix_schedule_gc(fpl->user);
return 0;
@@ -614,21 +612,9 @@ skip_gc:
static DECLARE_WORK(unix_gc_work, unix_gc);
-void unix_schedule_gc(void)
-{
- if (READ_ONCE(unix_graph_state) == UNIX_GRAPH_NOT_CYCLIC)
- return;
-
- if (READ_ONCE(gc_in_progress))
- return;
-
- WRITE_ONCE(gc_in_progress, true);
- queue_work(system_dfl_wq, &unix_gc_work);
-}
-
#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
-static void wait_for_unix_gc(struct scm_fp_list *fpl)
+void unix_schedule_gc(struct user_struct *user)
{
if (READ_ONCE(unix_graph_state) == UNIX_GRAPH_NOT_CYCLIC)
return;
@@ -636,11 +622,15 @@ static void wait_for_unix_gc(struct scm_fp_list *fpl)
/* Penalise users who want to send AF_UNIX sockets
* but whose sockets have not been received yet.
*/
- if (READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
+ if (user &&
+ READ_ONCE(user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
return;
- unix_schedule_gc();
+ if (!READ_ONCE(gc_in_progress)) {
+ WRITE_ONCE(gc_in_progress, true);
+ queue_work(system_dfl_wq, &unix_gc_work);
+ }
- if (READ_ONCE(unix_graph_cyclic_sccs))
+ if (user && READ_ONCE(unix_graph_cyclic_sccs))
flush_work(&unix_gc_work);
}