summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2025-11-19 18:27:11 +0100
committerThomas Gleixner <tglx@linutronix.de>2025-11-25 19:45:41 +0100
commit51dd92c71a38647803478fb81e1812286a8998b1 (patch)
tree9307edac0d7b5e58d362cffe824bc726c2103113 /kernel/sched/core.c
parentb0c3d51b54f8a4f4c809432d210c0c983d5cd97e (diff)
sched/mmcid: Serialize sched_mm_cid_fork()/exit() with a mutex
Prepare for the new CID management scheme which puts the CID ownership transition into the fork() and exit() slow path by serializing sched_mm_cid_fork()/exit() with it, so task list and cpu mask walks can be done in interruptible and preemptible code. The contention on it is not worse than on other concurrency controls in the fork()/exit() machinery. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251119172549.895826703@linutronix.de
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f9295c42da22..01903cf03ab2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10370,6 +10370,25 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count)
#ifdef CONFIG_SCHED_MM_CID
/*
+ * Concurrency IDentifier management
+ *
+ * Serialization rules:
+ *
+ * mm::mm_cid::mutex: Serializes fork() and exit() and therefore
+ * protects mm::mm_cid::users.
+ *
+ * mm::mm_cid::lock: Serializes mm_update_max_cids() and
+ * mm_update_cpus_allowed(). Nests in mm_cid::mutex
+ * and runqueue lock.
+ *
+ * The mm_cidmask bitmap is not protected by any of the mm::mm_cid locks
+ * and can only be modified with atomic operations.
+ *
+ * The mm::mm_cid:pcpu per CPU storage is protected by the CPUs runqueue
+ * lock.
+ */
+
+/*
* Update the CID range properties when the constraints change. Invoked via
* fork(), exit() and affinity changes
*/
@@ -10412,6 +10431,7 @@ void sched_mm_cid_fork(struct task_struct *t)
WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET);
+ guard(mutex)(&mm->mm_cid.mutex);
guard(raw_spinlock)(&mm->mm_cid.lock);
t->mm_cid.active = 1;
mm->mm_cid.users++;
@@ -10431,6 +10451,7 @@ void sched_mm_cid_exit(struct task_struct *t)
if (!mm || !t->mm_cid.active)
return;
+ guard(mutex)(&mm->mm_cid.mutex);
guard(raw_spinlock)(&mm->mm_cid.lock);
t->mm_cid.active = 0;
mm->mm_cid.users--;
@@ -10467,6 +10488,7 @@ void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
mm->mm_cid.users = 0;
raw_spin_lock_init(&mm->mm_cid.lock);
+ mutex_init(&mm->mm_cid.mutex);
cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
bitmap_zero(mm_cidmask(mm), num_possible_cpus());
}