summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f9295c42da22..01903cf03ab2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10370,6 +10370,25 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count)
#ifdef CONFIG_SCHED_MM_CID
/*
+ * Concurrency IDentifier management
+ *
+ * Serialization rules:
+ *
+ * mm::mm_cid::mutex: Serializes fork() and exit() and therefore
+ * protects mm::mm_cid::users.
+ *
+ * mm::mm_cid::lock: Serializes mm_update_max_cids() and
+ * mm_update_cpus_allowed(). Nests in mm_cid::mutex
+ * and runqueue lock.
+ *
+ * The mm_cidmask bitmap is not protected by any of the mm::mm_cid locks
+ * and can only be modified with atomic operations.
+ *
+ * The mm::mm_cid:pcpu per CPU storage is protected by the CPUs runqueue
+ * lock.
+ */
+
+/*
* Update the CID range properties when the constraints change. Invoked via
* fork(), exit() and affinity changes
*/
@@ -10412,6 +10431,7 @@ void sched_mm_cid_fork(struct task_struct *t)
WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET);
+ guard(mutex)(&mm->mm_cid.mutex);
guard(raw_spinlock)(&mm->mm_cid.lock);
t->mm_cid.active = 1;
mm->mm_cid.users++;
@@ -10431,6 +10451,7 @@ void sched_mm_cid_exit(struct task_struct *t)
if (!mm || !t->mm_cid.active)
return;
+ guard(mutex)(&mm->mm_cid.mutex);
guard(raw_spinlock)(&mm->mm_cid.lock);
t->mm_cid.active = 0;
mm->mm_cid.users--;
@@ -10467,6 +10488,7 @@ void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
mm->mm_cid.users = 0;
raw_spin_lock_init(&mm->mm_cid.lock);
+ mutex_init(&mm->mm_cid.mutex);
cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
bitmap_zero(mm_cidmask(mm), num_possible_cpus());
}