diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2025-11-19 18:27:09 +0100 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2025-11-25 19:45:40 +0100 |
| commit | b0c3d51b54f8a4f4c809432d210c0c983d5cd97e (patch) | |
| tree | 7c76e8d9f0631969fa9a2833d950b7394fbb4787 /kernel/sched/core.c | |
| parent | bf070520e398679cd582b3c3e44107bf22c143ba (diff) | |
sched/mmcid: Provide precomputed maximal value
Reading mm::mm_users and mm:::mm_cid::nr_cpus_allowed every time to compute
the maximal CID value is just wasteful as that value is only changing on
fork(), exit() and eventually when the affinity changes.
So it can be easily precomputed at those points and provided in mm::mm_cid
for consumption in the hot path.
But there is an issue with using mm::mm_users for accounting because that
does not necessarily reflect the number of user space tasks as other kernel
code can take temporary references on the MM which skew the picture.
Solve that by adding a users counter to struct mm_mm_cid, which is modified
by fork() and exit() and used for precomputing under mm_mm_cid::lock.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://patch.msgid.link/20251119172549.832764634@linutronix.de
Diffstat (limited to 'kernel/sched/core.c')
| -rw-r--r-- | kernel/sched/core.c | 59 |
1 files changed, 42 insertions, 17 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 34b6c31eca3a..f9295c42da22 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4485,7 +4485,6 @@ static void __sched_fork(u64 clone_flags, struct task_struct *p) init_numa_balancing(clone_flags, p); p->wake_entry.u_flags = CSD_TYPE_TTWU; p->migration_pending = NULL; - init_sched_mm_cid(p); } DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); @@ -10371,15 +10370,27 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count) #ifdef CONFIG_SCHED_MM_CID /* - * When a task exits, the MM CID held by the task is not longer required as - * the task cannot return to user space. + * Update the CID range properties when the constraints change. Invoked via + * fork(), exit() and affinity changes */ +static void mm_update_max_cids(struct mm_struct *mm) +{ + struct mm_mm_cid *mc = &mm->mm_cid; + unsigned int max_cids; + + lockdep_assert_held(&mm->mm_cid.lock); + + /* Calculate the new maximum constraint */ + max_cids = min(mc->nr_cpus_allowed, mc->users); + WRITE_ONCE(mc->max_cids, max_cids); +} + static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { struct cpumask *mm_allowed; unsigned int weight; - if (!mm) + if (!mm || !READ_ONCE(mm->mm_cid.users)) return; /* @@ -10389,9 +10400,30 @@ static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpu guard(raw_spinlock)(&mm->mm_cid.lock); mm_allowed = mm_cpus_allowed(mm); weight = cpumask_weighted_or(mm_allowed, mm_allowed, affmsk); + if (weight == mm->mm_cid.nr_cpus_allowed) + return; WRITE_ONCE(mm->mm_cid.nr_cpus_allowed, weight); + mm_update_max_cids(mm); +} + +void sched_mm_cid_fork(struct task_struct *t) +{ + struct mm_struct *mm = t->mm; + + WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET); + + guard(raw_spinlock)(&mm->mm_cid.lock); + t->mm_cid.active = 1; + mm->mm_cid.users++; + /* Preset last_cid for mm_cid_select() */ + t->mm_cid.last_cid = READ_ONCE(mm->mm_cid.max_cids) - 1; + mm_update_max_cids(mm); } +/* + * When a task exits, the MM CID held by the task is not longer required as + * the task cannot return to user space. + */ void sched_mm_cid_exit(struct task_struct *t) { struct mm_struct *mm = t->mm; @@ -10399,12 +10431,14 @@ void sched_mm_cid_exit(struct task_struct *t) if (!mm || !t->mm_cid.active) return; - guard(preempt)(); + guard(raw_spinlock)(&mm->mm_cid.lock); t->mm_cid.active = 0; + mm->mm_cid.users--; if (t->mm_cid.cid != MM_CID_UNSET) { clear_bit(t->mm_cid.cid, mm_cidmask(mm)); t->mm_cid.cid = MM_CID_UNSET; } + mm_update_max_cids(mm); } /* Deactivate MM CID allocation across execve() */ @@ -10416,22 +10450,11 @@ void sched_mm_cid_before_execve(struct task_struct *t) /* Reactivate MM CID after successful execve() */ void sched_mm_cid_after_execve(struct task_struct *t) { - struct mm_struct *mm = t->mm; - - if (!mm) - return; - + sched_mm_cid_fork(t); guard(preempt)(); - t->mm_cid.active = 1; mm_cid_select(t); } -void sched_mm_cid_fork(struct task_struct *t) -{ - WARN_ON_ONCE(!t->mm || t->mm_cid.cid != MM_CID_UNSET); - t->mm_cid.active = 1; -} - void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { struct mm_cid_pcpu __percpu *pcpu = mm->mm_cid.pcpu; @@ -10440,7 +10463,9 @@ void mm_init_cid(struct mm_struct *mm, struct task_struct *p) for_each_possible_cpu(cpu) per_cpu_ptr(pcpu, cpu)->cid = MM_CID_UNSET; + mm->mm_cid.max_cids = 0; mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed; + mm->mm_cid.users = 0; raw_spin_lock_init(&mm->mm_cid.lock); cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask); bitmap_zero(mm_cidmask(mm), num_possible_cpus()); |