summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2025-11-19 18:27:22 +0100
committerThomas Gleixner <tglx@linutronix.de>2025-11-25 19:45:42 +0100
commit653fda7ae73d8033dedb65537acac0c2c287dc3f (patch)
tree767d8491def78517d86b1d064b613888693d9fad /kernel/sched/sched.h
parent9da6ccbcea3de1fa704202e3346fe6c0226bfc18 (diff)
sched/mmcid: Switch over to the new mechanism
Now that all pieces are in place, change the implementations of sched_mm_cid_fork() and sched_mm_cid_exit() to adhere to the new strict ownership scheme and switch context_switch() over to use the new mm_cid_schedin() functionality. The common case is that there is no mode change required, which makes fork() and exit() just update the user count and the constraints. In case that a new user would exceed the CID space limit the fork() context handles the transition to per CPU mode with mm::mm_cid::mutex held. exit() handles the transition back to per task mode when the user count drops below the switch back threshold. fork() might also be forced to handle a deferred switch back to per task mode, when a affinity change increased the number of allowed CPUs enough. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251119172550.280380631@linutronix.de
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h76
1 files changed, 0 insertions, 76 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 82c7978d548e..f9d0515db130 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3745,83 +3745,7 @@ static inline void mm_cid_switch_to(struct task_struct *prev, struct task_struct
mm_cid_schedin(next);
}
-/* Active implementation */
-static inline void init_sched_mm_cid(struct task_struct *t)
-{
- struct mm_struct *mm = t->mm;
- unsigned int max_cid;
-
- if (!mm)
- return;
-
- /* Preset last_mm_cid */
- max_cid = min_t(int, READ_ONCE(mm->mm_cid.nr_cpus_allowed), atomic_read(&mm->mm_users));
- t->mm_cid.last_cid = max_cid - 1;
-}
-
-static inline bool __mm_cid_get(struct task_struct *t, unsigned int cid, unsigned int max_cids)
-{
- struct mm_struct *mm = t->mm;
-
- if (cid >= max_cids)
- return false;
- if (test_and_set_bit(cid, mm_cidmask(mm)))
- return false;
- t->mm_cid.cid = t->mm_cid.last_cid = cid;
- __this_cpu_write(mm->mm_cid.pcpu->cid, cid);
- return true;
-}
-
-static inline bool mm_cid_get(struct task_struct *t)
-{
- struct mm_struct *mm = t->mm;
- unsigned int max_cids;
-
- max_cids = READ_ONCE(mm->mm_cid.max_cids);
-
- /* Try to reuse the last CID of this task */
- if (__mm_cid_get(t, t->mm_cid.last_cid, max_cids))
- return true;
-
- /* Try to reuse the last CID of this mm on this CPU */
- if (__mm_cid_get(t, __this_cpu_read(mm->mm_cid.pcpu->cid), max_cids))
- return true;
-
- /* Try the first zero bit in the cidmask. */
- return __mm_cid_get(t, find_first_zero_bit(mm_cidmask(mm), num_possible_cpus()), max_cids);
-}
-
-static inline void mm_cid_select(struct task_struct *t)
-{
- /*
- * mm_cid_get() can fail when the maximum CID, which is determined
- * by min(mm->nr_cpus_allowed, mm->mm_users) changes concurrently.
- * That's a transient failure as there cannot be more tasks
- * concurrently on a CPU (or about to be scheduled in) than that.
- */
- for (;;) {
- if (mm_cid_get(t))
- break;
- }
-}
-
-static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next)
-{
- if (prev->mm_cid.active) {
- if (prev->mm_cid.cid != MM_CID_UNSET)
- clear_bit(prev->mm_cid.cid, mm_cidmask(prev->mm));
- prev->mm_cid.cid = MM_CID_UNSET;
- }
-
- if (next->mm_cid.active) {
- mm_cid_select(next);
- rseq_sched_set_task_mm_cid(next, next->mm_cid.cid);
- }
-}
-
#else /* !CONFIG_SCHED_MM_CID: */
-static inline void mm_cid_select(struct task_struct *t) { }
-static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next) { }
static inline void mm_cid_switch_to(struct task_struct *prev, struct task_struct *next) { }
#endif /* !CONFIG_SCHED_MM_CID */