diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2025-11-19 18:26:47 +0100 |
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2025-11-20 12:14:52 +0100 |
| commit | 8cea569ca785060b8c5cc7800713ddc3b1548a94 (patch) | |
| tree | ea8ae59120864ce14db53e93e87473230fc5eeb0 /kernel/sched/sched.h | |
| parent | 77d7dc8bef482e987036bc204136bbda552d95cd (diff) | |
sched/mmcid: Use proper data structures
Having a lot of CID functionality specific members in struct task_struct
and struct mm_struct is not really making the code easier to read.
Encapsulate the CID specific parts in data structures and keep them
separate from the stuff they are embedded in.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://patch.msgid.link/20251119172549.131573768@linutronix.de
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index bf227c27b889..a17f04f075e1 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3549,8 +3549,8 @@ static inline void init_sched_mm_cid(struct task_struct *t) return; /* Preset last_mm_cid */ - max_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed), atomic_read(&mm->mm_users)); - t->last_mm_cid = max_cid - 1; + max_cid = min_t(int, READ_ONCE(mm->mm_cid.nr_cpus_allowed), atomic_read(&mm->mm_users)); + t->mm_cid.last_cid = max_cid - 1; } static inline bool __mm_cid_get(struct task_struct *t, unsigned int cid, unsigned int max_cids) @@ -3561,8 +3561,8 @@ static inline bool __mm_cid_get(struct task_struct *t, unsigned int cid, unsigne return false; if (cpumask_test_and_set_cpu(cid, mm_cidmask(mm))) return false; - t->mm_cid = t->last_mm_cid = cid; - __this_cpu_write(mm->pcpu_cid->cid, cid); + t->mm_cid.cid = t->mm_cid.last_cid = cid; + __this_cpu_write(mm->mm_cid.pcpu->cid, cid); return true; } @@ -3571,14 +3571,14 @@ static inline bool mm_cid_get(struct task_struct *t) struct mm_struct *mm = t->mm; unsigned int max_cids; - max_cids = min_t(int, READ_ONCE(mm->nr_cpus_allowed), atomic_read(&mm->mm_users)); + max_cids = min_t(int, READ_ONCE(mm->mm_cid.nr_cpus_allowed), atomic_read(&mm->mm_users)); /* Try to reuse the last CID of this task */ - if (__mm_cid_get(t, t->last_mm_cid, max_cids)) + if (__mm_cid_get(t, t->mm_cid.last_cid, max_cids)) return true; /* Try to reuse the last CID of this mm on this CPU */ - if (__mm_cid_get(t, __this_cpu_read(mm->pcpu_cid->cid), max_cids)) + if (__mm_cid_get(t, __this_cpu_read(mm->mm_cid.pcpu->cid), max_cids)) return true; /* Try the first zero bit in the cidmask. */ @@ -3601,15 +3601,15 @@ static inline void mm_cid_select(struct task_struct *t) static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next) { - if (prev->mm_cid_active) { - if (prev->mm_cid != MM_CID_UNSET) - cpumask_clear_cpu(prev->mm_cid, mm_cidmask(prev->mm)); - prev->mm_cid = MM_CID_UNSET; + if (prev->mm_cid.active) { + if (prev->mm_cid.cid != MM_CID_UNSET) + cpumask_clear_cpu(prev->mm_cid.cid, mm_cidmask(prev->mm)); + prev->mm_cid.cid = MM_CID_UNSET; } - if (next->mm_cid_active) { + if (next->mm_cid.active) { mm_cid_select(next); - rseq_sched_set_task_mm_cid(next, next->mm_cid); + rseq_sched_set_task_mm_cid(next, next->mm_cid.cid); } } |