summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2025-10-27 09:45:14 +0100
committerIngo Molnar <mingo@kernel.org>2025-11-04 08:34:03 +0100
commit39a167560a61f913560ba803a96dbe6c15239f5c (patch)
treeeb6459ef532e42617b26670d36343e3b34ec20c1 /kernel
parente2d4f42271155045a49b89530f2c06ad8e9f1a1e (diff)
rseq: Optimize event setting
After removing the various condition bits earlier it turns out that one extra information is needed to avoid setting event::sched_switch and TIF_NOTIFY_RESUME unconditionally on every context switch. The update of the RSEQ user space memory is only required, when either the task was interrupted in user space and schedules or the CPU or MM CID changes in schedule() independent of the entry mode Right now only the interrupt from user information is available. Add an event flag, which is set when the CPU or MM CID or both change. Evaluate this event in the scheduler to decide whether the sched_switch event and the TIF bit need to be set. It's an extra conditional in context_switch(), but the downside of unconditionally handling RSEQ after a context switch to user is way more significant. The utilized boolean logic minimizes this to a single conditional branch. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251027084307.578058898@linutronix.de
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rseq.c2
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/sched/sched.h5
3 files changed, 11 insertions, 3 deletions
diff --git a/kernel/rseq.c b/kernel/rseq.c
index 148fb2103023..183dde756808 100644
--- a/kernel/rseq.c
+++ b/kernel/rseq.c
@@ -464,7 +464,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32
* are updated before returning to user-space.
*/
current->rseq.event.has_rseq = true;
- rseq_sched_switch_event(current);
+ rseq_force_update();
return 0;
efault:
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b75e8e1eca4a..579a8e93578f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5118,7 +5118,6 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
kcov_prepare_switch(prev);
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
- rseq_sched_switch_event(prev);
fire_sched_out_preempt_notifiers(prev, next);
kmap_local_sched_out();
prepare_task(next);
@@ -5316,6 +5315,12 @@ context_switch(struct rq *rq, struct task_struct *prev,
/* switch_mm_cid() requires the memory barriers above. */
switch_mm_cid(rq, prev, next);
+ /*
+ * Tell rseq that the task was scheduled in. Must be after
+ * switch_mm_cid() to get the TIF flag set.
+ */
+ rseq_sched_switch_event(next);
+
prepare_lock_switch(rq, next, rf);
/* Here we just switch the register state and the stack. */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index adfb6e3409d7..4838dda75b10 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2209,6 +2209,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
smp_wmb();
WRITE_ONCE(task_thread_info(p)->cpu, cpu);
p->wake_cpu = cpu;
+ rseq_sched_set_task_cpu(p, cpu);
#endif /* CONFIG_SMP */
}
@@ -3807,8 +3808,10 @@ static inline void switch_mm_cid(struct rq *rq,
mm_cid_put_lazy(prev);
prev->mm_cid = -1;
}
- if (next->mm_cid_active)
+ if (next->mm_cid_active) {
next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next, next->mm);
+ rseq_sched_set_task_mm_cid(next, next->mm_cid);
+ }
}
#else /* !CONFIG_SCHED_MM_CID: */