summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2025-09-11 12:09:19 +0200
committerPeter Zijlstra <peterz@infradead.org>2025-10-16 11:13:54 +0200
commitd4c64207b88a60dd15a38c790bb73c0b6f9a8c40 (patch)
tree789ee282a2ee696cc29148051c6f2ceeffef4914 /kernel/sched/core.c
parent5892cbd85dbf9059b8a3a7dd8ab64c0fce671029 (diff)
sched: Cleanup the sched_change NOCLOCK usage
Teach the sched_change pattern how to do update_rq_clock(); this allows for some simplifications / cleanups. Suggested-by: K Prateek Nayak <kprateek.nayak@amd.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Juri Lelli <juri.lelli@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c33
1 files changed, 11 insertions, 22 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e715147c31b2..3d5659f13624 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2346,10 +2346,8 @@ static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
if (p->cpus_ptr != &p->cpus_mask)
return;
- scoped_guard (task_rq_lock, p) {
- update_rq_clock(scope.rq);
+ scoped_guard (task_rq_lock, p)
do_set_cpus_allowed(p, &ac);
- }
}
void ___migrate_enable(void)
@@ -2666,9 +2664,7 @@ void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx
static void
do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
{
- u32 flags = DEQUEUE_SAVE | DEQUEUE_NOCLOCK;
-
- scoped_guard (sched_change, p, flags) {
+ scoped_guard (sched_change, p, DEQUEUE_SAVE) {
p->sched_class->set_cpus_allowed(p, ctx);
mm_set_cpus_allowed(p->mm, ctx->new_mask);
}
@@ -2690,10 +2686,8 @@ void set_cpus_allowed_force(struct task_struct *p, const struct cpumask *new_mas
struct rcu_head rcu;
};
- scoped_guard (__task_rq_lock, p) {
- update_rq_clock(scope.rq);
+ scoped_guard (__task_rq_lock, p)
do_set_cpus_allowed(p, &ac);
- }
/*
* Because this is called with p->pi_lock held, it is not possible
@@ -9108,16 +9102,13 @@ static void sched_change_group(struct task_struct *tsk)
*/
void sched_move_task(struct task_struct *tsk, bool for_autogroup)
{
- unsigned int queue_flags =
- DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
+ unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
bool resched = false;
struct rq *rq;
CLASS(task_rq_lock, rq_guard)(tsk);
rq = rq_guard.rq;
- update_rq_clock(rq);
-
scoped_guard (sched_change, tsk, queue_flags) {
sched_change_group(tsk);
if (!for_autogroup)
@@ -10792,16 +10783,14 @@ struct sched_change_ctx *sched_change_begin(struct task_struct *p, unsigned int
lockdep_assert_rq_held(rq);
+ if (!(flags & DEQUEUE_NOCLOCK)) {
+ update_rq_clock(rq);
+ flags |= DEQUEUE_NOCLOCK;
+ }
+
if (flags & DEQUEUE_CLASS) {
- if (p->sched_class->switching_from) {
- /*
- * switching_from_fair() assumes CLASS implies NOCLOCK;
- * fixing this assumption would mean switching_from()
- * would need to be able to change flags.
- */
- WARN_ON(!(flags & DEQUEUE_NOCLOCK));
+ if (p->sched_class->switching_from)
p->sched_class->switching_from(rq, p);
- }
}
*ctx = (struct sched_change_ctx){
@@ -10840,7 +10829,7 @@ void sched_change_end(struct sched_change_ctx *ctx)
p->sched_class->switching_to(rq, p);
if (ctx->queued)
- enqueue_task(rq, p, ctx->flags | ENQUEUE_NOCLOCK);
+ enqueue_task(rq, p, ctx->flags);
if (ctx->running)
set_next_task(rq, p);