summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2024-10-30 15:47:46 +0100
committerPeter Zijlstra <peterz@infradead.org>2025-10-16 11:13:51 +0200
commit1ae5f5dfe5adc64a90b1b0ab5bd9bd7c9d140c28 (patch)
tree62c65ad77a698c1dbb920100bf94330a7ffa0d08 /kernel/sched/core.c
parent637b0682821b144d5993211cf0a768b322138a69 (diff)
sched: Cleanup sched_delayed handling for class switches
Use the new sched_class::switching_from() method to dequeue delayed tasks before switching to another class. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Reviewed-by: Juri Lelli <juri.lelli@redhat.com> Acked-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4dbd2068f435..bd2c551de6d7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7366,9 +7366,6 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
if (prev_class != next_class)
queue_flag |= DEQUEUE_CLASS;
- if (prev_class != next_class && p->se.sched_delayed)
- dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
-
scoped_guard (sched_change, p, queue_flag) {
/*
* Boosting condition are:
@@ -10840,8 +10837,15 @@ struct sched_change_ctx *sched_change_begin(struct task_struct *p, unsigned int
lockdep_assert_rq_held(rq);
if (flags & DEQUEUE_CLASS) {
- if (p->sched_class->switching_from)
+ if (p->sched_class->switching_from) {
+ /*
+ * switching_from_fair() assumes CLASS implies NOCLOCK;
+ * fixing this assumption would mean switching_from()
+ * would need to be able to change flags.
+ */
+ WARN_ON(!(flags & DEQUEUE_NOCLOCK));
p->sched_class->switching_from(rq, p);
+ }
}
*ctx = (struct sched_change_ctx){