summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2024-11-01 14:16:10 +0100
committerPeter Zijlstra <peterz@infradead.org>2025-10-16 11:13:52 +0200
commit6455ad5346c9cf755fa9dda6e326c4028fb3c853 (patch)
treeb6f122e57fdac235588b6bec950c824ec5d424f4 /kernel/sched/sched.h
parent1ae5f5dfe5adc64a90b1b0ab5bd9bd7c9d140c28 (diff)
sched: Move sched_class::prio_changed() into the change pattern
Move sched_class::prio_changed() into the change pattern. And while there, extend it with sched_class::get_prio() in order to fix the deadline sitation. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Juri Lelli <juri.lelli@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h7
1 files changed, 4 insertions, 3 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e3f4215e84f7..bcde43deb8e9 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2451,8 +2451,10 @@ struct sched_class {
void (*reweight_task)(struct rq *this_rq, struct task_struct *task,
const struct load_weight *lw);
+
+ u64 (*get_prio) (struct rq *this_rq, struct task_struct *task);
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
- int oldprio);
+ u64 oldprio);
unsigned int (*get_rr_interval)(struct rq *rq,
struct task_struct *task);
@@ -3877,8 +3879,6 @@ extern void set_load_weight(struct task_struct *p, bool update_load);
extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
-extern void check_prio_changed(struct rq *rq, struct task_struct *p, int oldprio);
-
extern struct balance_callback *splice_balance_callbacks(struct rq *rq);
extern void balance_callbacks(struct rq *rq, struct balance_callback *head);
@@ -3899,6 +3899,7 @@ extern void balance_callbacks(struct rq *rq, struct balance_callback *head);
* the task's queueing state is idempotent across the operation.
*/
struct sched_change_ctx {
+ u64 prio;
struct task_struct *p;
int flags;
bool queued;