diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2025-10-01 15:50:15 +0200 |
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2025-10-16 11:13:55 +0200 |
| commit | 1e900f415c6082cd4bcdae4c92515d21fb389473 (patch) | |
| tree | fb9ce0ff87f639b50dab4cfc6d7ff23deec07cae /kernel/sched/sched.h | |
| parent | 73ec89a1ce4bce98f74b6520a95e64cd9986aae5 (diff) | |
sched: Detect per-class runqueue changes
Have enqueue/dequeue set a per-class bit in rq->queue_mask. This then
enables easy tracking of which runqueues are modified over a
lock-break.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e3d271013c8b..f4a323007dce 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1118,6 +1118,8 @@ struct rq { /* runqueue lock: */ raw_spinlock_t __lock; + /* Per class runqueue modification mask; bits in class order. */ + unsigned int queue_mask; unsigned int nr_running; #ifdef CONFIG_NUMA_BALANCING unsigned int nr_numa_running; @@ -2414,6 +2416,15 @@ struct sched_class { #ifdef CONFIG_UCLAMP_TASK int uclamp_enabled; #endif + /* + * idle: 0 + * ext: 1 + * fair: 2 + * rt: 4 + * dl: 8 + * stop: 16 + */ + unsigned int queue_mask; /* * move_queued_task/activate_task/enqueue_task: rq->lock @@ -2571,6 +2582,20 @@ struct sched_class { #endif }; +/* + * Does not nest; only used around sched_class::pick_task() rq-lock-breaks. + */ +static inline void rq_modified_clear(struct rq *rq) +{ + rq->queue_mask = 0; +} + +static inline bool rq_modified_above(struct rq *rq, const struct sched_class * class) +{ + unsigned int mask = class->queue_mask; + return rq->queue_mask & ~((mask << 1) - 1); +} + static inline void put_prev_task(struct rq *rq, struct task_struct *prev) { WARN_ON_ONCE(rq->donor != prev); |