summaryrefslogtreecommitdiff
path: root/kernel/sched/ext.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2025-11-11 09:18:15 -1000
committerTejun Heo <tj@kernel.org>2025-11-12 06:43:44 -1000
commitd18b96ce12becf3f3aa3556ba722c2de61aca94e (patch)
treef96e8811d5af7b736862b2686de2cbc685de4297 /kernel/sched/ext.c
parentd2974cc79f7139cc851b84ad4f77805e93c40fe1 (diff)
sched_ext: Factor out abbreviated dispatch dequeue into dispatch_dequeue_locked()
move_task_between_dsqs() contains open-coded abbreviated dequeue logic when moving tasks between non-local DSQs. Factor this out into dispatch_dequeue_locked() which can be used when both the task's rq and dsq locks are already held. Add lockdep assertions to both dispatch_dequeue() and the new helper to verify locking requirements. This prepares for the load balancer which will need the same abbreviated dequeue pattern. Cc: Andrea Righi <arighi@nvidia.com> Cc: Dan Schatzberg <schatzberg.dan@gmail.com> Cc: Emil Tsalapatis <etsal@meta.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/sched/ext.c')
-rw-r--r--kernel/sched/ext.c19
1 files changed, 17 insertions, 2 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 56946aceeb28..10d8532f8d9b 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1106,6 +1106,8 @@ static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
struct scx_dispatch_q *dsq = p->scx.dsq;
bool is_local = dsq == &rq->scx.local_dsq;
+ lockdep_assert_rq_held(rq);
+
if (!dsq) {
/*
* If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
@@ -1152,6 +1154,20 @@ static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
raw_spin_unlock(&dsq->lock);
}
+/*
+ * Abbreviated version of dispatch_dequeue() that can be used when both @p's rq
+ * and dsq are locked.
+ */
+static void dispatch_dequeue_locked(struct task_struct *p,
+ struct scx_dispatch_q *dsq)
+{
+ lockdep_assert_rq_held(task_rq(p));
+ lockdep_assert_held(&dsq->lock);
+
+ task_unlink_from_dsq(p, dsq);
+ p->scx.dsq = NULL;
+}
+
static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch,
struct rq *rq, u64 dsq_id,
struct task_struct *p)
@@ -1812,8 +1828,7 @@ static struct rq *move_task_between_dsqs(struct scx_sched *sch,
* @p is going from a non-local DSQ to a non-local DSQ. As
* $src_dsq is already locked, do an abbreviated dequeue.
*/
- task_unlink_from_dsq(p, src_dsq);
- p->scx.dsq = NULL;
+ dispatch_dequeue_locked(p, src_dsq);
raw_spin_unlock(&src_dsq->lock);
dispatch_enqueue(sch, dst_dsq, p, enq_flags);