summaryrefslogtreecommitdiff
path: root/kernel/sched/ext_internal.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-12-03 13:25:39 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-12-03 13:25:39 -0800
commit02baaa67d9afc2e56c6e1ac6a1fb1f1dd2be366f (patch)
tree13ae2fec8be92b2f774cfb3fd725c027740be3ac /kernel/sched/ext_internal.h
parent8449d3252c2603a51ffc7c36cb5bd94874378b7d (diff)
parent1dd6c84f1c544e552848a8968599220bd464e338 (diff)
Merge tag 'sched_ext-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext
Pull sched_ext updates from Tejun Heo: - Improve recovery from misbehaving BPF schedulers. When a scheduler puts many tasks with varying affinity restrictions on a shared DSQ, CPUs scanning through tasks they cannot run can overwhelm the system, causing lockups. Bypass mode now uses per-CPU DSQs with a load balancer to avoid this, and hooks into the hardlockup detector to attempt recovery. Add scx_cpu0 example scheduler to demonstrate this scenario. - Add lockless peek operation for DSQs to reduce lock contention for schedulers that need to query queue state during load balancing. - Allow scx_bpf_reenqueue_local() to be called from anywhere in preparation for deprecating cpu_acquire/release() callbacks in favor of generic BPF hooks. - Prepare for hierarchical scheduler support: add scx_bpf_task_set_slice() and scx_bpf_task_set_dsq_vtime() kfuncs, make scx_bpf_dsq_insert*() return bool, and wrap kfunc args in structs for future aux__prog parameter. - Implement cgroup_set_idle() callback to notify BPF schedulers when a cgroup's idle state changes. - Fix migration tasks being incorrectly downgraded from stop_sched_class to rt_sched_class across sched_ext enable/disable. Applied late as the fix is low risk and the bug subtle but needs stable backporting. - Various fixes and cleanups including cgroup exit ordering, SCX_KICK_WAIT reliability, and backward compatibility improvements. * tag 'sched_ext-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext: (44 commits) sched_ext: Fix incorrect sched_class settings for per-cpu migration tasks sched_ext: tools: Removing duplicate targets during non-cross compilation sched_ext: Use kvfree_rcu() to release per-cpu ksyncs object sched_ext: Pass locked CPU parameter to scx_hardlockup() and add docs sched_ext: Update comments replacing breather with aborting mechanism sched_ext: Implement load balancer for bypass mode sched_ext: Factor out abbreviated dispatch dequeue into dispatch_dequeue_locked() sched_ext: Factor out scx_dsq_list_node cursor initialization into INIT_DSQ_LIST_CURSOR sched_ext: Add scx_cpu0 example scheduler sched_ext: Hook up hardlockup detector sched_ext: Make handle_lockup() propagate scx_verror() result sched_ext: Refactor lockup handlers into handle_lockup() sched_ext: Make scx_exit() and scx_vexit() return bool sched_ext: Exit dispatch and move operations immediately when aborting sched_ext: Simplify breather mechanism with scx_aborting flag sched_ext: Use per-CPU DSQs instead of per-node global DSQs in bypass mode sched_ext: Refactor do_enqueue_task() local and global DSQ paths sched_ext: Use shorter slice in bypass mode sched_ext: Mark racy bitfields to prevent adding fields that can't tolerate races sched_ext: Minor cleanups to scx_task_iter ...
Diffstat (limited to 'kernel/sched/ext_internal.h')
-rw-r--r--kernel/sched/ext_internal.h29
1 files changed, 26 insertions, 3 deletions
diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h
index b3617abed510..386c677e4c9a 100644
--- a/kernel/sched/ext_internal.h
+++ b/kernel/sched/ext_internal.h
@@ -23,6 +23,11 @@ enum scx_consts {
* scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
*/
SCX_TASK_ITER_BATCH = 32,
+
+ SCX_BYPASS_LB_DFL_INTV_US = 500 * USEC_PER_MSEC,
+ SCX_BYPASS_LB_DONOR_PCT = 125,
+ SCX_BYPASS_LB_MIN_DELTA_DIV = 4,
+ SCX_BYPASS_LB_BATCH = 256,
};
enum scx_exit_kind {
@@ -697,12 +702,23 @@ struct sched_ext_ops {
* 2_500_000. @cgrp is entitled to 2.5 CPUs. @burst_us can be
* interpreted in the same fashion and specifies how much @cgrp can
* burst temporarily. The specific control mechanism and thus the
- * interpretation of @period_us and burstiness is upto to the BPF
+ * interpretation of @period_us and burstiness is up to the BPF
* scheduler.
*/
void (*cgroup_set_bandwidth)(struct cgroup *cgrp,
u64 period_us, u64 quota_us, u64 burst_us);
+ /**
+ * @cgroup_set_idle: A cgroup's idle state is being changed
+ * @cgrp: cgroup whose idle state is being updated
+ * @idle: whether the cgroup is entering or exiting idle state
+ *
+ * Update @cgrp's idle state to @idle. This callback is invoked when
+ * a cgroup transitions between idle and non-idle states, allowing the
+ * BPF scheduler to adjust its behavior accordingly.
+ */
+ void (*cgroup_set_idle)(struct cgroup *cgrp, bool idle);
+
#endif /* CONFIG_EXT_GROUP_SCHED */
/*
@@ -884,6 +900,10 @@ struct scx_sched {
struct scx_dispatch_q **global_dsqs;
struct scx_sched_pcpu __percpu *pcpu;
+ /*
+ * Updates to the following warned bitfields can race causing RMW issues
+ * but it doesn't really matter.
+ */
bool warned_zero_slice:1;
bool warned_deprecated_rq:1;
@@ -948,6 +968,7 @@ enum scx_enq_flags {
SCX_ENQ_CLEAR_OPSS = 1LLU << 56,
SCX_ENQ_DSQ_PRIQ = 1LLU << 57,
+ SCX_ENQ_NESTED = 1LLU << 58,
};
enum scx_deq_flags {
@@ -986,8 +1007,10 @@ enum scx_kick_flags {
SCX_KICK_PREEMPT = 1LLU << 1,
/*
- * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
- * return after the target CPU finishes picking the next task.
+ * The scx_bpf_kick_cpu() call will return after the current SCX task of
+ * the target CPU switches out. This can be used to implement e.g. core
+ * scheduling. This has no effect if the current task on the target CPU
+ * is not on SCX.
*/
SCX_KICK_WAIT = 1LLU << 2,
};