summaryrefslogtreecommitdiff
path: root/kernel/sched/ext_idle.c
diff options
context:
space:
mode:
authorAndrea Righi <arighi@nvidia.com>2025-02-14 20:40:04 +0100
committerTejun Heo <tj@kernel.org>2025-02-16 06:52:20 -1000
commitd73249f88743df63a2bdce0e3153369963113710 (patch)
tree66d137741c8a2ab1d2ca978d7f574ba91f135ebb /kernel/sched/ext_idle.c
parentf09177ca5f242a32368a2e9414dce4c90dc1d405 (diff)
sched_ext: idle: Make idle static keys private
Make all the static keys used by the idle CPU selection policy private to ext_idle.c. This avoids unnecessary exposure in headers and improves code encapsulation. Cc: Yury Norov <yury.norov@gmail.com> Signed-off-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/sched/ext_idle.c')
-rw-r--r--kernel/sched/ext_idle.c39
1 files changed, 26 insertions, 13 deletions
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index cb981956005b..ed1804506585 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -12,7 +12,7 @@
#include "ext_idle.h"
/* Enable/disable built-in idle CPU selection policy */
-DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
+static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
#ifdef CONFIG_SMP
#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -22,10 +22,10 @@ DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
#endif
/* Enable/disable LLC aware optimizations */
-DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
+static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
/* Enable/disable NUMA aware optimizations */
-DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
+static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
static struct {
cpumask_var_t cpu;
@@ -441,16 +441,6 @@ cpu_found:
return cpu;
}
-void scx_idle_reset_masks(void)
-{
- /*
- * Consider all online cpus idle. Should converge to the actual state
- * quickly.
- */
- cpumask_copy(idle_masks.cpu, cpu_online_mask);
- cpumask_copy(idle_masks.smt, cpu_online_mask);
-}
-
void scx_idle_init_masks(void)
{
BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
@@ -532,6 +522,29 @@ void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
}
#endif /* CONFIG_SMP */
+void scx_idle_enable(struct sched_ext_ops *ops)
+{
+ if (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
+ static_branch_disable(&scx_builtin_idle_enabled);
+ return;
+ }
+ static_branch_enable(&scx_builtin_idle_enabled);
+
+#ifdef CONFIG_SMP
+ /*
+ * Consider all online cpus idle. Should converge to the actual state
+ * quickly.
+ */
+ cpumask_copy(idle_masks.cpu, cpu_online_mask);
+ cpumask_copy(idle_masks.smt, cpu_online_mask);
+#endif
+}
+
+void scx_idle_disable(void)
+{
+ static_branch_disable(&scx_builtin_idle_enabled);
+}
+
/********************************************************************************
* Helpers that can be called from the BPF scheduler.
*/