summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h36
1 files changed, 28 insertions, 8 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 76f5f53a645f..c7cf4cc57cdd 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -362,7 +362,7 @@ extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
extern bool __checkparam_dl(const struct sched_attr *attr);
extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
-extern int dl_bw_check_overflow(int cpu);
+extern int dl_bw_deactivate(int cpu);
extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec);
/*
* SCHED_DEADLINE supports servers (nested scheduling) with the following
@@ -398,6 +398,11 @@ extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq
extern int dl_server_apply_params(struct sched_dl_entity *dl_se,
u64 runtime, u64 period, bool init);
+static inline bool dl_server_active(struct sched_dl_entity *dl_se)
+{
+ return dl_se->dl_server_active;
+}
+
#ifdef CONFIG_CGROUP_SCHED
extern struct list_head task_groups;
@@ -645,10 +650,10 @@ struct balance_callback {
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
- unsigned int nr_running;
- unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
- unsigned int idle_nr_running; /* SCHED_IDLE */
- unsigned int idle_h_nr_running; /* SCHED_IDLE */
+ unsigned int nr_queued;
+ unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */
+ unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
+ unsigned int h_nr_idle; /* SCHED_IDLE */
s64 avg_vruntime;
u64 avg_load;
@@ -899,7 +904,7 @@ struct dl_rq {
static inline void se_update_runnable(struct sched_entity *se)
{
if (!entity_is_task(se))
- se->runnable_weight = se->my_q->h_nr_running;
+ se->runnable_weight = se->my_q->h_nr_runnable;
}
static inline long se_runnable(struct sched_entity *se)
@@ -2271,7 +2276,7 @@ static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
static inline int task_on_rq_queued(struct task_struct *p)
{
- return p->on_rq == TASK_ON_RQ_QUEUED;
+ return READ_ONCE(p->on_rq) == TASK_ON_RQ_QUEUED;
}
static inline int task_on_rq_migrating(struct task_struct *p)
@@ -2565,7 +2570,7 @@ static inline bool sched_rt_runnable(struct rq *rq)
static inline bool sched_fair_runnable(struct rq *rq)
{
- return rq->cfs.nr_running > 0;
+ return rq->cfs.nr_queued > 0;
}
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
@@ -3233,6 +3238,12 @@ struct irqtime {
};
DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
+DECLARE_STATIC_KEY_FALSE(sched_clock_irqtime);
+
+static inline int irqtime_enabled(void)
+{
+ return static_branch_likely(&sched_clock_irqtime);
+}
/*
* Returns the irqtime minus the softirq time computed by ksoftirqd.
@@ -3253,6 +3264,13 @@ static inline u64 irq_time_read(int cpu)
return total;
}
+#else
+
+static inline int irqtime_enabled(void)
+{
+ return 0;
+}
+
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
#ifdef CONFIG_CPU_FREQ
@@ -3500,6 +3518,8 @@ unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned
#endif /* !CONFIG_HAVE_SCHED_AVG_IRQ */
+extern void __setparam_fair(struct task_struct *p, const struct sched_attr *attr);
+
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))