Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Scheduler internal types and methods:
4 */
5#ifndef _KERNEL_SCHED_SCHED_H
6#define _KERNEL_SCHED_SCHED_H
7
8#include <linux/sched/affinity.h>
9#include <linux/sched/autogroup.h>
10#include <linux/sched/cpufreq.h>
11#include <linux/sched/deadline.h>
12#include <linux/sched.h>
13#include <linux/sched/loadavg.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/rseq_api.h>
16#include <linux/sched/signal.h>
17#include <linux/sched/smt.h>
18#include <linux/sched/stat.h>
19#include <linux/sched/sysctl.h>
20#include <linux/sched/task_flags.h>
21#include <linux/sched/task.h>
22#include <linux/sched/topology.h>
23
24#include <linux/atomic.h>
25#include <linux/bitmap.h>
26#include <linux/bug.h>
27#include <linux/capability.h>
28#include <linux/cgroup_api.h>
29#include <linux/cgroup.h>
30#include <linux/context_tracking.h>
31#include <linux/cpufreq.h>
32#include <linux/cpumask_api.h>
33#include <linux/ctype.h>
34#include <linux/file.h>
35#include <linux/fs_api.h>
36#include <linux/hrtimer_api.h>
37#include <linux/interrupt.h>
38#include <linux/irq_work.h>
39#include <linux/jiffies.h>
40#include <linux/kref_api.h>
41#include <linux/kthread.h>
42#include <linux/ktime_api.h>
43#include <linux/lockdep_api.h>
44#include <linux/lockdep.h>
45#include <linux/minmax.h>
46#include <linux/mm.h>
47#include <linux/module.h>
48#include <linux/mutex_api.h>
49#include <linux/plist.h>
50#include <linux/poll.h>
51#include <linux/proc_fs.h>
52#include <linux/profile.h>
53#include <linux/psi.h>
54#include <linux/rcupdate.h>
55#include <linux/seq_file.h>
56#include <linux/seqlock.h>
57#include <linux/softirq.h>
58#include <linux/spinlock_api.h>
59#include <linux/static_key.h>
60#include <linux/stop_machine.h>
61#include <linux/syscalls_api.h>
62#include <linux/syscalls.h>
63#include <linux/tick.h>
64#include <linux/topology.h>
65#include <linux/types.h>
66#include <linux/u64_stats_sync_api.h>
67#include <linux/uaccess.h>
68#include <linux/wait_api.h>
69#include <linux/wait_bit.h>
70#include <linux/workqueue_api.h>
71
72#include <trace/events/power.h>
73#include <trace/events/sched.h>
74
75#include "../workqueue_internal.h"
76
77#ifdef CONFIG_CGROUP_SCHED
78#include <linux/cgroup.h>
79#include <linux/psi.h>
80#endif
81
82#ifdef CONFIG_SCHED_DEBUG
83# include <linux/static_key.h>
84#endif
85
86#ifdef CONFIG_PARAVIRT
87# include <asm/paravirt.h>
88# include <asm/paravirt_api_clock.h>
89#endif
90
91#include "cpupri.h"
92#include "cpudeadline.h"
93
94#ifdef CONFIG_SCHED_DEBUG
95# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
96#else
97# define SCHED_WARN_ON(x) ({ (void)(x), 0; })
98#endif
99
100struct rq;
101struct cpuidle_state;
102
103/* task_struct::on_rq states: */
104#define TASK_ON_RQ_QUEUED 1
105#define TASK_ON_RQ_MIGRATING 2
106
107extern __read_mostly int scheduler_running;
108
109extern unsigned long calc_load_update;
110extern atomic_long_t calc_load_tasks;
111
112extern unsigned int sysctl_sched_child_runs_first;
113
114extern void calc_global_load_tick(struct rq *this_rq);
115extern long calc_load_fold_active(struct rq *this_rq, long adjust);
116
117extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
118
119extern unsigned int sysctl_sched_rt_period;
120extern int sysctl_sched_rt_runtime;
121extern int sched_rr_timeslice;
122
123/*
124 * Helpers for converting nanosecond timing to jiffy resolution
125 */
126#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
127
128/*
129 * Increase resolution of nice-level calculations for 64-bit architectures.
130 * The extra resolution improves shares distribution and load balancing of
131 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
132 * hierarchies, especially on larger systems. This is not a user-visible change
133 * and does not change the user-interface for setting shares/weights.
134 *
135 * We increase resolution only if we have enough bits to allow this increased
136 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
137 * are pretty high and the returns do not justify the increased costs.
138 *
139 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
140 * increase coverage and consistency always enable it on 64-bit platforms.
141 */
142#ifdef CONFIG_64BIT
143# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
144# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
145# define scale_load_down(w) \
146({ \
147 unsigned long __w = (w); \
148 if (__w) \
149 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
150 __w; \
151})
152#else
153# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
154# define scale_load(w) (w)
155# define scale_load_down(w) (w)
156#endif
157
158/*
159 * Task weight (visible to users) and its load (invisible to users) have
160 * independent resolution, but they should be well calibrated. We use
161 * scale_load() and scale_load_down(w) to convert between them. The
162 * following must be true:
163 *
164 * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD
165 *
166 */
167#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
168
169/*
170 * Single value that decides SCHED_DEADLINE internal math precision.
171 * 10 -> just above 1us
172 * 9 -> just above 0.5us
173 */
174#define DL_SCALE 10
175
176/*
177 * Single value that denotes runtime == period, ie unlimited time.
178 */
179#define RUNTIME_INF ((u64)~0ULL)
180
181static inline int idle_policy(int policy)
182{
183 return policy == SCHED_IDLE;
184}
185static inline int fair_policy(int policy)
186{
187 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
188}
189
190static inline int rt_policy(int policy)
191{
192 return policy == SCHED_FIFO || policy == SCHED_RR;
193}
194
195static inline int dl_policy(int policy)
196{
197 return policy == SCHED_DEADLINE;
198}
199static inline bool valid_policy(int policy)
200{
201 return idle_policy(policy) || fair_policy(policy) ||
202 rt_policy(policy) || dl_policy(policy);
203}
204
205static inline int task_has_idle_policy(struct task_struct *p)
206{
207 return idle_policy(p->policy);
208}
209
210static inline int task_has_rt_policy(struct task_struct *p)
211{
212 return rt_policy(p->policy);
213}
214
215static inline int task_has_dl_policy(struct task_struct *p)
216{
217 return dl_policy(p->policy);
218}
219
220#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
221
222static inline void update_avg(u64 *avg, u64 sample)
223{
224 s64 diff = sample - *avg;
225 *avg += diff / 8;
226}
227
228/*
229 * Shifting a value by an exponent greater *or equal* to the size of said value
230 * is UB; cap at size-1.
231 */
232#define shr_bound(val, shift) \
233 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
234
235/*
236 * !! For sched_setattr_nocheck() (kernel) only !!
237 *
238 * This is actually gross. :(
239 *
240 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
241 * tasks, but still be able to sleep. We need this on platforms that cannot
242 * atomically change clock frequency. Remove once fast switching will be
243 * available on such platforms.
244 *
245 * SUGOV stands for SchedUtil GOVernor.
246 */
247#define SCHED_FLAG_SUGOV 0x10000000
248
249#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
250
251static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
252{
253#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
254 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
255#else
256 return false;
257#endif
258}
259
260/*
261 * Tells if entity @a should preempt entity @b.
262 */
263static inline bool
264dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
265{
266 return dl_entity_is_special(a) ||
267 dl_time_before(a->deadline, b->deadline);
268}
269
270/*
271 * This is the priority-queue data structure of the RT scheduling class:
272 */
273struct rt_prio_array {
274 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
275 struct list_head queue[MAX_RT_PRIO];
276};
277
278struct rt_bandwidth {
279 /* nests inside the rq lock: */
280 raw_spinlock_t rt_runtime_lock;
281 ktime_t rt_period;
282 u64 rt_runtime;
283 struct hrtimer rt_period_timer;
284 unsigned int rt_period_active;
285};
286
287void __dl_clear_params(struct task_struct *p);
288
289struct dl_bandwidth {
290 raw_spinlock_t dl_runtime_lock;
291 u64 dl_runtime;
292 u64 dl_period;
293};
294
295static inline int dl_bandwidth_enabled(void)
296{
297 return sysctl_sched_rt_runtime >= 0;
298}
299
300/*
301 * To keep the bandwidth of -deadline tasks under control
302 * we need some place where:
303 * - store the maximum -deadline bandwidth of each cpu;
304 * - cache the fraction of bandwidth that is currently allocated in
305 * each root domain;
306 *
307 * This is all done in the data structure below. It is similar to the
308 * one used for RT-throttling (rt_bandwidth), with the main difference
309 * that, since here we are only interested in admission control, we
310 * do not decrease any runtime while the group "executes", neither we
311 * need a timer to replenish it.
312 *
313 * With respect to SMP, bandwidth is given on a per root domain basis,
314 * meaning that:
315 * - bw (< 100%) is the deadline bandwidth of each CPU;
316 * - total_bw is the currently allocated bandwidth in each root domain;
317 */
318struct dl_bw {
319 raw_spinlock_t lock;
320 u64 bw;
321 u64 total_bw;
322};
323
324extern void init_dl_bw(struct dl_bw *dl_b);
325extern int sched_dl_global_validate(void);
326extern void sched_dl_do_global(void);
327extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
328extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
329extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
330extern bool __checkparam_dl(const struct sched_attr *attr);
331extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
332extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
333extern int dl_cpu_busy(int cpu, struct task_struct *p);
334
335#ifdef CONFIG_CGROUP_SCHED
336
337struct cfs_rq;
338struct rt_rq;
339
340extern struct list_head task_groups;
341
342struct cfs_bandwidth {
343#ifdef CONFIG_CFS_BANDWIDTH
344 raw_spinlock_t lock;
345 ktime_t period;
346 u64 quota;
347 u64 runtime;
348 u64 burst;
349 u64 runtime_snap;
350 s64 hierarchical_quota;
351
352 u8 idle;
353 u8 period_active;
354 u8 slack_started;
355 struct hrtimer period_timer;
356 struct hrtimer slack_timer;
357 struct list_head throttled_cfs_rq;
358
359 /* Statistics: */
360 int nr_periods;
361 int nr_throttled;
362 int nr_burst;
363 u64 throttled_time;
364 u64 burst_time;
365#endif
366};
367
368/* Task group related information */
369struct task_group {
370 struct cgroup_subsys_state css;
371
372#ifdef CONFIG_FAIR_GROUP_SCHED
373 /* schedulable entities of this group on each CPU */
374 struct sched_entity **se;
375 /* runqueue "owned" by this group on each CPU */
376 struct cfs_rq **cfs_rq;
377 unsigned long shares;
378
379 /* A positive value indicates that this is a SCHED_IDLE group. */
380 int idle;
381
382#ifdef CONFIG_SMP
383 /*
384 * load_avg can be heavily contended at clock tick time, so put
385 * it in its own cacheline separated from the fields above which
386 * will also be accessed at each tick.
387 */
388 atomic_long_t load_avg ____cacheline_aligned;
389#endif
390#endif
391
392#ifdef CONFIG_RT_GROUP_SCHED
393 struct sched_rt_entity **rt_se;
394 struct rt_rq **rt_rq;
395
396 struct rt_bandwidth rt_bandwidth;
397#endif
398
399 struct rcu_head rcu;
400 struct list_head list;
401
402 struct task_group *parent;
403 struct list_head siblings;
404 struct list_head children;
405
406#ifdef CONFIG_SCHED_AUTOGROUP
407 struct autogroup *autogroup;
408#endif
409
410 struct cfs_bandwidth cfs_bandwidth;
411
412#ifdef CONFIG_UCLAMP_TASK_GROUP
413 /* The two decimal precision [%] value requested from user-space */
414 unsigned int uclamp_pct[UCLAMP_CNT];
415 /* Clamp values requested for a task group */
416 struct uclamp_se uclamp_req[UCLAMP_CNT];
417 /* Effective clamp values used for a task group */
418 struct uclamp_se uclamp[UCLAMP_CNT];
419#endif
420
421};
422
423#ifdef CONFIG_FAIR_GROUP_SCHED
424#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
425
426/*
427 * A weight of 0 or 1 can cause arithmetics problems.
428 * A weight of a cfs_rq is the sum of weights of which entities
429 * are queued on this cfs_rq, so a weight of a entity should not be
430 * too large, so as the shares value of a task group.
431 * (The default weight is 1024 - so there's no practical
432 * limitation from this.)
433 */
434#define MIN_SHARES (1UL << 1)
435#define MAX_SHARES (1UL << 18)
436#endif
437
438typedef int (*tg_visitor)(struct task_group *, void *);
439
440extern int walk_tg_tree_from(struct task_group *from,
441 tg_visitor down, tg_visitor up, void *data);
442
443/*
444 * Iterate the full tree, calling @down when first entering a node and @up when
445 * leaving it for the final time.
446 *
447 * Caller must hold rcu_lock or sufficient equivalent.
448 */
449static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
450{
451 return walk_tg_tree_from(&root_task_group, down, up, data);
452}
453
454extern int tg_nop(struct task_group *tg, void *data);
455
456extern void free_fair_sched_group(struct task_group *tg);
457extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
458extern void online_fair_sched_group(struct task_group *tg);
459extern void unregister_fair_sched_group(struct task_group *tg);
460extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
461 struct sched_entity *se, int cpu,
462 struct sched_entity *parent);
463extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
464
465extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
466extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
467extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
468
469extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
470 struct sched_rt_entity *rt_se, int cpu,
471 struct sched_rt_entity *parent);
472extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
473extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
474extern long sched_group_rt_runtime(struct task_group *tg);
475extern long sched_group_rt_period(struct task_group *tg);
476extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
477
478extern struct task_group *sched_create_group(struct task_group *parent);
479extern void sched_online_group(struct task_group *tg,
480 struct task_group *parent);
481extern void sched_destroy_group(struct task_group *tg);
482extern void sched_release_group(struct task_group *tg);
483
484extern void sched_move_task(struct task_struct *tsk);
485
486#ifdef CONFIG_FAIR_GROUP_SCHED
487extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
488
489extern int sched_group_set_idle(struct task_group *tg, long idle);
490
491#ifdef CONFIG_SMP
492extern void set_task_rq_fair(struct sched_entity *se,
493 struct cfs_rq *prev, struct cfs_rq *next);
494#else /* !CONFIG_SMP */
495static inline void set_task_rq_fair(struct sched_entity *se,
496 struct cfs_rq *prev, struct cfs_rq *next) { }
497#endif /* CONFIG_SMP */
498#endif /* CONFIG_FAIR_GROUP_SCHED */
499
500#else /* CONFIG_CGROUP_SCHED */
501
502struct cfs_bandwidth { };
503
504#endif /* CONFIG_CGROUP_SCHED */
505
506extern void unregister_rt_sched_group(struct task_group *tg);
507extern void free_rt_sched_group(struct task_group *tg);
508extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
509
510/*
511 * u64_u32_load/u64_u32_store
512 *
513 * Use a copy of a u64 value to protect against data race. This is only
514 * applicable for 32-bits architectures.
515 */
516#ifdef CONFIG_64BIT
517# define u64_u32_load_copy(var, copy) var
518# define u64_u32_store_copy(var, copy, val) (var = val)
519#else
520# define u64_u32_load_copy(var, copy) \
521({ \
522 u64 __val, __val_copy; \
523 do { \
524 __val_copy = copy; \
525 /* \
526 * paired with u64_u32_store_copy(), ordering access \
527 * to var and copy. \
528 */ \
529 smp_rmb(); \
530 __val = var; \
531 } while (__val != __val_copy); \
532 __val; \
533})
534# define u64_u32_store_copy(var, copy, val) \
535do { \
536 typeof(val) __val = (val); \
537 var = __val; \
538 /* \
539 * paired with u64_u32_load_copy(), ordering access to var and \
540 * copy. \
541 */ \
542 smp_wmb(); \
543 copy = __val; \
544} while (0)
545#endif
546# define u64_u32_load(var) u64_u32_load_copy(var, var##_copy)
547# define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val)
548
549/* CFS-related fields in a runqueue */
550struct cfs_rq {
551 struct load_weight load;
552 unsigned int nr_running;
553 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
554 unsigned int idle_nr_running; /* SCHED_IDLE */
555 unsigned int idle_h_nr_running; /* SCHED_IDLE */
556
557 u64 exec_clock;
558 u64 min_vruntime;
559#ifdef CONFIG_SCHED_CORE
560 unsigned int forceidle_seq;
561 u64 min_vruntime_fi;
562#endif
563
564#ifndef CONFIG_64BIT
565 u64 min_vruntime_copy;
566#endif
567
568 struct rb_root_cached tasks_timeline;
569
570 /*
571 * 'curr' points to currently running entity on this cfs_rq.
572 * It is set to NULL otherwise (i.e when none are currently running).
573 */
574 struct sched_entity *curr;
575 struct sched_entity *next;
576 struct sched_entity *last;
577 struct sched_entity *skip;
578
579#ifdef CONFIG_SCHED_DEBUG
580 unsigned int nr_spread_over;
581#endif
582
583#ifdef CONFIG_SMP
584 /*
585 * CFS load tracking
586 */
587 struct sched_avg avg;
588#ifndef CONFIG_64BIT
589 u64 last_update_time_copy;
590#endif
591 struct {
592 raw_spinlock_t lock ____cacheline_aligned;
593 int nr;
594 unsigned long load_avg;
595 unsigned long util_avg;
596 unsigned long runnable_avg;
597 } removed;
598
599#ifdef CONFIG_FAIR_GROUP_SCHED
600 unsigned long tg_load_avg_contrib;
601 long propagate;
602 long prop_runnable_sum;
603
604 /*
605 * h_load = weight * f(tg)
606 *
607 * Where f(tg) is the recursive weight fraction assigned to
608 * this group.
609 */
610 unsigned long h_load;
611 u64 last_h_load_update;
612 struct sched_entity *h_load_next;
613#endif /* CONFIG_FAIR_GROUP_SCHED */
614#endif /* CONFIG_SMP */
615
616#ifdef CONFIG_FAIR_GROUP_SCHED
617 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */
618
619 /*
620 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
621 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
622 * (like users, containers etc.)
623 *
624 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU.
625 * This list is used during load balance.
626 */
627 int on_list;
628 struct list_head leaf_cfs_rq_list;
629 struct task_group *tg; /* group that "owns" this runqueue */
630
631 /* Locally cached copy of our task_group's idle value */
632 int idle;
633
634#ifdef CONFIG_CFS_BANDWIDTH
635 int runtime_enabled;
636 s64 runtime_remaining;
637
638 u64 throttled_pelt_idle;
639#ifndef CONFIG_64BIT
640 u64 throttled_pelt_idle_copy;
641#endif
642 u64 throttled_clock;
643 u64 throttled_clock_pelt;
644 u64 throttled_clock_pelt_time;
645 int throttled;
646 int throttle_count;
647 struct list_head throttled_list;
648#endif /* CONFIG_CFS_BANDWIDTH */
649#endif /* CONFIG_FAIR_GROUP_SCHED */
650};
651
652static inline int rt_bandwidth_enabled(void)
653{
654 return sysctl_sched_rt_runtime >= 0;
655}
656
657/* RT IPI pull logic requires IRQ_WORK */
658#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
659# define HAVE_RT_PUSH_IPI
660#endif
661
662/* Real-Time classes' related field in a runqueue: */
663struct rt_rq {
664 struct rt_prio_array active;
665 unsigned int rt_nr_running;
666 unsigned int rr_nr_running;
667#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
668 struct {
669 int curr; /* highest queued rt task prio */
670#ifdef CONFIG_SMP
671 int next; /* next highest */
672#endif
673 } highest_prio;
674#endif
675#ifdef CONFIG_SMP
676 unsigned int rt_nr_migratory;
677 unsigned int rt_nr_total;
678 int overloaded;
679 struct plist_head pushable_tasks;
680
681#endif /* CONFIG_SMP */
682 int rt_queued;
683
684 int rt_throttled;
685 u64 rt_time;
686 u64 rt_runtime;
687 /* Nests inside the rq lock: */
688 raw_spinlock_t rt_runtime_lock;
689
690#ifdef CONFIG_RT_GROUP_SCHED
691 unsigned int rt_nr_boosted;
692
693 struct rq *rq;
694 struct task_group *tg;
695#endif
696};
697
698static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
699{
700 return rt_rq->rt_queued && rt_rq->rt_nr_running;
701}
702
703/* Deadline class' related fields in a runqueue */
704struct dl_rq {
705 /* runqueue is an rbtree, ordered by deadline */
706 struct rb_root_cached root;
707
708 unsigned int dl_nr_running;
709
710#ifdef CONFIG_SMP
711 /*
712 * Deadline values of the currently executing and the
713 * earliest ready task on this rq. Caching these facilitates
714 * the decision whether or not a ready but not running task
715 * should migrate somewhere else.
716 */
717 struct {
718 u64 curr;
719 u64 next;
720 } earliest_dl;
721
722 unsigned int dl_nr_migratory;
723 int overloaded;
724
725 /*
726 * Tasks on this rq that can be pushed away. They are kept in
727 * an rb-tree, ordered by tasks' deadlines, with caching
728 * of the leftmost (earliest deadline) element.
729 */
730 struct rb_root_cached pushable_dl_tasks_root;
731#else
732 struct dl_bw dl_bw;
733#endif
734 /*
735 * "Active utilization" for this runqueue: increased when a
736 * task wakes up (becomes TASK_RUNNING) and decreased when a
737 * task blocks
738 */
739 u64 running_bw;
740
741 /*
742 * Utilization of the tasks "assigned" to this runqueue (including
743 * the tasks that are in runqueue and the tasks that executed on this
744 * CPU and blocked). Increased when a task moves to this runqueue, and
745 * decreased when the task moves away (migrates, changes scheduling
746 * policy, or terminates).
747 * This is needed to compute the "inactive utilization" for the
748 * runqueue (inactive utilization = this_bw - running_bw).
749 */
750 u64 this_bw;
751 u64 extra_bw;
752
753 /*
754 * Inverse of the fraction of CPU utilization that can be reclaimed
755 * by the GRUB algorithm.
756 */
757 u64 bw_ratio;
758};
759
760#ifdef CONFIG_FAIR_GROUP_SCHED
761/* An entity is a task if it doesn't "own" a runqueue */
762#define entity_is_task(se) (!se->my_q)
763
764static inline void se_update_runnable(struct sched_entity *se)
765{
766 if (!entity_is_task(se))
767 se->runnable_weight = se->my_q->h_nr_running;
768}
769
770static inline long se_runnable(struct sched_entity *se)
771{
772 if (entity_is_task(se))
773 return !!se->on_rq;
774 else
775 return se->runnable_weight;
776}
777
778#else
779#define entity_is_task(se) 1
780
781static inline void se_update_runnable(struct sched_entity *se) {}
782
783static inline long se_runnable(struct sched_entity *se)
784{
785 return !!se->on_rq;
786}
787#endif
788
789#ifdef CONFIG_SMP
790/*
791 * XXX we want to get rid of these helpers and use the full load resolution.
792 */
793static inline long se_weight(struct sched_entity *se)
794{
795 return scale_load_down(se->load.weight);
796}
797
798
799static inline bool sched_asym_prefer(int a, int b)
800{
801 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
802}
803
804struct perf_domain {
805 struct em_perf_domain *em_pd;
806 struct perf_domain *next;
807 struct rcu_head rcu;
808};
809
810/* Scheduling group status flags */
811#define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */
812#define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */
813
814/*
815 * We add the notion of a root-domain which will be used to define per-domain
816 * variables. Each exclusive cpuset essentially defines an island domain by
817 * fully partitioning the member CPUs from any other cpuset. Whenever a new
818 * exclusive cpuset is created, we also create and attach a new root-domain
819 * object.
820 *
821 */
822struct root_domain {
823 atomic_t refcount;
824 atomic_t rto_count;
825 struct rcu_head rcu;
826 cpumask_var_t span;
827 cpumask_var_t online;
828
829 /*
830 * Indicate pullable load on at least one CPU, e.g:
831 * - More than one runnable task
832 * - Running task is misfit
833 */
834 int overload;
835
836 /* Indicate one or more cpus over-utilized (tipping point) */
837 int overutilized;
838
839 /*
840 * The bit corresponding to a CPU gets set here if such CPU has more
841 * than one runnable -deadline task (as it is below for RT tasks).
842 */
843 cpumask_var_t dlo_mask;
844 atomic_t dlo_count;
845 struct dl_bw dl_bw;
846 struct cpudl cpudl;
847
848 /*
849 * Indicate whether a root_domain's dl_bw has been checked or
850 * updated. It's monotonously increasing value.
851 *
852 * Also, some corner cases, like 'wrap around' is dangerous, but given
853 * that u64 is 'big enough'. So that shouldn't be a concern.
854 */
855 u64 visit_gen;
856
857#ifdef HAVE_RT_PUSH_IPI
858 /*
859 * For IPI pull requests, loop across the rto_mask.
860 */
861 struct irq_work rto_push_work;
862 raw_spinlock_t rto_lock;
863 /* These are only updated and read within rto_lock */
864 int rto_loop;
865 int rto_cpu;
866 /* These atomics are updated outside of a lock */
867 atomic_t rto_loop_next;
868 atomic_t rto_loop_start;
869#endif
870 /*
871 * The "RT overload" flag: it gets set if a CPU has more than
872 * one runnable RT task.
873 */
874 cpumask_var_t rto_mask;
875 struct cpupri cpupri;
876
877 unsigned long max_cpu_capacity;
878
879 /*
880 * NULL-terminated list of performance domains intersecting with the
881 * CPUs of the rd. Protected by RCU.
882 */
883 struct perf_domain __rcu *pd;
884};
885
886extern void init_defrootdomain(void);
887extern int sched_init_domains(const struct cpumask *cpu_map);
888extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
889extern void sched_get_rd(struct root_domain *rd);
890extern void sched_put_rd(struct root_domain *rd);
891
892#ifdef HAVE_RT_PUSH_IPI
893extern void rto_push_irq_work_func(struct irq_work *work);
894#endif
895#endif /* CONFIG_SMP */
896
897#ifdef CONFIG_UCLAMP_TASK
898/*
899 * struct uclamp_bucket - Utilization clamp bucket
900 * @value: utilization clamp value for tasks on this clamp bucket
901 * @tasks: number of RUNNABLE tasks on this clamp bucket
902 *
903 * Keep track of how many tasks are RUNNABLE for a given utilization
904 * clamp value.
905 */
906struct uclamp_bucket {
907 unsigned long value : bits_per(SCHED_CAPACITY_SCALE);
908 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
909};
910
911/*
912 * struct uclamp_rq - rq's utilization clamp
913 * @value: currently active clamp values for a rq
914 * @bucket: utilization clamp buckets affecting a rq
915 *
916 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values.
917 * A clamp value is affecting a rq when there is at least one task RUNNABLE
918 * (or actually running) with that value.
919 *
920 * There are up to UCLAMP_CNT possible different clamp values, currently there
921 * are only two: minimum utilization and maximum utilization.
922 *
923 * All utilization clamping values are MAX aggregated, since:
924 * - for util_min: we want to run the CPU at least at the max of the minimum
925 * utilization required by its currently RUNNABLE tasks.
926 * - for util_max: we want to allow the CPU to run up to the max of the
927 * maximum utilization allowed by its currently RUNNABLE tasks.
928 *
929 * Since on each system we expect only a limited number of different
930 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track
931 * the metrics required to compute all the per-rq utilization clamp values.
932 */
933struct uclamp_rq {
934 unsigned int value;
935 struct uclamp_bucket bucket[UCLAMP_BUCKETS];
936};
937
938DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
939#endif /* CONFIG_UCLAMP_TASK */
940
941struct rq;
942struct balance_callback {
943 struct balance_callback *next;
944 void (*func)(struct rq *rq);
945};
946
947/*
948 * This is the main, per-CPU runqueue data structure.
949 *
950 * Locking rule: those places that want to lock multiple runqueues
951 * (such as the load balancing or the thread migration code), lock
952 * acquire operations must be ordered by ascending &runqueue.
953 */
954struct rq {
955 /* runqueue lock: */
956 raw_spinlock_t __lock;
957
958 /*
959 * nr_running and cpu_load should be in the same cacheline because
960 * remote CPUs use both these fields when doing load calculation.
961 */
962 unsigned int nr_running;
963#ifdef CONFIG_NUMA_BALANCING
964 unsigned int nr_numa_running;
965 unsigned int nr_preferred_running;
966 unsigned int numa_migrate_on;
967#endif
968#ifdef CONFIG_NO_HZ_COMMON
969#ifdef CONFIG_SMP
970 unsigned long last_blocked_load_update_tick;
971 unsigned int has_blocked_load;
972 call_single_data_t nohz_csd;
973#endif /* CONFIG_SMP */
974 unsigned int nohz_tick_stopped;
975 atomic_t nohz_flags;
976#endif /* CONFIG_NO_HZ_COMMON */
977
978#ifdef CONFIG_SMP
979 unsigned int ttwu_pending;
980#endif
981 u64 nr_switches;
982
983#ifdef CONFIG_UCLAMP_TASK
984 /* Utilization clamp values based on CPU's RUNNABLE tasks */
985 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned;
986 unsigned int uclamp_flags;
987#define UCLAMP_FLAG_IDLE 0x01
988#endif
989
990 struct cfs_rq cfs;
991 struct rt_rq rt;
992 struct dl_rq dl;
993
994#ifdef CONFIG_FAIR_GROUP_SCHED
995 /* list of leaf cfs_rq on this CPU: */
996 struct list_head leaf_cfs_rq_list;
997 struct list_head *tmp_alone_branch;
998#endif /* CONFIG_FAIR_GROUP_SCHED */
999
1000 /*
1001 * This is part of a global counter where only the total sum
1002 * over all CPUs matters. A task can increase this counter on
1003 * one CPU and if it got migrated afterwards it may decrease
1004 * it on another CPU. Always updated under the runqueue lock:
1005 */
1006 unsigned int nr_uninterruptible;
1007
1008 struct task_struct __rcu *curr;
1009 struct task_struct *idle;
1010 struct task_struct *stop;
1011 unsigned long next_balance;
1012 struct mm_struct *prev_mm;
1013
1014 unsigned int clock_update_flags;
1015 u64 clock;
1016 /* Ensure that all clocks are in the same cache line */
1017 u64 clock_task ____cacheline_aligned;
1018 u64 clock_pelt;
1019 unsigned long lost_idle_time;
1020 u64 clock_pelt_idle;
1021 u64 clock_idle;
1022#ifndef CONFIG_64BIT
1023 u64 clock_pelt_idle_copy;
1024 u64 clock_idle_copy;
1025#endif
1026
1027 atomic_t nr_iowait;
1028
1029#ifdef CONFIG_SCHED_DEBUG
1030 u64 last_seen_need_resched_ns;
1031 int ticks_without_resched;
1032#endif
1033
1034#ifdef CONFIG_MEMBARRIER
1035 int membarrier_state;
1036#endif
1037
1038#ifdef CONFIG_SMP
1039 struct root_domain *rd;
1040 struct sched_domain __rcu *sd;
1041
1042 unsigned long cpu_capacity;
1043 unsigned long cpu_capacity_orig;
1044 unsigned long cpu_capacity_inverted;
1045
1046 struct balance_callback *balance_callback;
1047
1048 unsigned char nohz_idle_balance;
1049 unsigned char idle_balance;
1050
1051 unsigned long misfit_task_load;
1052
1053 /* For active balancing */
1054 int active_balance;
1055 int push_cpu;
1056 struct cpu_stop_work active_balance_work;
1057
1058 /* CPU of this runqueue: */
1059 int cpu;
1060 int online;
1061
1062 struct list_head cfs_tasks;
1063
1064 struct sched_avg avg_rt;
1065 struct sched_avg avg_dl;
1066#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
1067 struct sched_avg avg_irq;
1068#endif
1069#ifdef CONFIG_SCHED_THERMAL_PRESSURE
1070 struct sched_avg avg_thermal;
1071#endif
1072 u64 idle_stamp;
1073 u64 avg_idle;
1074
1075 unsigned long wake_stamp;
1076 u64 wake_avg_idle;
1077
1078 /* This is used to determine avg_idle's max value */
1079 u64 max_idle_balance_cost;
1080
1081#ifdef CONFIG_HOTPLUG_CPU
1082 struct rcuwait hotplug_wait;
1083#endif
1084#endif /* CONFIG_SMP */
1085
1086#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1087 u64 prev_irq_time;
1088#endif
1089#ifdef CONFIG_PARAVIRT
1090 u64 prev_steal_time;
1091#endif
1092#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
1093 u64 prev_steal_time_rq;
1094#endif
1095
1096 /* calc_load related fields */
1097 unsigned long calc_load_update;
1098 long calc_load_active;
1099
1100#ifdef CONFIG_SCHED_HRTICK
1101#ifdef CONFIG_SMP
1102 call_single_data_t hrtick_csd;
1103#endif
1104 struct hrtimer hrtick_timer;
1105 ktime_t hrtick_time;
1106#endif
1107
1108#ifdef CONFIG_SCHEDSTATS
1109 /* latency stats */
1110 struct sched_info rq_sched_info;
1111 unsigned long long rq_cpu_time;
1112 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
1113
1114 /* sys_sched_yield() stats */
1115 unsigned int yld_count;
1116
1117 /* schedule() stats */
1118 unsigned int sched_count;
1119 unsigned int sched_goidle;
1120
1121 /* try_to_wake_up() stats */
1122 unsigned int ttwu_count;
1123 unsigned int ttwu_local;
1124#endif
1125
1126#ifdef CONFIG_CPU_IDLE
1127 /* Must be inspected within a rcu lock section */
1128 struct cpuidle_state *idle_state;
1129#endif
1130
1131#ifdef CONFIG_SMP
1132 unsigned int nr_pinned;
1133#endif
1134 unsigned int push_busy;
1135 struct cpu_stop_work push_work;
1136
1137#ifdef CONFIG_SCHED_CORE
1138 /* per rq */
1139 struct rq *core;
1140 struct task_struct *core_pick;
1141 unsigned int core_enabled;
1142 unsigned int core_sched_seq;
1143 struct rb_root core_tree;
1144
1145 /* shared state -- careful with sched_core_cpu_deactivate() */
1146 unsigned int core_task_seq;
1147 unsigned int core_pick_seq;
1148 unsigned long core_cookie;
1149 unsigned int core_forceidle_count;
1150 unsigned int core_forceidle_seq;
1151 unsigned int core_forceidle_occupation;
1152 u64 core_forceidle_start;
1153#endif
1154
1155 /* Scratch cpumask to be temporarily used under rq_lock */
1156 cpumask_var_t scratch_mask;
1157};
1158
1159#ifdef CONFIG_FAIR_GROUP_SCHED
1160
1161/* CPU runqueue to which this cfs_rq is attached */
1162static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1163{
1164 return cfs_rq->rq;
1165}
1166
1167#else
1168
1169static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1170{
1171 return container_of(cfs_rq, struct rq, cfs);
1172}
1173#endif
1174
1175static inline int cpu_of(struct rq *rq)
1176{
1177#ifdef CONFIG_SMP
1178 return rq->cpu;
1179#else
1180 return 0;
1181#endif
1182}
1183
1184#define MDF_PUSH 0x01
1185
1186static inline bool is_migration_disabled(struct task_struct *p)
1187{
1188#ifdef CONFIG_SMP
1189 return p->migration_disabled;
1190#else
1191 return false;
1192#endif
1193}
1194
1195DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1196
1197#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
1198#define this_rq() this_cpu_ptr(&runqueues)
1199#define task_rq(p) cpu_rq(task_cpu(p))
1200#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1201#define raw_rq() raw_cpu_ptr(&runqueues)
1202
1203struct sched_group;
1204#ifdef CONFIG_SCHED_CORE
1205static inline struct cpumask *sched_group_span(struct sched_group *sg);
1206
1207DECLARE_STATIC_KEY_FALSE(__sched_core_enabled);
1208
1209static inline bool sched_core_enabled(struct rq *rq)
1210{
1211 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
1212}
1213
1214static inline bool sched_core_disabled(void)
1215{
1216 return !static_branch_unlikely(&__sched_core_enabled);
1217}
1218
1219/*
1220 * Be careful with this function; not for general use. The return value isn't
1221 * stable unless you actually hold a relevant rq->__lock.
1222 */
1223static inline raw_spinlock_t *rq_lockp(struct rq *rq)
1224{
1225 if (sched_core_enabled(rq))
1226 return &rq->core->__lock;
1227
1228 return &rq->__lock;
1229}
1230
1231static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
1232{
1233 if (rq->core_enabled)
1234 return &rq->core->__lock;
1235
1236 return &rq->__lock;
1237}
1238
1239bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi);
1240
1241/*
1242 * Helpers to check if the CPU's core cookie matches with the task's cookie
1243 * when core scheduling is enabled.
1244 * A special case is that the task's cookie always matches with CPU's core
1245 * cookie if the CPU is in an idle core.
1246 */
1247static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
1248{
1249 /* Ignore cookie match if core scheduler is not enabled on the CPU. */
1250 if (!sched_core_enabled(rq))
1251 return true;
1252
1253 return rq->core->core_cookie == p->core_cookie;
1254}
1255
1256static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
1257{
1258 bool idle_core = true;
1259 int cpu;
1260
1261 /* Ignore cookie match if core scheduler is not enabled on the CPU. */
1262 if (!sched_core_enabled(rq))
1263 return true;
1264
1265 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) {
1266 if (!available_idle_cpu(cpu)) {
1267 idle_core = false;
1268 break;
1269 }
1270 }
1271
1272 /*
1273 * A CPU in an idle core is always the best choice for tasks with
1274 * cookies.
1275 */
1276 return idle_core || rq->core->core_cookie == p->core_cookie;
1277}
1278
1279static inline bool sched_group_cookie_match(struct rq *rq,
1280 struct task_struct *p,
1281 struct sched_group *group)
1282{
1283 int cpu;
1284
1285 /* Ignore cookie match if core scheduler is not enabled on the CPU. */
1286 if (!sched_core_enabled(rq))
1287 return true;
1288
1289 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) {
1290 if (sched_core_cookie_match(cpu_rq(cpu), p))
1291 return true;
1292 }
1293 return false;
1294}
1295
1296static inline bool sched_core_enqueued(struct task_struct *p)
1297{
1298 return !RB_EMPTY_NODE(&p->core_node);
1299}
1300
1301extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
1302extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags);
1303
1304extern void sched_core_get(void);
1305extern void sched_core_put(void);
1306
1307#else /* !CONFIG_SCHED_CORE */
1308
1309static inline bool sched_core_enabled(struct rq *rq)
1310{
1311 return false;
1312}
1313
1314static inline bool sched_core_disabled(void)
1315{
1316 return true;
1317}
1318
1319static inline raw_spinlock_t *rq_lockp(struct rq *rq)
1320{
1321 return &rq->__lock;
1322}
1323
1324static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
1325{
1326 return &rq->__lock;
1327}
1328
1329static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
1330{
1331 return true;
1332}
1333
1334static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
1335{
1336 return true;
1337}
1338
1339static inline bool sched_group_cookie_match(struct rq *rq,
1340 struct task_struct *p,
1341 struct sched_group *group)
1342{
1343 return true;
1344}
1345#endif /* CONFIG_SCHED_CORE */
1346
1347static inline void lockdep_assert_rq_held(struct rq *rq)
1348{
1349 lockdep_assert_held(__rq_lockp(rq));
1350}
1351
1352extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
1353extern bool raw_spin_rq_trylock(struct rq *rq);
1354extern void raw_spin_rq_unlock(struct rq *rq);
1355
1356static inline void raw_spin_rq_lock(struct rq *rq)
1357{
1358 raw_spin_rq_lock_nested(rq, 0);
1359}
1360
1361static inline void raw_spin_rq_lock_irq(struct rq *rq)
1362{
1363 local_irq_disable();
1364 raw_spin_rq_lock(rq);
1365}
1366
1367static inline void raw_spin_rq_unlock_irq(struct rq *rq)
1368{
1369 raw_spin_rq_unlock(rq);
1370 local_irq_enable();
1371}
1372
1373static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq)
1374{
1375 unsigned long flags;
1376 local_irq_save(flags);
1377 raw_spin_rq_lock(rq);
1378 return flags;
1379}
1380
1381static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags)
1382{
1383 raw_spin_rq_unlock(rq);
1384 local_irq_restore(flags);
1385}
1386
1387#define raw_spin_rq_lock_irqsave(rq, flags) \
1388do { \
1389 flags = _raw_spin_rq_lock_irqsave(rq); \
1390} while (0)
1391
1392#ifdef CONFIG_SCHED_SMT
1393extern void __update_idle_core(struct rq *rq);
1394
1395static inline void update_idle_core(struct rq *rq)
1396{
1397 if (static_branch_unlikely(&sched_smt_present))
1398 __update_idle_core(rq);
1399}
1400
1401#else
1402static inline void update_idle_core(struct rq *rq) { }
1403#endif
1404
1405#ifdef CONFIG_FAIR_GROUP_SCHED
1406static inline struct task_struct *task_of(struct sched_entity *se)
1407{
1408 SCHED_WARN_ON(!entity_is_task(se));
1409 return container_of(se, struct task_struct, se);
1410}
1411
1412static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
1413{
1414 return p->se.cfs_rq;
1415}
1416
1417/* runqueue on which this entity is (to be) queued */
1418static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
1419{
1420 return se->cfs_rq;
1421}
1422
1423/* runqueue "owned" by this group */
1424static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
1425{
1426 return grp->my_q;
1427}
1428
1429#else
1430
1431static inline struct task_struct *task_of(struct sched_entity *se)
1432{
1433 return container_of(se, struct task_struct, se);
1434}
1435
1436static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
1437{
1438 return &task_rq(p)->cfs;
1439}
1440
1441static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
1442{
1443 struct task_struct *p = task_of(se);
1444 struct rq *rq = task_rq(p);
1445
1446 return &rq->cfs;
1447}
1448
1449/* runqueue "owned" by this group */
1450static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
1451{
1452 return NULL;
1453}
1454#endif
1455
1456extern void update_rq_clock(struct rq *rq);
1457
1458/*
1459 * rq::clock_update_flags bits
1460 *
1461 * %RQCF_REQ_SKIP - will request skipping of clock update on the next
1462 * call to __schedule(). This is an optimisation to avoid
1463 * neighbouring rq clock updates.
1464 *
1465 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
1466 * in effect and calls to update_rq_clock() are being ignored.
1467 *
1468 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
1469 * made to update_rq_clock() since the last time rq::lock was pinned.
1470 *
1471 * If inside of __schedule(), clock_update_flags will have been
1472 * shifted left (a left shift is a cheap operation for the fast path
1473 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
1474 *
1475 * if (rq-clock_update_flags >= RQCF_UPDATED)
1476 *
1477 * to check if %RQCF_UPDATED is set. It'll never be shifted more than
1478 * one position though, because the next rq_unpin_lock() will shift it
1479 * back.
1480 */
1481#define RQCF_REQ_SKIP 0x01
1482#define RQCF_ACT_SKIP 0x02
1483#define RQCF_UPDATED 0x04
1484
1485static inline void assert_clock_updated(struct rq *rq)
1486{
1487 /*
1488 * The only reason for not seeing a clock update since the
1489 * last rq_pin_lock() is if we're currently skipping updates.
1490 */
1491 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
1492}
1493
1494static inline u64 rq_clock(struct rq *rq)
1495{
1496 lockdep_assert_rq_held(rq);
1497 assert_clock_updated(rq);
1498
1499 return rq->clock;
1500}
1501
1502static inline u64 rq_clock_task(struct rq *rq)
1503{
1504 lockdep_assert_rq_held(rq);
1505 assert_clock_updated(rq);
1506
1507 return rq->clock_task;
1508}
1509
1510/**
1511 * By default the decay is the default pelt decay period.
1512 * The decay shift can change the decay period in
1513 * multiples of 32.
1514 * Decay shift Decay period(ms)
1515 * 0 32
1516 * 1 64
1517 * 2 128
1518 * 3 256
1519 * 4 512
1520 */
1521extern int sched_thermal_decay_shift;
1522
1523static inline u64 rq_clock_thermal(struct rq *rq)
1524{
1525 return rq_clock_task(rq) >> sched_thermal_decay_shift;
1526}
1527
1528static inline void rq_clock_skip_update(struct rq *rq)
1529{
1530 lockdep_assert_rq_held(rq);
1531 rq->clock_update_flags |= RQCF_REQ_SKIP;
1532}
1533
1534/*
1535 * See rt task throttling, which is the only time a skip
1536 * request is canceled.
1537 */
1538static inline void rq_clock_cancel_skipupdate(struct rq *rq)
1539{
1540 lockdep_assert_rq_held(rq);
1541 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
1542}
1543
1544struct rq_flags {
1545 unsigned long flags;
1546 struct pin_cookie cookie;
1547#ifdef CONFIG_SCHED_DEBUG
1548 /*
1549 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
1550 * current pin context is stashed here in case it needs to be
1551 * restored in rq_repin_lock().
1552 */
1553 unsigned int clock_update_flags;
1554#endif
1555};
1556
1557extern struct balance_callback balance_push_callback;
1558
1559/*
1560 * Lockdep annotation that avoids accidental unlocks; it's like a
1561 * sticky/continuous lockdep_assert_held().
1562 *
1563 * This avoids code that has access to 'struct rq *rq' (basically everything in
1564 * the scheduler) from accidentally unlocking the rq if they do not also have a
1565 * copy of the (on-stack) 'struct rq_flags rf'.
1566 *
1567 * Also see Documentation/locking/lockdep-design.rst.
1568 */
1569static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1570{
1571 rf->cookie = lockdep_pin_lock(__rq_lockp(rq));
1572
1573#ifdef CONFIG_SCHED_DEBUG
1574 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1575 rf->clock_update_flags = 0;
1576#ifdef CONFIG_SMP
1577 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback);
1578#endif
1579#endif
1580}
1581
1582static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1583{
1584#ifdef CONFIG_SCHED_DEBUG
1585 if (rq->clock_update_flags > RQCF_ACT_SKIP)
1586 rf->clock_update_flags = RQCF_UPDATED;
1587#endif
1588
1589 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
1590}
1591
1592static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1593{
1594 lockdep_repin_lock(__rq_lockp(rq), rf->cookie);
1595
1596#ifdef CONFIG_SCHED_DEBUG
1597 /*
1598 * Restore the value we stashed in @rf for this pin context.
1599 */
1600 rq->clock_update_flags |= rf->clock_update_flags;
1601#endif
1602}
1603
1604struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1605 __acquires(rq->lock);
1606
1607struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1608 __acquires(p->pi_lock)
1609 __acquires(rq->lock);
1610
1611static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1612 __releases(rq->lock)
1613{
1614 rq_unpin_lock(rq, rf);
1615 raw_spin_rq_unlock(rq);
1616}
1617
1618static inline void
1619task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1620 __releases(rq->lock)
1621 __releases(p->pi_lock)
1622{
1623 rq_unpin_lock(rq, rf);
1624 raw_spin_rq_unlock(rq);
1625 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1626}
1627
1628static inline void
1629rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1630 __acquires(rq->lock)
1631{
1632 raw_spin_rq_lock_irqsave(rq, rf->flags);
1633 rq_pin_lock(rq, rf);
1634}
1635
1636static inline void
1637rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1638 __acquires(rq->lock)
1639{
1640 raw_spin_rq_lock_irq(rq);
1641 rq_pin_lock(rq, rf);
1642}
1643
1644static inline void
1645rq_lock(struct rq *rq, struct rq_flags *rf)
1646 __acquires(rq->lock)
1647{
1648 raw_spin_rq_lock(rq);
1649 rq_pin_lock(rq, rf);
1650}
1651
1652static inline void
1653rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1654 __releases(rq->lock)
1655{
1656 rq_unpin_lock(rq, rf);
1657 raw_spin_rq_unlock_irqrestore(rq, rf->flags);
1658}
1659
1660static inline void
1661rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1662 __releases(rq->lock)
1663{
1664 rq_unpin_lock(rq, rf);
1665 raw_spin_rq_unlock_irq(rq);
1666}
1667
1668static inline void
1669rq_unlock(struct rq *rq, struct rq_flags *rf)
1670 __releases(rq->lock)
1671{
1672 rq_unpin_lock(rq, rf);
1673 raw_spin_rq_unlock(rq);
1674}
1675
1676static inline struct rq *
1677this_rq_lock_irq(struct rq_flags *rf)
1678 __acquires(rq->lock)
1679{
1680 struct rq *rq;
1681
1682 local_irq_disable();
1683 rq = this_rq();
1684 rq_lock(rq, rf);
1685 return rq;
1686}
1687
1688#ifdef CONFIG_NUMA
1689enum numa_topology_type {
1690 NUMA_DIRECT,
1691 NUMA_GLUELESS_MESH,
1692 NUMA_BACKPLANE,
1693};
1694extern enum numa_topology_type sched_numa_topology_type;
1695extern int sched_max_numa_distance;
1696extern bool find_numa_distance(int distance);
1697extern void sched_init_numa(int offline_node);
1698extern void sched_update_numa(int cpu, bool online);
1699extern void sched_domains_numa_masks_set(unsigned int cpu);
1700extern void sched_domains_numa_masks_clear(unsigned int cpu);
1701extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
1702#else
1703static inline void sched_init_numa(int offline_node) { }
1704static inline void sched_update_numa(int cpu, bool online) { }
1705static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1706static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1707static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1708{
1709 return nr_cpu_ids;
1710}
1711#endif
1712
1713#ifdef CONFIG_NUMA_BALANCING
1714/* The regions in numa_faults array from task_struct */
1715enum numa_faults_stats {
1716 NUMA_MEM = 0,
1717 NUMA_CPU,
1718 NUMA_MEMBUF,
1719 NUMA_CPUBUF
1720};
1721extern void sched_setnuma(struct task_struct *p, int node);
1722extern int migrate_task_to(struct task_struct *p, int cpu);
1723extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1724 int cpu, int scpu);
1725extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
1726#else
1727static inline void
1728init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
1729{
1730}
1731#endif /* CONFIG_NUMA_BALANCING */
1732
1733#ifdef CONFIG_SMP
1734
1735static inline void
1736queue_balance_callback(struct rq *rq,
1737 struct balance_callback *head,
1738 void (*func)(struct rq *rq))
1739{
1740 lockdep_assert_rq_held(rq);
1741
1742 /*
1743 * Don't (re)queue an already queued item; nor queue anything when
1744 * balance_push() is active, see the comment with
1745 * balance_push_callback.
1746 */
1747 if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
1748 return;
1749
1750 head->func = func;
1751 head->next = rq->balance_callback;
1752 rq->balance_callback = head;
1753}
1754
1755#define rcu_dereference_check_sched_domain(p) \
1756 rcu_dereference_check((p), \
1757 lockdep_is_held(&sched_domains_mutex))
1758
1759/*
1760 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1761 * See destroy_sched_domains: call_rcu for details.
1762 *
1763 * The domain tree of any CPU may only be accessed from within
1764 * preempt-disabled sections.
1765 */
1766#define for_each_domain(cpu, __sd) \
1767 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1768 __sd; __sd = __sd->parent)
1769
1770/**
1771 * highest_flag_domain - Return highest sched_domain containing flag.
1772 * @cpu: The CPU whose highest level of sched domain is to
1773 * be returned.
1774 * @flag: The flag to check for the highest sched_domain
1775 * for the given CPU.
1776 *
1777 * Returns the highest sched_domain of a CPU which contains the given flag.
1778 */
1779static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1780{
1781 struct sched_domain *sd, *hsd = NULL;
1782
1783 for_each_domain(cpu, sd) {
1784 if (!(sd->flags & flag))
1785 break;
1786 hsd = sd;
1787 }
1788
1789 return hsd;
1790}
1791
1792static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1793{
1794 struct sched_domain *sd;
1795
1796 for_each_domain(cpu, sd) {
1797 if (sd->flags & flag)
1798 break;
1799 }
1800
1801 return sd;
1802}
1803
1804DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
1805DECLARE_PER_CPU(int, sd_llc_size);
1806DECLARE_PER_CPU(int, sd_llc_id);
1807DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
1808DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
1809DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
1810DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
1811extern struct static_key_false sched_asym_cpucapacity;
1812
1813static __always_inline bool sched_asym_cpucap_active(void)
1814{
1815 return static_branch_unlikely(&sched_asym_cpucapacity);
1816}
1817
1818struct sched_group_capacity {
1819 atomic_t ref;
1820 /*
1821 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
1822 * for a single CPU.
1823 */
1824 unsigned long capacity;
1825 unsigned long min_capacity; /* Min per-CPU capacity in group */
1826 unsigned long max_capacity; /* Max per-CPU capacity in group */
1827 unsigned long next_update;
1828 int imbalance; /* XXX unrelated to capacity but shared group state */
1829
1830#ifdef CONFIG_SCHED_DEBUG
1831 int id;
1832#endif
1833
1834 unsigned long cpumask[]; /* Balance mask */
1835};
1836
1837struct sched_group {
1838 struct sched_group *next; /* Must be a circular list */
1839 atomic_t ref;
1840
1841 unsigned int group_weight;
1842 struct sched_group_capacity *sgc;
1843 int asym_prefer_cpu; /* CPU of highest priority in group */
1844 int flags;
1845
1846 /*
1847 * The CPUs this group covers.
1848 *
1849 * NOTE: this field is variable length. (Allocated dynamically
1850 * by attaching extra space to the end of the structure,
1851 * depending on how many CPUs the kernel has booted up with)
1852 */
1853 unsigned long cpumask[];
1854};
1855
1856static inline struct cpumask *sched_group_span(struct sched_group *sg)
1857{
1858 return to_cpumask(sg->cpumask);
1859}
1860
1861/*
1862 * See build_balance_mask().
1863 */
1864static inline struct cpumask *group_balance_mask(struct sched_group *sg)
1865{
1866 return to_cpumask(sg->sgc->cpumask);
1867}
1868
1869extern int group_balance_cpu(struct sched_group *sg);
1870
1871#ifdef CONFIG_SCHED_DEBUG
1872void update_sched_domain_debugfs(void);
1873void dirty_sched_domain_sysctl(int cpu);
1874#else
1875static inline void update_sched_domain_debugfs(void)
1876{
1877}
1878static inline void dirty_sched_domain_sysctl(int cpu)
1879{
1880}
1881#endif
1882
1883extern int sched_update_scaling(void);
1884
1885static inline const struct cpumask *task_user_cpus(struct task_struct *p)
1886{
1887 if (!p->user_cpus_ptr)
1888 return cpu_possible_mask; /* &init_task.cpus_mask */
1889 return p->user_cpus_ptr;
1890}
1891#endif /* CONFIG_SMP */
1892
1893#include "stats.h"
1894
1895#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS)
1896
1897extern void __sched_core_account_forceidle(struct rq *rq);
1898
1899static inline void sched_core_account_forceidle(struct rq *rq)
1900{
1901 if (schedstat_enabled())
1902 __sched_core_account_forceidle(rq);
1903}
1904
1905extern void __sched_core_tick(struct rq *rq);
1906
1907static inline void sched_core_tick(struct rq *rq)
1908{
1909 if (sched_core_enabled(rq) && schedstat_enabled())
1910 __sched_core_tick(rq);
1911}
1912
1913#else
1914
1915static inline void sched_core_account_forceidle(struct rq *rq) {}
1916
1917static inline void sched_core_tick(struct rq *rq) {}
1918
1919#endif /* CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS */
1920
1921#ifdef CONFIG_CGROUP_SCHED
1922
1923/*
1924 * Return the group to which this tasks belongs.
1925 *
1926 * We cannot use task_css() and friends because the cgroup subsystem
1927 * changes that value before the cgroup_subsys::attach() method is called,
1928 * therefore we cannot pin it and might observe the wrong value.
1929 *
1930 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
1931 * core changes this before calling sched_move_task().
1932 *
1933 * Instead we use a 'copy' which is updated from sched_move_task() while
1934 * holding both task_struct::pi_lock and rq::lock.
1935 */
1936static inline struct task_group *task_group(struct task_struct *p)
1937{
1938 return p->sched_task_group;
1939}
1940
1941/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
1942static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1943{
1944#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1945 struct task_group *tg = task_group(p);
1946#endif
1947
1948#ifdef CONFIG_FAIR_GROUP_SCHED
1949 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1950 p->se.cfs_rq = tg->cfs_rq[cpu];
1951 p->se.parent = tg->se[cpu];
1952 p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0;
1953#endif
1954
1955#ifdef CONFIG_RT_GROUP_SCHED
1956 p->rt.rt_rq = tg->rt_rq[cpu];
1957 p->rt.parent = tg->rt_se[cpu];
1958#endif
1959}
1960
1961#else /* CONFIG_CGROUP_SCHED */
1962
1963static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1964static inline struct task_group *task_group(struct task_struct *p)
1965{
1966 return NULL;
1967}
1968
1969#endif /* CONFIG_CGROUP_SCHED */
1970
1971static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1972{
1973 set_task_rq(p, cpu);
1974#ifdef CONFIG_SMP
1975 /*
1976 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1977 * successfully executed on another CPU. We must ensure that updates of
1978 * per-task data have been completed by this moment.
1979 */
1980 smp_wmb();
1981 WRITE_ONCE(task_thread_info(p)->cpu, cpu);
1982 p->wake_cpu = cpu;
1983#endif
1984}
1985
1986/*
1987 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1988 */
1989#ifdef CONFIG_SCHED_DEBUG
1990# define const_debug __read_mostly
1991#else
1992# define const_debug const
1993#endif
1994
1995#define SCHED_FEAT(name, enabled) \
1996 __SCHED_FEAT_##name ,
1997
1998enum {
1999#include "features.h"
2000 __SCHED_FEAT_NR,
2001};
2002
2003#undef SCHED_FEAT
2004
2005#ifdef CONFIG_SCHED_DEBUG
2006
2007/*
2008 * To support run-time toggling of sched features, all the translation units
2009 * (but core.c) reference the sysctl_sched_features defined in core.c.
2010 */
2011extern const_debug unsigned int sysctl_sched_features;
2012
2013#ifdef CONFIG_JUMP_LABEL
2014#define SCHED_FEAT(name, enabled) \
2015static __always_inline bool static_branch_##name(struct static_key *key) \
2016{ \
2017 return static_key_##enabled(key); \
2018}
2019
2020#include "features.h"
2021#undef SCHED_FEAT
2022
2023extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
2024#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
2025
2026#else /* !CONFIG_JUMP_LABEL */
2027
2028#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
2029
2030#endif /* CONFIG_JUMP_LABEL */
2031
2032#else /* !SCHED_DEBUG */
2033
2034/*
2035 * Each translation unit has its own copy of sysctl_sched_features to allow
2036 * constants propagation at compile time and compiler optimization based on
2037 * features default.
2038 */
2039#define SCHED_FEAT(name, enabled) \
2040 (1UL << __SCHED_FEAT_##name) * enabled |
2041static const_debug __maybe_unused unsigned int sysctl_sched_features =
2042#include "features.h"
2043 0;
2044#undef SCHED_FEAT
2045
2046#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
2047
2048#endif /* SCHED_DEBUG */
2049
2050extern struct static_key_false sched_numa_balancing;
2051extern struct static_key_false sched_schedstats;
2052
2053static inline u64 global_rt_period(void)
2054{
2055 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
2056}
2057
2058static inline u64 global_rt_runtime(void)
2059{
2060 if (sysctl_sched_rt_runtime < 0)
2061 return RUNTIME_INF;
2062
2063 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
2064}
2065
2066static inline int task_current(struct rq *rq, struct task_struct *p)
2067{
2068 return rq->curr == p;
2069}
2070
2071static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
2072{
2073#ifdef CONFIG_SMP
2074 return p->on_cpu;
2075#else
2076 return task_current(rq, p);
2077#endif
2078}
2079
2080static inline int task_on_rq_queued(struct task_struct *p)
2081{
2082 return p->on_rq == TASK_ON_RQ_QUEUED;
2083}
2084
2085static inline int task_on_rq_migrating(struct task_struct *p)
2086{
2087 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
2088}
2089
2090/* Wake flags. The first three directly map to some SD flag value */
2091#define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
2092#define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
2093#define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */
2094
2095#define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */
2096#define WF_MIGRATED 0x20 /* Internal use, task got migrated */
2097
2098#ifdef CONFIG_SMP
2099static_assert(WF_EXEC == SD_BALANCE_EXEC);
2100static_assert(WF_FORK == SD_BALANCE_FORK);
2101static_assert(WF_TTWU == SD_BALANCE_WAKE);
2102#endif
2103
2104/*
2105 * To aid in avoiding the subversion of "niceness" due to uneven distribution
2106 * of tasks with abnormal "nice" values across CPUs the contribution that
2107 * each task makes to its run queue's load is weighted according to its
2108 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
2109 * scaled version of the new time slice allocation that they receive on time
2110 * slice expiry etc.
2111 */
2112
2113#define WEIGHT_IDLEPRIO 3
2114#define WMULT_IDLEPRIO 1431655765
2115
2116extern const int sched_prio_to_weight[40];
2117extern const u32 sched_prio_to_wmult[40];
2118
2119/*
2120 * {de,en}queue flags:
2121 *
2122 * DEQUEUE_SLEEP - task is no longer runnable
2123 * ENQUEUE_WAKEUP - task just became runnable
2124 *
2125 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
2126 * are in a known state which allows modification. Such pairs
2127 * should preserve as much state as possible.
2128 *
2129 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
2130 * in the runqueue.
2131 *
2132 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
2133 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
2134 * ENQUEUE_MIGRATED - the task was migrated during wakeup
2135 *
2136 */
2137
2138#define DEQUEUE_SLEEP 0x01
2139#define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */
2140#define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */
2141#define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */
2142
2143#define ENQUEUE_WAKEUP 0x01
2144#define ENQUEUE_RESTORE 0x02
2145#define ENQUEUE_MOVE 0x04
2146#define ENQUEUE_NOCLOCK 0x08
2147
2148#define ENQUEUE_HEAD 0x10
2149#define ENQUEUE_REPLENISH 0x20
2150#ifdef CONFIG_SMP
2151#define ENQUEUE_MIGRATED 0x40
2152#else
2153#define ENQUEUE_MIGRATED 0x00
2154#endif
2155
2156#define RETRY_TASK ((void *)-1UL)
2157
2158struct affinity_context {
2159 const struct cpumask *new_mask;
2160 struct cpumask *user_mask;
2161 unsigned int flags;
2162};
2163
2164struct sched_class {
2165
2166#ifdef CONFIG_UCLAMP_TASK
2167 int uclamp_enabled;
2168#endif
2169
2170 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
2171 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
2172 void (*yield_task) (struct rq *rq);
2173 bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
2174
2175 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
2176
2177 struct task_struct *(*pick_next_task)(struct rq *rq);
2178
2179 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
2180 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
2181
2182#ifdef CONFIG_SMP
2183 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2184 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
2185
2186 struct task_struct * (*pick_task)(struct rq *rq);
2187
2188 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
2189
2190 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
2191
2192 void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx);
2193
2194 void (*rq_online)(struct rq *rq);
2195 void (*rq_offline)(struct rq *rq);
2196
2197 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
2198#endif
2199
2200 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
2201 void (*task_fork)(struct task_struct *p);
2202 void (*task_dead)(struct task_struct *p);
2203
2204 /*
2205 * The switched_from() call is allowed to drop rq->lock, therefore we
2206 * cannot assume the switched_from/switched_to pair is serialized by
2207 * rq->lock. They are however serialized by p->pi_lock.
2208 */
2209 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
2210 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
2211 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
2212 int oldprio);
2213
2214 unsigned int (*get_rr_interval)(struct rq *rq,
2215 struct task_struct *task);
2216
2217 void (*update_curr)(struct rq *rq);
2218
2219#ifdef CONFIG_FAIR_GROUP_SCHED
2220 void (*task_change_group)(struct task_struct *p);
2221#endif
2222};
2223
2224static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
2225{
2226 WARN_ON_ONCE(rq->curr != prev);
2227 prev->sched_class->put_prev_task(rq, prev);
2228}
2229
2230static inline void set_next_task(struct rq *rq, struct task_struct *next)
2231{
2232 next->sched_class->set_next_task(rq, next, false);
2233}
2234
2235
2236/*
2237 * Helper to define a sched_class instance; each one is placed in a separate
2238 * section which is ordered by the linker script:
2239 *
2240 * include/asm-generic/vmlinux.lds.h
2241 *
2242 * *CAREFUL* they are laid out in *REVERSE* order!!!
2243 *
2244 * Also enforce alignment on the instance, not the type, to guarantee layout.
2245 */
2246#define DEFINE_SCHED_CLASS(name) \
2247const struct sched_class name##_sched_class \
2248 __aligned(__alignof__(struct sched_class)) \
2249 __section("__" #name "_sched_class")
2250
2251/* Defined in include/asm-generic/vmlinux.lds.h */
2252extern struct sched_class __sched_class_highest[];
2253extern struct sched_class __sched_class_lowest[];
2254
2255#define for_class_range(class, _from, _to) \
2256 for (class = (_from); class < (_to); class++)
2257
2258#define for_each_class(class) \
2259 for_class_range(class, __sched_class_highest, __sched_class_lowest)
2260
2261#define sched_class_above(_a, _b) ((_a) < (_b))
2262
2263extern const struct sched_class stop_sched_class;
2264extern const struct sched_class dl_sched_class;
2265extern const struct sched_class rt_sched_class;
2266extern const struct sched_class fair_sched_class;
2267extern const struct sched_class idle_sched_class;
2268
2269static inline bool sched_stop_runnable(struct rq *rq)
2270{
2271 return rq->stop && task_on_rq_queued(rq->stop);
2272}
2273
2274static inline bool sched_dl_runnable(struct rq *rq)
2275{
2276 return rq->dl.dl_nr_running > 0;
2277}
2278
2279static inline bool sched_rt_runnable(struct rq *rq)
2280{
2281 return rq->rt.rt_queued > 0;
2282}
2283
2284static inline bool sched_fair_runnable(struct rq *rq)
2285{
2286 return rq->cfs.nr_running > 0;
2287}
2288
2289extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2290extern struct task_struct *pick_next_task_idle(struct rq *rq);
2291
2292#define SCA_CHECK 0x01
2293#define SCA_MIGRATE_DISABLE 0x02
2294#define SCA_MIGRATE_ENABLE 0x04
2295#define SCA_USER 0x08
2296
2297#ifdef CONFIG_SMP
2298
2299extern void update_group_capacity(struct sched_domain *sd, int cpu);
2300
2301extern void trigger_load_balance(struct rq *rq);
2302
2303extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);
2304
2305static inline struct task_struct *get_push_task(struct rq *rq)
2306{
2307 struct task_struct *p = rq->curr;
2308
2309 lockdep_assert_rq_held(rq);
2310
2311 if (rq->push_busy)
2312 return NULL;
2313
2314 if (p->nr_cpus_allowed == 1)
2315 return NULL;
2316
2317 if (p->migration_disabled)
2318 return NULL;
2319
2320 rq->push_busy = true;
2321 return get_task_struct(p);
2322}
2323
2324extern int push_cpu_stop(void *arg);
2325
2326#endif
2327
2328#ifdef CONFIG_CPU_IDLE
2329static inline void idle_set_state(struct rq *rq,
2330 struct cpuidle_state *idle_state)
2331{
2332 rq->idle_state = idle_state;
2333}
2334
2335static inline struct cpuidle_state *idle_get_state(struct rq *rq)
2336{
2337 SCHED_WARN_ON(!rcu_read_lock_held());
2338
2339 return rq->idle_state;
2340}
2341#else
2342static inline void idle_set_state(struct rq *rq,
2343 struct cpuidle_state *idle_state)
2344{
2345}
2346
2347static inline struct cpuidle_state *idle_get_state(struct rq *rq)
2348{
2349 return NULL;
2350}
2351#endif
2352
2353extern void schedule_idle(void);
2354
2355extern void sysrq_sched_debug_show(void);
2356extern void sched_init_granularity(void);
2357extern void update_max_interval(void);
2358
2359extern void init_sched_dl_class(void);
2360extern void init_sched_rt_class(void);
2361extern void init_sched_fair_class(void);
2362
2363extern void reweight_task(struct task_struct *p, int prio);
2364
2365extern void resched_curr(struct rq *rq);
2366extern void resched_cpu(int cpu);
2367
2368extern struct rt_bandwidth def_rt_bandwidth;
2369extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
2370extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
2371
2372extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
2373extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
2374extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
2375
2376#define BW_SHIFT 20
2377#define BW_UNIT (1 << BW_SHIFT)
2378#define RATIO_SHIFT 8
2379#define MAX_BW_BITS (64 - BW_SHIFT)
2380#define MAX_BW ((1ULL << MAX_BW_BITS) - 1)
2381unsigned long to_ratio(u64 period, u64 runtime);
2382
2383extern void init_entity_runnable_average(struct sched_entity *se);
2384extern void post_init_entity_util_avg(struct task_struct *p);
2385
2386#ifdef CONFIG_NO_HZ_FULL
2387extern bool sched_can_stop_tick(struct rq *rq);
2388extern int __init sched_tick_offload_init(void);
2389
2390/*
2391 * Tick may be needed by tasks in the runqueue depending on their policy and
2392 * requirements. If tick is needed, lets send the target an IPI to kick it out of
2393 * nohz mode if necessary.
2394 */
2395static inline void sched_update_tick_dependency(struct rq *rq)
2396{
2397 int cpu = cpu_of(rq);
2398
2399 if (!tick_nohz_full_cpu(cpu))
2400 return;
2401
2402 if (sched_can_stop_tick(rq))
2403 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
2404 else
2405 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
2406}
2407#else
2408static inline int sched_tick_offload_init(void) { return 0; }
2409static inline void sched_update_tick_dependency(struct rq *rq) { }
2410#endif
2411
2412static inline void add_nr_running(struct rq *rq, unsigned count)
2413{
2414 unsigned prev_nr = rq->nr_running;
2415
2416 rq->nr_running = prev_nr + count;
2417 if (trace_sched_update_nr_running_tp_enabled()) {
2418 call_trace_sched_update_nr_running(rq, count);
2419 }
2420
2421#ifdef CONFIG_SMP
2422 if (prev_nr < 2 && rq->nr_running >= 2) {
2423 if (!READ_ONCE(rq->rd->overload))
2424 WRITE_ONCE(rq->rd->overload, 1);
2425 }
2426#endif
2427
2428 sched_update_tick_dependency(rq);
2429}
2430
2431static inline void sub_nr_running(struct rq *rq, unsigned count)
2432{
2433 rq->nr_running -= count;
2434 if (trace_sched_update_nr_running_tp_enabled()) {
2435 call_trace_sched_update_nr_running(rq, -count);
2436 }
2437
2438 /* Check if we still need preemption */
2439 sched_update_tick_dependency(rq);
2440}
2441
2442extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
2443extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
2444
2445extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
2446
2447#ifdef CONFIG_PREEMPT_RT
2448#define SCHED_NR_MIGRATE_BREAK 8
2449#else
2450#define SCHED_NR_MIGRATE_BREAK 32
2451#endif
2452
2453extern const_debug unsigned int sysctl_sched_nr_migrate;
2454extern const_debug unsigned int sysctl_sched_migration_cost;
2455
2456#ifdef CONFIG_SCHED_DEBUG
2457extern unsigned int sysctl_sched_latency;
2458extern unsigned int sysctl_sched_min_granularity;
2459extern unsigned int sysctl_sched_idle_min_granularity;
2460extern unsigned int sysctl_sched_wakeup_granularity;
2461extern int sysctl_resched_latency_warn_ms;
2462extern int sysctl_resched_latency_warn_once;
2463
2464extern unsigned int sysctl_sched_tunable_scaling;
2465
2466extern unsigned int sysctl_numa_balancing_scan_delay;
2467extern unsigned int sysctl_numa_balancing_scan_period_min;
2468extern unsigned int sysctl_numa_balancing_scan_period_max;
2469extern unsigned int sysctl_numa_balancing_scan_size;
2470extern unsigned int sysctl_numa_balancing_hot_threshold;
2471#endif
2472
2473#ifdef CONFIG_SCHED_HRTICK
2474
2475/*
2476 * Use hrtick when:
2477 * - enabled by features
2478 * - hrtimer is actually high res
2479 */
2480static inline int hrtick_enabled(struct rq *rq)
2481{
2482 if (!cpu_active(cpu_of(rq)))
2483 return 0;
2484 return hrtimer_is_hres_active(&rq->hrtick_timer);
2485}
2486
2487static inline int hrtick_enabled_fair(struct rq *rq)
2488{
2489 if (!sched_feat(HRTICK))
2490 return 0;
2491 return hrtick_enabled(rq);
2492}
2493
2494static inline int hrtick_enabled_dl(struct rq *rq)
2495{
2496 if (!sched_feat(HRTICK_DL))
2497 return 0;
2498 return hrtick_enabled(rq);
2499}
2500
2501void hrtick_start(struct rq *rq, u64 delay);
2502
2503#else
2504
2505static inline int hrtick_enabled_fair(struct rq *rq)
2506{
2507 return 0;
2508}
2509
2510static inline int hrtick_enabled_dl(struct rq *rq)
2511{
2512 return 0;
2513}
2514
2515static inline int hrtick_enabled(struct rq *rq)
2516{
2517 return 0;
2518}
2519
2520#endif /* CONFIG_SCHED_HRTICK */
2521
2522#ifndef arch_scale_freq_tick
2523static __always_inline
2524void arch_scale_freq_tick(void)
2525{
2526}
2527#endif
2528
2529#ifndef arch_scale_freq_capacity
2530/**
2531 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU.
2532 * @cpu: the CPU in question.
2533 *
2534 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
2535 *
2536 * f_curr
2537 * ------ * SCHED_CAPACITY_SCALE
2538 * f_max
2539 */
2540static __always_inline
2541unsigned long arch_scale_freq_capacity(int cpu)
2542{
2543 return SCHED_CAPACITY_SCALE;
2544}
2545#endif
2546
2547#ifdef CONFIG_SCHED_DEBUG
2548/*
2549 * In double_lock_balance()/double_rq_lock(), we use raw_spin_rq_lock() to
2550 * acquire rq lock instead of rq_lock(). So at the end of these two functions
2551 * we need to call double_rq_clock_clear_update() to clear RQCF_UPDATED of
2552 * rq->clock_update_flags to avoid the WARN_DOUBLE_CLOCK warning.
2553 */
2554static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2)
2555{
2556 rq1->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
2557 /* rq1 == rq2 for !CONFIG_SMP, so just clear RQCF_UPDATED once. */
2558#ifdef CONFIG_SMP
2559 rq2->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
2560#endif
2561}
2562#else
2563static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) {}
2564#endif
2565
2566#ifdef CONFIG_SMP
2567
2568static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
2569{
2570#ifdef CONFIG_SCHED_CORE
2571 /*
2572 * In order to not have {0,2},{1,3} turn into into an AB-BA,
2573 * order by core-id first and cpu-id second.
2574 *
2575 * Notably:
2576 *
2577 * double_rq_lock(0,3); will take core-0, core-1 lock
2578 * double_rq_lock(1,2); will take core-1, core-0 lock
2579 *
2580 * when only cpu-id is considered.
2581 */
2582 if (rq1->core->cpu < rq2->core->cpu)
2583 return true;
2584 if (rq1->core->cpu > rq2->core->cpu)
2585 return false;
2586
2587 /*
2588 * __sched_core_flip() relies on SMT having cpu-id lock order.
2589 */
2590#endif
2591 return rq1->cpu < rq2->cpu;
2592}
2593
2594extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
2595
2596#ifdef CONFIG_PREEMPTION
2597
2598/*
2599 * fair double_lock_balance: Safely acquires both rq->locks in a fair
2600 * way at the expense of forcing extra atomic operations in all
2601 * invocations. This assures that the double_lock is acquired using the
2602 * same underlying policy as the spinlock_t on this architecture, which
2603 * reduces latency compared to the unfair variant below. However, it
2604 * also adds more overhead and therefore may reduce throughput.
2605 */
2606static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2607 __releases(this_rq->lock)
2608 __acquires(busiest->lock)
2609 __acquires(this_rq->lock)
2610{
2611 raw_spin_rq_unlock(this_rq);
2612 double_rq_lock(this_rq, busiest);
2613
2614 return 1;
2615}
2616
2617#else
2618/*
2619 * Unfair double_lock_balance: Optimizes throughput at the expense of
2620 * latency by eliminating extra atomic operations when the locks are
2621 * already in proper order on entry. This favors lower CPU-ids and will
2622 * grant the double lock to lower CPUs over higher ids under contention,
2623 * regardless of entry order into the function.
2624 */
2625static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2626 __releases(this_rq->lock)
2627 __acquires(busiest->lock)
2628 __acquires(this_rq->lock)
2629{
2630 if (__rq_lockp(this_rq) == __rq_lockp(busiest) ||
2631 likely(raw_spin_rq_trylock(busiest))) {
2632 double_rq_clock_clear_update(this_rq, busiest);
2633 return 0;
2634 }
2635
2636 if (rq_order_less(this_rq, busiest)) {
2637 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING);
2638 double_rq_clock_clear_update(this_rq, busiest);
2639 return 0;
2640 }
2641
2642 raw_spin_rq_unlock(this_rq);
2643 double_rq_lock(this_rq, busiest);
2644
2645 return 1;
2646}
2647
2648#endif /* CONFIG_PREEMPTION */
2649
2650/*
2651 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2652 */
2653static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2654{
2655 lockdep_assert_irqs_disabled();
2656
2657 return _double_lock_balance(this_rq, busiest);
2658}
2659
2660static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2661 __releases(busiest->lock)
2662{
2663 if (__rq_lockp(this_rq) != __rq_lockp(busiest))
2664 raw_spin_rq_unlock(busiest);
2665 lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_);
2666}
2667
2668static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
2669{
2670 if (l1 > l2)
2671 swap(l1, l2);
2672
2673 spin_lock(l1);
2674 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2675}
2676
2677static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
2678{
2679 if (l1 > l2)
2680 swap(l1, l2);
2681
2682 spin_lock_irq(l1);
2683 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2684}
2685
2686static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
2687{
2688 if (l1 > l2)
2689 swap(l1, l2);
2690
2691 raw_spin_lock(l1);
2692 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2693}
2694
2695/*
2696 * double_rq_unlock - safely unlock two runqueues
2697 *
2698 * Note this does not restore interrupts like task_rq_unlock,
2699 * you need to do so manually after calling.
2700 */
2701static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2702 __releases(rq1->lock)
2703 __releases(rq2->lock)
2704{
2705 if (__rq_lockp(rq1) != __rq_lockp(rq2))
2706 raw_spin_rq_unlock(rq2);
2707 else
2708 __release(rq2->lock);
2709 raw_spin_rq_unlock(rq1);
2710}
2711
2712extern void set_rq_online (struct rq *rq);
2713extern void set_rq_offline(struct rq *rq);
2714extern bool sched_smp_initialized;
2715
2716#else /* CONFIG_SMP */
2717
2718/*
2719 * double_rq_lock - safely lock two runqueues
2720 *
2721 * Note this does not disable interrupts like task_rq_lock,
2722 * you need to do so manually before calling.
2723 */
2724static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2725 __acquires(rq1->lock)
2726 __acquires(rq2->lock)
2727{
2728 WARN_ON_ONCE(!irqs_disabled());
2729 WARN_ON_ONCE(rq1 != rq2);
2730 raw_spin_rq_lock(rq1);
2731 __acquire(rq2->lock); /* Fake it out ;) */
2732 double_rq_clock_clear_update(rq1, rq2);
2733}
2734
2735/*
2736 * double_rq_unlock - safely unlock two runqueues
2737 *
2738 * Note this does not restore interrupts like task_rq_unlock,
2739 * you need to do so manually after calling.
2740 */
2741static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2742 __releases(rq1->lock)
2743 __releases(rq2->lock)
2744{
2745 WARN_ON_ONCE(rq1 != rq2);
2746 raw_spin_rq_unlock(rq1);
2747 __release(rq2->lock);
2748}
2749
2750#endif
2751
2752extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2753extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
2754
2755#ifdef CONFIG_SCHED_DEBUG
2756extern bool sched_debug_verbose;
2757
2758extern void print_cfs_stats(struct seq_file *m, int cpu);
2759extern void print_rt_stats(struct seq_file *m, int cpu);
2760extern void print_dl_stats(struct seq_file *m, int cpu);
2761extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2762extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2763extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2764
2765extern void resched_latency_warn(int cpu, u64 latency);
2766#ifdef CONFIG_NUMA_BALANCING
2767extern void
2768show_numa_stats(struct task_struct *p, struct seq_file *m);
2769extern void
2770print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2771 unsigned long tpf, unsigned long gsf, unsigned long gpf);
2772#endif /* CONFIG_NUMA_BALANCING */
2773#else
2774static inline void resched_latency_warn(int cpu, u64 latency) {}
2775#endif /* CONFIG_SCHED_DEBUG */
2776
2777extern void init_cfs_rq(struct cfs_rq *cfs_rq);
2778extern void init_rt_rq(struct rt_rq *rt_rq);
2779extern void init_dl_rq(struct dl_rq *dl_rq);
2780
2781extern void cfs_bandwidth_usage_inc(void);
2782extern void cfs_bandwidth_usage_dec(void);
2783
2784#ifdef CONFIG_NO_HZ_COMMON
2785#define NOHZ_BALANCE_KICK_BIT 0
2786#define NOHZ_STATS_KICK_BIT 1
2787#define NOHZ_NEWILB_KICK_BIT 2
2788#define NOHZ_NEXT_KICK_BIT 3
2789
2790/* Run rebalance_domains() */
2791#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
2792/* Update blocked load */
2793#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
2794/* Update blocked load when entering idle */
2795#define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT)
2796/* Update nohz.next_balance */
2797#define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT)
2798
2799#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK)
2800
2801#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2802
2803extern void nohz_balance_exit_idle(struct rq *rq);
2804#else
2805static inline void nohz_balance_exit_idle(struct rq *rq) { }
2806#endif
2807
2808#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
2809extern void nohz_run_idle_balance(int cpu);
2810#else
2811static inline void nohz_run_idle_balance(int cpu) { }
2812#endif
2813
2814#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2815struct irqtime {
2816 u64 total;
2817 u64 tick_delta;
2818 u64 irq_start_time;
2819 struct u64_stats_sync sync;
2820};
2821
2822DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
2823
2824/*
2825 * Returns the irqtime minus the softirq time computed by ksoftirqd.
2826 * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime
2827 * and never move forward.
2828 */
2829static inline u64 irq_time_read(int cpu)
2830{
2831 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2832 unsigned int seq;
2833 u64 total;
2834
2835 do {
2836 seq = __u64_stats_fetch_begin(&irqtime->sync);
2837 total = irqtime->total;
2838 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
2839
2840 return total;
2841}
2842#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2843
2844#ifdef CONFIG_CPU_FREQ
2845DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
2846
2847/**
2848 * cpufreq_update_util - Take a note about CPU utilization changes.
2849 * @rq: Runqueue to carry out the update for.
2850 * @flags: Update reason flags.
2851 *
2852 * This function is called by the scheduler on the CPU whose utilization is
2853 * being updated.
2854 *
2855 * It can only be called from RCU-sched read-side critical sections.
2856 *
2857 * The way cpufreq is currently arranged requires it to evaluate the CPU
2858 * performance state (frequency/voltage) on a regular basis to prevent it from
2859 * being stuck in a completely inadequate performance level for too long.
2860 * That is not guaranteed to happen if the updates are only triggered from CFS
2861 * and DL, though, because they may not be coming in if only RT tasks are
2862 * active all the time (or there are RT tasks only).
2863 *
2864 * As a workaround for that issue, this function is called periodically by the
2865 * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
2866 * but that really is a band-aid. Going forward it should be replaced with
2867 * solutions targeted more specifically at RT tasks.
2868 */
2869static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2870{
2871 struct update_util_data *data;
2872
2873 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2874 cpu_of(rq)));
2875 if (data)
2876 data->func(data, rq_clock(rq), flags);
2877}
2878#else
2879static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
2880#endif /* CONFIG_CPU_FREQ */
2881
2882#ifdef arch_scale_freq_capacity
2883# ifndef arch_scale_freq_invariant
2884# define arch_scale_freq_invariant() true
2885# endif
2886#else
2887# define arch_scale_freq_invariant() false
2888#endif
2889
2890#ifdef CONFIG_SMP
2891static inline unsigned long capacity_orig_of(int cpu)
2892{
2893 return cpu_rq(cpu)->cpu_capacity_orig;
2894}
2895
2896/*
2897 * Returns inverted capacity if the CPU is in capacity inversion state.
2898 * 0 otherwise.
2899 *
2900 * Capacity inversion detection only considers thermal impact where actual
2901 * performance points (OPPs) gets dropped.
2902 *
2903 * Capacity inversion state happens when another performance domain that has
2904 * equal or lower capacity_orig_of() becomes effectively larger than the perf
2905 * domain this CPU belongs to due to thermal pressure throttling it hard.
2906 *
2907 * See comment in update_cpu_capacity().
2908 */
2909static inline unsigned long cpu_in_capacity_inversion(int cpu)
2910{
2911 return cpu_rq(cpu)->cpu_capacity_inverted;
2912}
2913
2914/**
2915 * enum cpu_util_type - CPU utilization type
2916 * @FREQUENCY_UTIL: Utilization used to select frequency
2917 * @ENERGY_UTIL: Utilization used during energy calculation
2918 *
2919 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time
2920 * need to be aggregated differently depending on the usage made of them. This
2921 * enum is used within effective_cpu_util() to differentiate the types of
2922 * utilization expected by the callers, and adjust the aggregation accordingly.
2923 */
2924enum cpu_util_type {
2925 FREQUENCY_UTIL,
2926 ENERGY_UTIL,
2927};
2928
2929unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
2930 enum cpu_util_type type,
2931 struct task_struct *p);
2932
2933/*
2934 * Verify the fitness of task @p to run on @cpu taking into account the
2935 * CPU original capacity and the runtime/deadline ratio of the task.
2936 *
2937 * The function will return true if the original capacity of @cpu is
2938 * greater than or equal to task's deadline density right shifted by
2939 * (BW_SHIFT - SCHED_CAPACITY_SHIFT) and false otherwise.
2940 */
2941static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu)
2942{
2943 unsigned long cap = arch_scale_cpu_capacity(cpu);
2944
2945 return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT);
2946}
2947
2948static inline unsigned long cpu_bw_dl(struct rq *rq)
2949{
2950 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2951}
2952
2953static inline unsigned long cpu_util_dl(struct rq *rq)
2954{
2955 return READ_ONCE(rq->avg_dl.util_avg);
2956}
2957
2958/**
2959 * cpu_util_cfs() - Estimates the amount of CPU capacity used by CFS tasks.
2960 * @cpu: the CPU to get the utilization for.
2961 *
2962 * The unit of the return value must be the same as the one of CPU capacity
2963 * so that CPU utilization can be compared with CPU capacity.
2964 *
2965 * CPU utilization is the sum of running time of runnable tasks plus the
2966 * recent utilization of currently non-runnable tasks on that CPU.
2967 * It represents the amount of CPU capacity currently used by CFS tasks in
2968 * the range [0..max CPU capacity] with max CPU capacity being the CPU
2969 * capacity at f_max.
2970 *
2971 * The estimated CPU utilization is defined as the maximum between CPU
2972 * utilization and sum of the estimated utilization of the currently
2973 * runnable tasks on that CPU. It preserves a utilization "snapshot" of
2974 * previously-executed tasks, which helps better deduce how busy a CPU will
2975 * be when a long-sleeping task wakes up. The contribution to CPU utilization
2976 * of such a task would be significantly decayed at this point of time.
2977 *
2978 * CPU utilization can be higher than the current CPU capacity
2979 * (f_curr/f_max * max CPU capacity) or even the max CPU capacity because
2980 * of rounding errors as well as task migrations or wakeups of new tasks.
2981 * CPU utilization has to be capped to fit into the [0..max CPU capacity]
2982 * range. Otherwise a group of CPUs (CPU0 util = 121% + CPU1 util = 80%)
2983 * could be seen as over-utilized even though CPU1 has 20% of spare CPU
2984 * capacity. CPU utilization is allowed to overshoot current CPU capacity
2985 * though since this is useful for predicting the CPU capacity required
2986 * after task migrations (scheduler-driven DVFS).
2987 *
2988 * Return: (Estimated) utilization for the specified CPU.
2989 */
2990static inline unsigned long cpu_util_cfs(int cpu)
2991{
2992 struct cfs_rq *cfs_rq;
2993 unsigned long util;
2994
2995 cfs_rq = &cpu_rq(cpu)->cfs;
2996 util = READ_ONCE(cfs_rq->avg.util_avg);
2997
2998 if (sched_feat(UTIL_EST)) {
2999 util = max_t(unsigned long, util,
3000 READ_ONCE(cfs_rq->avg.util_est.enqueued));
3001 }
3002
3003 return min(util, capacity_orig_of(cpu));
3004}
3005
3006static inline unsigned long cpu_util_rt(struct rq *rq)
3007{
3008 return READ_ONCE(rq->avg_rt.util_avg);
3009}
3010#endif
3011
3012#ifdef CONFIG_UCLAMP_TASK
3013unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
3014
3015static inline unsigned long uclamp_rq_get(struct rq *rq,
3016 enum uclamp_id clamp_id)
3017{
3018 return READ_ONCE(rq->uclamp[clamp_id].value);
3019}
3020
3021static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
3022 unsigned int value)
3023{
3024 WRITE_ONCE(rq->uclamp[clamp_id].value, value);
3025}
3026
3027static inline bool uclamp_rq_is_idle(struct rq *rq)
3028{
3029 return rq->uclamp_flags & UCLAMP_FLAG_IDLE;
3030}
3031
3032/**
3033 * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
3034 * @rq: The rq to clamp against. Must not be NULL.
3035 * @util: The util value to clamp.
3036 * @p: The task to clamp against. Can be NULL if you want to clamp
3037 * against @rq only.
3038 *
3039 * Clamps the passed @util to the max(@rq, @p) effective uclamp values.
3040 *
3041 * If sched_uclamp_used static key is disabled, then just return the util
3042 * without any clamping since uclamp aggregation at the rq level in the fast
3043 * path is disabled, rendering this operation a NOP.
3044 *
3045 * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
3046 * will return the correct effective uclamp value of the task even if the
3047 * static key is disabled.
3048 */
3049static __always_inline
3050unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
3051 struct task_struct *p)
3052{
3053 unsigned long min_util = 0;
3054 unsigned long max_util = 0;
3055
3056 if (!static_branch_likely(&sched_uclamp_used))
3057 return util;
3058
3059 if (p) {
3060 min_util = uclamp_eff_value(p, UCLAMP_MIN);
3061 max_util = uclamp_eff_value(p, UCLAMP_MAX);
3062
3063 /*
3064 * Ignore last runnable task's max clamp, as this task will
3065 * reset it. Similarly, no need to read the rq's min clamp.
3066 */
3067 if (uclamp_rq_is_idle(rq))
3068 goto out;
3069 }
3070
3071 min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN));
3072 max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX));
3073out:
3074 /*
3075 * Since CPU's {min,max}_util clamps are MAX aggregated considering
3076 * RUNNABLE tasks with _different_ clamps, we can end up with an
3077 * inversion. Fix it now when the clamps are applied.
3078 */
3079 if (unlikely(min_util >= max_util))
3080 return min_util;
3081
3082 return clamp(util, min_util, max_util);
3083}
3084
3085/* Is the rq being capped/throttled by uclamp_max? */
3086static inline bool uclamp_rq_is_capped(struct rq *rq)
3087{
3088 unsigned long rq_util;
3089 unsigned long max_util;
3090
3091 if (!static_branch_likely(&sched_uclamp_used))
3092 return false;
3093
3094 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq);
3095 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
3096
3097 return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util;
3098}
3099
3100/*
3101 * When uclamp is compiled in, the aggregation at rq level is 'turned off'
3102 * by default in the fast path and only gets turned on once userspace performs
3103 * an operation that requires it.
3104 *
3105 * Returns true if userspace opted-in to use uclamp and aggregation at rq level
3106 * hence is active.
3107 */
3108static inline bool uclamp_is_used(void)
3109{
3110 return static_branch_likely(&sched_uclamp_used);
3111}
3112#else /* CONFIG_UCLAMP_TASK */
3113static inline unsigned long uclamp_eff_value(struct task_struct *p,
3114 enum uclamp_id clamp_id)
3115{
3116 if (clamp_id == UCLAMP_MIN)
3117 return 0;
3118
3119 return SCHED_CAPACITY_SCALE;
3120}
3121
3122static inline
3123unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
3124 struct task_struct *p)
3125{
3126 return util;
3127}
3128
3129static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
3130
3131static inline bool uclamp_is_used(void)
3132{
3133 return false;
3134}
3135
3136static inline unsigned long uclamp_rq_get(struct rq *rq,
3137 enum uclamp_id clamp_id)
3138{
3139 if (clamp_id == UCLAMP_MIN)
3140 return 0;
3141
3142 return SCHED_CAPACITY_SCALE;
3143}
3144
3145static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
3146 unsigned int value)
3147{
3148}
3149
3150static inline bool uclamp_rq_is_idle(struct rq *rq)
3151{
3152 return false;
3153}
3154#endif /* CONFIG_UCLAMP_TASK */
3155
3156#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
3157static inline unsigned long cpu_util_irq(struct rq *rq)
3158{
3159 return rq->avg_irq.util_avg;
3160}
3161
3162static inline
3163unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
3164{
3165 util *= (max - irq);
3166 util /= max;
3167
3168 return util;
3169
3170}
3171#else
3172static inline unsigned long cpu_util_irq(struct rq *rq)
3173{
3174 return 0;
3175}
3176
3177static inline
3178unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
3179{
3180 return util;
3181}
3182#endif
3183
3184#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
3185
3186#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
3187
3188DECLARE_STATIC_KEY_FALSE(sched_energy_present);
3189
3190static inline bool sched_energy_enabled(void)
3191{
3192 return static_branch_unlikely(&sched_energy_present);
3193}
3194
3195#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
3196
3197#define perf_domain_span(pd) NULL
3198static inline bool sched_energy_enabled(void) { return false; }
3199
3200#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
3201
3202#ifdef CONFIG_MEMBARRIER
3203/*
3204 * The scheduler provides memory barriers required by membarrier between:
3205 * - prior user-space memory accesses and store to rq->membarrier_state,
3206 * - store to rq->membarrier_state and following user-space memory accesses.
3207 * In the same way it provides those guarantees around store to rq->curr.
3208 */
3209static inline void membarrier_switch_mm(struct rq *rq,
3210 struct mm_struct *prev_mm,
3211 struct mm_struct *next_mm)
3212{
3213 int membarrier_state;
3214
3215 if (prev_mm == next_mm)
3216 return;
3217
3218 membarrier_state = atomic_read(&next_mm->membarrier_state);
3219 if (READ_ONCE(rq->membarrier_state) == membarrier_state)
3220 return;
3221
3222 WRITE_ONCE(rq->membarrier_state, membarrier_state);
3223}
3224#else
3225static inline void membarrier_switch_mm(struct rq *rq,
3226 struct mm_struct *prev_mm,
3227 struct mm_struct *next_mm)
3228{
3229}
3230#endif
3231
3232#ifdef CONFIG_SMP
3233static inline bool is_per_cpu_kthread(struct task_struct *p)
3234{
3235 if (!(p->flags & PF_KTHREAD))
3236 return false;
3237
3238 if (p->nr_cpus_allowed != 1)
3239 return false;
3240
3241 return true;
3242}
3243#endif
3244
3245extern void swake_up_all_locked(struct swait_queue_head *q);
3246extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
3247
3248#ifdef CONFIG_PREEMPT_DYNAMIC
3249extern int preempt_dynamic_mode;
3250extern int sched_dynamic_mode(const char *str);
3251extern void sched_dynamic_update(int mode);
3252#endif
3253
3254static inline void update_current_exec_runtime(struct task_struct *curr,
3255 u64 now, u64 delta_exec)
3256{
3257 curr->se.sum_exec_runtime += delta_exec;
3258 account_group_exec_runtime(curr, delta_exec);
3259
3260 curr->se.exec_start = now;
3261 cgroup_account_cputime(curr, delta_exec);
3262}
3263
3264#endif /* _KERNEL_SCHED_SCHED_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Scheduler internal types and methods:
4 */
5#include <linux/sched.h>
6
7#include <linux/sched/autogroup.h>
8#include <linux/sched/clock.h>
9#include <linux/sched/coredump.h>
10#include <linux/sched/cpufreq.h>
11#include <linux/sched/cputime.h>
12#include <linux/sched/deadline.h>
13#include <linux/sched/debug.h>
14#include <linux/sched/hotplug.h>
15#include <linux/sched/idle.h>
16#include <linux/sched/init.h>
17#include <linux/sched/isolation.h>
18#include <linux/sched/jobctl.h>
19#include <linux/sched/loadavg.h>
20#include <linux/sched/mm.h>
21#include <linux/sched/nohz.h>
22#include <linux/sched/numa_balancing.h>
23#include <linux/sched/prio.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/signal.h>
26#include <linux/sched/smt.h>
27#include <linux/sched/stat.h>
28#include <linux/sched/sysctl.h>
29#include <linux/sched/task.h>
30#include <linux/sched/task_stack.h>
31#include <linux/sched/topology.h>
32#include <linux/sched/user.h>
33#include <linux/sched/wake_q.h>
34#include <linux/sched/xacct.h>
35
36#include <uapi/linux/sched/types.h>
37
38#include <linux/binfmts.h>
39#include <linux/blkdev.h>
40#include <linux/compat.h>
41#include <linux/context_tracking.h>
42#include <linux/cpufreq.h>
43#include <linux/cpuidle.h>
44#include <linux/cpuset.h>
45#include <linux/ctype.h>
46#include <linux/debugfs.h>
47#include <linux/delayacct.h>
48#include <linux/energy_model.h>
49#include <linux/init_task.h>
50#include <linux/kprobes.h>
51#include <linux/kthread.h>
52#include <linux/membarrier.h>
53#include <linux/migrate.h>
54#include <linux/mmu_context.h>
55#include <linux/nmi.h>
56#include <linux/proc_fs.h>
57#include <linux/prefetch.h>
58#include <linux/profile.h>
59#include <linux/psi.h>
60#include <linux/rcupdate_wait.h>
61#include <linux/security.h>
62#include <linux/stop_machine.h>
63#include <linux/suspend.h>
64#include <linux/swait.h>
65#include <linux/syscalls.h>
66#include <linux/task_work.h>
67#include <linux/tsacct_kern.h>
68
69#include <asm/tlb.h>
70
71#ifdef CONFIG_PARAVIRT
72# include <asm/paravirt.h>
73#endif
74
75#include "cpupri.h"
76#include "cpudeadline.h"
77
78#ifdef CONFIG_SCHED_DEBUG
79# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
80#else
81# define SCHED_WARN_ON(x) ({ (void)(x), 0; })
82#endif
83
84struct rq;
85struct cpuidle_state;
86
87/* task_struct::on_rq states: */
88#define TASK_ON_RQ_QUEUED 1
89#define TASK_ON_RQ_MIGRATING 2
90
91extern __read_mostly int scheduler_running;
92
93extern unsigned long calc_load_update;
94extern atomic_long_t calc_load_tasks;
95
96extern void calc_global_load_tick(struct rq *this_rq);
97extern long calc_load_fold_active(struct rq *this_rq, long adjust);
98
99/*
100 * Helpers for converting nanosecond timing to jiffy resolution
101 */
102#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
103
104/*
105 * Increase resolution of nice-level calculations for 64-bit architectures.
106 * The extra resolution improves shares distribution and load balancing of
107 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
108 * hierarchies, especially on larger systems. This is not a user-visible change
109 * and does not change the user-interface for setting shares/weights.
110 *
111 * We increase resolution only if we have enough bits to allow this increased
112 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
113 * are pretty high and the returns do not justify the increased costs.
114 *
115 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
116 * increase coverage and consistency always enable it on 64-bit platforms.
117 */
118#ifdef CONFIG_64BIT
119# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
120# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
121# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
122#else
123# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
124# define scale_load(w) (w)
125# define scale_load_down(w) (w)
126#endif
127
128/*
129 * Task weight (visible to users) and its load (invisible to users) have
130 * independent resolution, but they should be well calibrated. We use
131 * scale_load() and scale_load_down(w) to convert between them. The
132 * following must be true:
133 *
134 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
135 *
136 */
137#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
138
139/*
140 * Single value that decides SCHED_DEADLINE internal math precision.
141 * 10 -> just above 1us
142 * 9 -> just above 0.5us
143 */
144#define DL_SCALE 10
145
146/*
147 * Single value that denotes runtime == period, ie unlimited time.
148 */
149#define RUNTIME_INF ((u64)~0ULL)
150
151static inline int idle_policy(int policy)
152{
153 return policy == SCHED_IDLE;
154}
155static inline int fair_policy(int policy)
156{
157 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
158}
159
160static inline int rt_policy(int policy)
161{
162 return policy == SCHED_FIFO || policy == SCHED_RR;
163}
164
165static inline int dl_policy(int policy)
166{
167 return policy == SCHED_DEADLINE;
168}
169static inline bool valid_policy(int policy)
170{
171 return idle_policy(policy) || fair_policy(policy) ||
172 rt_policy(policy) || dl_policy(policy);
173}
174
175static inline int task_has_idle_policy(struct task_struct *p)
176{
177 return idle_policy(p->policy);
178}
179
180static inline int task_has_rt_policy(struct task_struct *p)
181{
182 return rt_policy(p->policy);
183}
184
185static inline int task_has_dl_policy(struct task_struct *p)
186{
187 return dl_policy(p->policy);
188}
189
190#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
191
192/*
193 * !! For sched_setattr_nocheck() (kernel) only !!
194 *
195 * This is actually gross. :(
196 *
197 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
198 * tasks, but still be able to sleep. We need this on platforms that cannot
199 * atomically change clock frequency. Remove once fast switching will be
200 * available on such platforms.
201 *
202 * SUGOV stands for SchedUtil GOVernor.
203 */
204#define SCHED_FLAG_SUGOV 0x10000000
205
206static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
207{
208#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
209 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
210#else
211 return false;
212#endif
213}
214
215/*
216 * Tells if entity @a should preempt entity @b.
217 */
218static inline bool
219dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
220{
221 return dl_entity_is_special(a) ||
222 dl_time_before(a->deadline, b->deadline);
223}
224
225/*
226 * This is the priority-queue data structure of the RT scheduling class:
227 */
228struct rt_prio_array {
229 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
230 struct list_head queue[MAX_RT_PRIO];
231};
232
233struct rt_bandwidth {
234 /* nests inside the rq lock: */
235 raw_spinlock_t rt_runtime_lock;
236 ktime_t rt_period;
237 u64 rt_runtime;
238 struct hrtimer rt_period_timer;
239 unsigned int rt_period_active;
240};
241
242void __dl_clear_params(struct task_struct *p);
243
244/*
245 * To keep the bandwidth of -deadline tasks and groups under control
246 * we need some place where:
247 * - store the maximum -deadline bandwidth of the system (the group);
248 * - cache the fraction of that bandwidth that is currently allocated.
249 *
250 * This is all done in the data structure below. It is similar to the
251 * one used for RT-throttling (rt_bandwidth), with the main difference
252 * that, since here we are only interested in admission control, we
253 * do not decrease any runtime while the group "executes", neither we
254 * need a timer to replenish it.
255 *
256 * With respect to SMP, the bandwidth is given on a per-CPU basis,
257 * meaning that:
258 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
259 * - dl_total_bw array contains, in the i-eth element, the currently
260 * allocated bandwidth on the i-eth CPU.
261 * Moreover, groups consume bandwidth on each CPU, while tasks only
262 * consume bandwidth on the CPU they're running on.
263 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
264 * that will be shown the next time the proc or cgroup controls will
265 * be red. It on its turn can be changed by writing on its own
266 * control.
267 */
268struct dl_bandwidth {
269 raw_spinlock_t dl_runtime_lock;
270 u64 dl_runtime;
271 u64 dl_period;
272};
273
274static inline int dl_bandwidth_enabled(void)
275{
276 return sysctl_sched_rt_runtime >= 0;
277}
278
279struct dl_bw {
280 raw_spinlock_t lock;
281 u64 bw;
282 u64 total_bw;
283};
284
285static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
286
287static inline
288void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
289{
290 dl_b->total_bw -= tsk_bw;
291 __dl_update(dl_b, (s32)tsk_bw / cpus);
292}
293
294static inline
295void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
296{
297 dl_b->total_bw += tsk_bw;
298 __dl_update(dl_b, -((s32)tsk_bw / cpus));
299}
300
301static inline
302bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
303{
304 return dl_b->bw != -1 &&
305 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
306}
307
308extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
309extern void init_dl_bw(struct dl_bw *dl_b);
310extern int sched_dl_global_validate(void);
311extern void sched_dl_do_global(void);
312extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
313extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
314extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
315extern bool __checkparam_dl(const struct sched_attr *attr);
316extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
317extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
318extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
319extern bool dl_cpu_busy(unsigned int cpu);
320
321#ifdef CONFIG_CGROUP_SCHED
322
323#include <linux/cgroup.h>
324#include <linux/psi.h>
325
326struct cfs_rq;
327struct rt_rq;
328
329extern struct list_head task_groups;
330
331struct cfs_bandwidth {
332#ifdef CONFIG_CFS_BANDWIDTH
333 raw_spinlock_t lock;
334 ktime_t period;
335 u64 quota;
336 u64 runtime;
337 s64 hierarchical_quota;
338
339 u8 idle;
340 u8 period_active;
341 u8 distribute_running;
342 u8 slack_started;
343 struct hrtimer period_timer;
344 struct hrtimer slack_timer;
345 struct list_head throttled_cfs_rq;
346
347 /* Statistics: */
348 int nr_periods;
349 int nr_throttled;
350 u64 throttled_time;
351#endif
352};
353
354/* Task group related information */
355struct task_group {
356 struct cgroup_subsys_state css;
357
358#ifdef CONFIG_FAIR_GROUP_SCHED
359 /* schedulable entities of this group on each CPU */
360 struct sched_entity **se;
361 /* runqueue "owned" by this group on each CPU */
362 struct cfs_rq **cfs_rq;
363 unsigned long shares;
364
365#ifdef CONFIG_SMP
366 /*
367 * load_avg can be heavily contended at clock tick time, so put
368 * it in its own cacheline separated from the fields above which
369 * will also be accessed at each tick.
370 */
371 atomic_long_t load_avg ____cacheline_aligned;
372#endif
373#endif
374
375#ifdef CONFIG_RT_GROUP_SCHED
376 struct sched_rt_entity **rt_se;
377 struct rt_rq **rt_rq;
378
379 struct rt_bandwidth rt_bandwidth;
380#endif
381
382 struct rcu_head rcu;
383 struct list_head list;
384
385 struct task_group *parent;
386 struct list_head siblings;
387 struct list_head children;
388
389#ifdef CONFIG_SCHED_AUTOGROUP
390 struct autogroup *autogroup;
391#endif
392
393 struct cfs_bandwidth cfs_bandwidth;
394
395#ifdef CONFIG_UCLAMP_TASK_GROUP
396 /* The two decimal precision [%] value requested from user-space */
397 unsigned int uclamp_pct[UCLAMP_CNT];
398 /* Clamp values requested for a task group */
399 struct uclamp_se uclamp_req[UCLAMP_CNT];
400 /* Effective clamp values used for a task group */
401 struct uclamp_se uclamp[UCLAMP_CNT];
402#endif
403
404};
405
406#ifdef CONFIG_FAIR_GROUP_SCHED
407#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
408
409/*
410 * A weight of 0 or 1 can cause arithmetics problems.
411 * A weight of a cfs_rq is the sum of weights of which entities
412 * are queued on this cfs_rq, so a weight of a entity should not be
413 * too large, so as the shares value of a task group.
414 * (The default weight is 1024 - so there's no practical
415 * limitation from this.)
416 */
417#define MIN_SHARES (1UL << 1)
418#define MAX_SHARES (1UL << 18)
419#endif
420
421typedef int (*tg_visitor)(struct task_group *, void *);
422
423extern int walk_tg_tree_from(struct task_group *from,
424 tg_visitor down, tg_visitor up, void *data);
425
426/*
427 * Iterate the full tree, calling @down when first entering a node and @up when
428 * leaving it for the final time.
429 *
430 * Caller must hold rcu_lock or sufficient equivalent.
431 */
432static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
433{
434 return walk_tg_tree_from(&root_task_group, down, up, data);
435}
436
437extern int tg_nop(struct task_group *tg, void *data);
438
439extern void free_fair_sched_group(struct task_group *tg);
440extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
441extern void online_fair_sched_group(struct task_group *tg);
442extern void unregister_fair_sched_group(struct task_group *tg);
443extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
444 struct sched_entity *se, int cpu,
445 struct sched_entity *parent);
446extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
447
448extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
449extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
450extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
451
452extern void free_rt_sched_group(struct task_group *tg);
453extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
454extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
455 struct sched_rt_entity *rt_se, int cpu,
456 struct sched_rt_entity *parent);
457extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
458extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
459extern long sched_group_rt_runtime(struct task_group *tg);
460extern long sched_group_rt_period(struct task_group *tg);
461extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
462
463extern struct task_group *sched_create_group(struct task_group *parent);
464extern void sched_online_group(struct task_group *tg,
465 struct task_group *parent);
466extern void sched_destroy_group(struct task_group *tg);
467extern void sched_offline_group(struct task_group *tg);
468
469extern void sched_move_task(struct task_struct *tsk);
470
471#ifdef CONFIG_FAIR_GROUP_SCHED
472extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
473
474#ifdef CONFIG_SMP
475extern void set_task_rq_fair(struct sched_entity *se,
476 struct cfs_rq *prev, struct cfs_rq *next);
477#else /* !CONFIG_SMP */
478static inline void set_task_rq_fair(struct sched_entity *se,
479 struct cfs_rq *prev, struct cfs_rq *next) { }
480#endif /* CONFIG_SMP */
481#endif /* CONFIG_FAIR_GROUP_SCHED */
482
483#else /* CONFIG_CGROUP_SCHED */
484
485struct cfs_bandwidth { };
486
487#endif /* CONFIG_CGROUP_SCHED */
488
489/* CFS-related fields in a runqueue */
490struct cfs_rq {
491 struct load_weight load;
492 unsigned long runnable_weight;
493 unsigned int nr_running;
494 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
495 unsigned int idle_h_nr_running; /* SCHED_IDLE */
496
497 u64 exec_clock;
498 u64 min_vruntime;
499#ifndef CONFIG_64BIT
500 u64 min_vruntime_copy;
501#endif
502
503 struct rb_root_cached tasks_timeline;
504
505 /*
506 * 'curr' points to currently running entity on this cfs_rq.
507 * It is set to NULL otherwise (i.e when none are currently running).
508 */
509 struct sched_entity *curr;
510 struct sched_entity *next;
511 struct sched_entity *last;
512 struct sched_entity *skip;
513
514#ifdef CONFIG_SCHED_DEBUG
515 unsigned int nr_spread_over;
516#endif
517
518#ifdef CONFIG_SMP
519 /*
520 * CFS load tracking
521 */
522 struct sched_avg avg;
523#ifndef CONFIG_64BIT
524 u64 load_last_update_time_copy;
525#endif
526 struct {
527 raw_spinlock_t lock ____cacheline_aligned;
528 int nr;
529 unsigned long load_avg;
530 unsigned long util_avg;
531 unsigned long runnable_sum;
532 } removed;
533
534#ifdef CONFIG_FAIR_GROUP_SCHED
535 unsigned long tg_load_avg_contrib;
536 long propagate;
537 long prop_runnable_sum;
538
539 /*
540 * h_load = weight * f(tg)
541 *
542 * Where f(tg) is the recursive weight fraction assigned to
543 * this group.
544 */
545 unsigned long h_load;
546 u64 last_h_load_update;
547 struct sched_entity *h_load_next;
548#endif /* CONFIG_FAIR_GROUP_SCHED */
549#endif /* CONFIG_SMP */
550
551#ifdef CONFIG_FAIR_GROUP_SCHED
552 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */
553
554 /*
555 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
556 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
557 * (like users, containers etc.)
558 *
559 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU.
560 * This list is used during load balance.
561 */
562 int on_list;
563 struct list_head leaf_cfs_rq_list;
564 struct task_group *tg; /* group that "owns" this runqueue */
565
566#ifdef CONFIG_CFS_BANDWIDTH
567 int runtime_enabled;
568 s64 runtime_remaining;
569
570 u64 throttled_clock;
571 u64 throttled_clock_task;
572 u64 throttled_clock_task_time;
573 int throttled;
574 int throttle_count;
575 struct list_head throttled_list;
576#endif /* CONFIG_CFS_BANDWIDTH */
577#endif /* CONFIG_FAIR_GROUP_SCHED */
578};
579
580static inline int rt_bandwidth_enabled(void)
581{
582 return sysctl_sched_rt_runtime >= 0;
583}
584
585/* RT IPI pull logic requires IRQ_WORK */
586#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
587# define HAVE_RT_PUSH_IPI
588#endif
589
590/* Real-Time classes' related field in a runqueue: */
591struct rt_rq {
592 struct rt_prio_array active;
593 unsigned int rt_nr_running;
594 unsigned int rr_nr_running;
595#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
596 struct {
597 int curr; /* highest queued rt task prio */
598#ifdef CONFIG_SMP
599 int next; /* next highest */
600#endif
601 } highest_prio;
602#endif
603#ifdef CONFIG_SMP
604 unsigned long rt_nr_migratory;
605 unsigned long rt_nr_total;
606 int overloaded;
607 struct plist_head pushable_tasks;
608
609#endif /* CONFIG_SMP */
610 int rt_queued;
611
612 int rt_throttled;
613 u64 rt_time;
614 u64 rt_runtime;
615 /* Nests inside the rq lock: */
616 raw_spinlock_t rt_runtime_lock;
617
618#ifdef CONFIG_RT_GROUP_SCHED
619 unsigned long rt_nr_boosted;
620
621 struct rq *rq;
622 struct task_group *tg;
623#endif
624};
625
626static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
627{
628 return rt_rq->rt_queued && rt_rq->rt_nr_running;
629}
630
631/* Deadline class' related fields in a runqueue */
632struct dl_rq {
633 /* runqueue is an rbtree, ordered by deadline */
634 struct rb_root_cached root;
635
636 unsigned long dl_nr_running;
637
638#ifdef CONFIG_SMP
639 /*
640 * Deadline values of the currently executing and the
641 * earliest ready task on this rq. Caching these facilitates
642 * the decision whether or not a ready but not running task
643 * should migrate somewhere else.
644 */
645 struct {
646 u64 curr;
647 u64 next;
648 } earliest_dl;
649
650 unsigned long dl_nr_migratory;
651 int overloaded;
652
653 /*
654 * Tasks on this rq that can be pushed away. They are kept in
655 * an rb-tree, ordered by tasks' deadlines, with caching
656 * of the leftmost (earliest deadline) element.
657 */
658 struct rb_root_cached pushable_dl_tasks_root;
659#else
660 struct dl_bw dl_bw;
661#endif
662 /*
663 * "Active utilization" for this runqueue: increased when a
664 * task wakes up (becomes TASK_RUNNING) and decreased when a
665 * task blocks
666 */
667 u64 running_bw;
668
669 /*
670 * Utilization of the tasks "assigned" to this runqueue (including
671 * the tasks that are in runqueue and the tasks that executed on this
672 * CPU and blocked). Increased when a task moves to this runqueue, and
673 * decreased when the task moves away (migrates, changes scheduling
674 * policy, or terminates).
675 * This is needed to compute the "inactive utilization" for the
676 * runqueue (inactive utilization = this_bw - running_bw).
677 */
678 u64 this_bw;
679 u64 extra_bw;
680
681 /*
682 * Inverse of the fraction of CPU utilization that can be reclaimed
683 * by the GRUB algorithm.
684 */
685 u64 bw_ratio;
686};
687
688#ifdef CONFIG_FAIR_GROUP_SCHED
689/* An entity is a task if it doesn't "own" a runqueue */
690#define entity_is_task(se) (!se->my_q)
691#else
692#define entity_is_task(se) 1
693#endif
694
695#ifdef CONFIG_SMP
696/*
697 * XXX we want to get rid of these helpers and use the full load resolution.
698 */
699static inline long se_weight(struct sched_entity *se)
700{
701 return scale_load_down(se->load.weight);
702}
703
704static inline long se_runnable(struct sched_entity *se)
705{
706 return scale_load_down(se->runnable_weight);
707}
708
709static inline bool sched_asym_prefer(int a, int b)
710{
711 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
712}
713
714struct perf_domain {
715 struct em_perf_domain *em_pd;
716 struct perf_domain *next;
717 struct rcu_head rcu;
718};
719
720/* Scheduling group status flags */
721#define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */
722#define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */
723
724/*
725 * We add the notion of a root-domain which will be used to define per-domain
726 * variables. Each exclusive cpuset essentially defines an island domain by
727 * fully partitioning the member CPUs from any other cpuset. Whenever a new
728 * exclusive cpuset is created, we also create and attach a new root-domain
729 * object.
730 *
731 */
732struct root_domain {
733 atomic_t refcount;
734 atomic_t rto_count;
735 struct rcu_head rcu;
736 cpumask_var_t span;
737 cpumask_var_t online;
738
739 /*
740 * Indicate pullable load on at least one CPU, e.g:
741 * - More than one runnable task
742 * - Running task is misfit
743 */
744 int overload;
745
746 /* Indicate one or more cpus over-utilized (tipping point) */
747 int overutilized;
748
749 /*
750 * The bit corresponding to a CPU gets set here if such CPU has more
751 * than one runnable -deadline task (as it is below for RT tasks).
752 */
753 cpumask_var_t dlo_mask;
754 atomic_t dlo_count;
755 struct dl_bw dl_bw;
756 struct cpudl cpudl;
757
758#ifdef HAVE_RT_PUSH_IPI
759 /*
760 * For IPI pull requests, loop across the rto_mask.
761 */
762 struct irq_work rto_push_work;
763 raw_spinlock_t rto_lock;
764 /* These are only updated and read within rto_lock */
765 int rto_loop;
766 int rto_cpu;
767 /* These atomics are updated outside of a lock */
768 atomic_t rto_loop_next;
769 atomic_t rto_loop_start;
770#endif
771 /*
772 * The "RT overload" flag: it gets set if a CPU has more than
773 * one runnable RT task.
774 */
775 cpumask_var_t rto_mask;
776 struct cpupri cpupri;
777
778 unsigned long max_cpu_capacity;
779
780 /*
781 * NULL-terminated list of performance domains intersecting with the
782 * CPUs of the rd. Protected by RCU.
783 */
784 struct perf_domain __rcu *pd;
785};
786
787extern void init_defrootdomain(void);
788extern int sched_init_domains(const struct cpumask *cpu_map);
789extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
790extern void sched_get_rd(struct root_domain *rd);
791extern void sched_put_rd(struct root_domain *rd);
792
793#ifdef HAVE_RT_PUSH_IPI
794extern void rto_push_irq_work_func(struct irq_work *work);
795#endif
796#endif /* CONFIG_SMP */
797
798#ifdef CONFIG_UCLAMP_TASK
799/*
800 * struct uclamp_bucket - Utilization clamp bucket
801 * @value: utilization clamp value for tasks on this clamp bucket
802 * @tasks: number of RUNNABLE tasks on this clamp bucket
803 *
804 * Keep track of how many tasks are RUNNABLE for a given utilization
805 * clamp value.
806 */
807struct uclamp_bucket {
808 unsigned long value : bits_per(SCHED_CAPACITY_SCALE);
809 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
810};
811
812/*
813 * struct uclamp_rq - rq's utilization clamp
814 * @value: currently active clamp values for a rq
815 * @bucket: utilization clamp buckets affecting a rq
816 *
817 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values.
818 * A clamp value is affecting a rq when there is at least one task RUNNABLE
819 * (or actually running) with that value.
820 *
821 * There are up to UCLAMP_CNT possible different clamp values, currently there
822 * are only two: minimum utilization and maximum utilization.
823 *
824 * All utilization clamping values are MAX aggregated, since:
825 * - for util_min: we want to run the CPU at least at the max of the minimum
826 * utilization required by its currently RUNNABLE tasks.
827 * - for util_max: we want to allow the CPU to run up to the max of the
828 * maximum utilization allowed by its currently RUNNABLE tasks.
829 *
830 * Since on each system we expect only a limited number of different
831 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track
832 * the metrics required to compute all the per-rq utilization clamp values.
833 */
834struct uclamp_rq {
835 unsigned int value;
836 struct uclamp_bucket bucket[UCLAMP_BUCKETS];
837};
838#endif /* CONFIG_UCLAMP_TASK */
839
840/*
841 * This is the main, per-CPU runqueue data structure.
842 *
843 * Locking rule: those places that want to lock multiple runqueues
844 * (such as the load balancing or the thread migration code), lock
845 * acquire operations must be ordered by ascending &runqueue.
846 */
847struct rq {
848 /* runqueue lock: */
849 raw_spinlock_t lock;
850
851 /*
852 * nr_running and cpu_load should be in the same cacheline because
853 * remote CPUs use both these fields when doing load calculation.
854 */
855 unsigned int nr_running;
856#ifdef CONFIG_NUMA_BALANCING
857 unsigned int nr_numa_running;
858 unsigned int nr_preferred_running;
859 unsigned int numa_migrate_on;
860#endif
861#ifdef CONFIG_NO_HZ_COMMON
862#ifdef CONFIG_SMP
863 unsigned long last_load_update_tick;
864 unsigned long last_blocked_load_update_tick;
865 unsigned int has_blocked_load;
866#endif /* CONFIG_SMP */
867 unsigned int nohz_tick_stopped;
868 atomic_t nohz_flags;
869#endif /* CONFIG_NO_HZ_COMMON */
870
871 unsigned long nr_load_updates;
872 u64 nr_switches;
873
874#ifdef CONFIG_UCLAMP_TASK
875 /* Utilization clamp values based on CPU's RUNNABLE tasks */
876 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned;
877 unsigned int uclamp_flags;
878#define UCLAMP_FLAG_IDLE 0x01
879#endif
880
881 struct cfs_rq cfs;
882 struct rt_rq rt;
883 struct dl_rq dl;
884
885#ifdef CONFIG_FAIR_GROUP_SCHED
886 /* list of leaf cfs_rq on this CPU: */
887 struct list_head leaf_cfs_rq_list;
888 struct list_head *tmp_alone_branch;
889#endif /* CONFIG_FAIR_GROUP_SCHED */
890
891 /*
892 * This is part of a global counter where only the total sum
893 * over all CPUs matters. A task can increase this counter on
894 * one CPU and if it got migrated afterwards it may decrease
895 * it on another CPU. Always updated under the runqueue lock:
896 */
897 unsigned long nr_uninterruptible;
898
899 struct task_struct *curr;
900 struct task_struct *idle;
901 struct task_struct *stop;
902 unsigned long next_balance;
903 struct mm_struct *prev_mm;
904
905 unsigned int clock_update_flags;
906 u64 clock;
907 /* Ensure that all clocks are in the same cache line */
908 u64 clock_task ____cacheline_aligned;
909 u64 clock_pelt;
910 unsigned long lost_idle_time;
911
912 atomic_t nr_iowait;
913
914#ifdef CONFIG_MEMBARRIER
915 int membarrier_state;
916#endif
917
918#ifdef CONFIG_SMP
919 struct root_domain *rd;
920 struct sched_domain __rcu *sd;
921
922 unsigned long cpu_capacity;
923 unsigned long cpu_capacity_orig;
924
925 struct callback_head *balance_callback;
926
927 unsigned char idle_balance;
928
929 unsigned long misfit_task_load;
930
931 /* For active balancing */
932 int active_balance;
933 int push_cpu;
934 struct cpu_stop_work active_balance_work;
935
936 /* CPU of this runqueue: */
937 int cpu;
938 int online;
939
940 struct list_head cfs_tasks;
941
942 struct sched_avg avg_rt;
943 struct sched_avg avg_dl;
944#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
945 struct sched_avg avg_irq;
946#endif
947 u64 idle_stamp;
948 u64 avg_idle;
949
950 /* This is used to determine avg_idle's max value */
951 u64 max_idle_balance_cost;
952#endif
953
954#ifdef CONFIG_IRQ_TIME_ACCOUNTING
955 u64 prev_irq_time;
956#endif
957#ifdef CONFIG_PARAVIRT
958 u64 prev_steal_time;
959#endif
960#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
961 u64 prev_steal_time_rq;
962#endif
963
964 /* calc_load related fields */
965 unsigned long calc_load_update;
966 long calc_load_active;
967
968#ifdef CONFIG_SCHED_HRTICK
969#ifdef CONFIG_SMP
970 int hrtick_csd_pending;
971 call_single_data_t hrtick_csd;
972#endif
973 struct hrtimer hrtick_timer;
974#endif
975
976#ifdef CONFIG_SCHEDSTATS
977 /* latency stats */
978 struct sched_info rq_sched_info;
979 unsigned long long rq_cpu_time;
980 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
981
982 /* sys_sched_yield() stats */
983 unsigned int yld_count;
984
985 /* schedule() stats */
986 unsigned int sched_count;
987 unsigned int sched_goidle;
988
989 /* try_to_wake_up() stats */
990 unsigned int ttwu_count;
991 unsigned int ttwu_local;
992#endif
993
994#ifdef CONFIG_SMP
995 struct llist_head wake_list;
996#endif
997
998#ifdef CONFIG_CPU_IDLE
999 /* Must be inspected within a rcu lock section */
1000 struct cpuidle_state *idle_state;
1001#endif
1002};
1003
1004#ifdef CONFIG_FAIR_GROUP_SCHED
1005
1006/* CPU runqueue to which this cfs_rq is attached */
1007static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1008{
1009 return cfs_rq->rq;
1010}
1011
1012#else
1013
1014static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1015{
1016 return container_of(cfs_rq, struct rq, cfs);
1017}
1018#endif
1019
1020static inline int cpu_of(struct rq *rq)
1021{
1022#ifdef CONFIG_SMP
1023 return rq->cpu;
1024#else
1025 return 0;
1026#endif
1027}
1028
1029
1030#ifdef CONFIG_SCHED_SMT
1031extern void __update_idle_core(struct rq *rq);
1032
1033static inline void update_idle_core(struct rq *rq)
1034{
1035 if (static_branch_unlikely(&sched_smt_present))
1036 __update_idle_core(rq);
1037}
1038
1039#else
1040static inline void update_idle_core(struct rq *rq) { }
1041#endif
1042
1043DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1044
1045#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
1046#define this_rq() this_cpu_ptr(&runqueues)
1047#define task_rq(p) cpu_rq(task_cpu(p))
1048#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1049#define raw_rq() raw_cpu_ptr(&runqueues)
1050
1051extern void update_rq_clock(struct rq *rq);
1052
1053static inline u64 __rq_clock_broken(struct rq *rq)
1054{
1055 return READ_ONCE(rq->clock);
1056}
1057
1058/*
1059 * rq::clock_update_flags bits
1060 *
1061 * %RQCF_REQ_SKIP - will request skipping of clock update on the next
1062 * call to __schedule(). This is an optimisation to avoid
1063 * neighbouring rq clock updates.
1064 *
1065 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
1066 * in effect and calls to update_rq_clock() are being ignored.
1067 *
1068 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
1069 * made to update_rq_clock() since the last time rq::lock was pinned.
1070 *
1071 * If inside of __schedule(), clock_update_flags will have been
1072 * shifted left (a left shift is a cheap operation for the fast path
1073 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
1074 *
1075 * if (rq-clock_update_flags >= RQCF_UPDATED)
1076 *
1077 * to check if %RQCF_UPADTED is set. It'll never be shifted more than
1078 * one position though, because the next rq_unpin_lock() will shift it
1079 * back.
1080 */
1081#define RQCF_REQ_SKIP 0x01
1082#define RQCF_ACT_SKIP 0x02
1083#define RQCF_UPDATED 0x04
1084
1085static inline void assert_clock_updated(struct rq *rq)
1086{
1087 /*
1088 * The only reason for not seeing a clock update since the
1089 * last rq_pin_lock() is if we're currently skipping updates.
1090 */
1091 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
1092}
1093
1094static inline u64 rq_clock(struct rq *rq)
1095{
1096 lockdep_assert_held(&rq->lock);
1097 assert_clock_updated(rq);
1098
1099 return rq->clock;
1100}
1101
1102static inline u64 rq_clock_task(struct rq *rq)
1103{
1104 lockdep_assert_held(&rq->lock);
1105 assert_clock_updated(rq);
1106
1107 return rq->clock_task;
1108}
1109
1110static inline void rq_clock_skip_update(struct rq *rq)
1111{
1112 lockdep_assert_held(&rq->lock);
1113 rq->clock_update_flags |= RQCF_REQ_SKIP;
1114}
1115
1116/*
1117 * See rt task throttling, which is the only time a skip
1118 * request is cancelled.
1119 */
1120static inline void rq_clock_cancel_skipupdate(struct rq *rq)
1121{
1122 lockdep_assert_held(&rq->lock);
1123 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
1124}
1125
1126struct rq_flags {
1127 unsigned long flags;
1128 struct pin_cookie cookie;
1129#ifdef CONFIG_SCHED_DEBUG
1130 /*
1131 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
1132 * current pin context is stashed here in case it needs to be
1133 * restored in rq_repin_lock().
1134 */
1135 unsigned int clock_update_flags;
1136#endif
1137};
1138
1139static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1140{
1141 rf->cookie = lockdep_pin_lock(&rq->lock);
1142
1143#ifdef CONFIG_SCHED_DEBUG
1144 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1145 rf->clock_update_flags = 0;
1146#endif
1147}
1148
1149static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1150{
1151#ifdef CONFIG_SCHED_DEBUG
1152 if (rq->clock_update_flags > RQCF_ACT_SKIP)
1153 rf->clock_update_flags = RQCF_UPDATED;
1154#endif
1155
1156 lockdep_unpin_lock(&rq->lock, rf->cookie);
1157}
1158
1159static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1160{
1161 lockdep_repin_lock(&rq->lock, rf->cookie);
1162
1163#ifdef CONFIG_SCHED_DEBUG
1164 /*
1165 * Restore the value we stashed in @rf for this pin context.
1166 */
1167 rq->clock_update_flags |= rf->clock_update_flags;
1168#endif
1169}
1170
1171struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1172 __acquires(rq->lock);
1173
1174struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1175 __acquires(p->pi_lock)
1176 __acquires(rq->lock);
1177
1178static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1179 __releases(rq->lock)
1180{
1181 rq_unpin_lock(rq, rf);
1182 raw_spin_unlock(&rq->lock);
1183}
1184
1185static inline void
1186task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1187 __releases(rq->lock)
1188 __releases(p->pi_lock)
1189{
1190 rq_unpin_lock(rq, rf);
1191 raw_spin_unlock(&rq->lock);
1192 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1193}
1194
1195static inline void
1196rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1197 __acquires(rq->lock)
1198{
1199 raw_spin_lock_irqsave(&rq->lock, rf->flags);
1200 rq_pin_lock(rq, rf);
1201}
1202
1203static inline void
1204rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1205 __acquires(rq->lock)
1206{
1207 raw_spin_lock_irq(&rq->lock);
1208 rq_pin_lock(rq, rf);
1209}
1210
1211static inline void
1212rq_lock(struct rq *rq, struct rq_flags *rf)
1213 __acquires(rq->lock)
1214{
1215 raw_spin_lock(&rq->lock);
1216 rq_pin_lock(rq, rf);
1217}
1218
1219static inline void
1220rq_relock(struct rq *rq, struct rq_flags *rf)
1221 __acquires(rq->lock)
1222{
1223 raw_spin_lock(&rq->lock);
1224 rq_repin_lock(rq, rf);
1225}
1226
1227static inline void
1228rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1229 __releases(rq->lock)
1230{
1231 rq_unpin_lock(rq, rf);
1232 raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1233}
1234
1235static inline void
1236rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1237 __releases(rq->lock)
1238{
1239 rq_unpin_lock(rq, rf);
1240 raw_spin_unlock_irq(&rq->lock);
1241}
1242
1243static inline void
1244rq_unlock(struct rq *rq, struct rq_flags *rf)
1245 __releases(rq->lock)
1246{
1247 rq_unpin_lock(rq, rf);
1248 raw_spin_unlock(&rq->lock);
1249}
1250
1251static inline struct rq *
1252this_rq_lock_irq(struct rq_flags *rf)
1253 __acquires(rq->lock)
1254{
1255 struct rq *rq;
1256
1257 local_irq_disable();
1258 rq = this_rq();
1259 rq_lock(rq, rf);
1260 return rq;
1261}
1262
1263#ifdef CONFIG_NUMA
1264enum numa_topology_type {
1265 NUMA_DIRECT,
1266 NUMA_GLUELESS_MESH,
1267 NUMA_BACKPLANE,
1268};
1269extern enum numa_topology_type sched_numa_topology_type;
1270extern int sched_max_numa_distance;
1271extern bool find_numa_distance(int distance);
1272extern void sched_init_numa(void);
1273extern void sched_domains_numa_masks_set(unsigned int cpu);
1274extern void sched_domains_numa_masks_clear(unsigned int cpu);
1275extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
1276#else
1277static inline void sched_init_numa(void) { }
1278static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1279static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1280static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1281{
1282 return nr_cpu_ids;
1283}
1284#endif
1285
1286#ifdef CONFIG_NUMA_BALANCING
1287/* The regions in numa_faults array from task_struct */
1288enum numa_faults_stats {
1289 NUMA_MEM = 0,
1290 NUMA_CPU,
1291 NUMA_MEMBUF,
1292 NUMA_CPUBUF
1293};
1294extern void sched_setnuma(struct task_struct *p, int node);
1295extern int migrate_task_to(struct task_struct *p, int cpu);
1296extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1297 int cpu, int scpu);
1298extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
1299#else
1300static inline void
1301init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
1302{
1303}
1304#endif /* CONFIG_NUMA_BALANCING */
1305
1306#ifdef CONFIG_SMP
1307
1308static inline void
1309queue_balance_callback(struct rq *rq,
1310 struct callback_head *head,
1311 void (*func)(struct rq *rq))
1312{
1313 lockdep_assert_held(&rq->lock);
1314
1315 if (unlikely(head->next))
1316 return;
1317
1318 head->func = (void (*)(struct callback_head *))func;
1319 head->next = rq->balance_callback;
1320 rq->balance_callback = head;
1321}
1322
1323extern void sched_ttwu_pending(void);
1324
1325#define rcu_dereference_check_sched_domain(p) \
1326 rcu_dereference_check((p), \
1327 lockdep_is_held(&sched_domains_mutex))
1328
1329/*
1330 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1331 * See destroy_sched_domains: call_rcu for details.
1332 *
1333 * The domain tree of any CPU may only be accessed from within
1334 * preempt-disabled sections.
1335 */
1336#define for_each_domain(cpu, __sd) \
1337 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1338 __sd; __sd = __sd->parent)
1339
1340#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
1341
1342/**
1343 * highest_flag_domain - Return highest sched_domain containing flag.
1344 * @cpu: The CPU whose highest level of sched domain is to
1345 * be returned.
1346 * @flag: The flag to check for the highest sched_domain
1347 * for the given CPU.
1348 *
1349 * Returns the highest sched_domain of a CPU which contains the given flag.
1350 */
1351static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1352{
1353 struct sched_domain *sd, *hsd = NULL;
1354
1355 for_each_domain(cpu, sd) {
1356 if (!(sd->flags & flag))
1357 break;
1358 hsd = sd;
1359 }
1360
1361 return hsd;
1362}
1363
1364static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1365{
1366 struct sched_domain *sd;
1367
1368 for_each_domain(cpu, sd) {
1369 if (sd->flags & flag)
1370 break;
1371 }
1372
1373 return sd;
1374}
1375
1376DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
1377DECLARE_PER_CPU(int, sd_llc_size);
1378DECLARE_PER_CPU(int, sd_llc_id);
1379DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
1380DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
1381DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
1382DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
1383extern struct static_key_false sched_asym_cpucapacity;
1384
1385struct sched_group_capacity {
1386 atomic_t ref;
1387 /*
1388 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
1389 * for a single CPU.
1390 */
1391 unsigned long capacity;
1392 unsigned long min_capacity; /* Min per-CPU capacity in group */
1393 unsigned long max_capacity; /* Max per-CPU capacity in group */
1394 unsigned long next_update;
1395 int imbalance; /* XXX unrelated to capacity but shared group state */
1396
1397#ifdef CONFIG_SCHED_DEBUG
1398 int id;
1399#endif
1400
1401 unsigned long cpumask[0]; /* Balance mask */
1402};
1403
1404struct sched_group {
1405 struct sched_group *next; /* Must be a circular list */
1406 atomic_t ref;
1407
1408 unsigned int group_weight;
1409 struct sched_group_capacity *sgc;
1410 int asym_prefer_cpu; /* CPU of highest priority in group */
1411
1412 /*
1413 * The CPUs this group covers.
1414 *
1415 * NOTE: this field is variable length. (Allocated dynamically
1416 * by attaching extra space to the end of the structure,
1417 * depending on how many CPUs the kernel has booted up with)
1418 */
1419 unsigned long cpumask[0];
1420};
1421
1422static inline struct cpumask *sched_group_span(struct sched_group *sg)
1423{
1424 return to_cpumask(sg->cpumask);
1425}
1426
1427/*
1428 * See build_balance_mask().
1429 */
1430static inline struct cpumask *group_balance_mask(struct sched_group *sg)
1431{
1432 return to_cpumask(sg->sgc->cpumask);
1433}
1434
1435/**
1436 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group.
1437 * @group: The group whose first CPU is to be returned.
1438 */
1439static inline unsigned int group_first_cpu(struct sched_group *group)
1440{
1441 return cpumask_first(sched_group_span(group));
1442}
1443
1444extern int group_balance_cpu(struct sched_group *sg);
1445
1446#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
1447void register_sched_domain_sysctl(void);
1448void dirty_sched_domain_sysctl(int cpu);
1449void unregister_sched_domain_sysctl(void);
1450#else
1451static inline void register_sched_domain_sysctl(void)
1452{
1453}
1454static inline void dirty_sched_domain_sysctl(int cpu)
1455{
1456}
1457static inline void unregister_sched_domain_sysctl(void)
1458{
1459}
1460#endif
1461
1462extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
1463
1464#else
1465
1466static inline void sched_ttwu_pending(void) { }
1467
1468static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; }
1469
1470#endif /* CONFIG_SMP */
1471
1472#include "stats.h"
1473#include "autogroup.h"
1474
1475#ifdef CONFIG_CGROUP_SCHED
1476
1477/*
1478 * Return the group to which this tasks belongs.
1479 *
1480 * We cannot use task_css() and friends because the cgroup subsystem
1481 * changes that value before the cgroup_subsys::attach() method is called,
1482 * therefore we cannot pin it and might observe the wrong value.
1483 *
1484 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
1485 * core changes this before calling sched_move_task().
1486 *
1487 * Instead we use a 'copy' which is updated from sched_move_task() while
1488 * holding both task_struct::pi_lock and rq::lock.
1489 */
1490static inline struct task_group *task_group(struct task_struct *p)
1491{
1492 return p->sched_task_group;
1493}
1494
1495/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
1496static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1497{
1498#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1499 struct task_group *tg = task_group(p);
1500#endif
1501
1502#ifdef CONFIG_FAIR_GROUP_SCHED
1503 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1504 p->se.cfs_rq = tg->cfs_rq[cpu];
1505 p->se.parent = tg->se[cpu];
1506#endif
1507
1508#ifdef CONFIG_RT_GROUP_SCHED
1509 p->rt.rt_rq = tg->rt_rq[cpu];
1510 p->rt.parent = tg->rt_se[cpu];
1511#endif
1512}
1513
1514#else /* CONFIG_CGROUP_SCHED */
1515
1516static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1517static inline struct task_group *task_group(struct task_struct *p)
1518{
1519 return NULL;
1520}
1521
1522#endif /* CONFIG_CGROUP_SCHED */
1523
1524static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1525{
1526 set_task_rq(p, cpu);
1527#ifdef CONFIG_SMP
1528 /*
1529 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1530 * successfully executed on another CPU. We must ensure that updates of
1531 * per-task data have been completed by this moment.
1532 */
1533 smp_wmb();
1534#ifdef CONFIG_THREAD_INFO_IN_TASK
1535 WRITE_ONCE(p->cpu, cpu);
1536#else
1537 WRITE_ONCE(task_thread_info(p)->cpu, cpu);
1538#endif
1539 p->wake_cpu = cpu;
1540#endif
1541}
1542
1543/*
1544 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1545 */
1546#ifdef CONFIG_SCHED_DEBUG
1547# include <linux/static_key.h>
1548# define const_debug __read_mostly
1549#else
1550# define const_debug const
1551#endif
1552
1553#define SCHED_FEAT(name, enabled) \
1554 __SCHED_FEAT_##name ,
1555
1556enum {
1557#include "features.h"
1558 __SCHED_FEAT_NR,
1559};
1560
1561#undef SCHED_FEAT
1562
1563#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
1564
1565/*
1566 * To support run-time toggling of sched features, all the translation units
1567 * (but core.c) reference the sysctl_sched_features defined in core.c.
1568 */
1569extern const_debug unsigned int sysctl_sched_features;
1570
1571#define SCHED_FEAT(name, enabled) \
1572static __always_inline bool static_branch_##name(struct static_key *key) \
1573{ \
1574 return static_key_##enabled(key); \
1575}
1576
1577#include "features.h"
1578#undef SCHED_FEAT
1579
1580extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1581#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1582
1583#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
1584
1585/*
1586 * Each translation unit has its own copy of sysctl_sched_features to allow
1587 * constants propagation at compile time and compiler optimization based on
1588 * features default.
1589 */
1590#define SCHED_FEAT(name, enabled) \
1591 (1UL << __SCHED_FEAT_##name) * enabled |
1592static const_debug __maybe_unused unsigned int sysctl_sched_features =
1593#include "features.h"
1594 0;
1595#undef SCHED_FEAT
1596
1597#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1598
1599#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
1600
1601extern struct static_key_false sched_numa_balancing;
1602extern struct static_key_false sched_schedstats;
1603
1604static inline u64 global_rt_period(void)
1605{
1606 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1607}
1608
1609static inline u64 global_rt_runtime(void)
1610{
1611 if (sysctl_sched_rt_runtime < 0)
1612 return RUNTIME_INF;
1613
1614 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1615}
1616
1617static inline int task_current(struct rq *rq, struct task_struct *p)
1618{
1619 return rq->curr == p;
1620}
1621
1622static inline int task_running(struct rq *rq, struct task_struct *p)
1623{
1624#ifdef CONFIG_SMP
1625 return p->on_cpu;
1626#else
1627 return task_current(rq, p);
1628#endif
1629}
1630
1631static inline int task_on_rq_queued(struct task_struct *p)
1632{
1633 return p->on_rq == TASK_ON_RQ_QUEUED;
1634}
1635
1636static inline int task_on_rq_migrating(struct task_struct *p)
1637{
1638 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
1639}
1640
1641/*
1642 * wake flags
1643 */
1644#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
1645#define WF_FORK 0x02 /* Child wakeup after fork */
1646#define WF_MIGRATED 0x4 /* Internal use, task got migrated */
1647
1648/*
1649 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1650 * of tasks with abnormal "nice" values across CPUs the contribution that
1651 * each task makes to its run queue's load is weighted according to its
1652 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1653 * scaled version of the new time slice allocation that they receive on time
1654 * slice expiry etc.
1655 */
1656
1657#define WEIGHT_IDLEPRIO 3
1658#define WMULT_IDLEPRIO 1431655765
1659
1660extern const int sched_prio_to_weight[40];
1661extern const u32 sched_prio_to_wmult[40];
1662
1663/*
1664 * {de,en}queue flags:
1665 *
1666 * DEQUEUE_SLEEP - task is no longer runnable
1667 * ENQUEUE_WAKEUP - task just became runnable
1668 *
1669 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1670 * are in a known state which allows modification. Such pairs
1671 * should preserve as much state as possible.
1672 *
1673 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1674 * in the runqueue.
1675 *
1676 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
1677 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
1678 * ENQUEUE_MIGRATED - the task was migrated during wakeup
1679 *
1680 */
1681
1682#define DEQUEUE_SLEEP 0x01
1683#define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */
1684#define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */
1685#define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */
1686
1687#define ENQUEUE_WAKEUP 0x01
1688#define ENQUEUE_RESTORE 0x02
1689#define ENQUEUE_MOVE 0x04
1690#define ENQUEUE_NOCLOCK 0x08
1691
1692#define ENQUEUE_HEAD 0x10
1693#define ENQUEUE_REPLENISH 0x20
1694#ifdef CONFIG_SMP
1695#define ENQUEUE_MIGRATED 0x40
1696#else
1697#define ENQUEUE_MIGRATED 0x00
1698#endif
1699
1700#define RETRY_TASK ((void *)-1UL)
1701
1702struct sched_class {
1703 const struct sched_class *next;
1704
1705#ifdef CONFIG_UCLAMP_TASK
1706 int uclamp_enabled;
1707#endif
1708
1709 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1710 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1711 void (*yield_task) (struct rq *rq);
1712 bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
1713
1714 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
1715
1716 /*
1717 * Both @prev and @rf are optional and may be NULL, in which case the
1718 * caller must already have invoked put_prev_task(rq, prev, rf).
1719 *
1720 * Otherwise it is the responsibility of the pick_next_task() to call
1721 * put_prev_task() on the @prev task or something equivalent, IFF it
1722 * returns a next task.
1723 *
1724 * In that case (@rf != NULL) it may return RETRY_TASK when it finds a
1725 * higher prio class has runnable tasks.
1726 */
1727 struct task_struct * (*pick_next_task)(struct rq *rq,
1728 struct task_struct *prev,
1729 struct rq_flags *rf);
1730 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1731 void (*set_next_task)(struct rq *rq, struct task_struct *p);
1732
1733#ifdef CONFIG_SMP
1734 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1735 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1736 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
1737
1738 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1739
1740 void (*set_cpus_allowed)(struct task_struct *p,
1741 const struct cpumask *newmask);
1742
1743 void (*rq_online)(struct rq *rq);
1744 void (*rq_offline)(struct rq *rq);
1745#endif
1746
1747 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1748 void (*task_fork)(struct task_struct *p);
1749 void (*task_dead)(struct task_struct *p);
1750
1751 /*
1752 * The switched_from() call is allowed to drop rq->lock, therefore we
1753 * cannot assume the switched_from/switched_to pair is serliazed by
1754 * rq->lock. They are however serialized by p->pi_lock.
1755 */
1756 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1757 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1758 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1759 int oldprio);
1760
1761 unsigned int (*get_rr_interval)(struct rq *rq,
1762 struct task_struct *task);
1763
1764 void (*update_curr)(struct rq *rq);
1765
1766#define TASK_SET_GROUP 0
1767#define TASK_MOVE_GROUP 1
1768
1769#ifdef CONFIG_FAIR_GROUP_SCHED
1770 void (*task_change_group)(struct task_struct *p, int type);
1771#endif
1772};
1773
1774static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1775{
1776 WARN_ON_ONCE(rq->curr != prev);
1777 prev->sched_class->put_prev_task(rq, prev);
1778}
1779
1780static inline void set_next_task(struct rq *rq, struct task_struct *next)
1781{
1782 WARN_ON_ONCE(rq->curr != next);
1783 next->sched_class->set_next_task(rq, next);
1784}
1785
1786#ifdef CONFIG_SMP
1787#define sched_class_highest (&stop_sched_class)
1788#else
1789#define sched_class_highest (&dl_sched_class)
1790#endif
1791
1792#define for_class_range(class, _from, _to) \
1793 for (class = (_from); class != (_to); class = class->next)
1794
1795#define for_each_class(class) \
1796 for_class_range(class, sched_class_highest, NULL)
1797
1798extern const struct sched_class stop_sched_class;
1799extern const struct sched_class dl_sched_class;
1800extern const struct sched_class rt_sched_class;
1801extern const struct sched_class fair_sched_class;
1802extern const struct sched_class idle_sched_class;
1803
1804static inline bool sched_stop_runnable(struct rq *rq)
1805{
1806 return rq->stop && task_on_rq_queued(rq->stop);
1807}
1808
1809static inline bool sched_dl_runnable(struct rq *rq)
1810{
1811 return rq->dl.dl_nr_running > 0;
1812}
1813
1814static inline bool sched_rt_runnable(struct rq *rq)
1815{
1816 return rq->rt.rt_queued > 0;
1817}
1818
1819static inline bool sched_fair_runnable(struct rq *rq)
1820{
1821 return rq->cfs.nr_running > 0;
1822}
1823
1824#ifdef CONFIG_SMP
1825
1826extern void update_group_capacity(struct sched_domain *sd, int cpu);
1827
1828extern void trigger_load_balance(struct rq *rq);
1829
1830extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1831
1832#endif
1833
1834#ifdef CONFIG_CPU_IDLE
1835static inline void idle_set_state(struct rq *rq,
1836 struct cpuidle_state *idle_state)
1837{
1838 rq->idle_state = idle_state;
1839}
1840
1841static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1842{
1843 SCHED_WARN_ON(!rcu_read_lock_held());
1844
1845 return rq->idle_state;
1846}
1847#else
1848static inline void idle_set_state(struct rq *rq,
1849 struct cpuidle_state *idle_state)
1850{
1851}
1852
1853static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1854{
1855 return NULL;
1856}
1857#endif
1858
1859extern void schedule_idle(void);
1860
1861extern void sysrq_sched_debug_show(void);
1862extern void sched_init_granularity(void);
1863extern void update_max_interval(void);
1864
1865extern void init_sched_dl_class(void);
1866extern void init_sched_rt_class(void);
1867extern void init_sched_fair_class(void);
1868
1869extern void reweight_task(struct task_struct *p, int prio);
1870
1871extern void resched_curr(struct rq *rq);
1872extern void resched_cpu(int cpu);
1873
1874extern struct rt_bandwidth def_rt_bandwidth;
1875extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1876
1877extern struct dl_bandwidth def_dl_bandwidth;
1878extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1879extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1880extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
1881extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
1882
1883#define BW_SHIFT 20
1884#define BW_UNIT (1 << BW_SHIFT)
1885#define RATIO_SHIFT 8
1886unsigned long to_ratio(u64 period, u64 runtime);
1887
1888extern void init_entity_runnable_average(struct sched_entity *se);
1889extern void post_init_entity_util_avg(struct task_struct *p);
1890
1891#ifdef CONFIG_NO_HZ_FULL
1892extern bool sched_can_stop_tick(struct rq *rq);
1893extern int __init sched_tick_offload_init(void);
1894
1895/*
1896 * Tick may be needed by tasks in the runqueue depending on their policy and
1897 * requirements. If tick is needed, lets send the target an IPI to kick it out of
1898 * nohz mode if necessary.
1899 */
1900static inline void sched_update_tick_dependency(struct rq *rq)
1901{
1902 int cpu;
1903
1904 if (!tick_nohz_full_enabled())
1905 return;
1906
1907 cpu = cpu_of(rq);
1908
1909 if (!tick_nohz_full_cpu(cpu))
1910 return;
1911
1912 if (sched_can_stop_tick(rq))
1913 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1914 else
1915 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1916}
1917#else
1918static inline int sched_tick_offload_init(void) { return 0; }
1919static inline void sched_update_tick_dependency(struct rq *rq) { }
1920#endif
1921
1922static inline void add_nr_running(struct rq *rq, unsigned count)
1923{
1924 unsigned prev_nr = rq->nr_running;
1925
1926 rq->nr_running = prev_nr + count;
1927
1928#ifdef CONFIG_SMP
1929 if (prev_nr < 2 && rq->nr_running >= 2) {
1930 if (!READ_ONCE(rq->rd->overload))
1931 WRITE_ONCE(rq->rd->overload, 1);
1932 }
1933#endif
1934
1935 sched_update_tick_dependency(rq);
1936}
1937
1938static inline void sub_nr_running(struct rq *rq, unsigned count)
1939{
1940 rq->nr_running -= count;
1941 /* Check if we still need preemption */
1942 sched_update_tick_dependency(rq);
1943}
1944
1945extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1946extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1947
1948extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1949
1950extern const_debug unsigned int sysctl_sched_nr_migrate;
1951extern const_debug unsigned int sysctl_sched_migration_cost;
1952
1953#ifdef CONFIG_SCHED_HRTICK
1954
1955/*
1956 * Use hrtick when:
1957 * - enabled by features
1958 * - hrtimer is actually high res
1959 */
1960static inline int hrtick_enabled(struct rq *rq)
1961{
1962 if (!sched_feat(HRTICK))
1963 return 0;
1964 if (!cpu_active(cpu_of(rq)))
1965 return 0;
1966 return hrtimer_is_hres_active(&rq->hrtick_timer);
1967}
1968
1969void hrtick_start(struct rq *rq, u64 delay);
1970
1971#else
1972
1973static inline int hrtick_enabled(struct rq *rq)
1974{
1975 return 0;
1976}
1977
1978#endif /* CONFIG_SCHED_HRTICK */
1979
1980#ifndef arch_scale_freq_capacity
1981static __always_inline
1982unsigned long arch_scale_freq_capacity(int cpu)
1983{
1984 return SCHED_CAPACITY_SCALE;
1985}
1986#endif
1987
1988#ifdef CONFIG_SMP
1989#ifdef CONFIG_PREEMPTION
1990
1991static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1992
1993/*
1994 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1995 * way at the expense of forcing extra atomic operations in all
1996 * invocations. This assures that the double_lock is acquired using the
1997 * same underlying policy as the spinlock_t on this architecture, which
1998 * reduces latency compared to the unfair variant below. However, it
1999 * also adds more overhead and therefore may reduce throughput.
2000 */
2001static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2002 __releases(this_rq->lock)
2003 __acquires(busiest->lock)
2004 __acquires(this_rq->lock)
2005{
2006 raw_spin_unlock(&this_rq->lock);
2007 double_rq_lock(this_rq, busiest);
2008
2009 return 1;
2010}
2011
2012#else
2013/*
2014 * Unfair double_lock_balance: Optimizes throughput at the expense of
2015 * latency by eliminating extra atomic operations when the locks are
2016 * already in proper order on entry. This favors lower CPU-ids and will
2017 * grant the double lock to lower CPUs over higher ids under contention,
2018 * regardless of entry order into the function.
2019 */
2020static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2021 __releases(this_rq->lock)
2022 __acquires(busiest->lock)
2023 __acquires(this_rq->lock)
2024{
2025 int ret = 0;
2026
2027 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
2028 if (busiest < this_rq) {
2029 raw_spin_unlock(&this_rq->lock);
2030 raw_spin_lock(&busiest->lock);
2031 raw_spin_lock_nested(&this_rq->lock,
2032 SINGLE_DEPTH_NESTING);
2033 ret = 1;
2034 } else
2035 raw_spin_lock_nested(&busiest->lock,
2036 SINGLE_DEPTH_NESTING);
2037 }
2038 return ret;
2039}
2040
2041#endif /* CONFIG_PREEMPTION */
2042
2043/*
2044 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2045 */
2046static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2047{
2048 if (unlikely(!irqs_disabled())) {
2049 /* printk() doesn't work well under rq->lock */
2050 raw_spin_unlock(&this_rq->lock);
2051 BUG_ON(1);
2052 }
2053
2054 return _double_lock_balance(this_rq, busiest);
2055}
2056
2057static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2058 __releases(busiest->lock)
2059{
2060 raw_spin_unlock(&busiest->lock);
2061 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2062}
2063
2064static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
2065{
2066 if (l1 > l2)
2067 swap(l1, l2);
2068
2069 spin_lock(l1);
2070 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2071}
2072
2073static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
2074{
2075 if (l1 > l2)
2076 swap(l1, l2);
2077
2078 spin_lock_irq(l1);
2079 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2080}
2081
2082static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
2083{
2084 if (l1 > l2)
2085 swap(l1, l2);
2086
2087 raw_spin_lock(l1);
2088 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2089}
2090
2091/*
2092 * double_rq_lock - safely lock two runqueues
2093 *
2094 * Note this does not disable interrupts like task_rq_lock,
2095 * you need to do so manually before calling.
2096 */
2097static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2098 __acquires(rq1->lock)
2099 __acquires(rq2->lock)
2100{
2101 BUG_ON(!irqs_disabled());
2102 if (rq1 == rq2) {
2103 raw_spin_lock(&rq1->lock);
2104 __acquire(rq2->lock); /* Fake it out ;) */
2105 } else {
2106 if (rq1 < rq2) {
2107 raw_spin_lock(&rq1->lock);
2108 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
2109 } else {
2110 raw_spin_lock(&rq2->lock);
2111 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
2112 }
2113 }
2114}
2115
2116/*
2117 * double_rq_unlock - safely unlock two runqueues
2118 *
2119 * Note this does not restore interrupts like task_rq_unlock,
2120 * you need to do so manually after calling.
2121 */
2122static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2123 __releases(rq1->lock)
2124 __releases(rq2->lock)
2125{
2126 raw_spin_unlock(&rq1->lock);
2127 if (rq1 != rq2)
2128 raw_spin_unlock(&rq2->lock);
2129 else
2130 __release(rq2->lock);
2131}
2132
2133extern void set_rq_online (struct rq *rq);
2134extern void set_rq_offline(struct rq *rq);
2135extern bool sched_smp_initialized;
2136
2137#else /* CONFIG_SMP */
2138
2139/*
2140 * double_rq_lock - safely lock two runqueues
2141 *
2142 * Note this does not disable interrupts like task_rq_lock,
2143 * you need to do so manually before calling.
2144 */
2145static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2146 __acquires(rq1->lock)
2147 __acquires(rq2->lock)
2148{
2149 BUG_ON(!irqs_disabled());
2150 BUG_ON(rq1 != rq2);
2151 raw_spin_lock(&rq1->lock);
2152 __acquire(rq2->lock); /* Fake it out ;) */
2153}
2154
2155/*
2156 * double_rq_unlock - safely unlock two runqueues
2157 *
2158 * Note this does not restore interrupts like task_rq_unlock,
2159 * you need to do so manually after calling.
2160 */
2161static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2162 __releases(rq1->lock)
2163 __releases(rq2->lock)
2164{
2165 BUG_ON(rq1 != rq2);
2166 raw_spin_unlock(&rq1->lock);
2167 __release(rq2->lock);
2168}
2169
2170#endif
2171
2172extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2173extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
2174
2175#ifdef CONFIG_SCHED_DEBUG
2176extern bool sched_debug_enabled;
2177
2178extern void print_cfs_stats(struct seq_file *m, int cpu);
2179extern void print_rt_stats(struct seq_file *m, int cpu);
2180extern void print_dl_stats(struct seq_file *m, int cpu);
2181extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2182extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2183extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2184#ifdef CONFIG_NUMA_BALANCING
2185extern void
2186show_numa_stats(struct task_struct *p, struct seq_file *m);
2187extern void
2188print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2189 unsigned long tpf, unsigned long gsf, unsigned long gpf);
2190#endif /* CONFIG_NUMA_BALANCING */
2191#endif /* CONFIG_SCHED_DEBUG */
2192
2193extern void init_cfs_rq(struct cfs_rq *cfs_rq);
2194extern void init_rt_rq(struct rt_rq *rt_rq);
2195extern void init_dl_rq(struct dl_rq *dl_rq);
2196
2197extern void cfs_bandwidth_usage_inc(void);
2198extern void cfs_bandwidth_usage_dec(void);
2199
2200#ifdef CONFIG_NO_HZ_COMMON
2201#define NOHZ_BALANCE_KICK_BIT 0
2202#define NOHZ_STATS_KICK_BIT 1
2203
2204#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
2205#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
2206
2207#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
2208
2209#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2210
2211extern void nohz_balance_exit_idle(struct rq *rq);
2212#else
2213static inline void nohz_balance_exit_idle(struct rq *rq) { }
2214#endif
2215
2216
2217#ifdef CONFIG_SMP
2218static inline
2219void __dl_update(struct dl_bw *dl_b, s64 bw)
2220{
2221 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
2222 int i;
2223
2224 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2225 "sched RCU must be held");
2226 for_each_cpu_and(i, rd->span, cpu_active_mask) {
2227 struct rq *rq = cpu_rq(i);
2228
2229 rq->dl.extra_bw += bw;
2230 }
2231}
2232#else
2233static inline
2234void __dl_update(struct dl_bw *dl_b, s64 bw)
2235{
2236 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
2237
2238 dl->extra_bw += bw;
2239}
2240#endif
2241
2242
2243#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2244struct irqtime {
2245 u64 total;
2246 u64 tick_delta;
2247 u64 irq_start_time;
2248 struct u64_stats_sync sync;
2249};
2250
2251DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
2252
2253/*
2254 * Returns the irqtime minus the softirq time computed by ksoftirqd.
2255 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
2256 * and never move forward.
2257 */
2258static inline u64 irq_time_read(int cpu)
2259{
2260 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2261 unsigned int seq;
2262 u64 total;
2263
2264 do {
2265 seq = __u64_stats_fetch_begin(&irqtime->sync);
2266 total = irqtime->total;
2267 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
2268
2269 return total;
2270}
2271#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2272
2273#ifdef CONFIG_CPU_FREQ
2274DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
2275
2276/**
2277 * cpufreq_update_util - Take a note about CPU utilization changes.
2278 * @rq: Runqueue to carry out the update for.
2279 * @flags: Update reason flags.
2280 *
2281 * This function is called by the scheduler on the CPU whose utilization is
2282 * being updated.
2283 *
2284 * It can only be called from RCU-sched read-side critical sections.
2285 *
2286 * The way cpufreq is currently arranged requires it to evaluate the CPU
2287 * performance state (frequency/voltage) on a regular basis to prevent it from
2288 * being stuck in a completely inadequate performance level for too long.
2289 * That is not guaranteed to happen if the updates are only triggered from CFS
2290 * and DL, though, because they may not be coming in if only RT tasks are
2291 * active all the time (or there are RT tasks only).
2292 *
2293 * As a workaround for that issue, this function is called periodically by the
2294 * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
2295 * but that really is a band-aid. Going forward it should be replaced with
2296 * solutions targeted more specifically at RT tasks.
2297 */
2298static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2299{
2300 struct update_util_data *data;
2301
2302 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2303 cpu_of(rq)));
2304 if (data)
2305 data->func(data, rq_clock(rq), flags);
2306}
2307#else
2308static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
2309#endif /* CONFIG_CPU_FREQ */
2310
2311#ifdef CONFIG_UCLAMP_TASK
2312enum uclamp_id uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
2313
2314static __always_inline
2315unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
2316 struct task_struct *p)
2317{
2318 unsigned int min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
2319 unsigned int max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
2320
2321 if (p) {
2322 min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
2323 max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX));
2324 }
2325
2326 /*
2327 * Since CPU's {min,max}_util clamps are MAX aggregated considering
2328 * RUNNABLE tasks with _different_ clamps, we can end up with an
2329 * inversion. Fix it now when the clamps are applied.
2330 */
2331 if (unlikely(min_util >= max_util))
2332 return min_util;
2333
2334 return clamp(util, min_util, max_util);
2335}
2336
2337static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
2338{
2339 return uclamp_util_with(rq, util, NULL);
2340}
2341#else /* CONFIG_UCLAMP_TASK */
2342static inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
2343 struct task_struct *p)
2344{
2345 return util;
2346}
2347static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
2348{
2349 return util;
2350}
2351#endif /* CONFIG_UCLAMP_TASK */
2352
2353#ifdef arch_scale_freq_capacity
2354# ifndef arch_scale_freq_invariant
2355# define arch_scale_freq_invariant() true
2356# endif
2357#else
2358# define arch_scale_freq_invariant() false
2359#endif
2360
2361#ifdef CONFIG_SMP
2362static inline unsigned long capacity_orig_of(int cpu)
2363{
2364 return cpu_rq(cpu)->cpu_capacity_orig;
2365}
2366#endif
2367
2368/**
2369 * enum schedutil_type - CPU utilization type
2370 * @FREQUENCY_UTIL: Utilization used to select frequency
2371 * @ENERGY_UTIL: Utilization used during energy calculation
2372 *
2373 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time
2374 * need to be aggregated differently depending on the usage made of them. This
2375 * enum is used within schedutil_freq_util() to differentiate the types of
2376 * utilization expected by the callers, and adjust the aggregation accordingly.
2377 */
2378enum schedutil_type {
2379 FREQUENCY_UTIL,
2380 ENERGY_UTIL,
2381};
2382
2383#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
2384
2385unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
2386 unsigned long max, enum schedutil_type type,
2387 struct task_struct *p);
2388
2389static inline unsigned long cpu_bw_dl(struct rq *rq)
2390{
2391 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2392}
2393
2394static inline unsigned long cpu_util_dl(struct rq *rq)
2395{
2396 return READ_ONCE(rq->avg_dl.util_avg);
2397}
2398
2399static inline unsigned long cpu_util_cfs(struct rq *rq)
2400{
2401 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2402
2403 if (sched_feat(UTIL_EST)) {
2404 util = max_t(unsigned long, util,
2405 READ_ONCE(rq->cfs.avg.util_est.enqueued));
2406 }
2407
2408 return util;
2409}
2410
2411static inline unsigned long cpu_util_rt(struct rq *rq)
2412{
2413 return READ_ONCE(rq->avg_rt.util_avg);
2414}
2415#else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
2416static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
2417 unsigned long max, enum schedutil_type type,
2418 struct task_struct *p)
2419{
2420 return 0;
2421}
2422#endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
2423
2424#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
2425static inline unsigned long cpu_util_irq(struct rq *rq)
2426{
2427 return rq->avg_irq.util_avg;
2428}
2429
2430static inline
2431unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
2432{
2433 util *= (max - irq);
2434 util /= max;
2435
2436 return util;
2437
2438}
2439#else
2440static inline unsigned long cpu_util_irq(struct rq *rq)
2441{
2442 return 0;
2443}
2444
2445static inline
2446unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
2447{
2448 return util;
2449}
2450#endif
2451
2452#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2453
2454#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
2455
2456DECLARE_STATIC_KEY_FALSE(sched_energy_present);
2457
2458static inline bool sched_energy_enabled(void)
2459{
2460 return static_branch_unlikely(&sched_energy_present);
2461}
2462
2463#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
2464
2465#define perf_domain_span(pd) NULL
2466static inline bool sched_energy_enabled(void) { return false; }
2467
2468#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
2469
2470#ifdef CONFIG_MEMBARRIER
2471/*
2472 * The scheduler provides memory barriers required by membarrier between:
2473 * - prior user-space memory accesses and store to rq->membarrier_state,
2474 * - store to rq->membarrier_state and following user-space memory accesses.
2475 * In the same way it provides those guarantees around store to rq->curr.
2476 */
2477static inline void membarrier_switch_mm(struct rq *rq,
2478 struct mm_struct *prev_mm,
2479 struct mm_struct *next_mm)
2480{
2481 int membarrier_state;
2482
2483 if (prev_mm == next_mm)
2484 return;
2485
2486 membarrier_state = atomic_read(&next_mm->membarrier_state);
2487 if (READ_ONCE(rq->membarrier_state) == membarrier_state)
2488 return;
2489
2490 WRITE_ONCE(rq->membarrier_state, membarrier_state);
2491}
2492#else
2493static inline void membarrier_switch_mm(struct rq *rq,
2494 struct mm_struct *prev_mm,
2495 struct mm_struct *next_mm)
2496{
2497}
2498#endif