Linux Audio

Check our new training course

Loading...
v4.17
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Scheduler internal types and methods:
   4 */
   5#include <linux/sched.h>
   6
   7#include <linux/sched/autogroup.h>
   8#include <linux/sched/clock.h>
   9#include <linux/sched/coredump.h>
  10#include <linux/sched/cpufreq.h>
  11#include <linux/sched/cputime.h>
  12#include <linux/sched/deadline.h>
  13#include <linux/sched/debug.h>
  14#include <linux/sched/hotplug.h>
  15#include <linux/sched/idle.h>
  16#include <linux/sched/init.h>
  17#include <linux/sched/isolation.h>
  18#include <linux/sched/jobctl.h>
  19#include <linux/sched/loadavg.h>
  20#include <linux/sched/mm.h>
  21#include <linux/sched/nohz.h>
  22#include <linux/sched/numa_balancing.h>
  23#include <linux/sched/prio.h>
  24#include <linux/sched/rt.h>
  25#include <linux/sched/signal.h>
  26#include <linux/sched/stat.h>
  27#include <linux/sched/sysctl.h>
  28#include <linux/sched/task.h>
  29#include <linux/sched/task_stack.h>
  30#include <linux/sched/topology.h>
  31#include <linux/sched/user.h>
  32#include <linux/sched/wake_q.h>
  33#include <linux/sched/xacct.h>
  34
  35#include <uapi/linux/sched/types.h>
  36
  37#include <linux/binfmts.h>
  38#include <linux/blkdev.h>
  39#include <linux/compat.h>
  40#include <linux/context_tracking.h>
  41#include <linux/cpufreq.h>
  42#include <linux/cpuidle.h>
  43#include <linux/cpuset.h>
  44#include <linux/ctype.h>
  45#include <linux/debugfs.h>
  46#include <linux/delayacct.h>
  47#include <linux/init_task.h>
  48#include <linux/kprobes.h>
  49#include <linux/kthread.h>
  50#include <linux/membarrier.h>
  51#include <linux/migrate.h>
  52#include <linux/mmu_context.h>
  53#include <linux/nmi.h>
  54#include <linux/proc_fs.h>
  55#include <linux/prefetch.h>
  56#include <linux/profile.h>
  57#include <linux/rcupdate_wait.h>
  58#include <linux/security.h>
  59#include <linux/stackprotector.h>
  60#include <linux/stop_machine.h>
  61#include <linux/suspend.h>
  62#include <linux/swait.h>
  63#include <linux/syscalls.h>
  64#include <linux/task_work.h>
  65#include <linux/tsacct_kern.h>
  66
  67#include <asm/tlb.h>
  68
  69#ifdef CONFIG_PARAVIRT
  70# include <asm/paravirt.h>
  71#endif
  72
  73#include "cpupri.h"
  74#include "cpudeadline.h"
 
  75
  76#ifdef CONFIG_SCHED_DEBUG
  77# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
  78#else
  79# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
  80#endif
  81
  82struct rq;
  83struct cpuidle_state;
  84
  85/* task_struct::on_rq states: */
  86#define TASK_ON_RQ_QUEUED	1
  87#define TASK_ON_RQ_MIGRATING	2
  88
  89extern __read_mostly int scheduler_running;
  90
  91extern unsigned long calc_load_update;
  92extern atomic_long_t calc_load_tasks;
  93
  94extern void calc_global_load_tick(struct rq *this_rq);
  95extern long calc_load_fold_active(struct rq *this_rq, long adjust);
  96
  97#ifdef CONFIG_SMP
  98extern void cpu_load_update_active(struct rq *this_rq);
  99#else
 100static inline void cpu_load_update_active(struct rq *this_rq) { }
 101#endif
 102
 103/*
 104 * Helpers for converting nanosecond timing to jiffy resolution
 105 */
 106#define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
 107
 108/*
 109 * Increase resolution of nice-level calculations for 64-bit architectures.
 110 * The extra resolution improves shares distribution and load balancing of
 111 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
 112 * hierarchies, especially on larger systems. This is not a user-visible change
 113 * and does not change the user-interface for setting shares/weights.
 114 *
 115 * We increase resolution only if we have enough bits to allow this increased
 116 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
 117 * are pretty high and the returns do not justify the increased costs.
 118 *
 119 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
 120 * increase coverage and consistency always enable it on 64-bit platforms.
 121 */
 122#ifdef CONFIG_64BIT
 123# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
 124# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
 125# define scale_load_down(w)	((w) >> SCHED_FIXEDPOINT_SHIFT)
 126#else
 127# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
 128# define scale_load(w)		(w)
 129# define scale_load_down(w)	(w)
 130#endif
 131
 132/*
 133 * Task weight (visible to users) and its load (invisible to users) have
 134 * independent resolution, but they should be well calibrated. We use
 135 * scale_load() and scale_load_down(w) to convert between them. The
 136 * following must be true:
 137 *
 138 *  scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
 139 *
 140 */
 141#define NICE_0_LOAD		(1L << NICE_0_LOAD_SHIFT)
 142
 143/*
 144 * Single value that decides SCHED_DEADLINE internal math precision.
 145 * 10 -> just above 1us
 146 * 9  -> just above 0.5us
 147 */
 148#define DL_SCALE		10
 
 
 
 
 149
 150/*
 151 * Single value that denotes runtime == period, ie unlimited time.
 152 */
 153#define RUNTIME_INF		((u64)~0ULL)
 154
 155static inline int idle_policy(int policy)
 156{
 157	return policy == SCHED_IDLE;
 158}
 159static inline int fair_policy(int policy)
 160{
 161	return policy == SCHED_NORMAL || policy == SCHED_BATCH;
 162}
 163
 164static inline int rt_policy(int policy)
 165{
 166	return policy == SCHED_FIFO || policy == SCHED_RR;
 167}
 168
 169static inline int dl_policy(int policy)
 170{
 171	return policy == SCHED_DEADLINE;
 172}
 173static inline bool valid_policy(int policy)
 174{
 175	return idle_policy(policy) || fair_policy(policy) ||
 176		rt_policy(policy) || dl_policy(policy);
 177}
 178
 179static inline int task_has_rt_policy(struct task_struct *p)
 180{
 181	return rt_policy(p->policy);
 182}
 183
 184static inline int task_has_dl_policy(struct task_struct *p)
 185{
 186	return dl_policy(p->policy);
 187}
 188
 189#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
 190
 191/*
 192 * !! For sched_setattr_nocheck() (kernel) only !!
 193 *
 194 * This is actually gross. :(
 195 *
 196 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
 197 * tasks, but still be able to sleep. We need this on platforms that cannot
 198 * atomically change clock frequency. Remove once fast switching will be
 199 * available on such platforms.
 200 *
 201 * SUGOV stands for SchedUtil GOVernor.
 202 */
 203#define SCHED_FLAG_SUGOV	0x10000000
 204
 205static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
 206{
 207#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
 208	return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
 209#else
 210	return false;
 211#endif
 212}
 213
 214/*
 215 * Tells if entity @a should preempt entity @b.
 216 */
 217static inline bool
 218dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
 219{
 220	return dl_entity_is_special(a) ||
 221	       dl_time_before(a->deadline, b->deadline);
 222}
 223
 224/*
 225 * This is the priority-queue data structure of the RT scheduling class:
 226 */
 227struct rt_prio_array {
 228	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
 229	struct list_head queue[MAX_RT_PRIO];
 230};
 231
 232struct rt_bandwidth {
 233	/* nests inside the rq lock: */
 234	raw_spinlock_t		rt_runtime_lock;
 235	ktime_t			rt_period;
 236	u64			rt_runtime;
 237	struct hrtimer		rt_period_timer;
 238	unsigned int		rt_period_active;
 239};
 240
 241void __dl_clear_params(struct task_struct *p);
 242
 243/*
 244 * To keep the bandwidth of -deadline tasks and groups under control
 245 * we need some place where:
 246 *  - store the maximum -deadline bandwidth of the system (the group);
 247 *  - cache the fraction of that bandwidth that is currently allocated.
 248 *
 249 * This is all done in the data structure below. It is similar to the
 250 * one used for RT-throttling (rt_bandwidth), with the main difference
 251 * that, since here we are only interested in admission control, we
 252 * do not decrease any runtime while the group "executes", neither we
 253 * need a timer to replenish it.
 254 *
 255 * With respect to SMP, the bandwidth is given on a per-CPU basis,
 256 * meaning that:
 257 *  - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
 258 *  - dl_total_bw array contains, in the i-eth element, the currently
 259 *    allocated bandwidth on the i-eth CPU.
 260 * Moreover, groups consume bandwidth on each CPU, while tasks only
 261 * consume bandwidth on the CPU they're running on.
 262 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
 263 * that will be shown the next time the proc or cgroup controls will
 264 * be red. It on its turn can be changed by writing on its own
 265 * control.
 266 */
 267struct dl_bandwidth {
 268	raw_spinlock_t		dl_runtime_lock;
 269	u64			dl_runtime;
 270	u64			dl_period;
 271};
 272
 273static inline int dl_bandwidth_enabled(void)
 274{
 275	return sysctl_sched_rt_runtime >= 0;
 276}
 277
 
 
 278struct dl_bw {
 279	raw_spinlock_t		lock;
 280	u64			bw;
 281	u64			total_bw;
 282};
 283
 284static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
 285
 286static inline
 287void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
 288{
 289	dl_b->total_bw -= tsk_bw;
 290	__dl_update(dl_b, (s32)tsk_bw / cpus);
 291}
 292
 293static inline
 294void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
 295{
 296	dl_b->total_bw += tsk_bw;
 297	__dl_update(dl_b, -((s32)tsk_bw / cpus));
 298}
 299
 300static inline
 301bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
 302{
 303	return dl_b->bw != -1 &&
 304	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
 305}
 306
 307extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
 308extern void init_dl_bw(struct dl_bw *dl_b);
 309extern int  sched_dl_global_validate(void);
 310extern void sched_dl_do_global(void);
 311extern int  sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
 312extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
 313extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
 314extern bool __checkparam_dl(const struct sched_attr *attr);
 315extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
 316extern int  dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
 317extern int  dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
 318extern bool dl_cpu_busy(unsigned int cpu);
 319
 320#ifdef CONFIG_CGROUP_SCHED
 321
 322#include <linux/cgroup.h>
 323
 324struct cfs_rq;
 325struct rt_rq;
 326
 327extern struct list_head task_groups;
 328
 329struct cfs_bandwidth {
 330#ifdef CONFIG_CFS_BANDWIDTH
 331	raw_spinlock_t		lock;
 332	ktime_t			period;
 333	u64			quota;
 334	u64			runtime;
 335	s64			hierarchical_quota;
 336	u64			runtime_expires;
 337
 338	int			idle;
 339	int			period_active;
 340	struct hrtimer		period_timer;
 341	struct hrtimer		slack_timer;
 342	struct list_head	throttled_cfs_rq;
 343
 344	/* Statistics: */
 345	int			nr_periods;
 346	int			nr_throttled;
 347	u64			throttled_time;
 348#endif
 349};
 350
 351/* Task group related information */
 352struct task_group {
 353	struct cgroup_subsys_state css;
 354
 355#ifdef CONFIG_FAIR_GROUP_SCHED
 356	/* schedulable entities of this group on each CPU */
 357	struct sched_entity	**se;
 358	/* runqueue "owned" by this group on each CPU */
 359	struct cfs_rq		**cfs_rq;
 360	unsigned long		shares;
 361
 362#ifdef	CONFIG_SMP
 363	/*
 364	 * load_avg can be heavily contended at clock tick time, so put
 365	 * it in its own cacheline separated from the fields above which
 366	 * will also be accessed at each tick.
 367	 */
 368	atomic_long_t		load_avg ____cacheline_aligned;
 369#endif
 370#endif
 371
 372#ifdef CONFIG_RT_GROUP_SCHED
 373	struct sched_rt_entity	**rt_se;
 374	struct rt_rq		**rt_rq;
 375
 376	struct rt_bandwidth	rt_bandwidth;
 377#endif
 378
 379	struct rcu_head		rcu;
 380	struct list_head	list;
 381
 382	struct task_group	*parent;
 383	struct list_head	siblings;
 384	struct list_head	children;
 385
 386#ifdef CONFIG_SCHED_AUTOGROUP
 387	struct autogroup	*autogroup;
 388#endif
 389
 390	struct cfs_bandwidth	cfs_bandwidth;
 391};
 392
 393#ifdef CONFIG_FAIR_GROUP_SCHED
 394#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
 395
 396/*
 397 * A weight of 0 or 1 can cause arithmetics problems.
 398 * A weight of a cfs_rq is the sum of weights of which entities
 399 * are queued on this cfs_rq, so a weight of a entity should not be
 400 * too large, so as the shares value of a task group.
 401 * (The default weight is 1024 - so there's no practical
 402 *  limitation from this.)
 403 */
 404#define MIN_SHARES		(1UL <<  1)
 405#define MAX_SHARES		(1UL << 18)
 406#endif
 407
 408typedef int (*tg_visitor)(struct task_group *, void *);
 409
 410extern int walk_tg_tree_from(struct task_group *from,
 411			     tg_visitor down, tg_visitor up, void *data);
 412
 413/*
 414 * Iterate the full tree, calling @down when first entering a node and @up when
 415 * leaving it for the final time.
 416 *
 417 * Caller must hold rcu_lock or sufficient equivalent.
 418 */
 419static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
 420{
 421	return walk_tg_tree_from(&root_task_group, down, up, data);
 422}
 423
 424extern int tg_nop(struct task_group *tg, void *data);
 425
 426extern void free_fair_sched_group(struct task_group *tg);
 427extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
 428extern void online_fair_sched_group(struct task_group *tg);
 429extern void unregister_fair_sched_group(struct task_group *tg);
 430extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 431			struct sched_entity *se, int cpu,
 432			struct sched_entity *parent);
 433extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 434
 435extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
 436extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 437extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
 438
 439extern void free_rt_sched_group(struct task_group *tg);
 440extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
 441extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 442		struct sched_rt_entity *rt_se, int cpu,
 443		struct sched_rt_entity *parent);
 444extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
 445extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
 446extern long sched_group_rt_runtime(struct task_group *tg);
 447extern long sched_group_rt_period(struct task_group *tg);
 448extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
 449
 450extern struct task_group *sched_create_group(struct task_group *parent);
 451extern void sched_online_group(struct task_group *tg,
 452			       struct task_group *parent);
 453extern void sched_destroy_group(struct task_group *tg);
 454extern void sched_offline_group(struct task_group *tg);
 455
 456extern void sched_move_task(struct task_struct *tsk);
 457
 458#ifdef CONFIG_FAIR_GROUP_SCHED
 459extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
 460
 461#ifdef CONFIG_SMP
 462extern void set_task_rq_fair(struct sched_entity *se,
 463			     struct cfs_rq *prev, struct cfs_rq *next);
 464#else /* !CONFIG_SMP */
 465static inline void set_task_rq_fair(struct sched_entity *se,
 466			     struct cfs_rq *prev, struct cfs_rq *next) { }
 467#endif /* CONFIG_SMP */
 468#endif /* CONFIG_FAIR_GROUP_SCHED */
 469
 470#else /* CONFIG_CGROUP_SCHED */
 471
 472struct cfs_bandwidth { };
 473
 474#endif	/* CONFIG_CGROUP_SCHED */
 475
 476/* CFS-related fields in a runqueue */
 477struct cfs_rq {
 478	struct load_weight	load;
 479	unsigned long		runnable_weight;
 480	unsigned int		nr_running;
 481	unsigned int		h_nr_running;
 482
 483	u64			exec_clock;
 484	u64			min_vruntime;
 485#ifndef CONFIG_64BIT
 486	u64			min_vruntime_copy;
 487#endif
 488
 489	struct rb_root_cached	tasks_timeline;
 
 490
 491	/*
 492	 * 'curr' points to currently running entity on this cfs_rq.
 493	 * It is set to NULL otherwise (i.e when none are currently running).
 494	 */
 495	struct sched_entity	*curr;
 496	struct sched_entity	*next;
 497	struct sched_entity	*last;
 498	struct sched_entity	*skip;
 499
 500#ifdef	CONFIG_SCHED_DEBUG
 501	unsigned int		nr_spread_over;
 502#endif
 503
 504#ifdef CONFIG_SMP
 505	/*
 506	 * CFS load tracking
 507	 */
 508	struct sched_avg	avg;
 
 
 
 
 
 
 
 509#ifndef CONFIG_64BIT
 510	u64			load_last_update_time_copy;
 511#endif
 512	struct {
 513		raw_spinlock_t	lock ____cacheline_aligned;
 514		int		nr;
 515		unsigned long	load_avg;
 516		unsigned long	util_avg;
 517		unsigned long	runnable_sum;
 518	} removed;
 519
 520#ifdef CONFIG_FAIR_GROUP_SCHED
 521	unsigned long		tg_load_avg_contrib;
 522	long			propagate;
 523	long			prop_runnable_sum;
 524
 525	/*
 526	 *   h_load = weight * f(tg)
 527	 *
 528	 * Where f(tg) is the recursive weight fraction assigned to
 529	 * this group.
 530	 */
 531	unsigned long		h_load;
 532	u64			last_h_load_update;
 533	struct sched_entity	*h_load_next;
 534#endif /* CONFIG_FAIR_GROUP_SCHED */
 535#endif /* CONFIG_SMP */
 536
 537#ifdef CONFIG_FAIR_GROUP_SCHED
 538	struct rq		*rq;	/* CPU runqueue to which this cfs_rq is attached */
 539
 540	/*
 541	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
 542	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
 543	 * (like users, containers etc.)
 544	 *
 545	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU.
 546	 * This list is used during load balance.
 547	 */
 548	int			on_list;
 549	struct list_head	leaf_cfs_rq_list;
 550	struct task_group	*tg;	/* group that "owns" this runqueue */
 551
 552#ifdef CONFIG_CFS_BANDWIDTH
 553	int			runtime_enabled;
 554	u64			runtime_expires;
 555	s64			runtime_remaining;
 556
 557	u64			throttled_clock;
 558	u64			throttled_clock_task;
 559	u64			throttled_clock_task_time;
 560	int			throttled;
 561	int			throttle_count;
 562	struct list_head	throttled_list;
 563#endif /* CONFIG_CFS_BANDWIDTH */
 564#endif /* CONFIG_FAIR_GROUP_SCHED */
 565};
 566
 567static inline int rt_bandwidth_enabled(void)
 568{
 569	return sysctl_sched_rt_runtime >= 0;
 570}
 571
 572/* RT IPI pull logic requires IRQ_WORK */
 573#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
 574# define HAVE_RT_PUSH_IPI
 575#endif
 576
 577/* Real-Time classes' related field in a runqueue: */
 578struct rt_rq {
 579	struct rt_prio_array	active;
 580	unsigned int		rt_nr_running;
 581	unsigned int		rr_nr_running;
 582#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 583	struct {
 584		int		curr; /* highest queued rt task prio */
 585#ifdef CONFIG_SMP
 586		int		next; /* next highest */
 587#endif
 588	} highest_prio;
 589#endif
 590#ifdef CONFIG_SMP
 591	unsigned long		rt_nr_migratory;
 592	unsigned long		rt_nr_total;
 593	int			overloaded;
 594	struct plist_head	pushable_tasks;
 
 
 
 
 
 
 595#endif /* CONFIG_SMP */
 596	int			rt_queued;
 597
 598	int			rt_throttled;
 599	u64			rt_time;
 600	u64			rt_runtime;
 601	/* Nests inside the rq lock: */
 602	raw_spinlock_t		rt_runtime_lock;
 603
 604#ifdef CONFIG_RT_GROUP_SCHED
 605	unsigned long		rt_nr_boosted;
 606
 607	struct rq		*rq;
 608	struct task_group	*tg;
 609#endif
 610};
 611
 612/* Deadline class' related fields in a runqueue */
 613struct dl_rq {
 614	/* runqueue is an rbtree, ordered by deadline */
 615	struct rb_root_cached	root;
 
 616
 617	unsigned long		dl_nr_running;
 618
 619#ifdef CONFIG_SMP
 620	/*
 621	 * Deadline values of the currently executing and the
 622	 * earliest ready task on this rq. Caching these facilitates
 623	 * the decision wether or not a ready but not running task
 624	 * should migrate somewhere else.
 625	 */
 626	struct {
 627		u64		curr;
 628		u64		next;
 629	} earliest_dl;
 630
 631	unsigned long		dl_nr_migratory;
 632	int			overloaded;
 633
 634	/*
 635	 * Tasks on this rq that can be pushed away. They are kept in
 636	 * an rb-tree, ordered by tasks' deadlines, with caching
 637	 * of the leftmost (earliest deadline) element.
 638	 */
 639	struct rb_root_cached	pushable_dl_tasks_root;
 
 640#else
 641	struct dl_bw		dl_bw;
 642#endif
 643	/*
 644	 * "Active utilization" for this runqueue: increased when a
 645	 * task wakes up (becomes TASK_RUNNING) and decreased when a
 646	 * task blocks
 647	 */
 648	u64			running_bw;
 649
 650	/*
 651	 * Utilization of the tasks "assigned" to this runqueue (including
 652	 * the tasks that are in runqueue and the tasks that executed on this
 653	 * CPU and blocked). Increased when a task moves to this runqueue, and
 654	 * decreased when the task moves away (migrates, changes scheduling
 655	 * policy, or terminates).
 656	 * This is needed to compute the "inactive utilization" for the
 657	 * runqueue (inactive utilization = this_bw - running_bw).
 658	 */
 659	u64			this_bw;
 660	u64			extra_bw;
 661
 662	/*
 663	 * Inverse of the fraction of CPU utilization that can be reclaimed
 664	 * by the GRUB algorithm.
 665	 */
 666	u64			bw_ratio;
 667};
 668
 669#ifdef CONFIG_SMP
 670
 671static inline bool sched_asym_prefer(int a, int b)
 672{
 673	return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
 674}
 675
 676/*
 677 * We add the notion of a root-domain which will be used to define per-domain
 678 * variables. Each exclusive cpuset essentially defines an island domain by
 679 * fully partitioning the member CPUs from any other cpuset. Whenever a new
 680 * exclusive cpuset is created, we also create and attach a new root-domain
 681 * object.
 682 *
 683 */
 684struct root_domain {
 685	atomic_t		refcount;
 686	atomic_t		rto_count;
 687	struct rcu_head		rcu;
 688	cpumask_var_t		span;
 689	cpumask_var_t		online;
 690
 691	/* Indicate more than one runnable task for any CPU */
 692	bool			overload;
 693
 694	/*
 695	 * The bit corresponding to a CPU gets set here if such CPU has more
 696	 * than one runnable -deadline task (as it is below for RT tasks).
 697	 */
 698	cpumask_var_t		dlo_mask;
 699	atomic_t		dlo_count;
 700	struct dl_bw		dl_bw;
 701	struct cpudl		cpudl;
 702
 703#ifdef HAVE_RT_PUSH_IPI
 704	/*
 705	 * For IPI pull requests, loop across the rto_mask.
 706	 */
 707	struct irq_work		rto_push_work;
 708	raw_spinlock_t		rto_lock;
 709	/* These are only updated and read within rto_lock */
 710	int			rto_loop;
 711	int			rto_cpu;
 712	/* These atomics are updated outside of a lock */
 713	atomic_t		rto_loop_next;
 714	atomic_t		rto_loop_start;
 715#endif
 716	/*
 717	 * The "RT overload" flag: it gets set if a CPU has more than
 718	 * one runnable RT task.
 719	 */
 720	cpumask_var_t		rto_mask;
 721	struct cpupri		cpupri;
 722
 723	unsigned long		max_cpu_capacity;
 724};
 725
 726extern struct root_domain def_root_domain;
 727extern struct mutex sched_domains_mutex;
 728
 729extern void init_defrootdomain(void);
 730extern int sched_init_domains(const struct cpumask *cpu_map);
 731extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
 732extern void sched_get_rd(struct root_domain *rd);
 733extern void sched_put_rd(struct root_domain *rd);
 734
 735#ifdef HAVE_RT_PUSH_IPI
 736extern void rto_push_irq_work_func(struct irq_work *work);
 737#endif
 738#endif /* CONFIG_SMP */
 739
 740/*
 741 * This is the main, per-CPU runqueue data structure.
 742 *
 743 * Locking rule: those places that want to lock multiple runqueues
 744 * (such as the load balancing or the thread migration code), lock
 745 * acquire operations must be ordered by ascending &runqueue.
 746 */
 747struct rq {
 748	/* runqueue lock: */
 749	raw_spinlock_t		lock;
 750
 751	/*
 752	 * nr_running and cpu_load should be in the same cacheline because
 753	 * remote CPUs use both these fields when doing load calculation.
 754	 */
 755	unsigned int		nr_running;
 756#ifdef CONFIG_NUMA_BALANCING
 757	unsigned int		nr_numa_running;
 758	unsigned int		nr_preferred_running;
 759#endif
 760	#define CPU_LOAD_IDX_MAX 5
 761	unsigned long		cpu_load[CPU_LOAD_IDX_MAX];
 762#ifdef CONFIG_NO_HZ_COMMON
 763#ifdef CONFIG_SMP
 764	unsigned long		last_load_update_tick;
 765	unsigned long		last_blocked_load_update_tick;
 766	unsigned int		has_blocked_load;
 767#endif /* CONFIG_SMP */
 768	unsigned int		nohz_tick_stopped;
 769	atomic_t nohz_flags;
 770#endif /* CONFIG_NO_HZ_COMMON */
 771
 772	/* capture load from *all* tasks on this CPU: */
 773	struct load_weight	load;
 774	unsigned long		nr_load_updates;
 775	u64			nr_switches;
 776
 777	struct cfs_rq		cfs;
 778	struct rt_rq		rt;
 779	struct dl_rq		dl;
 
 
 780
 781#ifdef CONFIG_FAIR_GROUP_SCHED
 782	/* list of leaf cfs_rq on this CPU: */
 783	struct list_head	leaf_cfs_rq_list;
 784	struct list_head	*tmp_alone_branch;
 785#endif /* CONFIG_FAIR_GROUP_SCHED */
 786
 787	/*
 788	 * This is part of a global counter where only the total sum
 789	 * over all CPUs matters. A task can increase this counter on
 790	 * one CPU and if it got migrated afterwards it may decrease
 791	 * it on another CPU. Always updated under the runqueue lock:
 792	 */
 793	unsigned long		nr_uninterruptible;
 794
 795	struct task_struct	*curr;
 796	struct task_struct	*idle;
 797	struct task_struct	*stop;
 798	unsigned long		next_balance;
 799	struct mm_struct	*prev_mm;
 800
 801	unsigned int		clock_update_flags;
 802	u64			clock;
 803	u64			clock_task;
 804
 805	atomic_t		nr_iowait;
 806
 807#ifdef CONFIG_SMP
 808	struct root_domain	*rd;
 809	struct sched_domain	*sd;
 810
 811	unsigned long		cpu_capacity;
 812	unsigned long		cpu_capacity_orig;
 813
 814	struct callback_head	*balance_callback;
 
 815
 816	unsigned char		idle_balance;
 817
 
 818	/* For active balancing */
 819	int			active_balance;
 820	int			push_cpu;
 821	struct cpu_stop_work	active_balance_work;
 822
 823	/* CPU of this runqueue: */
 824	int			cpu;
 825	int			online;
 826
 827	struct list_head cfs_tasks;
 828
 829	u64			rt_avg;
 830	u64			age_stamp;
 831	u64			idle_stamp;
 832	u64			avg_idle;
 833
 834	/* This is used to determine avg_idle's max value */
 835	u64			max_idle_balance_cost;
 836#endif
 837
 838#ifdef CONFIG_IRQ_TIME_ACCOUNTING
 839	u64			prev_irq_time;
 840#endif
 841#ifdef CONFIG_PARAVIRT
 842	u64			prev_steal_time;
 843#endif
 844#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
 845	u64			prev_steal_time_rq;
 846#endif
 847
 848	/* calc_load related fields */
 849	unsigned long		calc_load_update;
 850	long			calc_load_active;
 851
 852#ifdef CONFIG_SCHED_HRTICK
 853#ifdef CONFIG_SMP
 854	int			hrtick_csd_pending;
 855	call_single_data_t	hrtick_csd;
 856#endif
 857	struct hrtimer		hrtick_timer;
 858#endif
 859
 860#ifdef CONFIG_SCHEDSTATS
 861	/* latency stats */
 862	struct sched_info	rq_sched_info;
 863	unsigned long long	rq_cpu_time;
 864	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
 865
 866	/* sys_sched_yield() stats */
 867	unsigned int		yld_count;
 868
 869	/* schedule() stats */
 870	unsigned int		sched_count;
 871	unsigned int		sched_goidle;
 872
 873	/* try_to_wake_up() stats */
 874	unsigned int		ttwu_count;
 875	unsigned int		ttwu_local;
 876#endif
 877
 878#ifdef CONFIG_SMP
 879	struct llist_head	wake_list;
 880#endif
 881
 882#ifdef CONFIG_CPU_IDLE
 883	/* Must be inspected within a rcu lock section */
 884	struct cpuidle_state	*idle_state;
 885#endif
 886};
 887
 888static inline int cpu_of(struct rq *rq)
 889{
 890#ifdef CONFIG_SMP
 891	return rq->cpu;
 892#else
 893	return 0;
 894#endif
 895}
 896
 897
 898#ifdef CONFIG_SCHED_SMT
 899
 900extern struct static_key_false sched_smt_present;
 901
 902extern void __update_idle_core(struct rq *rq);
 903
 904static inline void update_idle_core(struct rq *rq)
 905{
 906	if (static_branch_unlikely(&sched_smt_present))
 907		__update_idle_core(rq);
 908}
 909
 910#else
 911static inline void update_idle_core(struct rq *rq) { }
 912#endif
 913
 914DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 915
 916#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
 917#define this_rq()		this_cpu_ptr(&runqueues)
 918#define task_rq(p)		cpu_rq(task_cpu(p))
 919#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
 920#define raw_rq()		raw_cpu_ptr(&runqueues)
 921
 922static inline u64 __rq_clock_broken(struct rq *rq)
 923{
 924	return READ_ONCE(rq->clock);
 925}
 926
 927/*
 928 * rq::clock_update_flags bits
 929 *
 930 * %RQCF_REQ_SKIP - will request skipping of clock update on the next
 931 *  call to __schedule(). This is an optimisation to avoid
 932 *  neighbouring rq clock updates.
 933 *
 934 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
 935 *  in effect and calls to update_rq_clock() are being ignored.
 936 *
 937 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
 938 *  made to update_rq_clock() since the last time rq::lock was pinned.
 939 *
 940 * If inside of __schedule(), clock_update_flags will have been
 941 * shifted left (a left shift is a cheap operation for the fast path
 942 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
 943 *
 944 *	if (rq-clock_update_flags >= RQCF_UPDATED)
 945 *
 946 * to check if %RQCF_UPADTED is set. It'll never be shifted more than
 947 * one position though, because the next rq_unpin_lock() will shift it
 948 * back.
 949 */
 950#define RQCF_REQ_SKIP		0x01
 951#define RQCF_ACT_SKIP		0x02
 952#define RQCF_UPDATED		0x04
 953
 954static inline void assert_clock_updated(struct rq *rq)
 955{
 956	/*
 957	 * The only reason for not seeing a clock update since the
 958	 * last rq_pin_lock() is if we're currently skipping updates.
 959	 */
 960	SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
 961}
 962
 963static inline u64 rq_clock(struct rq *rq)
 964{
 965	lockdep_assert_held(&rq->lock);
 966	assert_clock_updated(rq);
 967
 968	return rq->clock;
 969}
 970
 971static inline u64 rq_clock_task(struct rq *rq)
 972{
 973	lockdep_assert_held(&rq->lock);
 974	assert_clock_updated(rq);
 975
 976	return rq->clock_task;
 977}
 978
 979static inline void rq_clock_skip_update(struct rq *rq)
 980{
 981	lockdep_assert_held(&rq->lock);
 982	rq->clock_update_flags |= RQCF_REQ_SKIP;
 983}
 984
 985/*
 986 * See rt task throttling, which is the only time a skip
 987 * request is cancelled.
 988 */
 989static inline void rq_clock_cancel_skipupdate(struct rq *rq)
 990{
 991	lockdep_assert_held(&rq->lock);
 992	rq->clock_update_flags &= ~RQCF_REQ_SKIP;
 993}
 994
 995struct rq_flags {
 996	unsigned long flags;
 997	struct pin_cookie cookie;
 998#ifdef CONFIG_SCHED_DEBUG
 999	/*
1000	 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
1001	 * current pin context is stashed here in case it needs to be
1002	 * restored in rq_repin_lock().
1003	 */
1004	unsigned int clock_update_flags;
1005#endif
1006};
1007
1008static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1009{
1010	rf->cookie = lockdep_pin_lock(&rq->lock);
1011
1012#ifdef CONFIG_SCHED_DEBUG
1013	rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1014	rf->clock_update_flags = 0;
1015#endif
1016}
1017
1018static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1019{
1020#ifdef CONFIG_SCHED_DEBUG
1021	if (rq->clock_update_flags > RQCF_ACT_SKIP)
1022		rf->clock_update_flags = RQCF_UPDATED;
1023#endif
1024
1025	lockdep_unpin_lock(&rq->lock, rf->cookie);
1026}
1027
1028static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1029{
1030	lockdep_repin_lock(&rq->lock, rf->cookie);
1031
1032#ifdef CONFIG_SCHED_DEBUG
1033	/*
1034	 * Restore the value we stashed in @rf for this pin context.
1035	 */
1036	rq->clock_update_flags |= rf->clock_update_flags;
1037#endif
1038}
1039
1040#ifdef CONFIG_NUMA
1041enum numa_topology_type {
1042	NUMA_DIRECT,
1043	NUMA_GLUELESS_MESH,
1044	NUMA_BACKPLANE,
1045};
1046extern enum numa_topology_type sched_numa_topology_type;
1047extern int sched_max_numa_distance;
1048extern bool find_numa_distance(int distance);
1049#endif
1050
1051#ifdef CONFIG_NUMA
1052extern void sched_init_numa(void);
1053extern void sched_domains_numa_masks_set(unsigned int cpu);
1054extern void sched_domains_numa_masks_clear(unsigned int cpu);
1055#else
1056static inline void sched_init_numa(void) { }
1057static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1058static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1059#endif
1060
1061#ifdef CONFIG_NUMA_BALANCING
1062/* The regions in numa_faults array from task_struct */
1063enum numa_faults_stats {
1064	NUMA_MEM = 0,
1065	NUMA_CPU,
1066	NUMA_MEMBUF,
1067	NUMA_CPUBUF
1068};
1069extern void sched_setnuma(struct task_struct *p, int node);
1070extern int migrate_task_to(struct task_struct *p, int cpu);
1071extern int migrate_swap(struct task_struct *, struct task_struct *);
1072#endif /* CONFIG_NUMA_BALANCING */
1073
1074#ifdef CONFIG_SMP
1075
1076static inline void
1077queue_balance_callback(struct rq *rq,
1078		       struct callback_head *head,
1079		       void (*func)(struct rq *rq))
1080{
1081	lockdep_assert_held(&rq->lock);
1082
1083	if (unlikely(head->next))
1084		return;
1085
1086	head->func = (void (*)(struct callback_head *))func;
1087	head->next = rq->balance_callback;
1088	rq->balance_callback = head;
1089}
1090
1091extern void sched_ttwu_pending(void);
1092
1093#define rcu_dereference_check_sched_domain(p) \
1094	rcu_dereference_check((p), \
1095			      lockdep_is_held(&sched_domains_mutex))
1096
1097/*
1098 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1099 * See detach_destroy_domains: synchronize_sched for details.
1100 *
1101 * The domain tree of any CPU may only be accessed from within
1102 * preempt-disabled sections.
1103 */
1104#define for_each_domain(cpu, __sd) \
1105	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1106			__sd; __sd = __sd->parent)
1107
1108#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
1109
1110/**
1111 * highest_flag_domain - Return highest sched_domain containing flag.
1112 * @cpu:	The CPU whose highest level of sched domain is to
1113 *		be returned.
1114 * @flag:	The flag to check for the highest sched_domain
1115 *		for the given CPU.
1116 *
1117 * Returns the highest sched_domain of a CPU which contains the given flag.
1118 */
1119static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1120{
1121	struct sched_domain *sd, *hsd = NULL;
1122
1123	for_each_domain(cpu, sd) {
1124		if (!(sd->flags & flag))
1125			break;
1126		hsd = sd;
1127	}
1128
1129	return hsd;
1130}
1131
1132static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1133{
1134	struct sched_domain *sd;
1135
1136	for_each_domain(cpu, sd) {
1137		if (sd->flags & flag)
1138			break;
1139	}
1140
1141	return sd;
1142}
1143
1144DECLARE_PER_CPU(struct sched_domain *, sd_llc);
1145DECLARE_PER_CPU(int, sd_llc_size);
1146DECLARE_PER_CPU(int, sd_llc_id);
1147DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
1148DECLARE_PER_CPU(struct sched_domain *, sd_numa);
1149DECLARE_PER_CPU(struct sched_domain *, sd_asym);
1150
1151struct sched_group_capacity {
1152	atomic_t		ref;
1153	/*
1154	 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
1155	 * for a single CPU.
1156	 */
1157	unsigned long		capacity;
1158	unsigned long		min_capacity;		/* Min per-CPU capacity in group */
1159	unsigned long		next_update;
1160	int			imbalance;		/* XXX unrelated to capacity but shared group state */
1161
1162#ifdef CONFIG_SCHED_DEBUG
1163	int			id;
1164#endif
1165
1166	unsigned long		cpumask[0];		/* Balance mask */
1167};
1168
1169struct sched_group {
1170	struct sched_group	*next;			/* Must be a circular list */
1171	atomic_t		ref;
1172
1173	unsigned int		group_weight;
1174	struct sched_group_capacity *sgc;
1175	int			asym_prefer_cpu;	/* CPU of highest priority in group */
1176
1177	/*
1178	 * The CPUs this group covers.
1179	 *
1180	 * NOTE: this field is variable length. (Allocated dynamically
1181	 * by attaching extra space to the end of the structure,
1182	 * depending on how many CPUs the kernel has booted up with)
1183	 */
1184	unsigned long		cpumask[0];
1185};
1186
1187static inline struct cpumask *sched_group_span(struct sched_group *sg)
1188{
1189	return to_cpumask(sg->cpumask);
1190}
1191
1192/*
1193 * See build_balance_mask().
 
1194 */
1195static inline struct cpumask *group_balance_mask(struct sched_group *sg)
1196{
1197	return to_cpumask(sg->sgc->cpumask);
1198}
1199
1200/**
1201 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group.
1202 * @group: The group whose first CPU is to be returned.
1203 */
1204static inline unsigned int group_first_cpu(struct sched_group *group)
1205{
1206	return cpumask_first(sched_group_span(group));
1207}
1208
1209extern int group_balance_cpu(struct sched_group *sg);
1210
1211#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
1212void register_sched_domain_sysctl(void);
1213void dirty_sched_domain_sysctl(int cpu);
1214void unregister_sched_domain_sysctl(void);
1215#else
1216static inline void register_sched_domain_sysctl(void)
1217{
1218}
1219static inline void dirty_sched_domain_sysctl(int cpu)
1220{
1221}
1222static inline void unregister_sched_domain_sysctl(void)
1223{
1224}
1225#endif
1226
1227#else
1228
1229static inline void sched_ttwu_pending(void) { }
1230
1231#endif /* CONFIG_SMP */
1232
1233#include "stats.h"
1234#include "autogroup.h"
1235
1236#ifdef CONFIG_CGROUP_SCHED
1237
1238/*
1239 * Return the group to which this tasks belongs.
1240 *
1241 * We cannot use task_css() and friends because the cgroup subsystem
1242 * changes that value before the cgroup_subsys::attach() method is called,
1243 * therefore we cannot pin it and might observe the wrong value.
1244 *
1245 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
1246 * core changes this before calling sched_move_task().
1247 *
1248 * Instead we use a 'copy' which is updated from sched_move_task() while
1249 * holding both task_struct::pi_lock and rq::lock.
1250 */
1251static inline struct task_group *task_group(struct task_struct *p)
1252{
1253	return p->sched_task_group;
1254}
1255
1256/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
1257static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1258{
1259#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1260	struct task_group *tg = task_group(p);
1261#endif
1262
1263#ifdef CONFIG_FAIR_GROUP_SCHED
1264	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1265	p->se.cfs_rq = tg->cfs_rq[cpu];
1266	p->se.parent = tg->se[cpu];
1267#endif
1268
1269#ifdef CONFIG_RT_GROUP_SCHED
1270	p->rt.rt_rq  = tg->rt_rq[cpu];
1271	p->rt.parent = tg->rt_se[cpu];
1272#endif
1273}
1274
1275#else /* CONFIG_CGROUP_SCHED */
1276
1277static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1278static inline struct task_group *task_group(struct task_struct *p)
1279{
1280	return NULL;
1281}
1282
1283#endif /* CONFIG_CGROUP_SCHED */
1284
1285static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1286{
1287	set_task_rq(p, cpu);
1288#ifdef CONFIG_SMP
1289	/*
1290	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1291	 * successfuly executed on another CPU. We must ensure that updates of
1292	 * per-task data have been completed by this moment.
1293	 */
1294	smp_wmb();
1295#ifdef CONFIG_THREAD_INFO_IN_TASK
1296	p->cpu = cpu;
1297#else
1298	task_thread_info(p)->cpu = cpu;
1299#endif
1300	p->wake_cpu = cpu;
1301#endif
1302}
1303
1304/*
1305 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1306 */
1307#ifdef CONFIG_SCHED_DEBUG
1308# include <linux/static_key.h>
1309# define const_debug __read_mostly
1310#else
1311# define const_debug const
1312#endif
1313
 
 
1314#define SCHED_FEAT(name, enabled)	\
1315	__SCHED_FEAT_##name ,
1316
1317enum {
1318#include "features.h"
1319	__SCHED_FEAT_NR,
1320};
1321
1322#undef SCHED_FEAT
1323
1324#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
1325
1326/*
1327 * To support run-time toggling of sched features, all the translation units
1328 * (but core.c) reference the sysctl_sched_features defined in core.c.
1329 */
1330extern const_debug unsigned int sysctl_sched_features;
1331
1332#define SCHED_FEAT(name, enabled)					\
1333static __always_inline bool static_branch_##name(struct static_key *key) \
1334{									\
1335	return static_key_##enabled(key);				\
1336}
1337
1338#include "features.h"
 
1339#undef SCHED_FEAT
1340
1341extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1342#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1343
1344#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
1345
1346/*
1347 * Each translation unit has its own copy of sysctl_sched_features to allow
1348 * constants propagation at compile time and compiler optimization based on
1349 * features default.
1350 */
1351#define SCHED_FEAT(name, enabled)	\
1352	(1UL << __SCHED_FEAT_##name) * enabled |
1353static const_debug __maybe_unused unsigned int sysctl_sched_features =
1354#include "features.h"
1355	0;
1356#undef SCHED_FEAT
1357
1358#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1359
1360#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
1361
1362extern struct static_key_false sched_numa_balancing;
1363extern struct static_key_false sched_schedstats;
1364
1365static inline u64 global_rt_period(void)
1366{
1367	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1368}
1369
1370static inline u64 global_rt_runtime(void)
1371{
1372	if (sysctl_sched_rt_runtime < 0)
1373		return RUNTIME_INF;
1374
1375	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1376}
1377
1378static inline int task_current(struct rq *rq, struct task_struct *p)
1379{
1380	return rq->curr == p;
1381}
1382
1383static inline int task_running(struct rq *rq, struct task_struct *p)
1384{
1385#ifdef CONFIG_SMP
1386	return p->on_cpu;
1387#else
1388	return task_current(rq, p);
1389#endif
1390}
1391
1392static inline int task_on_rq_queued(struct task_struct *p)
1393{
1394	return p->on_rq == TASK_ON_RQ_QUEUED;
1395}
1396
1397static inline int task_on_rq_migrating(struct task_struct *p)
1398{
1399	return p->on_rq == TASK_ON_RQ_MIGRATING;
1400}
1401
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1402/*
1403 * wake flags
1404 */
1405#define WF_SYNC			0x01		/* Waker goes to sleep after wakeup */
1406#define WF_FORK			0x02		/* Child wakeup after fork */
1407#define WF_MIGRATED		0x4		/* Internal use, task got migrated */
1408
1409/*
1410 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1411 * of tasks with abnormal "nice" values across CPUs the contribution that
1412 * each task makes to its run queue's load is weighted according to its
1413 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1414 * scaled version of the new time slice allocation that they receive on time
1415 * slice expiry etc.
1416 */
1417
1418#define WEIGHT_IDLEPRIO		3
1419#define WMULT_IDLEPRIO		1431655765
1420
1421extern const int		sched_prio_to_weight[40];
1422extern const u32		sched_prio_to_wmult[40];
1423
1424/*
1425 * {de,en}queue flags:
1426 *
1427 * DEQUEUE_SLEEP  - task is no longer runnable
1428 * ENQUEUE_WAKEUP - task just became runnable
1429 *
1430 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1431 *                are in a known state which allows modification. Such pairs
1432 *                should preserve as much state as possible.
1433 *
1434 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1435 *        in the runqueue.
1436 *
1437 * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
1438 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
1439 * ENQUEUE_MIGRATED  - the task was migrated during wakeup
1440 *
1441 */
1442
1443#define DEQUEUE_SLEEP		0x01
1444#define DEQUEUE_SAVE		0x02 /* Matches ENQUEUE_RESTORE */
1445#define DEQUEUE_MOVE		0x04 /* Matches ENQUEUE_MOVE */
1446#define DEQUEUE_NOCLOCK		0x08 /* Matches ENQUEUE_NOCLOCK */
1447
1448#define ENQUEUE_WAKEUP		0x01
1449#define ENQUEUE_RESTORE		0x02
1450#define ENQUEUE_MOVE		0x04
1451#define ENQUEUE_NOCLOCK		0x08
1452
1453#define ENQUEUE_HEAD		0x10
1454#define ENQUEUE_REPLENISH	0x20
1455#ifdef CONFIG_SMP
1456#define ENQUEUE_MIGRATED	0x40
1457#else
1458#define ENQUEUE_MIGRATED	0x00
1459#endif
1460
1461#define RETRY_TASK		((void *)-1UL)
1462
1463struct sched_class {
1464	const struct sched_class *next;
1465
1466	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1467	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1468	void (*yield_task)   (struct rq *rq);
1469	bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
1470
1471	void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
1472
1473	/*
1474	 * It is the responsibility of the pick_next_task() method that will
1475	 * return the next task to call put_prev_task() on the @prev task or
1476	 * something equivalent.
1477	 *
1478	 * May return RETRY_TASK when it finds a higher prio class has runnable
1479	 * tasks.
1480	 */
1481	struct task_struct * (*pick_next_task)(struct rq *rq,
1482					       struct task_struct *prev,
1483					       struct rq_flags *rf);
1484	void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1485
1486#ifdef CONFIG_SMP
1487	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1488	void (*migrate_task_rq)(struct task_struct *p);
1489
1490	void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1491
1492	void (*set_cpus_allowed)(struct task_struct *p,
1493				 const struct cpumask *newmask);
1494
1495	void (*rq_online)(struct rq *rq);
1496	void (*rq_offline)(struct rq *rq);
1497#endif
1498
1499	void (*set_curr_task)(struct rq *rq);
1500	void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1501	void (*task_fork)(struct task_struct *p);
1502	void (*task_dead)(struct task_struct *p);
1503
1504	/*
1505	 * The switched_from() call is allowed to drop rq->lock, therefore we
1506	 * cannot assume the switched_from/switched_to pair is serliazed by
1507	 * rq->lock. They are however serialized by p->pi_lock.
1508	 */
1509	void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1510	void (*switched_to)  (struct rq *this_rq, struct task_struct *task);
1511	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1512			      int oldprio);
1513
1514	unsigned int (*get_rr_interval)(struct rq *rq,
1515					struct task_struct *task);
1516
1517	void (*update_curr)(struct rq *rq);
1518
1519#define TASK_SET_GROUP		0
1520#define TASK_MOVE_GROUP		1
1521
1522#ifdef CONFIG_FAIR_GROUP_SCHED
1523	void (*task_change_group)(struct task_struct *p, int type);
1524#endif
1525};
1526
1527static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1528{
1529	prev->sched_class->put_prev_task(rq, prev);
1530}
1531
1532static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1533{
1534	curr->sched_class->set_curr_task(rq);
1535}
1536
1537#ifdef CONFIG_SMP
1538#define sched_class_highest (&stop_sched_class)
1539#else
1540#define sched_class_highest (&dl_sched_class)
1541#endif
1542#define for_each_class(class) \
1543   for (class = sched_class_highest; class; class = class->next)
1544
1545extern const struct sched_class stop_sched_class;
1546extern const struct sched_class dl_sched_class;
1547extern const struct sched_class rt_sched_class;
1548extern const struct sched_class fair_sched_class;
1549extern const struct sched_class idle_sched_class;
1550
1551
1552#ifdef CONFIG_SMP
1553
1554extern void update_group_capacity(struct sched_domain *sd, int cpu);
1555
1556extern void trigger_load_balance(struct rq *rq);
1557
1558extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1559
1560#endif
1561
1562#ifdef CONFIG_CPU_IDLE
1563static inline void idle_set_state(struct rq *rq,
1564				  struct cpuidle_state *idle_state)
1565{
1566	rq->idle_state = idle_state;
1567}
1568
1569static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1570{
1571	SCHED_WARN_ON(!rcu_read_lock_held());
1572
1573	return rq->idle_state;
1574}
1575#else
1576static inline void idle_set_state(struct rq *rq,
1577				  struct cpuidle_state *idle_state)
1578{
1579}
1580
1581static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1582{
1583	return NULL;
1584}
1585#endif
1586
1587extern void schedule_idle(void);
1588
1589extern void sysrq_sched_debug_show(void);
1590extern void sched_init_granularity(void);
1591extern void update_max_interval(void);
1592
1593extern void init_sched_dl_class(void);
1594extern void init_sched_rt_class(void);
1595extern void init_sched_fair_class(void);
1596
1597extern void reweight_task(struct task_struct *p, int prio);
1598
1599extern void resched_curr(struct rq *rq);
1600extern void resched_cpu(int cpu);
1601
1602extern struct rt_bandwidth def_rt_bandwidth;
1603extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1604
1605extern struct dl_bandwidth def_dl_bandwidth;
1606extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1607extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1608extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
1609extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
1610
1611#define BW_SHIFT		20
1612#define BW_UNIT			(1 << BW_SHIFT)
1613#define RATIO_SHIFT		8
1614unsigned long to_ratio(u64 period, u64 runtime);
1615
1616extern void init_entity_runnable_average(struct sched_entity *se);
1617extern void post_init_entity_util_avg(struct sched_entity *se);
1618
1619#ifdef CONFIG_NO_HZ_FULL
1620extern bool sched_can_stop_tick(struct rq *rq);
1621extern int __init sched_tick_offload_init(void);
1622
1623/*
1624 * Tick may be needed by tasks in the runqueue depending on their policy and
1625 * requirements. If tick is needed, lets send the target an IPI to kick it out of
1626 * nohz mode if necessary.
1627 */
1628static inline void sched_update_tick_dependency(struct rq *rq)
1629{
1630	int cpu;
1631
1632	if (!tick_nohz_full_enabled())
1633		return;
1634
1635	cpu = cpu_of(rq);
1636
1637	if (!tick_nohz_full_cpu(cpu))
1638		return;
1639
1640	if (sched_can_stop_tick(rq))
1641		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1642	else
1643		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1644}
1645#else
1646static inline int sched_tick_offload_init(void) { return 0; }
1647static inline void sched_update_tick_dependency(struct rq *rq) { }
1648#endif
1649
1650static inline void add_nr_running(struct rq *rq, unsigned count)
1651{
1652	unsigned prev_nr = rq->nr_running;
1653
1654	rq->nr_running = prev_nr + count;
1655
1656	if (prev_nr < 2 && rq->nr_running >= 2) {
1657#ifdef CONFIG_SMP
1658		if (!rq->rd->overload)
1659			rq->rd->overload = true;
1660#endif
1661	}
1662
1663	sched_update_tick_dependency(rq);
1664}
1665
1666static inline void sub_nr_running(struct rq *rq, unsigned count)
1667{
1668	rq->nr_running -= count;
1669	/* Check if we still need preemption */
1670	sched_update_tick_dependency(rq);
1671}
1672
 
 
 
 
 
 
 
1673extern void update_rq_clock(struct rq *rq);
1674
1675extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1676extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1677
1678extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1679
1680extern const_debug unsigned int sysctl_sched_time_avg;
1681extern const_debug unsigned int sysctl_sched_nr_migrate;
1682extern const_debug unsigned int sysctl_sched_migration_cost;
1683
1684static inline u64 sched_avg_period(void)
1685{
1686	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1687}
1688
1689#ifdef CONFIG_SCHED_HRTICK
1690
1691/*
1692 * Use hrtick when:
1693 *  - enabled by features
1694 *  - hrtimer is actually high res
1695 */
1696static inline int hrtick_enabled(struct rq *rq)
1697{
1698	if (!sched_feat(HRTICK))
1699		return 0;
1700	if (!cpu_active(cpu_of(rq)))
1701		return 0;
1702	return hrtimer_is_hres_active(&rq->hrtick_timer);
1703}
1704
1705void hrtick_start(struct rq *rq, u64 delay);
1706
1707#else
1708
1709static inline int hrtick_enabled(struct rq *rq)
1710{
1711	return 0;
1712}
1713
1714#endif /* CONFIG_SCHED_HRTICK */
1715
 
 
 
1716#ifndef arch_scale_freq_capacity
1717static __always_inline
1718unsigned long arch_scale_freq_capacity(int cpu)
1719{
1720	return SCHED_CAPACITY_SCALE;
1721}
1722#endif
1723
1724#ifdef CONFIG_SMP
1725extern void sched_avg_update(struct rq *rq);
1726
1727#ifndef arch_scale_cpu_capacity
1728static __always_inline
1729unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1730{
1731	if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
1732		return sd->smt_gain / sd->span_weight;
1733
1734	return SCHED_CAPACITY_SCALE;
1735}
1736#endif
1737
1738static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1739{
1740	rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq));
1741	sched_avg_update(rq);
1742}
1743#else
1744#ifndef arch_scale_cpu_capacity
1745static __always_inline
1746unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
1747{
1748	return SCHED_CAPACITY_SCALE;
1749}
1750#endif
1751static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1752static inline void sched_avg_update(struct rq *rq) { }
1753#endif
1754
 
 
 
 
 
1755struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1756	__acquires(rq->lock);
1757
1758struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1759	__acquires(p->pi_lock)
1760	__acquires(rq->lock);
1761
1762static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1763	__releases(rq->lock)
1764{
1765	rq_unpin_lock(rq, rf);
1766	raw_spin_unlock(&rq->lock);
1767}
1768
1769static inline void
1770task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1771	__releases(rq->lock)
1772	__releases(p->pi_lock)
1773{
1774	rq_unpin_lock(rq, rf);
1775	raw_spin_unlock(&rq->lock);
1776	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1777}
1778
1779static inline void
1780rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1781	__acquires(rq->lock)
1782{
1783	raw_spin_lock_irqsave(&rq->lock, rf->flags);
1784	rq_pin_lock(rq, rf);
1785}
1786
1787static inline void
1788rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1789	__acquires(rq->lock)
1790{
1791	raw_spin_lock_irq(&rq->lock);
1792	rq_pin_lock(rq, rf);
1793}
1794
1795static inline void
1796rq_lock(struct rq *rq, struct rq_flags *rf)
1797	__acquires(rq->lock)
1798{
1799	raw_spin_lock(&rq->lock);
1800	rq_pin_lock(rq, rf);
1801}
1802
1803static inline void
1804rq_relock(struct rq *rq, struct rq_flags *rf)
1805	__acquires(rq->lock)
1806{
1807	raw_spin_lock(&rq->lock);
1808	rq_repin_lock(rq, rf);
1809}
1810
1811static inline void
1812rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1813	__releases(rq->lock)
1814{
1815	rq_unpin_lock(rq, rf);
1816	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1817}
1818
1819static inline void
1820rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1821	__releases(rq->lock)
1822{
1823	rq_unpin_lock(rq, rf);
1824	raw_spin_unlock_irq(&rq->lock);
1825}
1826
1827static inline void
1828rq_unlock(struct rq *rq, struct rq_flags *rf)
1829	__releases(rq->lock)
1830{
1831	rq_unpin_lock(rq, rf);
1832	raw_spin_unlock(&rq->lock);
1833}
1834
1835#ifdef CONFIG_SMP
1836#ifdef CONFIG_PREEMPT
1837
1838static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1839
1840/*
1841 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1842 * way at the expense of forcing extra atomic operations in all
1843 * invocations.  This assures that the double_lock is acquired using the
1844 * same underlying policy as the spinlock_t on this architecture, which
1845 * reduces latency compared to the unfair variant below.  However, it
1846 * also adds more overhead and therefore may reduce throughput.
1847 */
1848static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1849	__releases(this_rq->lock)
1850	__acquires(busiest->lock)
1851	__acquires(this_rq->lock)
1852{
1853	raw_spin_unlock(&this_rq->lock);
1854	double_rq_lock(this_rq, busiest);
1855
1856	return 1;
1857}
1858
1859#else
1860/*
1861 * Unfair double_lock_balance: Optimizes throughput at the expense of
1862 * latency by eliminating extra atomic operations when the locks are
1863 * already in proper order on entry.  This favors lower CPU-ids and will
1864 * grant the double lock to lower CPUs over higher ids under contention,
1865 * regardless of entry order into the function.
1866 */
1867static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1868	__releases(this_rq->lock)
1869	__acquires(busiest->lock)
1870	__acquires(this_rq->lock)
1871{
1872	int ret = 0;
1873
1874	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1875		if (busiest < this_rq) {
1876			raw_spin_unlock(&this_rq->lock);
1877			raw_spin_lock(&busiest->lock);
1878			raw_spin_lock_nested(&this_rq->lock,
1879					      SINGLE_DEPTH_NESTING);
1880			ret = 1;
1881		} else
1882			raw_spin_lock_nested(&busiest->lock,
1883					      SINGLE_DEPTH_NESTING);
1884	}
1885	return ret;
1886}
1887
1888#endif /* CONFIG_PREEMPT */
1889
1890/*
1891 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1892 */
1893static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1894{
1895	if (unlikely(!irqs_disabled())) {
1896		/* printk() doesn't work well under rq->lock */
1897		raw_spin_unlock(&this_rq->lock);
1898		BUG_ON(1);
1899	}
1900
1901	return _double_lock_balance(this_rq, busiest);
1902}
1903
1904static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1905	__releases(busiest->lock)
1906{
1907	raw_spin_unlock(&busiest->lock);
1908	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1909}
1910
1911static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1912{
1913	if (l1 > l2)
1914		swap(l1, l2);
1915
1916	spin_lock(l1);
1917	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1918}
1919
1920static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1921{
1922	if (l1 > l2)
1923		swap(l1, l2);
1924
1925	spin_lock_irq(l1);
1926	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1927}
1928
1929static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1930{
1931	if (l1 > l2)
1932		swap(l1, l2);
1933
1934	raw_spin_lock(l1);
1935	raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1936}
1937
1938/*
1939 * double_rq_lock - safely lock two runqueues
1940 *
1941 * Note this does not disable interrupts like task_rq_lock,
1942 * you need to do so manually before calling.
1943 */
1944static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1945	__acquires(rq1->lock)
1946	__acquires(rq2->lock)
1947{
1948	BUG_ON(!irqs_disabled());
1949	if (rq1 == rq2) {
1950		raw_spin_lock(&rq1->lock);
1951		__acquire(rq2->lock);	/* Fake it out ;) */
1952	} else {
1953		if (rq1 < rq2) {
1954			raw_spin_lock(&rq1->lock);
1955			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1956		} else {
1957			raw_spin_lock(&rq2->lock);
1958			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1959		}
1960	}
1961}
1962
1963/*
1964 * double_rq_unlock - safely unlock two runqueues
1965 *
1966 * Note this does not restore interrupts like task_rq_unlock,
1967 * you need to do so manually after calling.
1968 */
1969static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1970	__releases(rq1->lock)
1971	__releases(rq2->lock)
1972{
1973	raw_spin_unlock(&rq1->lock);
1974	if (rq1 != rq2)
1975		raw_spin_unlock(&rq2->lock);
1976	else
1977		__release(rq2->lock);
1978}
1979
1980extern void set_rq_online (struct rq *rq);
1981extern void set_rq_offline(struct rq *rq);
1982extern bool sched_smp_initialized;
1983
1984#else /* CONFIG_SMP */
1985
1986/*
1987 * double_rq_lock - safely lock two runqueues
1988 *
1989 * Note this does not disable interrupts like task_rq_lock,
1990 * you need to do so manually before calling.
1991 */
1992static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1993	__acquires(rq1->lock)
1994	__acquires(rq2->lock)
1995{
1996	BUG_ON(!irqs_disabled());
1997	BUG_ON(rq1 != rq2);
1998	raw_spin_lock(&rq1->lock);
1999	__acquire(rq2->lock);	/* Fake it out ;) */
2000}
2001
2002/*
2003 * double_rq_unlock - safely unlock two runqueues
2004 *
2005 * Note this does not restore interrupts like task_rq_unlock,
2006 * you need to do so manually after calling.
2007 */
2008static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2009	__releases(rq1->lock)
2010	__releases(rq2->lock)
2011{
2012	BUG_ON(rq1 != rq2);
2013	raw_spin_unlock(&rq1->lock);
2014	__release(rq2->lock);
2015}
2016
2017#endif
2018
2019extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2020extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
2021
2022#ifdef	CONFIG_SCHED_DEBUG
2023extern bool sched_debug_enabled;
2024
2025extern void print_cfs_stats(struct seq_file *m, int cpu);
2026extern void print_rt_stats(struct seq_file *m, int cpu);
2027extern void print_dl_stats(struct seq_file *m, int cpu);
2028extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2029extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2030extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2031#ifdef CONFIG_NUMA_BALANCING
2032extern void
2033show_numa_stats(struct task_struct *p, struct seq_file *m);
2034extern void
2035print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2036	unsigned long tpf, unsigned long gsf, unsigned long gpf);
2037#endif /* CONFIG_NUMA_BALANCING */
2038#endif /* CONFIG_SCHED_DEBUG */
2039
2040extern void init_cfs_rq(struct cfs_rq *cfs_rq);
2041extern void init_rt_rq(struct rt_rq *rt_rq);
2042extern void init_dl_rq(struct dl_rq *dl_rq);
2043
2044extern void cfs_bandwidth_usage_inc(void);
2045extern void cfs_bandwidth_usage_dec(void);
2046
2047#ifdef CONFIG_NO_HZ_COMMON
2048#define NOHZ_BALANCE_KICK_BIT	0
2049#define NOHZ_STATS_KICK_BIT	1
2050
2051#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
2052#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
2053
2054#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
2055
2056#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
2057
2058extern void nohz_balance_exit_idle(struct rq *rq);
2059#else
2060static inline void nohz_balance_exit_idle(struct rq *rq) { }
2061#endif
2062
2063
2064#ifdef CONFIG_SMP
2065static inline
2066void __dl_update(struct dl_bw *dl_b, s64 bw)
2067{
2068	struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
2069	int i;
2070
2071	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2072			 "sched RCU must be held");
2073	for_each_cpu_and(i, rd->span, cpu_active_mask) {
2074		struct rq *rq = cpu_rq(i);
2075
2076		rq->dl.extra_bw += bw;
2077	}
2078}
2079#else
2080static inline
2081void __dl_update(struct dl_bw *dl_b, s64 bw)
2082{
2083	struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
2084
2085	dl->extra_bw += bw;
2086}
2087#endif
2088
2089
2090#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2091struct irqtime {
2092	u64			total;
2093	u64			tick_delta;
2094	u64			irq_start_time;
2095	struct u64_stats_sync	sync;
2096};
2097
2098DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
2099
2100/*
2101 * Returns the irqtime minus the softirq time computed by ksoftirqd.
2102 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
2103 * and never move forward.
2104 */
2105static inline u64 irq_time_read(int cpu)
2106{
2107	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2108	unsigned int seq;
2109	u64 total;
2110
2111	do {
2112		seq = __u64_stats_fetch_begin(&irqtime->sync);
2113		total = irqtime->total;
2114	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
2115
2116	return total;
2117}
2118#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2119
2120#ifdef CONFIG_CPU_FREQ
2121DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
2122
2123/**
2124 * cpufreq_update_util - Take a note about CPU utilization changes.
2125 * @rq: Runqueue to carry out the update for.
2126 * @flags: Update reason flags.
2127 *
2128 * This function is called by the scheduler on the CPU whose utilization is
2129 * being updated.
2130 *
2131 * It can only be called from RCU-sched read-side critical sections.
2132 *
2133 * The way cpufreq is currently arranged requires it to evaluate the CPU
2134 * performance state (frequency/voltage) on a regular basis to prevent it from
2135 * being stuck in a completely inadequate performance level for too long.
2136 * That is not guaranteed to happen if the updates are only triggered from CFS
2137 * and DL, though, because they may not be coming in if only RT tasks are
2138 * active all the time (or there are RT tasks only).
2139 *
2140 * As a workaround for that issue, this function is called periodically by the
2141 * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
2142 * but that really is a band-aid.  Going forward it should be replaced with
2143 * solutions targeted more specifically at RT tasks.
2144 */
2145static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2146{
2147	struct update_util_data *data;
2148
2149	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2150						  cpu_of(rq)));
2151	if (data)
2152		data->func(data, rq_clock(rq), flags);
2153}
 
 
 
 
 
 
2154#else
2155static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
 
2156#endif /* CONFIG_CPU_FREQ */
2157
2158#ifdef arch_scale_freq_capacity
2159# ifndef arch_scale_freq_invariant
2160#  define arch_scale_freq_invariant()	true
2161# endif
2162#else
2163# define arch_scale_freq_invariant()	false
2164#endif
2165
2166#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
2167static inline unsigned long cpu_util_dl(struct rq *rq)
2168{
2169	return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2170}
2171
2172static inline unsigned long cpu_util_cfs(struct rq *rq)
2173{
2174	unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2175
2176	if (sched_feat(UTIL_EST)) {
2177		util = max_t(unsigned long, util,
2178			     READ_ONCE(rq->cfs.avg.util_est.enqueued));
2179	}
2180
2181	return util;
2182}
2183#endif
v4.10.11
 
 
 
 
 
   1
   2#include <linux/sched.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   3#include <linux/sched/sysctl.h>
   4#include <linux/sched/rt.h>
   5#include <linux/u64_stats_sync.h>
   6#include <linux/sched/deadline.h>
 
 
 
 
 
 
   7#include <linux/binfmts.h>
   8#include <linux/mutex.h>
   9#include <linux/spinlock.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  10#include <linux/stop_machine.h>
  11#include <linux/irq_work.h>
  12#include <linux/tick.h>
  13#include <linux/slab.h>
 
 
 
 
 
 
 
 
  14
  15#include "cpupri.h"
  16#include "cpudeadline.h"
  17#include "cpuacct.h"
  18
  19#ifdef CONFIG_SCHED_DEBUG
  20#define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
  21#else
  22#define SCHED_WARN_ON(x)	((void)(x))
  23#endif
  24
  25struct rq;
  26struct cpuidle_state;
  27
  28/* task_struct::on_rq states: */
  29#define TASK_ON_RQ_QUEUED	1
  30#define TASK_ON_RQ_MIGRATING	2
  31
  32extern __read_mostly int scheduler_running;
  33
  34extern unsigned long calc_load_update;
  35extern atomic_long_t calc_load_tasks;
  36
  37extern void calc_global_load_tick(struct rq *this_rq);
  38extern long calc_load_fold_active(struct rq *this_rq, long adjust);
  39
  40#ifdef CONFIG_SMP
  41extern void cpu_load_update_active(struct rq *this_rq);
  42#else
  43static inline void cpu_load_update_active(struct rq *this_rq) { }
  44#endif
  45
  46/*
  47 * Helpers for converting nanosecond timing to jiffy resolution
  48 */
  49#define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
  50
  51/*
  52 * Increase resolution of nice-level calculations for 64-bit architectures.
  53 * The extra resolution improves shares distribution and load balancing of
  54 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
  55 * hierarchies, especially on larger systems. This is not a user-visible change
  56 * and does not change the user-interface for setting shares/weights.
  57 *
  58 * We increase resolution only if we have enough bits to allow this increased
  59 * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are
  60 * pretty high and the returns do not justify the increased costs.
  61 *
  62 * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to
  63 * increase coverage and consistency always enable it on 64bit platforms.
  64 */
  65#ifdef CONFIG_64BIT
  66# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
  67# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
  68# define scale_load_down(w)	((w) >> SCHED_FIXEDPOINT_SHIFT)
  69#else
  70# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
  71# define scale_load(w)		(w)
  72# define scale_load_down(w)	(w)
  73#endif
  74
  75/*
  76 * Task weight (visible to users) and its load (invisible to users) have
  77 * independent resolution, but they should be well calibrated. We use
  78 * scale_load() and scale_load_down(w) to convert between them. The
  79 * following must be true:
  80 *
  81 *  scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
  82 *
  83 */
  84#define NICE_0_LOAD		(1L << NICE_0_LOAD_SHIFT)
  85
  86/*
  87 * Single value that decides SCHED_DEADLINE internal math precision.
  88 * 10 -> just above 1us
  89 * 9  -> just above 0.5us
  90 */
  91#define DL_SCALE (10)
  92
  93/*
  94 * These are the 'tuning knobs' of the scheduler:
  95 */
  96
  97/*
  98 * single value that denotes runtime == period, ie unlimited time.
  99 */
 100#define RUNTIME_INF	((u64)~0ULL)
 101
 102static inline int idle_policy(int policy)
 103{
 104	return policy == SCHED_IDLE;
 105}
 106static inline int fair_policy(int policy)
 107{
 108	return policy == SCHED_NORMAL || policy == SCHED_BATCH;
 109}
 110
 111static inline int rt_policy(int policy)
 112{
 113	return policy == SCHED_FIFO || policy == SCHED_RR;
 114}
 115
 116static inline int dl_policy(int policy)
 117{
 118	return policy == SCHED_DEADLINE;
 119}
 120static inline bool valid_policy(int policy)
 121{
 122	return idle_policy(policy) || fair_policy(policy) ||
 123		rt_policy(policy) || dl_policy(policy);
 124}
 125
 126static inline int task_has_rt_policy(struct task_struct *p)
 127{
 128	return rt_policy(p->policy);
 129}
 130
 131static inline int task_has_dl_policy(struct task_struct *p)
 132{
 133	return dl_policy(p->policy);
 134}
 135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 136/*
 137 * Tells if entity @a should preempt entity @b.
 138 */
 139static inline bool
 140dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
 141{
 142	return dl_time_before(a->deadline, b->deadline);
 
 143}
 144
 145/*
 146 * This is the priority-queue data structure of the RT scheduling class:
 147 */
 148struct rt_prio_array {
 149	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
 150	struct list_head queue[MAX_RT_PRIO];
 151};
 152
 153struct rt_bandwidth {
 154	/* nests inside the rq lock: */
 155	raw_spinlock_t		rt_runtime_lock;
 156	ktime_t			rt_period;
 157	u64			rt_runtime;
 158	struct hrtimer		rt_period_timer;
 159	unsigned int		rt_period_active;
 160};
 161
 162void __dl_clear_params(struct task_struct *p);
 163
 164/*
 165 * To keep the bandwidth of -deadline tasks and groups under control
 166 * we need some place where:
 167 *  - store the maximum -deadline bandwidth of the system (the group);
 168 *  - cache the fraction of that bandwidth that is currently allocated.
 169 *
 170 * This is all done in the data structure below. It is similar to the
 171 * one used for RT-throttling (rt_bandwidth), with the main difference
 172 * that, since here we are only interested in admission control, we
 173 * do not decrease any runtime while the group "executes", neither we
 174 * need a timer to replenish it.
 175 *
 176 * With respect to SMP, the bandwidth is given on a per-CPU basis,
 177 * meaning that:
 178 *  - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
 179 *  - dl_total_bw array contains, in the i-eth element, the currently
 180 *    allocated bandwidth on the i-eth CPU.
 181 * Moreover, groups consume bandwidth on each CPU, while tasks only
 182 * consume bandwidth on the CPU they're running on.
 183 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
 184 * that will be shown the next time the proc or cgroup controls will
 185 * be red. It on its turn can be changed by writing on its own
 186 * control.
 187 */
 188struct dl_bandwidth {
 189	raw_spinlock_t dl_runtime_lock;
 190	u64 dl_runtime;
 191	u64 dl_period;
 192};
 193
 194static inline int dl_bandwidth_enabled(void)
 195{
 196	return sysctl_sched_rt_runtime >= 0;
 197}
 198
 199extern struct dl_bw *dl_bw_of(int i);
 200
 201struct dl_bw {
 202	raw_spinlock_t lock;
 203	u64 bw, total_bw;
 
 204};
 205
 
 
 206static inline
 207void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
 208{
 209	dl_b->total_bw -= tsk_bw;
 
 210}
 211
 212static inline
 213void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
 214{
 215	dl_b->total_bw += tsk_bw;
 
 216}
 217
 218static inline
 219bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
 220{
 221	return dl_b->bw != -1 &&
 222	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
 223}
 224
 225extern struct mutex sched_domains_mutex;
 
 
 
 
 
 
 
 
 
 
 
 226
 227#ifdef CONFIG_CGROUP_SCHED
 228
 229#include <linux/cgroup.h>
 230
 231struct cfs_rq;
 232struct rt_rq;
 233
 234extern struct list_head task_groups;
 235
 236struct cfs_bandwidth {
 237#ifdef CONFIG_CFS_BANDWIDTH
 238	raw_spinlock_t lock;
 239	ktime_t period;
 240	u64 quota, runtime;
 241	s64 hierarchical_quota;
 242	u64 runtime_expires;
 243
 244	int idle, period_active;
 245	struct hrtimer period_timer, slack_timer;
 246	struct list_head throttled_cfs_rq;
 247
 248	/* statistics */
 249	int nr_periods, nr_throttled;
 250	u64 throttled_time;
 
 
 
 
 251#endif
 252};
 253
 254/* task group related information */
 255struct task_group {
 256	struct cgroup_subsys_state css;
 257
 258#ifdef CONFIG_FAIR_GROUP_SCHED
 259	/* schedulable entities of this group on each cpu */
 260	struct sched_entity **se;
 261	/* runqueue "owned" by this group on each cpu */
 262	struct cfs_rq **cfs_rq;
 263	unsigned long shares;
 264
 265#ifdef	CONFIG_SMP
 266	/*
 267	 * load_avg can be heavily contended at clock tick time, so put
 268	 * it in its own cacheline separated from the fields above which
 269	 * will also be accessed at each tick.
 270	 */
 271	atomic_long_t load_avg ____cacheline_aligned;
 272#endif
 273#endif
 274
 275#ifdef CONFIG_RT_GROUP_SCHED
 276	struct sched_rt_entity **rt_se;
 277	struct rt_rq **rt_rq;
 278
 279	struct rt_bandwidth rt_bandwidth;
 280#endif
 281
 282	struct rcu_head rcu;
 283	struct list_head list;
 284
 285	struct task_group *parent;
 286	struct list_head siblings;
 287	struct list_head children;
 288
 289#ifdef CONFIG_SCHED_AUTOGROUP
 290	struct autogroup *autogroup;
 291#endif
 292
 293	struct cfs_bandwidth cfs_bandwidth;
 294};
 295
 296#ifdef CONFIG_FAIR_GROUP_SCHED
 297#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
 298
 299/*
 300 * A weight of 0 or 1 can cause arithmetics problems.
 301 * A weight of a cfs_rq is the sum of weights of which entities
 302 * are queued on this cfs_rq, so a weight of a entity should not be
 303 * too large, so as the shares value of a task group.
 304 * (The default weight is 1024 - so there's no practical
 305 *  limitation from this.)
 306 */
 307#define MIN_SHARES	(1UL <<  1)
 308#define MAX_SHARES	(1UL << 18)
 309#endif
 310
 311typedef int (*tg_visitor)(struct task_group *, void *);
 312
 313extern int walk_tg_tree_from(struct task_group *from,
 314			     tg_visitor down, tg_visitor up, void *data);
 315
 316/*
 317 * Iterate the full tree, calling @down when first entering a node and @up when
 318 * leaving it for the final time.
 319 *
 320 * Caller must hold rcu_lock or sufficient equivalent.
 321 */
 322static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
 323{
 324	return walk_tg_tree_from(&root_task_group, down, up, data);
 325}
 326
 327extern int tg_nop(struct task_group *tg, void *data);
 328
 329extern void free_fair_sched_group(struct task_group *tg);
 330extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
 331extern void online_fair_sched_group(struct task_group *tg);
 332extern void unregister_fair_sched_group(struct task_group *tg);
 333extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 334			struct sched_entity *se, int cpu,
 335			struct sched_entity *parent);
 336extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 337
 338extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
 339extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 340extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
 341
 342extern void free_rt_sched_group(struct task_group *tg);
 343extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
 344extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 345		struct sched_rt_entity *rt_se, int cpu,
 346		struct sched_rt_entity *parent);
 
 
 
 
 
 347
 348extern struct task_group *sched_create_group(struct task_group *parent);
 349extern void sched_online_group(struct task_group *tg,
 350			       struct task_group *parent);
 351extern void sched_destroy_group(struct task_group *tg);
 352extern void sched_offline_group(struct task_group *tg);
 353
 354extern void sched_move_task(struct task_struct *tsk);
 355
 356#ifdef CONFIG_FAIR_GROUP_SCHED
 357extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
 358
 359#ifdef CONFIG_SMP
 360extern void set_task_rq_fair(struct sched_entity *se,
 361			     struct cfs_rq *prev, struct cfs_rq *next);
 362#else /* !CONFIG_SMP */
 363static inline void set_task_rq_fair(struct sched_entity *se,
 364			     struct cfs_rq *prev, struct cfs_rq *next) { }
 365#endif /* CONFIG_SMP */
 366#endif /* CONFIG_FAIR_GROUP_SCHED */
 367
 368#else /* CONFIG_CGROUP_SCHED */
 369
 370struct cfs_bandwidth { };
 371
 372#endif	/* CONFIG_CGROUP_SCHED */
 373
 374/* CFS-related fields in a runqueue */
 375struct cfs_rq {
 376	struct load_weight load;
 377	unsigned int nr_running, h_nr_running;
 
 
 378
 379	u64 exec_clock;
 380	u64 min_vruntime;
 381#ifndef CONFIG_64BIT
 382	u64 min_vruntime_copy;
 383#endif
 384
 385	struct rb_root tasks_timeline;
 386	struct rb_node *rb_leftmost;
 387
 388	/*
 389	 * 'curr' points to currently running entity on this cfs_rq.
 390	 * It is set to NULL otherwise (i.e when none are currently running).
 391	 */
 392	struct sched_entity *curr, *next, *last, *skip;
 
 
 
 393
 394#ifdef	CONFIG_SCHED_DEBUG
 395	unsigned int nr_spread_over;
 396#endif
 397
 398#ifdef CONFIG_SMP
 399	/*
 400	 * CFS load tracking
 401	 */
 402	struct sched_avg avg;
 403	u64 runnable_load_sum;
 404	unsigned long runnable_load_avg;
 405#ifdef CONFIG_FAIR_GROUP_SCHED
 406	unsigned long tg_load_avg_contrib;
 407	unsigned long propagate_avg;
 408#endif
 409	atomic_long_t removed_load_avg, removed_util_avg;
 410#ifndef CONFIG_64BIT
 411	u64 load_last_update_time_copy;
 412#endif
 
 
 
 
 
 
 
 413
 414#ifdef CONFIG_FAIR_GROUP_SCHED
 
 
 
 
 415	/*
 416	 *   h_load = weight * f(tg)
 417	 *
 418	 * Where f(tg) is the recursive weight fraction assigned to
 419	 * this group.
 420	 */
 421	unsigned long h_load;
 422	u64 last_h_load_update;
 423	struct sched_entity *h_load_next;
 424#endif /* CONFIG_FAIR_GROUP_SCHED */
 425#endif /* CONFIG_SMP */
 426
 427#ifdef CONFIG_FAIR_GROUP_SCHED
 428	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
 429
 430	/*
 431	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
 432	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
 433	 * (like users, containers etc.)
 434	 *
 435	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
 436	 * list is used during load balance.
 437	 */
 438	int on_list;
 439	struct list_head leaf_cfs_rq_list;
 440	struct task_group *tg;	/* group that "owns" this runqueue */
 441
 442#ifdef CONFIG_CFS_BANDWIDTH
 443	int runtime_enabled;
 444	u64 runtime_expires;
 445	s64 runtime_remaining;
 446
 447	u64 throttled_clock, throttled_clock_task;
 448	u64 throttled_clock_task_time;
 449	int throttled, throttle_count;
 450	struct list_head throttled_list;
 
 
 451#endif /* CONFIG_CFS_BANDWIDTH */
 452#endif /* CONFIG_FAIR_GROUP_SCHED */
 453};
 454
 455static inline int rt_bandwidth_enabled(void)
 456{
 457	return sysctl_sched_rt_runtime >= 0;
 458}
 459
 460/* RT IPI pull logic requires IRQ_WORK */
 461#ifdef CONFIG_IRQ_WORK
 462# define HAVE_RT_PUSH_IPI
 463#endif
 464
 465/* Real-Time classes' related field in a runqueue: */
 466struct rt_rq {
 467	struct rt_prio_array active;
 468	unsigned int rt_nr_running;
 469	unsigned int rr_nr_running;
 470#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 471	struct {
 472		int curr; /* highest queued rt task prio */
 473#ifdef CONFIG_SMP
 474		int next; /* next highest */
 475#endif
 476	} highest_prio;
 477#endif
 478#ifdef CONFIG_SMP
 479	unsigned long rt_nr_migratory;
 480	unsigned long rt_nr_total;
 481	int overloaded;
 482	struct plist_head pushable_tasks;
 483#ifdef HAVE_RT_PUSH_IPI
 484	int push_flags;
 485	int push_cpu;
 486	struct irq_work push_work;
 487	raw_spinlock_t push_lock;
 488#endif
 489#endif /* CONFIG_SMP */
 490	int rt_queued;
 491
 492	int rt_throttled;
 493	u64 rt_time;
 494	u64 rt_runtime;
 495	/* Nests inside the rq lock: */
 496	raw_spinlock_t rt_runtime_lock;
 497
 498#ifdef CONFIG_RT_GROUP_SCHED
 499	unsigned long rt_nr_boosted;
 500
 501	struct rq *rq;
 502	struct task_group *tg;
 503#endif
 504};
 505
 506/* Deadline class' related fields in a runqueue */
 507struct dl_rq {
 508	/* runqueue is an rbtree, ordered by deadline */
 509	struct rb_root rb_root;
 510	struct rb_node *rb_leftmost;
 511
 512	unsigned long dl_nr_running;
 513
 514#ifdef CONFIG_SMP
 515	/*
 516	 * Deadline values of the currently executing and the
 517	 * earliest ready task on this rq. Caching these facilitates
 518	 * the decision wether or not a ready but not running task
 519	 * should migrate somewhere else.
 520	 */
 521	struct {
 522		u64 curr;
 523		u64 next;
 524	} earliest_dl;
 525
 526	unsigned long dl_nr_migratory;
 527	int overloaded;
 528
 529	/*
 530	 * Tasks on this rq that can be pushed away. They are kept in
 531	 * an rb-tree, ordered by tasks' deadlines, with caching
 532	 * of the leftmost (earliest deadline) element.
 533	 */
 534	struct rb_root pushable_dl_tasks_root;
 535	struct rb_node *pushable_dl_tasks_leftmost;
 536#else
 537	struct dl_bw dl_bw;
 538#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539};
 540
 541#ifdef CONFIG_SMP
 542
 543static inline bool sched_asym_prefer(int a, int b)
 544{
 545	return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
 546}
 547
 548/*
 549 * We add the notion of a root-domain which will be used to define per-domain
 550 * variables. Each exclusive cpuset essentially defines an island domain by
 551 * fully partitioning the member cpus from any other cpuset. Whenever a new
 552 * exclusive cpuset is created, we also create and attach a new root-domain
 553 * object.
 554 *
 555 */
 556struct root_domain {
 557	atomic_t refcount;
 558	atomic_t rto_count;
 559	struct rcu_head rcu;
 560	cpumask_var_t span;
 561	cpumask_var_t online;
 562
 563	/* Indicate more than one runnable task for any CPU */
 564	bool overload;
 565
 566	/*
 567	 * The bit corresponding to a CPU gets set here if such CPU has more
 568	 * than one runnable -deadline task (as it is below for RT tasks).
 569	 */
 570	cpumask_var_t dlo_mask;
 571	atomic_t dlo_count;
 572	struct dl_bw dl_bw;
 573	struct cpudl cpudl;
 574
 
 
 
 
 
 
 
 
 
 
 
 
 
 575	/*
 576	 * The "RT overload" flag: it gets set if a CPU has more than
 577	 * one runnable RT task.
 578	 */
 579	cpumask_var_t rto_mask;
 580	struct cpupri cpupri;
 581
 582	unsigned long max_cpu_capacity;
 583};
 584
 585extern struct root_domain def_root_domain;
 
 586
 
 
 
 
 
 
 
 
 
 587#endif /* CONFIG_SMP */
 588
 589/*
 590 * This is the main, per-CPU runqueue data structure.
 591 *
 592 * Locking rule: those places that want to lock multiple runqueues
 593 * (such as the load balancing or the thread migration code), lock
 594 * acquire operations must be ordered by ascending &runqueue.
 595 */
 596struct rq {
 597	/* runqueue lock: */
 598	raw_spinlock_t lock;
 599
 600	/*
 601	 * nr_running and cpu_load should be in the same cacheline because
 602	 * remote CPUs use both these fields when doing load calculation.
 603	 */
 604	unsigned int nr_running;
 605#ifdef CONFIG_NUMA_BALANCING
 606	unsigned int nr_numa_running;
 607	unsigned int nr_preferred_running;
 608#endif
 609	#define CPU_LOAD_IDX_MAX 5
 610	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 611#ifdef CONFIG_NO_HZ_COMMON
 612#ifdef CONFIG_SMP
 613	unsigned long last_load_update_tick;
 
 
 614#endif /* CONFIG_SMP */
 615	unsigned long nohz_flags;
 
 616#endif /* CONFIG_NO_HZ_COMMON */
 617#ifdef CONFIG_NO_HZ_FULL
 618	unsigned long last_sched_tick;
 619#endif
 620	/* capture load from *all* tasks on this cpu: */
 621	struct load_weight load;
 622	unsigned long nr_load_updates;
 623	u64 nr_switches;
 624
 625	struct cfs_rq cfs;
 626	struct rt_rq rt;
 627	struct dl_rq dl;
 628
 629#ifdef CONFIG_FAIR_GROUP_SCHED
 630	/* list of leaf cfs_rq on this cpu: */
 631	struct list_head leaf_cfs_rq_list;
 632	struct list_head *tmp_alone_branch;
 633#endif /* CONFIG_FAIR_GROUP_SCHED */
 634
 635	/*
 636	 * This is part of a global counter where only the total sum
 637	 * over all CPUs matters. A task can increase this counter on
 638	 * one CPU and if it got migrated afterwards it may decrease
 639	 * it on another CPU. Always updated under the runqueue lock:
 640	 */
 641	unsigned long nr_uninterruptible;
 642
 643	struct task_struct *curr, *idle, *stop;
 644	unsigned long next_balance;
 645	struct mm_struct *prev_mm;
 
 
 646
 647	unsigned int clock_skip_update;
 648	u64 clock;
 649	u64 clock_task;
 650
 651	atomic_t nr_iowait;
 652
 653#ifdef CONFIG_SMP
 654	struct root_domain *rd;
 655	struct sched_domain *sd;
 
 
 
 656
 657	unsigned long cpu_capacity;
 658	unsigned long cpu_capacity_orig;
 659
 660	struct callback_head *balance_callback;
 661
 662	unsigned char idle_balance;
 663	/* For active balancing */
 664	int active_balance;
 665	int push_cpu;
 666	struct cpu_stop_work active_balance_work;
 667	/* cpu of this runqueue: */
 668	int cpu;
 669	int online;
 
 670
 671	struct list_head cfs_tasks;
 672
 673	u64 rt_avg;
 674	u64 age_stamp;
 675	u64 idle_stamp;
 676	u64 avg_idle;
 677
 678	/* This is used to determine avg_idle's max value */
 679	u64 max_idle_balance_cost;
 680#endif
 681
 682#ifdef CONFIG_IRQ_TIME_ACCOUNTING
 683	u64 prev_irq_time;
 684#endif
 685#ifdef CONFIG_PARAVIRT
 686	u64 prev_steal_time;
 687#endif
 688#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
 689	u64 prev_steal_time_rq;
 690#endif
 691
 692	/* calc_load related fields */
 693	unsigned long calc_load_update;
 694	long calc_load_active;
 695
 696#ifdef CONFIG_SCHED_HRTICK
 697#ifdef CONFIG_SMP
 698	int hrtick_csd_pending;
 699	struct call_single_data hrtick_csd;
 700#endif
 701	struct hrtimer hrtick_timer;
 702#endif
 703
 704#ifdef CONFIG_SCHEDSTATS
 705	/* latency stats */
 706	struct sched_info rq_sched_info;
 707	unsigned long long rq_cpu_time;
 708	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
 709
 710	/* sys_sched_yield() stats */
 711	unsigned int yld_count;
 712
 713	/* schedule() stats */
 714	unsigned int sched_count;
 715	unsigned int sched_goidle;
 716
 717	/* try_to_wake_up() stats */
 718	unsigned int ttwu_count;
 719	unsigned int ttwu_local;
 720#endif
 721
 722#ifdef CONFIG_SMP
 723	struct llist_head wake_list;
 724#endif
 725
 726#ifdef CONFIG_CPU_IDLE
 727	/* Must be inspected within a rcu lock section */
 728	struct cpuidle_state *idle_state;
 729#endif
 730};
 731
 732static inline int cpu_of(struct rq *rq)
 733{
 734#ifdef CONFIG_SMP
 735	return rq->cpu;
 736#else
 737	return 0;
 738#endif
 739}
 740
 741
 742#ifdef CONFIG_SCHED_SMT
 743
 744extern struct static_key_false sched_smt_present;
 745
 746extern void __update_idle_core(struct rq *rq);
 747
 748static inline void update_idle_core(struct rq *rq)
 749{
 750	if (static_branch_unlikely(&sched_smt_present))
 751		__update_idle_core(rq);
 752}
 753
 754#else
 755static inline void update_idle_core(struct rq *rq) { }
 756#endif
 757
 758DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 759
 760#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
 761#define this_rq()		this_cpu_ptr(&runqueues)
 762#define task_rq(p)		cpu_rq(task_cpu(p))
 763#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
 764#define raw_rq()		raw_cpu_ptr(&runqueues)
 765
 766static inline u64 __rq_clock_broken(struct rq *rq)
 767{
 768	return READ_ONCE(rq->clock);
 769}
 770
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 771static inline u64 rq_clock(struct rq *rq)
 772{
 773	lockdep_assert_held(&rq->lock);
 
 
 774	return rq->clock;
 775}
 776
 777static inline u64 rq_clock_task(struct rq *rq)
 778{
 779	lockdep_assert_held(&rq->lock);
 
 
 780	return rq->clock_task;
 781}
 782
 783#define RQCF_REQ_SKIP	0x01
 784#define RQCF_ACT_SKIP	0x02
 
 
 
 785
 786static inline void rq_clock_skip_update(struct rq *rq, bool skip)
 
 
 
 
 787{
 788	lockdep_assert_held(&rq->lock);
 789	if (skip)
 790		rq->clock_skip_update |= RQCF_REQ_SKIP;
 791	else
 792		rq->clock_skip_update &= ~RQCF_REQ_SKIP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 793}
 794
 795#ifdef CONFIG_NUMA
 796enum numa_topology_type {
 797	NUMA_DIRECT,
 798	NUMA_GLUELESS_MESH,
 799	NUMA_BACKPLANE,
 800};
 801extern enum numa_topology_type sched_numa_topology_type;
 802extern int sched_max_numa_distance;
 803extern bool find_numa_distance(int distance);
 804#endif
 805
 
 
 
 
 
 
 
 
 
 
 806#ifdef CONFIG_NUMA_BALANCING
 807/* The regions in numa_faults array from task_struct */
 808enum numa_faults_stats {
 809	NUMA_MEM = 0,
 810	NUMA_CPU,
 811	NUMA_MEMBUF,
 812	NUMA_CPUBUF
 813};
 814extern void sched_setnuma(struct task_struct *p, int node);
 815extern int migrate_task_to(struct task_struct *p, int cpu);
 816extern int migrate_swap(struct task_struct *, struct task_struct *);
 817#endif /* CONFIG_NUMA_BALANCING */
 818
 819#ifdef CONFIG_SMP
 820
 821static inline void
 822queue_balance_callback(struct rq *rq,
 823		       struct callback_head *head,
 824		       void (*func)(struct rq *rq))
 825{
 826	lockdep_assert_held(&rq->lock);
 827
 828	if (unlikely(head->next))
 829		return;
 830
 831	head->func = (void (*)(struct callback_head *))func;
 832	head->next = rq->balance_callback;
 833	rq->balance_callback = head;
 834}
 835
 836extern void sched_ttwu_pending(void);
 837
 838#define rcu_dereference_check_sched_domain(p) \
 839	rcu_dereference_check((p), \
 840			      lockdep_is_held(&sched_domains_mutex))
 841
 842/*
 843 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
 844 * See detach_destroy_domains: synchronize_sched for details.
 845 *
 846 * The domain tree of any CPU may only be accessed from within
 847 * preempt-disabled sections.
 848 */
 849#define for_each_domain(cpu, __sd) \
 850	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
 851			__sd; __sd = __sd->parent)
 852
 853#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
 854
 855/**
 856 * highest_flag_domain - Return highest sched_domain containing flag.
 857 * @cpu:	The cpu whose highest level of sched domain is to
 858 *		be returned.
 859 * @flag:	The flag to check for the highest sched_domain
 860 *		for the given cpu.
 861 *
 862 * Returns the highest sched_domain of a cpu which contains the given flag.
 863 */
 864static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
 865{
 866	struct sched_domain *sd, *hsd = NULL;
 867
 868	for_each_domain(cpu, sd) {
 869		if (!(sd->flags & flag))
 870			break;
 871		hsd = sd;
 872	}
 873
 874	return hsd;
 875}
 876
 877static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
 878{
 879	struct sched_domain *sd;
 880
 881	for_each_domain(cpu, sd) {
 882		if (sd->flags & flag)
 883			break;
 884	}
 885
 886	return sd;
 887}
 888
 889DECLARE_PER_CPU(struct sched_domain *, sd_llc);
 890DECLARE_PER_CPU(int, sd_llc_size);
 891DECLARE_PER_CPU(int, sd_llc_id);
 892DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
 893DECLARE_PER_CPU(struct sched_domain *, sd_numa);
 894DECLARE_PER_CPU(struct sched_domain *, sd_asym);
 895
 896struct sched_group_capacity {
 897	atomic_t ref;
 898	/*
 899	 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
 900	 * for a single CPU.
 901	 */
 902	unsigned long capacity;
 903	unsigned long min_capacity; /* Min per-CPU capacity in group */
 904	unsigned long next_update;
 905	int imbalance; /* XXX unrelated to capacity but shared group state */
 
 
 
 
 906
 907	unsigned long cpumask[0]; /* iteration mask */
 908};
 909
 910struct sched_group {
 911	struct sched_group *next;	/* Must be a circular list */
 912	atomic_t ref;
 913
 914	unsigned int group_weight;
 915	struct sched_group_capacity *sgc;
 916	int asym_prefer_cpu;		/* cpu of highest priority in group */
 917
 918	/*
 919	 * The CPUs this group covers.
 920	 *
 921	 * NOTE: this field is variable length. (Allocated dynamically
 922	 * by attaching extra space to the end of the structure,
 923	 * depending on how many CPUs the kernel has booted up with)
 924	 */
 925	unsigned long cpumask[0];
 926};
 927
 928static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
 929{
 930	return to_cpumask(sg->cpumask);
 931}
 932
 933/*
 934 * cpumask masking which cpus in the group are allowed to iterate up the domain
 935 * tree.
 936 */
 937static inline struct cpumask *sched_group_mask(struct sched_group *sg)
 938{
 939	return to_cpumask(sg->sgc->cpumask);
 940}
 941
 942/**
 943 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
 944 * @group: The group whose first cpu is to be returned.
 945 */
 946static inline unsigned int group_first_cpu(struct sched_group *group)
 947{
 948	return cpumask_first(sched_group_cpus(group));
 949}
 950
 951extern int group_balance_cpu(struct sched_group *sg);
 952
 953#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
 954void register_sched_domain_sysctl(void);
 
 955void unregister_sched_domain_sysctl(void);
 956#else
 957static inline void register_sched_domain_sysctl(void)
 958{
 959}
 
 
 
 960static inline void unregister_sched_domain_sysctl(void)
 961{
 962}
 963#endif
 964
 965#else
 966
 967static inline void sched_ttwu_pending(void) { }
 968
 969#endif /* CONFIG_SMP */
 970
 971#include "stats.h"
 972#include "auto_group.h"
 973
 974#ifdef CONFIG_CGROUP_SCHED
 975
 976/*
 977 * Return the group to which this tasks belongs.
 978 *
 979 * We cannot use task_css() and friends because the cgroup subsystem
 980 * changes that value before the cgroup_subsys::attach() method is called,
 981 * therefore we cannot pin it and might observe the wrong value.
 982 *
 983 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
 984 * core changes this before calling sched_move_task().
 985 *
 986 * Instead we use a 'copy' which is updated from sched_move_task() while
 987 * holding both task_struct::pi_lock and rq::lock.
 988 */
 989static inline struct task_group *task_group(struct task_struct *p)
 990{
 991	return p->sched_task_group;
 992}
 993
 994/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
 995static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 996{
 997#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
 998	struct task_group *tg = task_group(p);
 999#endif
1000
1001#ifdef CONFIG_FAIR_GROUP_SCHED
1002	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1003	p->se.cfs_rq = tg->cfs_rq[cpu];
1004	p->se.parent = tg->se[cpu];
1005#endif
1006
1007#ifdef CONFIG_RT_GROUP_SCHED
1008	p->rt.rt_rq  = tg->rt_rq[cpu];
1009	p->rt.parent = tg->rt_se[cpu];
1010#endif
1011}
1012
1013#else /* CONFIG_CGROUP_SCHED */
1014
1015static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1016static inline struct task_group *task_group(struct task_struct *p)
1017{
1018	return NULL;
1019}
1020
1021#endif /* CONFIG_CGROUP_SCHED */
1022
1023static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1024{
1025	set_task_rq(p, cpu);
1026#ifdef CONFIG_SMP
1027	/*
1028	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1029	 * successfuly executed on another CPU. We must ensure that updates of
1030	 * per-task data have been completed by this moment.
1031	 */
1032	smp_wmb();
1033#ifdef CONFIG_THREAD_INFO_IN_TASK
1034	p->cpu = cpu;
1035#else
1036	task_thread_info(p)->cpu = cpu;
1037#endif
1038	p->wake_cpu = cpu;
1039#endif
1040}
1041
1042/*
1043 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1044 */
1045#ifdef CONFIG_SCHED_DEBUG
1046# include <linux/static_key.h>
1047# define const_debug __read_mostly
1048#else
1049# define const_debug const
1050#endif
1051
1052extern const_debug unsigned int sysctl_sched_features;
1053
1054#define SCHED_FEAT(name, enabled)	\
1055	__SCHED_FEAT_##name ,
1056
1057enum {
1058#include "features.h"
1059	__SCHED_FEAT_NR,
1060};
1061
1062#undef SCHED_FEAT
1063
1064#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
 
 
 
 
 
 
 
1065#define SCHED_FEAT(name, enabled)					\
1066static __always_inline bool static_branch_##name(struct static_key *key) \
1067{									\
1068	return static_key_##enabled(key);				\
1069}
1070
1071#include "features.h"
1072
1073#undef SCHED_FEAT
1074
1075extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1076#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
 
1077#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
 
 
 
 
 
 
 
 
 
 
 
 
 
1078#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
 
1079#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
1080
1081extern struct static_key_false sched_numa_balancing;
1082extern struct static_key_false sched_schedstats;
1083
1084static inline u64 global_rt_period(void)
1085{
1086	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1087}
1088
1089static inline u64 global_rt_runtime(void)
1090{
1091	if (sysctl_sched_rt_runtime < 0)
1092		return RUNTIME_INF;
1093
1094	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1095}
1096
1097static inline int task_current(struct rq *rq, struct task_struct *p)
1098{
1099	return rq->curr == p;
1100}
1101
1102static inline int task_running(struct rq *rq, struct task_struct *p)
1103{
1104#ifdef CONFIG_SMP
1105	return p->on_cpu;
1106#else
1107	return task_current(rq, p);
1108#endif
1109}
1110
1111static inline int task_on_rq_queued(struct task_struct *p)
1112{
1113	return p->on_rq == TASK_ON_RQ_QUEUED;
1114}
1115
1116static inline int task_on_rq_migrating(struct task_struct *p)
1117{
1118	return p->on_rq == TASK_ON_RQ_MIGRATING;
1119}
1120
1121#ifndef prepare_arch_switch
1122# define prepare_arch_switch(next)	do { } while (0)
1123#endif
1124#ifndef finish_arch_post_lock_switch
1125# define finish_arch_post_lock_switch()	do { } while (0)
1126#endif
1127
1128static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1129{
1130#ifdef CONFIG_SMP
1131	/*
1132	 * We can optimise this out completely for !SMP, because the
1133	 * SMP rebalancing from interrupt is the only thing that cares
1134	 * here.
1135	 */
1136	next->on_cpu = 1;
1137#endif
1138}
1139
1140static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1141{
1142#ifdef CONFIG_SMP
1143	/*
1144	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1145	 * We must ensure this doesn't happen until the switch is completely
1146	 * finished.
1147	 *
1148	 * In particular, the load of prev->state in finish_task_switch() must
1149	 * happen before this.
1150	 *
1151	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
1152	 */
1153	smp_store_release(&prev->on_cpu, 0);
1154#endif
1155#ifdef CONFIG_DEBUG_SPINLOCK
1156	/* this is a valid case when another task releases the spinlock */
1157	rq->lock.owner = current;
1158#endif
1159	/*
1160	 * If we are tracking spinlock dependencies then we have to
1161	 * fix up the runqueue lock - which gets 'carried over' from
1162	 * prev into current:
1163	 */
1164	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1165
1166	raw_spin_unlock_irq(&rq->lock);
1167}
1168
1169/*
1170 * wake flags
1171 */
1172#define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
1173#define WF_FORK		0x02		/* child wakeup after fork */
1174#define WF_MIGRATED	0x4		/* internal use, task got migrated */
1175
1176/*
1177 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1178 * of tasks with abnormal "nice" values across CPUs the contribution that
1179 * each task makes to its run queue's load is weighted according to its
1180 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1181 * scaled version of the new time slice allocation that they receive on time
1182 * slice expiry etc.
1183 */
1184
1185#define WEIGHT_IDLEPRIO                3
1186#define WMULT_IDLEPRIO         1431655765
1187
1188extern const int sched_prio_to_weight[40];
1189extern const u32 sched_prio_to_wmult[40];
1190
1191/*
1192 * {de,en}queue flags:
1193 *
1194 * DEQUEUE_SLEEP  - task is no longer runnable
1195 * ENQUEUE_WAKEUP - task just became runnable
1196 *
1197 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1198 *                are in a known state which allows modification. Such pairs
1199 *                should preserve as much state as possible.
1200 *
1201 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1202 *        in the runqueue.
1203 *
1204 * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
1205 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
1206 * ENQUEUE_MIGRATED  - the task was migrated during wakeup
1207 *
1208 */
1209
1210#define DEQUEUE_SLEEP		0x01
1211#define DEQUEUE_SAVE		0x02 /* matches ENQUEUE_RESTORE */
1212#define DEQUEUE_MOVE		0x04 /* matches ENQUEUE_MOVE */
 
1213
1214#define ENQUEUE_WAKEUP		0x01
1215#define ENQUEUE_RESTORE		0x02
1216#define ENQUEUE_MOVE		0x04
 
1217
1218#define ENQUEUE_HEAD		0x08
1219#define ENQUEUE_REPLENISH	0x10
1220#ifdef CONFIG_SMP
1221#define ENQUEUE_MIGRATED	0x20
1222#else
1223#define ENQUEUE_MIGRATED	0x00
1224#endif
1225
1226#define RETRY_TASK		((void *)-1UL)
1227
1228struct sched_class {
1229	const struct sched_class *next;
1230
1231	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1232	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1233	void (*yield_task) (struct rq *rq);
1234	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1235
1236	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1237
1238	/*
1239	 * It is the responsibility of the pick_next_task() method that will
1240	 * return the next task to call put_prev_task() on the @prev task or
1241	 * something equivalent.
1242	 *
1243	 * May return RETRY_TASK when it finds a higher prio class has runnable
1244	 * tasks.
1245	 */
1246	struct task_struct * (*pick_next_task) (struct rq *rq,
1247						struct task_struct *prev,
1248						struct pin_cookie cookie);
1249	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1250
1251#ifdef CONFIG_SMP
1252	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1253	void (*migrate_task_rq)(struct task_struct *p);
1254
1255	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1256
1257	void (*set_cpus_allowed)(struct task_struct *p,
1258				 const struct cpumask *newmask);
1259
1260	void (*rq_online)(struct rq *rq);
1261	void (*rq_offline)(struct rq *rq);
1262#endif
1263
1264	void (*set_curr_task) (struct rq *rq);
1265	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1266	void (*task_fork) (struct task_struct *p);
1267	void (*task_dead) (struct task_struct *p);
1268
1269	/*
1270	 * The switched_from() call is allowed to drop rq->lock, therefore we
1271	 * cannot assume the switched_from/switched_to pair is serliazed by
1272	 * rq->lock. They are however serialized by p->pi_lock.
1273	 */
1274	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1275	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1276	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1277			     int oldprio);
1278
1279	unsigned int (*get_rr_interval) (struct rq *rq,
1280					 struct task_struct *task);
1281
1282	void (*update_curr) (struct rq *rq);
1283
1284#define TASK_SET_GROUP  0
1285#define TASK_MOVE_GROUP	1
1286
1287#ifdef CONFIG_FAIR_GROUP_SCHED
1288	void (*task_change_group) (struct task_struct *p, int type);
1289#endif
1290};
1291
1292static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1293{
1294	prev->sched_class->put_prev_task(rq, prev);
1295}
1296
1297static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1298{
1299	curr->sched_class->set_curr_task(rq);
1300}
1301
 
1302#define sched_class_highest (&stop_sched_class)
 
 
 
1303#define for_each_class(class) \
1304   for (class = sched_class_highest; class; class = class->next)
1305
1306extern const struct sched_class stop_sched_class;
1307extern const struct sched_class dl_sched_class;
1308extern const struct sched_class rt_sched_class;
1309extern const struct sched_class fair_sched_class;
1310extern const struct sched_class idle_sched_class;
1311
1312
1313#ifdef CONFIG_SMP
1314
1315extern void update_group_capacity(struct sched_domain *sd, int cpu);
1316
1317extern void trigger_load_balance(struct rq *rq);
1318
1319extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1320
1321#endif
1322
1323#ifdef CONFIG_CPU_IDLE
1324static inline void idle_set_state(struct rq *rq,
1325				  struct cpuidle_state *idle_state)
1326{
1327	rq->idle_state = idle_state;
1328}
1329
1330static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1331{
1332	SCHED_WARN_ON(!rcu_read_lock_held());
 
1333	return rq->idle_state;
1334}
1335#else
1336static inline void idle_set_state(struct rq *rq,
1337				  struct cpuidle_state *idle_state)
1338{
1339}
1340
1341static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1342{
1343	return NULL;
1344}
1345#endif
1346
 
 
1347extern void sysrq_sched_debug_show(void);
1348extern void sched_init_granularity(void);
1349extern void update_max_interval(void);
1350
1351extern void init_sched_dl_class(void);
1352extern void init_sched_rt_class(void);
1353extern void init_sched_fair_class(void);
1354
 
 
1355extern void resched_curr(struct rq *rq);
1356extern void resched_cpu(int cpu);
1357
1358extern struct rt_bandwidth def_rt_bandwidth;
1359extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1360
1361extern struct dl_bandwidth def_dl_bandwidth;
1362extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1363extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
 
 
1364
 
 
 
1365unsigned long to_ratio(u64 period, u64 runtime);
1366
1367extern void init_entity_runnable_average(struct sched_entity *se);
1368extern void post_init_entity_util_avg(struct sched_entity *se);
1369
1370#ifdef CONFIG_NO_HZ_FULL
1371extern bool sched_can_stop_tick(struct rq *rq);
 
1372
1373/*
1374 * Tick may be needed by tasks in the runqueue depending on their policy and
1375 * requirements. If tick is needed, lets send the target an IPI to kick it out of
1376 * nohz mode if necessary.
1377 */
1378static inline void sched_update_tick_dependency(struct rq *rq)
1379{
1380	int cpu;
1381
1382	if (!tick_nohz_full_enabled())
1383		return;
1384
1385	cpu = cpu_of(rq);
1386
1387	if (!tick_nohz_full_cpu(cpu))
1388		return;
1389
1390	if (sched_can_stop_tick(rq))
1391		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1392	else
1393		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1394}
1395#else
 
1396static inline void sched_update_tick_dependency(struct rq *rq) { }
1397#endif
1398
1399static inline void add_nr_running(struct rq *rq, unsigned count)
1400{
1401	unsigned prev_nr = rq->nr_running;
1402
1403	rq->nr_running = prev_nr + count;
1404
1405	if (prev_nr < 2 && rq->nr_running >= 2) {
1406#ifdef CONFIG_SMP
1407		if (!rq->rd->overload)
1408			rq->rd->overload = true;
1409#endif
1410	}
1411
1412	sched_update_tick_dependency(rq);
1413}
1414
1415static inline void sub_nr_running(struct rq *rq, unsigned count)
1416{
1417	rq->nr_running -= count;
1418	/* Check if we still need preemption */
1419	sched_update_tick_dependency(rq);
1420}
1421
1422static inline void rq_last_tick_reset(struct rq *rq)
1423{
1424#ifdef CONFIG_NO_HZ_FULL
1425	rq->last_sched_tick = jiffies;
1426#endif
1427}
1428
1429extern void update_rq_clock(struct rq *rq);
1430
1431extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1432extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1433
1434extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1435
1436extern const_debug unsigned int sysctl_sched_time_avg;
1437extern const_debug unsigned int sysctl_sched_nr_migrate;
1438extern const_debug unsigned int sysctl_sched_migration_cost;
1439
1440static inline u64 sched_avg_period(void)
1441{
1442	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1443}
1444
1445#ifdef CONFIG_SCHED_HRTICK
1446
1447/*
1448 * Use hrtick when:
1449 *  - enabled by features
1450 *  - hrtimer is actually high res
1451 */
1452static inline int hrtick_enabled(struct rq *rq)
1453{
1454	if (!sched_feat(HRTICK))
1455		return 0;
1456	if (!cpu_active(cpu_of(rq)))
1457		return 0;
1458	return hrtimer_is_hres_active(&rq->hrtick_timer);
1459}
1460
1461void hrtick_start(struct rq *rq, u64 delay);
1462
1463#else
1464
1465static inline int hrtick_enabled(struct rq *rq)
1466{
1467	return 0;
1468}
1469
1470#endif /* CONFIG_SCHED_HRTICK */
1471
1472#ifdef CONFIG_SMP
1473extern void sched_avg_update(struct rq *rq);
1474
1475#ifndef arch_scale_freq_capacity
1476static __always_inline
1477unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1478{
1479	return SCHED_CAPACITY_SCALE;
1480}
1481#endif
1482
 
 
 
1483#ifndef arch_scale_cpu_capacity
1484static __always_inline
1485unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1486{
1487	if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
1488		return sd->smt_gain / sd->span_weight;
1489
1490	return SCHED_CAPACITY_SCALE;
1491}
1492#endif
1493
1494static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1495{
1496	rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
1497	sched_avg_update(rq);
1498}
1499#else
 
 
 
 
 
 
 
1500static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1501static inline void sched_avg_update(struct rq *rq) { }
1502#endif
1503
1504struct rq_flags {
1505	unsigned long flags;
1506	struct pin_cookie cookie;
1507};
1508
1509struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1510	__acquires(rq->lock);
 
1511struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1512	__acquires(p->pi_lock)
1513	__acquires(rq->lock);
1514
1515static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1516	__releases(rq->lock)
1517{
1518	lockdep_unpin_lock(&rq->lock, rf->cookie);
1519	raw_spin_unlock(&rq->lock);
1520}
1521
1522static inline void
1523task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1524	__releases(rq->lock)
1525	__releases(p->pi_lock)
1526{
1527	lockdep_unpin_lock(&rq->lock, rf->cookie);
1528	raw_spin_unlock(&rq->lock);
1529	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1530}
1531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1532#ifdef CONFIG_SMP
1533#ifdef CONFIG_PREEMPT
1534
1535static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1536
1537/*
1538 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1539 * way at the expense of forcing extra atomic operations in all
1540 * invocations.  This assures that the double_lock is acquired using the
1541 * same underlying policy as the spinlock_t on this architecture, which
1542 * reduces latency compared to the unfair variant below.  However, it
1543 * also adds more overhead and therefore may reduce throughput.
1544 */
1545static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1546	__releases(this_rq->lock)
1547	__acquires(busiest->lock)
1548	__acquires(this_rq->lock)
1549{
1550	raw_spin_unlock(&this_rq->lock);
1551	double_rq_lock(this_rq, busiest);
1552
1553	return 1;
1554}
1555
1556#else
1557/*
1558 * Unfair double_lock_balance: Optimizes throughput at the expense of
1559 * latency by eliminating extra atomic operations when the locks are
1560 * already in proper order on entry.  This favors lower cpu-ids and will
1561 * grant the double lock to lower cpus over higher ids under contention,
1562 * regardless of entry order into the function.
1563 */
1564static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1565	__releases(this_rq->lock)
1566	__acquires(busiest->lock)
1567	__acquires(this_rq->lock)
1568{
1569	int ret = 0;
1570
1571	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1572		if (busiest < this_rq) {
1573			raw_spin_unlock(&this_rq->lock);
1574			raw_spin_lock(&busiest->lock);
1575			raw_spin_lock_nested(&this_rq->lock,
1576					      SINGLE_DEPTH_NESTING);
1577			ret = 1;
1578		} else
1579			raw_spin_lock_nested(&busiest->lock,
1580					      SINGLE_DEPTH_NESTING);
1581	}
1582	return ret;
1583}
1584
1585#endif /* CONFIG_PREEMPT */
1586
1587/*
1588 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1589 */
1590static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1591{
1592	if (unlikely(!irqs_disabled())) {
1593		/* printk() doesn't work good under rq->lock */
1594		raw_spin_unlock(&this_rq->lock);
1595		BUG_ON(1);
1596	}
1597
1598	return _double_lock_balance(this_rq, busiest);
1599}
1600
1601static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1602	__releases(busiest->lock)
1603{
1604	raw_spin_unlock(&busiest->lock);
1605	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1606}
1607
1608static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1609{
1610	if (l1 > l2)
1611		swap(l1, l2);
1612
1613	spin_lock(l1);
1614	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1615}
1616
1617static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1618{
1619	if (l1 > l2)
1620		swap(l1, l2);
1621
1622	spin_lock_irq(l1);
1623	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1624}
1625
1626static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1627{
1628	if (l1 > l2)
1629		swap(l1, l2);
1630
1631	raw_spin_lock(l1);
1632	raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1633}
1634
1635/*
1636 * double_rq_lock - safely lock two runqueues
1637 *
1638 * Note this does not disable interrupts like task_rq_lock,
1639 * you need to do so manually before calling.
1640 */
1641static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1642	__acquires(rq1->lock)
1643	__acquires(rq2->lock)
1644{
1645	BUG_ON(!irqs_disabled());
1646	if (rq1 == rq2) {
1647		raw_spin_lock(&rq1->lock);
1648		__acquire(rq2->lock);	/* Fake it out ;) */
1649	} else {
1650		if (rq1 < rq2) {
1651			raw_spin_lock(&rq1->lock);
1652			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1653		} else {
1654			raw_spin_lock(&rq2->lock);
1655			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1656		}
1657	}
1658}
1659
1660/*
1661 * double_rq_unlock - safely unlock two runqueues
1662 *
1663 * Note this does not restore interrupts like task_rq_unlock,
1664 * you need to do so manually after calling.
1665 */
1666static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1667	__releases(rq1->lock)
1668	__releases(rq2->lock)
1669{
1670	raw_spin_unlock(&rq1->lock);
1671	if (rq1 != rq2)
1672		raw_spin_unlock(&rq2->lock);
1673	else
1674		__release(rq2->lock);
1675}
1676
 
 
 
 
1677#else /* CONFIG_SMP */
1678
1679/*
1680 * double_rq_lock - safely lock two runqueues
1681 *
1682 * Note this does not disable interrupts like task_rq_lock,
1683 * you need to do so manually before calling.
1684 */
1685static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1686	__acquires(rq1->lock)
1687	__acquires(rq2->lock)
1688{
1689	BUG_ON(!irqs_disabled());
1690	BUG_ON(rq1 != rq2);
1691	raw_spin_lock(&rq1->lock);
1692	__acquire(rq2->lock);	/* Fake it out ;) */
1693}
1694
1695/*
1696 * double_rq_unlock - safely unlock two runqueues
1697 *
1698 * Note this does not restore interrupts like task_rq_unlock,
1699 * you need to do so manually after calling.
1700 */
1701static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1702	__releases(rq1->lock)
1703	__releases(rq2->lock)
1704{
1705	BUG_ON(rq1 != rq2);
1706	raw_spin_unlock(&rq1->lock);
1707	__release(rq2->lock);
1708}
1709
1710#endif
1711
1712extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1713extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1714
1715#ifdef	CONFIG_SCHED_DEBUG
 
 
1716extern void print_cfs_stats(struct seq_file *m, int cpu);
1717extern void print_rt_stats(struct seq_file *m, int cpu);
1718extern void print_dl_stats(struct seq_file *m, int cpu);
1719extern void
1720print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
1721
1722#ifdef CONFIG_NUMA_BALANCING
1723extern void
1724show_numa_stats(struct task_struct *p, struct seq_file *m);
1725extern void
1726print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1727	unsigned long tpf, unsigned long gsf, unsigned long gpf);
1728#endif /* CONFIG_NUMA_BALANCING */
1729#endif /* CONFIG_SCHED_DEBUG */
1730
1731extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1732extern void init_rt_rq(struct rt_rq *rt_rq);
1733extern void init_dl_rq(struct dl_rq *dl_rq);
1734
1735extern void cfs_bandwidth_usage_inc(void);
1736extern void cfs_bandwidth_usage_dec(void);
1737
1738#ifdef CONFIG_NO_HZ_COMMON
1739enum rq_nohz_flag_bits {
1740	NOHZ_TICK_STOPPED,
1741	NOHZ_BALANCE_KICK,
1742};
 
 
 
1743
1744#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
1745
1746extern void nohz_balance_exit_idle(unsigned int cpu);
1747#else
1748static inline void nohz_balance_exit_idle(unsigned int cpu) { }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1749#endif
1750
 
1751#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1752struct irqtime {
1753	u64			hardirq_time;
1754	u64			softirq_time;
1755	u64			irq_start_time;
1756	struct u64_stats_sync	sync;
1757};
1758
1759DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
1760
 
 
 
 
 
1761static inline u64 irq_time_read(int cpu)
1762{
1763	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
1764	unsigned int seq;
1765	u64 total;
1766
1767	do {
1768		seq = __u64_stats_fetch_begin(&irqtime->sync);
1769		total = irqtime->softirq_time + irqtime->hardirq_time;
1770	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
1771
1772	return total;
1773}
1774#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1775
1776#ifdef CONFIG_CPU_FREQ
1777DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
1778
1779/**
1780 * cpufreq_update_util - Take a note about CPU utilization changes.
1781 * @rq: Runqueue to carry out the update for.
1782 * @flags: Update reason flags.
1783 *
1784 * This function is called by the scheduler on the CPU whose utilization is
1785 * being updated.
1786 *
1787 * It can only be called from RCU-sched read-side critical sections.
1788 *
1789 * The way cpufreq is currently arranged requires it to evaluate the CPU
1790 * performance state (frequency/voltage) on a regular basis to prevent it from
1791 * being stuck in a completely inadequate performance level for too long.
1792 * That is not guaranteed to happen if the updates are only triggered from CFS,
1793 * though, because they may not be coming in if RT or deadline tasks are active
1794 * all the time (or there are RT and DL tasks only).
1795 *
1796 * As a workaround for that issue, this function is called by the RT and DL
1797 * sched classes to trigger extra cpufreq updates to prevent it from stalling,
1798 * but that really is a band-aid.  Going forward it should be replaced with
1799 * solutions targeted more specifically at RT and DL tasks.
1800 */
1801static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
1802{
1803	struct update_util_data *data;
1804
1805	data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
 
1806	if (data)
1807		data->func(data, rq_clock(rq), flags);
1808}
1809
1810static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
1811{
1812	if (cpu_of(rq) == smp_processor_id())
1813		cpufreq_update_util(rq, flags);
1814}
1815#else
1816static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
1817static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
1818#endif /* CONFIG_CPU_FREQ */
1819
1820#ifdef arch_scale_freq_capacity
1821#ifndef arch_scale_freq_invariant
1822#define arch_scale_freq_invariant()	(true)
 
 
 
1823#endif
1824#else /* arch_scale_freq_capacity */
1825#define arch_scale_freq_invariant()	(false)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1826#endif