Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
Note: File does not exist in v3.1.
   1
   2#include <linux/sched.h>
   3#include <linux/mutex.h>
   4#include <linux/spinlock.h>
   5#include <linux/stop_machine.h>
   6
   7#include "cpupri.h"
   8
   9extern __read_mostly int scheduler_running;
  10
  11/*
  12 * Convert user-nice values [ -20 ... 0 ... 19 ]
  13 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  14 * and back.
  15 */
  16#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
  17#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
  18#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
  19
  20/*
  21 * 'User priority' is the nice value converted to something we
  22 * can work with better when scaling various scheduler parameters,
  23 * it's a [ 0 ... 39 ] range.
  24 */
  25#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
  26#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
  27#define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))
  28
  29/*
  30 * Helpers for converting nanosecond timing to jiffy resolution
  31 */
  32#define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
  33
  34#define NICE_0_LOAD		SCHED_LOAD_SCALE
  35#define NICE_0_SHIFT		SCHED_LOAD_SHIFT
  36
  37/*
  38 * These are the 'tuning knobs' of the scheduler:
  39 */
  40
  41/*
  42 * single value that denotes runtime == period, ie unlimited time.
  43 */
  44#define RUNTIME_INF	((u64)~0ULL)
  45
  46static inline int rt_policy(int policy)
  47{
  48	if (policy == SCHED_FIFO || policy == SCHED_RR)
  49		return 1;
  50	return 0;
  51}
  52
  53static inline int task_has_rt_policy(struct task_struct *p)
  54{
  55	return rt_policy(p->policy);
  56}
  57
  58/*
  59 * This is the priority-queue data structure of the RT scheduling class:
  60 */
  61struct rt_prio_array {
  62	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
  63	struct list_head queue[MAX_RT_PRIO];
  64};
  65
  66struct rt_bandwidth {
  67	/* nests inside the rq lock: */
  68	raw_spinlock_t		rt_runtime_lock;
  69	ktime_t			rt_period;
  70	u64			rt_runtime;
  71	struct hrtimer		rt_period_timer;
  72};
  73
  74extern struct mutex sched_domains_mutex;
  75
  76#ifdef CONFIG_CGROUP_SCHED
  77
  78#include <linux/cgroup.h>
  79
  80struct cfs_rq;
  81struct rt_rq;
  82
  83extern struct list_head task_groups;
  84
  85struct cfs_bandwidth {
  86#ifdef CONFIG_CFS_BANDWIDTH
  87	raw_spinlock_t lock;
  88	ktime_t period;
  89	u64 quota, runtime;
  90	s64 hierarchal_quota;
  91	u64 runtime_expires;
  92
  93	int idle, timer_active;
  94	struct hrtimer period_timer, slack_timer;
  95	struct list_head throttled_cfs_rq;
  96
  97	/* statistics */
  98	int nr_periods, nr_throttled;
  99	u64 throttled_time;
 100#endif
 101};
 102
 103/* task group related information */
 104struct task_group {
 105	struct cgroup_subsys_state css;
 106
 107#ifdef CONFIG_FAIR_GROUP_SCHED
 108	/* schedulable entities of this group on each cpu */
 109	struct sched_entity **se;
 110	/* runqueue "owned" by this group on each cpu */
 111	struct cfs_rq **cfs_rq;
 112	unsigned long shares;
 113
 114	atomic_t load_weight;
 115#endif
 116
 117#ifdef CONFIG_RT_GROUP_SCHED
 118	struct sched_rt_entity **rt_se;
 119	struct rt_rq **rt_rq;
 120
 121	struct rt_bandwidth rt_bandwidth;
 122#endif
 123
 124	struct rcu_head rcu;
 125	struct list_head list;
 126
 127	struct task_group *parent;
 128	struct list_head siblings;
 129	struct list_head children;
 130
 131#ifdef CONFIG_SCHED_AUTOGROUP
 132	struct autogroup *autogroup;
 133#endif
 134
 135	struct cfs_bandwidth cfs_bandwidth;
 136};
 137
 138#ifdef CONFIG_FAIR_GROUP_SCHED
 139#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
 140
 141/*
 142 * A weight of 0 or 1 can cause arithmetics problems.
 143 * A weight of a cfs_rq is the sum of weights of which entities
 144 * are queued on this cfs_rq, so a weight of a entity should not be
 145 * too large, so as the shares value of a task group.
 146 * (The default weight is 1024 - so there's no practical
 147 *  limitation from this.)
 148 */
 149#define MIN_SHARES	(1UL <<  1)
 150#define MAX_SHARES	(1UL << 18)
 151#endif
 152
 153/* Default task group.
 154 *	Every task in system belong to this group at bootup.
 155 */
 156extern struct task_group root_task_group;
 157
 158typedef int (*tg_visitor)(struct task_group *, void *);
 159
 160extern int walk_tg_tree_from(struct task_group *from,
 161			     tg_visitor down, tg_visitor up, void *data);
 162
 163/*
 164 * Iterate the full tree, calling @down when first entering a node and @up when
 165 * leaving it for the final time.
 166 *
 167 * Caller must hold rcu_lock or sufficient equivalent.
 168 */
 169static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
 170{
 171	return walk_tg_tree_from(&root_task_group, down, up, data);
 172}
 173
 174extern int tg_nop(struct task_group *tg, void *data);
 175
 176extern void free_fair_sched_group(struct task_group *tg);
 177extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
 178extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
 179extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 180			struct sched_entity *se, int cpu,
 181			struct sched_entity *parent);
 182extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 183extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
 184
 185extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
 186extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 187extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
 188
 189extern void free_rt_sched_group(struct task_group *tg);
 190extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
 191extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 192		struct sched_rt_entity *rt_se, int cpu,
 193		struct sched_rt_entity *parent);
 194
 195#else /* CONFIG_CGROUP_SCHED */
 196
 197struct cfs_bandwidth { };
 198
 199#endif	/* CONFIG_CGROUP_SCHED */
 200
 201/* CFS-related fields in a runqueue */
 202struct cfs_rq {
 203	struct load_weight load;
 204	unsigned int nr_running, h_nr_running;
 205
 206	u64 exec_clock;
 207	u64 min_vruntime;
 208#ifndef CONFIG_64BIT
 209	u64 min_vruntime_copy;
 210#endif
 211
 212	struct rb_root tasks_timeline;
 213	struct rb_node *rb_leftmost;
 214
 215	/*
 216	 * 'curr' points to currently running entity on this cfs_rq.
 217	 * It is set to NULL otherwise (i.e when none are currently running).
 218	 */
 219	struct sched_entity *curr, *next, *last, *skip;
 220
 221#ifdef	CONFIG_SCHED_DEBUG
 222	unsigned int nr_spread_over;
 223#endif
 224
 225#ifdef CONFIG_FAIR_GROUP_SCHED
 226	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
 227
 228	/*
 229	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
 230	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
 231	 * (like users, containers etc.)
 232	 *
 233	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
 234	 * list is used during load balance.
 235	 */
 236	int on_list;
 237	struct list_head leaf_cfs_rq_list;
 238	struct task_group *tg;	/* group that "owns" this runqueue */
 239
 240#ifdef CONFIG_SMP
 241	/*
 242	 *   h_load = weight * f(tg)
 243	 *
 244	 * Where f(tg) is the recursive weight fraction assigned to
 245	 * this group.
 246	 */
 247	unsigned long h_load;
 248
 249	/*
 250	 * Maintaining per-cpu shares distribution for group scheduling
 251	 *
 252	 * load_stamp is the last time we updated the load average
 253	 * load_last is the last time we updated the load average and saw load
 254	 * load_unacc_exec_time is currently unaccounted execution time
 255	 */
 256	u64 load_avg;
 257	u64 load_period;
 258	u64 load_stamp, load_last, load_unacc_exec_time;
 259
 260	unsigned long load_contribution;
 261#endif /* CONFIG_SMP */
 262#ifdef CONFIG_CFS_BANDWIDTH
 263	int runtime_enabled;
 264	u64 runtime_expires;
 265	s64 runtime_remaining;
 266
 267	u64 throttled_timestamp;
 268	int throttled, throttle_count;
 269	struct list_head throttled_list;
 270#endif /* CONFIG_CFS_BANDWIDTH */
 271#endif /* CONFIG_FAIR_GROUP_SCHED */
 272};
 273
 274static inline int rt_bandwidth_enabled(void)
 275{
 276	return sysctl_sched_rt_runtime >= 0;
 277}
 278
 279/* Real-Time classes' related field in a runqueue: */
 280struct rt_rq {
 281	struct rt_prio_array active;
 282	unsigned int rt_nr_running;
 283#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 284	struct {
 285		int curr; /* highest queued rt task prio */
 286#ifdef CONFIG_SMP
 287		int next; /* next highest */
 288#endif
 289	} highest_prio;
 290#endif
 291#ifdef CONFIG_SMP
 292	unsigned long rt_nr_migratory;
 293	unsigned long rt_nr_total;
 294	int overloaded;
 295	struct plist_head pushable_tasks;
 296#endif
 297	int rt_throttled;
 298	u64 rt_time;
 299	u64 rt_runtime;
 300	/* Nests inside the rq lock: */
 301	raw_spinlock_t rt_runtime_lock;
 302
 303#ifdef CONFIG_RT_GROUP_SCHED
 304	unsigned long rt_nr_boosted;
 305
 306	struct rq *rq;
 307	struct list_head leaf_rt_rq_list;
 308	struct task_group *tg;
 309#endif
 310};
 311
 312#ifdef CONFIG_SMP
 313
 314/*
 315 * We add the notion of a root-domain which will be used to define per-domain
 316 * variables. Each exclusive cpuset essentially defines an island domain by
 317 * fully partitioning the member cpus from any other cpuset. Whenever a new
 318 * exclusive cpuset is created, we also create and attach a new root-domain
 319 * object.
 320 *
 321 */
 322struct root_domain {
 323	atomic_t refcount;
 324	atomic_t rto_count;
 325	struct rcu_head rcu;
 326	cpumask_var_t span;
 327	cpumask_var_t online;
 328
 329	/*
 330	 * The "RT overload" flag: it gets set if a CPU has more than
 331	 * one runnable RT task.
 332	 */
 333	cpumask_var_t rto_mask;
 334	struct cpupri cpupri;
 335};
 336
 337extern struct root_domain def_root_domain;
 338
 339#endif /* CONFIG_SMP */
 340
 341/*
 342 * This is the main, per-CPU runqueue data structure.
 343 *
 344 * Locking rule: those places that want to lock multiple runqueues
 345 * (such as the load balancing or the thread migration code), lock
 346 * acquire operations must be ordered by ascending &runqueue.
 347 */
 348struct rq {
 349	/* runqueue lock: */
 350	raw_spinlock_t lock;
 351
 352	/*
 353	 * nr_running and cpu_load should be in the same cacheline because
 354	 * remote CPUs use both these fields when doing load calculation.
 355	 */
 356	unsigned int nr_running;
 357	#define CPU_LOAD_IDX_MAX 5
 358	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 359	unsigned long last_load_update_tick;
 360#ifdef CONFIG_NO_HZ
 361	u64 nohz_stamp;
 362	unsigned long nohz_flags;
 363#endif
 364	int skip_clock_update;
 365
 366	/* capture load from *all* tasks on this cpu: */
 367	struct load_weight load;
 368	unsigned long nr_load_updates;
 369	u64 nr_switches;
 370
 371	struct cfs_rq cfs;
 372	struct rt_rq rt;
 373
 374#ifdef CONFIG_FAIR_GROUP_SCHED
 375	/* list of leaf cfs_rq on this cpu: */
 376	struct list_head leaf_cfs_rq_list;
 377#endif
 378#ifdef CONFIG_RT_GROUP_SCHED
 379	struct list_head leaf_rt_rq_list;
 380#endif
 381
 382	/*
 383	 * This is part of a global counter where only the total sum
 384	 * over all CPUs matters. A task can increase this counter on
 385	 * one CPU and if it got migrated afterwards it may decrease
 386	 * it on another CPU. Always updated under the runqueue lock:
 387	 */
 388	unsigned long nr_uninterruptible;
 389
 390	struct task_struct *curr, *idle, *stop;
 391	unsigned long next_balance;
 392	struct mm_struct *prev_mm;
 393
 394	u64 clock;
 395	u64 clock_task;
 396
 397	atomic_t nr_iowait;
 398
 399#ifdef CONFIG_SMP
 400	struct root_domain *rd;
 401	struct sched_domain *sd;
 402
 403	unsigned long cpu_power;
 404
 405	unsigned char idle_balance;
 406	/* For active balancing */
 407	int post_schedule;
 408	int active_balance;
 409	int push_cpu;
 410	struct cpu_stop_work active_balance_work;
 411	/* cpu of this runqueue: */
 412	int cpu;
 413	int online;
 414
 415	struct list_head cfs_tasks;
 416
 417	u64 rt_avg;
 418	u64 age_stamp;
 419	u64 idle_stamp;
 420	u64 avg_idle;
 421#endif
 422
 423#ifdef CONFIG_IRQ_TIME_ACCOUNTING
 424	u64 prev_irq_time;
 425#endif
 426#ifdef CONFIG_PARAVIRT
 427	u64 prev_steal_time;
 428#endif
 429#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
 430	u64 prev_steal_time_rq;
 431#endif
 432
 433	/* calc_load related fields */
 434	unsigned long calc_load_update;
 435	long calc_load_active;
 436
 437#ifdef CONFIG_SCHED_HRTICK
 438#ifdef CONFIG_SMP
 439	int hrtick_csd_pending;
 440	struct call_single_data hrtick_csd;
 441#endif
 442	struct hrtimer hrtick_timer;
 443#endif
 444
 445#ifdef CONFIG_SCHEDSTATS
 446	/* latency stats */
 447	struct sched_info rq_sched_info;
 448	unsigned long long rq_cpu_time;
 449	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
 450
 451	/* sys_sched_yield() stats */
 452	unsigned int yld_count;
 453
 454	/* schedule() stats */
 455	unsigned int sched_count;
 456	unsigned int sched_goidle;
 457
 458	/* try_to_wake_up() stats */
 459	unsigned int ttwu_count;
 460	unsigned int ttwu_local;
 461#endif
 462
 463#ifdef CONFIG_SMP
 464	struct llist_head wake_list;
 465#endif
 466};
 467
 468static inline int cpu_of(struct rq *rq)
 469{
 470#ifdef CONFIG_SMP
 471	return rq->cpu;
 472#else
 473	return 0;
 474#endif
 475}
 476
 477DECLARE_PER_CPU(struct rq, runqueues);
 478
 479#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
 480#define this_rq()		(&__get_cpu_var(runqueues))
 481#define task_rq(p)		cpu_rq(task_cpu(p))
 482#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
 483#define raw_rq()		(&__raw_get_cpu_var(runqueues))
 484
 485#ifdef CONFIG_SMP
 486
 487#define rcu_dereference_check_sched_domain(p) \
 488	rcu_dereference_check((p), \
 489			      lockdep_is_held(&sched_domains_mutex))
 490
 491/*
 492 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
 493 * See detach_destroy_domains: synchronize_sched for details.
 494 *
 495 * The domain tree of any CPU may only be accessed from within
 496 * preempt-disabled sections.
 497 */
 498#define for_each_domain(cpu, __sd) \
 499	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
 500			__sd; __sd = __sd->parent)
 501
 502#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
 503
 504/**
 505 * highest_flag_domain - Return highest sched_domain containing flag.
 506 * @cpu:	The cpu whose highest level of sched domain is to
 507 *		be returned.
 508 * @flag:	The flag to check for the highest sched_domain
 509 *		for the given cpu.
 510 *
 511 * Returns the highest sched_domain of a cpu which contains the given flag.
 512 */
 513static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
 514{
 515	struct sched_domain *sd, *hsd = NULL;
 516
 517	for_each_domain(cpu, sd) {
 518		if (!(sd->flags & flag))
 519			break;
 520		hsd = sd;
 521	}
 522
 523	return hsd;
 524}
 525
 526DECLARE_PER_CPU(struct sched_domain *, sd_llc);
 527DECLARE_PER_CPU(int, sd_llc_id);
 528
 529extern int group_balance_cpu(struct sched_group *sg);
 530
 531#endif /* CONFIG_SMP */
 532
 533#include "stats.h"
 534#include "auto_group.h"
 535
 536#ifdef CONFIG_CGROUP_SCHED
 537
 538/*
 539 * Return the group to which this tasks belongs.
 540 *
 541 * We cannot use task_subsys_state() and friends because the cgroup
 542 * subsystem changes that value before the cgroup_subsys::attach() method
 543 * is called, therefore we cannot pin it and might observe the wrong value.
 544 *
 545 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
 546 * core changes this before calling sched_move_task().
 547 *
 548 * Instead we use a 'copy' which is updated from sched_move_task() while
 549 * holding both task_struct::pi_lock and rq::lock.
 550 */
 551static inline struct task_group *task_group(struct task_struct *p)
 552{
 553	return p->sched_task_group;
 554}
 555
 556/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
 557static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 558{
 559#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
 560	struct task_group *tg = task_group(p);
 561#endif
 562
 563#ifdef CONFIG_FAIR_GROUP_SCHED
 564	p->se.cfs_rq = tg->cfs_rq[cpu];
 565	p->se.parent = tg->se[cpu];
 566#endif
 567
 568#ifdef CONFIG_RT_GROUP_SCHED
 569	p->rt.rt_rq  = tg->rt_rq[cpu];
 570	p->rt.parent = tg->rt_se[cpu];
 571#endif
 572}
 573
 574#else /* CONFIG_CGROUP_SCHED */
 575
 576static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
 577static inline struct task_group *task_group(struct task_struct *p)
 578{
 579	return NULL;
 580}
 581
 582#endif /* CONFIG_CGROUP_SCHED */
 583
 584static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
 585{
 586	set_task_rq(p, cpu);
 587#ifdef CONFIG_SMP
 588	/*
 589	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
 590	 * successfuly executed on another CPU. We must ensure that updates of
 591	 * per-task data have been completed by this moment.
 592	 */
 593	smp_wmb();
 594	task_thread_info(p)->cpu = cpu;
 595#endif
 596}
 597
 598/*
 599 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
 600 */
 601#ifdef CONFIG_SCHED_DEBUG
 602# include <linux/static_key.h>
 603# define const_debug __read_mostly
 604#else
 605# define const_debug const
 606#endif
 607
 608extern const_debug unsigned int sysctl_sched_features;
 609
 610#define SCHED_FEAT(name, enabled)	\
 611	__SCHED_FEAT_##name ,
 612
 613enum {
 614#include "features.h"
 615	__SCHED_FEAT_NR,
 616};
 617
 618#undef SCHED_FEAT
 619
 620#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
 621static __always_inline bool static_branch__true(struct static_key *key)
 622{
 623	return static_key_true(key); /* Not out of line branch. */
 624}
 625
 626static __always_inline bool static_branch__false(struct static_key *key)
 627{
 628	return static_key_false(key); /* Out of line branch. */
 629}
 630
 631#define SCHED_FEAT(name, enabled)					\
 632static __always_inline bool static_branch_##name(struct static_key *key) \
 633{									\
 634	return static_branch__##enabled(key);				\
 635}
 636
 637#include "features.h"
 638
 639#undef SCHED_FEAT
 640
 641extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
 642#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
 643#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
 644#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
 645#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
 646
 647static inline u64 global_rt_period(void)
 648{
 649	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
 650}
 651
 652static inline u64 global_rt_runtime(void)
 653{
 654	if (sysctl_sched_rt_runtime < 0)
 655		return RUNTIME_INF;
 656
 657	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
 658}
 659
 660
 661
 662static inline int task_current(struct rq *rq, struct task_struct *p)
 663{
 664	return rq->curr == p;
 665}
 666
 667static inline int task_running(struct rq *rq, struct task_struct *p)
 668{
 669#ifdef CONFIG_SMP
 670	return p->on_cpu;
 671#else
 672	return task_current(rq, p);
 673#endif
 674}
 675
 676
 677#ifndef prepare_arch_switch
 678# define prepare_arch_switch(next)	do { } while (0)
 679#endif
 680#ifndef finish_arch_switch
 681# define finish_arch_switch(prev)	do { } while (0)
 682#endif
 683#ifndef finish_arch_post_lock_switch
 684# define finish_arch_post_lock_switch()	do { } while (0)
 685#endif
 686
 687#ifndef __ARCH_WANT_UNLOCKED_CTXSW
 688static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 689{
 690#ifdef CONFIG_SMP
 691	/*
 692	 * We can optimise this out completely for !SMP, because the
 693	 * SMP rebalancing from interrupt is the only thing that cares
 694	 * here.
 695	 */
 696	next->on_cpu = 1;
 697#endif
 698}
 699
 700static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 701{
 702#ifdef CONFIG_SMP
 703	/*
 704	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
 705	 * We must ensure this doesn't happen until the switch is completely
 706	 * finished.
 707	 */
 708	smp_wmb();
 709	prev->on_cpu = 0;
 710#endif
 711#ifdef CONFIG_DEBUG_SPINLOCK
 712	/* this is a valid case when another task releases the spinlock */
 713	rq->lock.owner = current;
 714#endif
 715	/*
 716	 * If we are tracking spinlock dependencies then we have to
 717	 * fix up the runqueue lock - which gets 'carried over' from
 718	 * prev into current:
 719	 */
 720	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
 721
 722	raw_spin_unlock_irq(&rq->lock);
 723}
 724
 725#else /* __ARCH_WANT_UNLOCKED_CTXSW */
 726static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 727{
 728#ifdef CONFIG_SMP
 729	/*
 730	 * We can optimise this out completely for !SMP, because the
 731	 * SMP rebalancing from interrupt is the only thing that cares
 732	 * here.
 733	 */
 734	next->on_cpu = 1;
 735#endif
 736#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 737	raw_spin_unlock_irq(&rq->lock);
 738#else
 739	raw_spin_unlock(&rq->lock);
 740#endif
 741}
 742
 743static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 744{
 745#ifdef CONFIG_SMP
 746	/*
 747	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
 748	 * We must ensure this doesn't happen until the switch is completely
 749	 * finished.
 750	 */
 751	smp_wmb();
 752	prev->on_cpu = 0;
 753#endif
 754#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 755	local_irq_enable();
 756#endif
 757}
 758#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 759
 760
 761static inline void update_load_add(struct load_weight *lw, unsigned long inc)
 762{
 763	lw->weight += inc;
 764	lw->inv_weight = 0;
 765}
 766
 767static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
 768{
 769	lw->weight -= dec;
 770	lw->inv_weight = 0;
 771}
 772
 773static inline void update_load_set(struct load_weight *lw, unsigned long w)
 774{
 775	lw->weight = w;
 776	lw->inv_weight = 0;
 777}
 778
 779/*
 780 * To aid in avoiding the subversion of "niceness" due to uneven distribution
 781 * of tasks with abnormal "nice" values across CPUs the contribution that
 782 * each task makes to its run queue's load is weighted according to its
 783 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
 784 * scaled version of the new time slice allocation that they receive on time
 785 * slice expiry etc.
 786 */
 787
 788#define WEIGHT_IDLEPRIO                3
 789#define WMULT_IDLEPRIO         1431655765
 790
 791/*
 792 * Nice levels are multiplicative, with a gentle 10% change for every
 793 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
 794 * nice 1, it will get ~10% less CPU time than another CPU-bound task
 795 * that remained on nice 0.
 796 *
 797 * The "10% effect" is relative and cumulative: from _any_ nice level,
 798 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
 799 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
 800 * If a task goes up by ~10% and another task goes down by ~10% then
 801 * the relative distance between them is ~25%.)
 802 */
 803static const int prio_to_weight[40] = {
 804 /* -20 */     88761,     71755,     56483,     46273,     36291,
 805 /* -15 */     29154,     23254,     18705,     14949,     11916,
 806 /* -10 */      9548,      7620,      6100,      4904,      3906,
 807 /*  -5 */      3121,      2501,      1991,      1586,      1277,
 808 /*   0 */      1024,       820,       655,       526,       423,
 809 /*   5 */       335,       272,       215,       172,       137,
 810 /*  10 */       110,        87,        70,        56,        45,
 811 /*  15 */        36,        29,        23,        18,        15,
 812};
 813
 814/*
 815 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
 816 *
 817 * In cases where the weight does not change often, we can use the
 818 * precalculated inverse to speed up arithmetics by turning divisions
 819 * into multiplications:
 820 */
 821static const u32 prio_to_wmult[40] = {
 822 /* -20 */     48388,     59856,     76040,     92818,    118348,
 823 /* -15 */    147320,    184698,    229616,    287308,    360437,
 824 /* -10 */    449829,    563644,    704093,    875809,   1099582,
 825 /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
 826 /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
 827 /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
 828 /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
 829 /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
 830};
 831
 832/* Time spent by the tasks of the cpu accounting group executing in ... */
 833enum cpuacct_stat_index {
 834	CPUACCT_STAT_USER,	/* ... user mode */
 835	CPUACCT_STAT_SYSTEM,	/* ... kernel mode */
 836
 837	CPUACCT_STAT_NSTATS,
 838};
 839
 840
 841#define sched_class_highest (&stop_sched_class)
 842#define for_each_class(class) \
 843   for (class = sched_class_highest; class; class = class->next)
 844
 845extern const struct sched_class stop_sched_class;
 846extern const struct sched_class rt_sched_class;
 847extern const struct sched_class fair_sched_class;
 848extern const struct sched_class idle_sched_class;
 849
 850
 851#ifdef CONFIG_SMP
 852
 853extern void trigger_load_balance(struct rq *rq, int cpu);
 854extern void idle_balance(int this_cpu, struct rq *this_rq);
 855
 856#else	/* CONFIG_SMP */
 857
 858static inline void idle_balance(int cpu, struct rq *rq)
 859{
 860}
 861
 862#endif
 863
 864extern void sysrq_sched_debug_show(void);
 865extern void sched_init_granularity(void);
 866extern void update_max_interval(void);
 867extern void update_group_power(struct sched_domain *sd, int cpu);
 868extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
 869extern void init_sched_rt_class(void);
 870extern void init_sched_fair_class(void);
 871
 872extern void resched_task(struct task_struct *p);
 873extern void resched_cpu(int cpu);
 874
 875extern struct rt_bandwidth def_rt_bandwidth;
 876extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
 877
 878extern void update_idle_cpu_load(struct rq *this_rq);
 879
 880#ifdef CONFIG_CGROUP_CPUACCT
 881#include <linux/cgroup.h>
 882/* track cpu usage of a group of tasks and its child groups */
 883struct cpuacct {
 884	struct cgroup_subsys_state css;
 885	/* cpuusage holds pointer to a u64-type object on every cpu */
 886	u64 __percpu *cpuusage;
 887	struct kernel_cpustat __percpu *cpustat;
 888};
 889
 890/* return cpu accounting group corresponding to this container */
 891static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
 892{
 893	return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
 894			    struct cpuacct, css);
 895}
 896
 897/* return cpu accounting group to which this task belongs */
 898static inline struct cpuacct *task_ca(struct task_struct *tsk)
 899{
 900	return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
 901			    struct cpuacct, css);
 902}
 903
 904static inline struct cpuacct *parent_ca(struct cpuacct *ca)
 905{
 906	if (!ca || !ca->css.cgroup->parent)
 907		return NULL;
 908	return cgroup_ca(ca->css.cgroup->parent);
 909}
 910
 911extern void cpuacct_charge(struct task_struct *tsk, u64 cputime);
 912#else
 913static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
 914#endif
 915
 916static inline void inc_nr_running(struct rq *rq)
 917{
 918	rq->nr_running++;
 919}
 920
 921static inline void dec_nr_running(struct rq *rq)
 922{
 923	rq->nr_running--;
 924}
 925
 926extern void update_rq_clock(struct rq *rq);
 927
 928extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
 929extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
 930
 931extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
 932
 933extern const_debug unsigned int sysctl_sched_time_avg;
 934extern const_debug unsigned int sysctl_sched_nr_migrate;
 935extern const_debug unsigned int sysctl_sched_migration_cost;
 936
 937static inline u64 sched_avg_period(void)
 938{
 939	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
 940}
 941
 942#ifdef CONFIG_SCHED_HRTICK
 943
 944/*
 945 * Use hrtick when:
 946 *  - enabled by features
 947 *  - hrtimer is actually high res
 948 */
 949static inline int hrtick_enabled(struct rq *rq)
 950{
 951	if (!sched_feat(HRTICK))
 952		return 0;
 953	if (!cpu_active(cpu_of(rq)))
 954		return 0;
 955	return hrtimer_is_hres_active(&rq->hrtick_timer);
 956}
 957
 958void hrtick_start(struct rq *rq, u64 delay);
 959
 960#else
 961
 962static inline int hrtick_enabled(struct rq *rq)
 963{
 964	return 0;
 965}
 966
 967#endif /* CONFIG_SCHED_HRTICK */
 968
 969#ifdef CONFIG_SMP
 970extern void sched_avg_update(struct rq *rq);
 971static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 972{
 973	rq->rt_avg += rt_delta;
 974	sched_avg_update(rq);
 975}
 976#else
 977static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
 978static inline void sched_avg_update(struct rq *rq) { }
 979#endif
 980
 981extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
 982
 983#ifdef CONFIG_SMP
 984#ifdef CONFIG_PREEMPT
 985
 986static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
 987
 988/*
 989 * fair double_lock_balance: Safely acquires both rq->locks in a fair
 990 * way at the expense of forcing extra atomic operations in all
 991 * invocations.  This assures that the double_lock is acquired using the
 992 * same underlying policy as the spinlock_t on this architecture, which
 993 * reduces latency compared to the unfair variant below.  However, it
 994 * also adds more overhead and therefore may reduce throughput.
 995 */
 996static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
 997	__releases(this_rq->lock)
 998	__acquires(busiest->lock)
 999	__acquires(this_rq->lock)
1000{
1001	raw_spin_unlock(&this_rq->lock);
1002	double_rq_lock(this_rq, busiest);
1003
1004	return 1;
1005}
1006
1007#else
1008/*
1009 * Unfair double_lock_balance: Optimizes throughput at the expense of
1010 * latency by eliminating extra atomic operations when the locks are
1011 * already in proper order on entry.  This favors lower cpu-ids and will
1012 * grant the double lock to lower cpus over higher ids under contention,
1013 * regardless of entry order into the function.
1014 */
1015static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1016	__releases(this_rq->lock)
1017	__acquires(busiest->lock)
1018	__acquires(this_rq->lock)
1019{
1020	int ret = 0;
1021
1022	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1023		if (busiest < this_rq) {
1024			raw_spin_unlock(&this_rq->lock);
1025			raw_spin_lock(&busiest->lock);
1026			raw_spin_lock_nested(&this_rq->lock,
1027					      SINGLE_DEPTH_NESTING);
1028			ret = 1;
1029		} else
1030			raw_spin_lock_nested(&busiest->lock,
1031					      SINGLE_DEPTH_NESTING);
1032	}
1033	return ret;
1034}
1035
1036#endif /* CONFIG_PREEMPT */
1037
1038/*
1039 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1040 */
1041static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1042{
1043	if (unlikely(!irqs_disabled())) {
1044		/* printk() doesn't work good under rq->lock */
1045		raw_spin_unlock(&this_rq->lock);
1046		BUG_ON(1);
1047	}
1048
1049	return _double_lock_balance(this_rq, busiest);
1050}
1051
1052static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1053	__releases(busiest->lock)
1054{
1055	raw_spin_unlock(&busiest->lock);
1056	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1057}
1058
1059/*
1060 * double_rq_lock - safely lock two runqueues
1061 *
1062 * Note this does not disable interrupts like task_rq_lock,
1063 * you need to do so manually before calling.
1064 */
1065static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1066	__acquires(rq1->lock)
1067	__acquires(rq2->lock)
1068{
1069	BUG_ON(!irqs_disabled());
1070	if (rq1 == rq2) {
1071		raw_spin_lock(&rq1->lock);
1072		__acquire(rq2->lock);	/* Fake it out ;) */
1073	} else {
1074		if (rq1 < rq2) {
1075			raw_spin_lock(&rq1->lock);
1076			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1077		} else {
1078			raw_spin_lock(&rq2->lock);
1079			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1080		}
1081	}
1082}
1083
1084/*
1085 * double_rq_unlock - safely unlock two runqueues
1086 *
1087 * Note this does not restore interrupts like task_rq_unlock,
1088 * you need to do so manually after calling.
1089 */
1090static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1091	__releases(rq1->lock)
1092	__releases(rq2->lock)
1093{
1094	raw_spin_unlock(&rq1->lock);
1095	if (rq1 != rq2)
1096		raw_spin_unlock(&rq2->lock);
1097	else
1098		__release(rq2->lock);
1099}
1100
1101#else /* CONFIG_SMP */
1102
1103/*
1104 * double_rq_lock - safely lock two runqueues
1105 *
1106 * Note this does not disable interrupts like task_rq_lock,
1107 * you need to do so manually before calling.
1108 */
1109static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1110	__acquires(rq1->lock)
1111	__acquires(rq2->lock)
1112{
1113	BUG_ON(!irqs_disabled());
1114	BUG_ON(rq1 != rq2);
1115	raw_spin_lock(&rq1->lock);
1116	__acquire(rq2->lock);	/* Fake it out ;) */
1117}
1118
1119/*
1120 * double_rq_unlock - safely unlock two runqueues
1121 *
1122 * Note this does not restore interrupts like task_rq_unlock,
1123 * you need to do so manually after calling.
1124 */
1125static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1126	__releases(rq1->lock)
1127	__releases(rq2->lock)
1128{
1129	BUG_ON(rq1 != rq2);
1130	raw_spin_unlock(&rq1->lock);
1131	__release(rq2->lock);
1132}
1133
1134#endif
1135
1136extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1137extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1138extern void print_cfs_stats(struct seq_file *m, int cpu);
1139extern void print_rt_stats(struct seq_file *m, int cpu);
1140
1141extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1142extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1143extern void unthrottle_offline_cfs_rqs(struct rq *rq);
1144
1145extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1146
1147#ifdef CONFIG_NO_HZ
1148enum rq_nohz_flag_bits {
1149	NOHZ_TICK_STOPPED,
1150	NOHZ_BALANCE_KICK,
1151	NOHZ_IDLE,
1152};
1153
1154#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
1155#endif