Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1/*
   2 *  kernel/sched.c
   3 *
   4 *  Kernel scheduler and related syscalls
   5 *
   6 *  Copyright (C) 1991-2002  Linus Torvalds
   7 *
   8 *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
   9 *		make semaphores SMP safe
  10 *  1998-11-19	Implemented schedule_timeout() and related stuff
  11 *		by Andrea Arcangeli
  12 *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
  13 *		hybrid priority-list and round-robin design with
  14 *		an array-switch method of distributing timeslices
  15 *		and per-CPU runqueues.  Cleanups and useful suggestions
  16 *		by Davide Libenzi, preemptible kernel bits by Robert Love.
  17 *  2003-09-03	Interactivity tuning by Con Kolivas.
  18 *  2004-04-02	Scheduler domains code by Nick Piggin
  19 *  2007-04-15  Work begun on replacing all interactivity tuning with a
  20 *              fair scheduling design by Con Kolivas.
  21 *  2007-05-05  Load balancing (smp-nice) and other improvements
  22 *              by Peter Williams
  23 *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
  24 *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
  25 *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
  26 *              Thomas Gleixner, Mike Kravetz
  27 */
  28
  29#include <linux/mm.h>
  30#include <linux/module.h>
  31#include <linux/nmi.h>
  32#include <linux/init.h>
  33#include <linux/uaccess.h>
  34#include <linux/highmem.h>
  35#include <asm/mmu_context.h>
  36#include <linux/interrupt.h>
  37#include <linux/capability.h>
  38#include <linux/completion.h>
  39#include <linux/kernel_stat.h>
  40#include <linux/debug_locks.h>
  41#include <linux/perf_event.h>
  42#include <linux/security.h>
  43#include <linux/notifier.h>
  44#include <linux/profile.h>
  45#include <linux/freezer.h>
  46#include <linux/vmalloc.h>
  47#include <linux/blkdev.h>
  48#include <linux/delay.h>
  49#include <linux/pid_namespace.h>
  50#include <linux/smp.h>
  51#include <linux/threads.h>
  52#include <linux/timer.h>
  53#include <linux/rcupdate.h>
  54#include <linux/cpu.h>
  55#include <linux/cpuset.h>
  56#include <linux/percpu.h>
  57#include <linux/proc_fs.h>
  58#include <linux/seq_file.h>
  59#include <linux/stop_machine.h>
  60#include <linux/sysctl.h>
  61#include <linux/syscalls.h>
  62#include <linux/times.h>
  63#include <linux/tsacct_kern.h>
  64#include <linux/kprobes.h>
  65#include <linux/delayacct.h>
  66#include <linux/unistd.h>
  67#include <linux/pagemap.h>
  68#include <linux/hrtimer.h>
  69#include <linux/tick.h>
  70#include <linux/debugfs.h>
  71#include <linux/ctype.h>
  72#include <linux/ftrace.h>
  73#include <linux/slab.h>
  74
  75#include <asm/tlb.h>
  76#include <asm/irq_regs.h>
  77#include <asm/mutex.h>
  78#ifdef CONFIG_PARAVIRT
  79#include <asm/paravirt.h>
  80#endif
  81
  82#include "sched_cpupri.h"
  83#include "workqueue_sched.h"
  84#include "sched_autogroup.h"
  85
  86#define CREATE_TRACE_POINTS
  87#include <trace/events/sched.h>
  88
  89/*
  90 * Convert user-nice values [ -20 ... 0 ... 19 ]
  91 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  92 * and back.
  93 */
  94#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
  95#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
  96#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
  97
  98/*
  99 * 'User priority' is the nice value converted to something we
 100 * can work with better when scaling various scheduler parameters,
 101 * it's a [ 0 ... 39 ] range.
 102 */
 103#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
 104#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
 105#define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))
 106
 107/*
 108 * Helpers for converting nanosecond timing to jiffy resolution
 109 */
 110#define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
 111
 112#define NICE_0_LOAD		SCHED_LOAD_SCALE
 113#define NICE_0_SHIFT		SCHED_LOAD_SHIFT
 114
 115/*
 116 * These are the 'tuning knobs' of the scheduler:
 117 *
 118 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
 119 * Timeslices get refilled after they expire.
 120 */
 121#define DEF_TIMESLICE		(100 * HZ / 1000)
 122
 123/*
 124 * single value that denotes runtime == period, ie unlimited time.
 125 */
 126#define RUNTIME_INF	((u64)~0ULL)
 127
 128static inline int rt_policy(int policy)
 129{
 130	if (policy == SCHED_FIFO || policy == SCHED_RR)
 131		return 1;
 132	return 0;
 133}
 134
 135static inline int task_has_rt_policy(struct task_struct *p)
 136{
 137	return rt_policy(p->policy);
 138}
 139
 140/*
 141 * This is the priority-queue data structure of the RT scheduling class:
 142 */
 143struct rt_prio_array {
 144	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
 145	struct list_head queue[MAX_RT_PRIO];
 146};
 147
 148struct rt_bandwidth {
 149	/* nests inside the rq lock: */
 150	raw_spinlock_t		rt_runtime_lock;
 151	ktime_t			rt_period;
 152	u64			rt_runtime;
 153	struct hrtimer		rt_period_timer;
 154};
 155
 156static struct rt_bandwidth def_rt_bandwidth;
 157
 158static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
 159
 160static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
 161{
 162	struct rt_bandwidth *rt_b =
 163		container_of(timer, struct rt_bandwidth, rt_period_timer);
 164	ktime_t now;
 165	int overrun;
 166	int idle = 0;
 167
 168	for (;;) {
 169		now = hrtimer_cb_get_time(timer);
 170		overrun = hrtimer_forward(timer, now, rt_b->rt_period);
 171
 172		if (!overrun)
 173			break;
 174
 175		idle = do_sched_rt_period_timer(rt_b, overrun);
 176	}
 177
 178	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
 179}
 180
 181static
 182void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
 183{
 184	rt_b->rt_period = ns_to_ktime(period);
 185	rt_b->rt_runtime = runtime;
 186
 187	raw_spin_lock_init(&rt_b->rt_runtime_lock);
 188
 189	hrtimer_init(&rt_b->rt_period_timer,
 190			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 191	rt_b->rt_period_timer.function = sched_rt_period_timer;
 192}
 193
 194static inline int rt_bandwidth_enabled(void)
 195{
 196	return sysctl_sched_rt_runtime >= 0;
 197}
 198
 199static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
 200{
 201	ktime_t now;
 202
 203	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
 204		return;
 205
 206	if (hrtimer_active(&rt_b->rt_period_timer))
 207		return;
 208
 209	raw_spin_lock(&rt_b->rt_runtime_lock);
 210	for (;;) {
 211		unsigned long delta;
 212		ktime_t soft, hard;
 213
 214		if (hrtimer_active(&rt_b->rt_period_timer))
 215			break;
 216
 217		now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
 218		hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
 219
 220		soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
 221		hard = hrtimer_get_expires(&rt_b->rt_period_timer);
 222		delta = ktime_to_ns(ktime_sub(hard, soft));
 223		__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
 224				HRTIMER_MODE_ABS_PINNED, 0);
 225	}
 226	raw_spin_unlock(&rt_b->rt_runtime_lock);
 227}
 228
 229#ifdef CONFIG_RT_GROUP_SCHED
 230static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
 231{
 232	hrtimer_cancel(&rt_b->rt_period_timer);
 233}
 234#endif
 235
 236/*
 237 * sched_domains_mutex serializes calls to init_sched_domains,
 238 * detach_destroy_domains and partition_sched_domains.
 239 */
 240static DEFINE_MUTEX(sched_domains_mutex);
 241
 242#ifdef CONFIG_CGROUP_SCHED
 243
 244#include <linux/cgroup.h>
 245
 246struct cfs_rq;
 247
 248static LIST_HEAD(task_groups);
 249
 250/* task group related information */
 251struct task_group {
 252	struct cgroup_subsys_state css;
 253
 254#ifdef CONFIG_FAIR_GROUP_SCHED
 255	/* schedulable entities of this group on each cpu */
 256	struct sched_entity **se;
 257	/* runqueue "owned" by this group on each cpu */
 258	struct cfs_rq **cfs_rq;
 259	unsigned long shares;
 260
 261	atomic_t load_weight;
 262#endif
 263
 264#ifdef CONFIG_RT_GROUP_SCHED
 265	struct sched_rt_entity **rt_se;
 266	struct rt_rq **rt_rq;
 267
 268	struct rt_bandwidth rt_bandwidth;
 269#endif
 270
 271	struct rcu_head rcu;
 272	struct list_head list;
 273
 274	struct task_group *parent;
 275	struct list_head siblings;
 276	struct list_head children;
 277
 278#ifdef CONFIG_SCHED_AUTOGROUP
 279	struct autogroup *autogroup;
 280#endif
 281};
 282
 283/* task_group_lock serializes the addition/removal of task groups */
 284static DEFINE_SPINLOCK(task_group_lock);
 285
 286#ifdef CONFIG_FAIR_GROUP_SCHED
 287
 288# define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
 289
 290/*
 291 * A weight of 0 or 1 can cause arithmetics problems.
 292 * A weight of a cfs_rq is the sum of weights of which entities
 293 * are queued on this cfs_rq, so a weight of a entity should not be
 294 * too large, so as the shares value of a task group.
 295 * (The default weight is 1024 - so there's no practical
 296 *  limitation from this.)
 297 */
 298#define MIN_SHARES	(1UL <<  1)
 299#define MAX_SHARES	(1UL << 18)
 300
 301static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
 302#endif
 303
 304/* Default task group.
 305 *	Every task in system belong to this group at bootup.
 306 */
 307struct task_group root_task_group;
 308
 309#endif	/* CONFIG_CGROUP_SCHED */
 310
 311/* CFS-related fields in a runqueue */
 312struct cfs_rq {
 313	struct load_weight load;
 314	unsigned long nr_running;
 315
 316	u64 exec_clock;
 317	u64 min_vruntime;
 318#ifndef CONFIG_64BIT
 319	u64 min_vruntime_copy;
 320#endif
 321
 322	struct rb_root tasks_timeline;
 323	struct rb_node *rb_leftmost;
 324
 325	struct list_head tasks;
 326	struct list_head *balance_iterator;
 327
 328	/*
 329	 * 'curr' points to currently running entity on this cfs_rq.
 330	 * It is set to NULL otherwise (i.e when none are currently running).
 331	 */
 332	struct sched_entity *curr, *next, *last, *skip;
 333
 334#ifdef	CONFIG_SCHED_DEBUG
 335	unsigned int nr_spread_over;
 336#endif
 337
 338#ifdef CONFIG_FAIR_GROUP_SCHED
 339	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
 340
 341	/*
 342	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
 343	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
 344	 * (like users, containers etc.)
 345	 *
 346	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
 347	 * list is used during load balance.
 348	 */
 349	int on_list;
 350	struct list_head leaf_cfs_rq_list;
 351	struct task_group *tg;	/* group that "owns" this runqueue */
 352
 353#ifdef CONFIG_SMP
 354	/*
 355	 * the part of load.weight contributed by tasks
 356	 */
 357	unsigned long task_weight;
 358
 359	/*
 360	 *   h_load = weight * f(tg)
 361	 *
 362	 * Where f(tg) is the recursive weight fraction assigned to
 363	 * this group.
 364	 */
 365	unsigned long h_load;
 366
 367	/*
 368	 * Maintaining per-cpu shares distribution for group scheduling
 369	 *
 370	 * load_stamp is the last time we updated the load average
 371	 * load_last is the last time we updated the load average and saw load
 372	 * load_unacc_exec_time is currently unaccounted execution time
 373	 */
 374	u64 load_avg;
 375	u64 load_period;
 376	u64 load_stamp, load_last, load_unacc_exec_time;
 377
 378	unsigned long load_contribution;
 379#endif
 380#endif
 381};
 382
 383/* Real-Time classes' related field in a runqueue: */
 384struct rt_rq {
 385	struct rt_prio_array active;
 386	unsigned long rt_nr_running;
 387#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 388	struct {
 389		int curr; /* highest queued rt task prio */
 390#ifdef CONFIG_SMP
 391		int next; /* next highest */
 392#endif
 393	} highest_prio;
 394#endif
 395#ifdef CONFIG_SMP
 396	unsigned long rt_nr_migratory;
 397	unsigned long rt_nr_total;
 398	int overloaded;
 399	struct plist_head pushable_tasks;
 400#endif
 401	int rt_throttled;
 402	u64 rt_time;
 403	u64 rt_runtime;
 404	/* Nests inside the rq lock: */
 405	raw_spinlock_t rt_runtime_lock;
 406
 407#ifdef CONFIG_RT_GROUP_SCHED
 408	unsigned long rt_nr_boosted;
 409
 410	struct rq *rq;
 411	struct list_head leaf_rt_rq_list;
 412	struct task_group *tg;
 413#endif
 414};
 415
 416#ifdef CONFIG_SMP
 417
 418/*
 419 * We add the notion of a root-domain which will be used to define per-domain
 420 * variables. Each exclusive cpuset essentially defines an island domain by
 421 * fully partitioning the member cpus from any other cpuset. Whenever a new
 422 * exclusive cpuset is created, we also create and attach a new root-domain
 423 * object.
 424 *
 425 */
 426struct root_domain {
 427	atomic_t refcount;
 428	atomic_t rto_count;
 429	struct rcu_head rcu;
 430	cpumask_var_t span;
 431	cpumask_var_t online;
 432
 433	/*
 434	 * The "RT overload" flag: it gets set if a CPU has more than
 435	 * one runnable RT task.
 436	 */
 437	cpumask_var_t rto_mask;
 438	struct cpupri cpupri;
 439};
 440
 441/*
 442 * By default the system creates a single root-domain with all cpus as
 443 * members (mimicking the global state we have today).
 444 */
 445static struct root_domain def_root_domain;
 446
 447#endif /* CONFIG_SMP */
 448
 449/*
 450 * This is the main, per-CPU runqueue data structure.
 451 *
 452 * Locking rule: those places that want to lock multiple runqueues
 453 * (such as the load balancing or the thread migration code), lock
 454 * acquire operations must be ordered by ascending &runqueue.
 455 */
 456struct rq {
 457	/* runqueue lock: */
 458	raw_spinlock_t lock;
 459
 460	/*
 461	 * nr_running and cpu_load should be in the same cacheline because
 462	 * remote CPUs use both these fields when doing load calculation.
 463	 */
 464	unsigned long nr_running;
 465	#define CPU_LOAD_IDX_MAX 5
 466	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 467	unsigned long last_load_update_tick;
 468#ifdef CONFIG_NO_HZ
 469	u64 nohz_stamp;
 470	unsigned char nohz_balance_kick;
 471#endif
 472	int skip_clock_update;
 473
 474	/* capture load from *all* tasks on this cpu: */
 475	struct load_weight load;
 476	unsigned long nr_load_updates;
 477	u64 nr_switches;
 478
 479	struct cfs_rq cfs;
 480	struct rt_rq rt;
 481
 482#ifdef CONFIG_FAIR_GROUP_SCHED
 483	/* list of leaf cfs_rq on this cpu: */
 484	struct list_head leaf_cfs_rq_list;
 485#endif
 486#ifdef CONFIG_RT_GROUP_SCHED
 487	struct list_head leaf_rt_rq_list;
 488#endif
 489
 490	/*
 491	 * This is part of a global counter where only the total sum
 492	 * over all CPUs matters. A task can increase this counter on
 493	 * one CPU and if it got migrated afterwards it may decrease
 494	 * it on another CPU. Always updated under the runqueue lock:
 495	 */
 496	unsigned long nr_uninterruptible;
 497
 498	struct task_struct *curr, *idle, *stop;
 499	unsigned long next_balance;
 500	struct mm_struct *prev_mm;
 501
 502	u64 clock;
 503	u64 clock_task;
 504
 505	atomic_t nr_iowait;
 506
 507#ifdef CONFIG_SMP
 508	struct root_domain *rd;
 509	struct sched_domain *sd;
 510
 511	unsigned long cpu_power;
 512
 513	unsigned char idle_at_tick;
 514	/* For active balancing */
 515	int post_schedule;
 516	int active_balance;
 517	int push_cpu;
 518	struct cpu_stop_work active_balance_work;
 519	/* cpu of this runqueue: */
 520	int cpu;
 521	int online;
 522
 523	unsigned long avg_load_per_task;
 524
 525	u64 rt_avg;
 526	u64 age_stamp;
 527	u64 idle_stamp;
 528	u64 avg_idle;
 529#endif
 530
 531#ifdef CONFIG_IRQ_TIME_ACCOUNTING
 532	u64 prev_irq_time;
 533#endif
 534#ifdef CONFIG_PARAVIRT
 535	u64 prev_steal_time;
 536#endif
 537#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
 538	u64 prev_steal_time_rq;
 539#endif
 540
 541	/* calc_load related fields */
 542	unsigned long calc_load_update;
 543	long calc_load_active;
 544
 545#ifdef CONFIG_SCHED_HRTICK
 546#ifdef CONFIG_SMP
 547	int hrtick_csd_pending;
 548	struct call_single_data hrtick_csd;
 549#endif
 550	struct hrtimer hrtick_timer;
 551#endif
 552
 553#ifdef CONFIG_SCHEDSTATS
 554	/* latency stats */
 555	struct sched_info rq_sched_info;
 556	unsigned long long rq_cpu_time;
 557	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
 558
 559	/* sys_sched_yield() stats */
 560	unsigned int yld_count;
 561
 562	/* schedule() stats */
 563	unsigned int sched_switch;
 564	unsigned int sched_count;
 565	unsigned int sched_goidle;
 566
 567	/* try_to_wake_up() stats */
 568	unsigned int ttwu_count;
 569	unsigned int ttwu_local;
 570#endif
 571
 572#ifdef CONFIG_SMP
 573	struct task_struct *wake_list;
 574#endif
 575};
 576
 577static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 578
 579
 580static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
 581
 582static inline int cpu_of(struct rq *rq)
 583{
 584#ifdef CONFIG_SMP
 585	return rq->cpu;
 586#else
 587	return 0;
 588#endif
 589}
 590
 591#define rcu_dereference_check_sched_domain(p) \
 592	rcu_dereference_check((p), \
 593			      lockdep_is_held(&sched_domains_mutex))
 594
 595/*
 596 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
 597 * See detach_destroy_domains: synchronize_sched for details.
 598 *
 599 * The domain tree of any CPU may only be accessed from within
 600 * preempt-disabled sections.
 601 */
 602#define for_each_domain(cpu, __sd) \
 603	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
 604
 605#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
 606#define this_rq()		(&__get_cpu_var(runqueues))
 607#define task_rq(p)		cpu_rq(task_cpu(p))
 608#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
 609#define raw_rq()		(&__raw_get_cpu_var(runqueues))
 610
 611#ifdef CONFIG_CGROUP_SCHED
 612
 613/*
 614 * Return the group to which this tasks belongs.
 615 *
 616 * We use task_subsys_state_check() and extend the RCU verification with
 617 * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
 618 * task it moves into the cgroup. Therefore by holding either of those locks,
 619 * we pin the task to the current cgroup.
 620 */
 621static inline struct task_group *task_group(struct task_struct *p)
 622{
 623	struct task_group *tg;
 624	struct cgroup_subsys_state *css;
 625
 626	css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
 627			lockdep_is_held(&p->pi_lock) ||
 628			lockdep_is_held(&task_rq(p)->lock));
 629	tg = container_of(css, struct task_group, css);
 630
 631	return autogroup_task_group(p, tg);
 632}
 633
 634/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
 635static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 636{
 637#ifdef CONFIG_FAIR_GROUP_SCHED
 638	p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
 639	p->se.parent = task_group(p)->se[cpu];
 640#endif
 641
 642#ifdef CONFIG_RT_GROUP_SCHED
 643	p->rt.rt_rq  = task_group(p)->rt_rq[cpu];
 644	p->rt.parent = task_group(p)->rt_se[cpu];
 645#endif
 646}
 647
 648#else /* CONFIG_CGROUP_SCHED */
 649
 650static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
 651static inline struct task_group *task_group(struct task_struct *p)
 652{
 653	return NULL;
 654}
 655
 656#endif /* CONFIG_CGROUP_SCHED */
 657
 658static void update_rq_clock_task(struct rq *rq, s64 delta);
 659
 660static void update_rq_clock(struct rq *rq)
 661{
 662	s64 delta;
 663
 664	if (rq->skip_clock_update > 0)
 665		return;
 666
 667	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
 668	rq->clock += delta;
 669	update_rq_clock_task(rq, delta);
 670}
 671
 672/*
 673 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
 674 */
 675#ifdef CONFIG_SCHED_DEBUG
 676# define const_debug __read_mostly
 677#else
 678# define const_debug static const
 679#endif
 680
 681/**
 682 * runqueue_is_locked - Returns true if the current cpu runqueue is locked
 683 * @cpu: the processor in question.
 684 *
 685 * This interface allows printk to be called with the runqueue lock
 686 * held and know whether or not it is OK to wake up the klogd.
 687 */
 688int runqueue_is_locked(int cpu)
 689{
 690	return raw_spin_is_locked(&cpu_rq(cpu)->lock);
 691}
 692
 693/*
 694 * Debugging: various feature bits
 695 */
 696
 697#define SCHED_FEAT(name, enabled)	\
 698	__SCHED_FEAT_##name ,
 699
 700enum {
 701#include "sched_features.h"
 702};
 703
 704#undef SCHED_FEAT
 705
 706#define SCHED_FEAT(name, enabled)	\
 707	(1UL << __SCHED_FEAT_##name) * enabled |
 708
 709const_debug unsigned int sysctl_sched_features =
 710#include "sched_features.h"
 711	0;
 712
 713#undef SCHED_FEAT
 714
 715#ifdef CONFIG_SCHED_DEBUG
 716#define SCHED_FEAT(name, enabled)	\
 717	#name ,
 718
 719static __read_mostly char *sched_feat_names[] = {
 720#include "sched_features.h"
 721	NULL
 722};
 723
 724#undef SCHED_FEAT
 725
 726static int sched_feat_show(struct seq_file *m, void *v)
 727{
 728	int i;
 729
 730	for (i = 0; sched_feat_names[i]; i++) {
 731		if (!(sysctl_sched_features & (1UL << i)))
 732			seq_puts(m, "NO_");
 733		seq_printf(m, "%s ", sched_feat_names[i]);
 734	}
 735	seq_puts(m, "\n");
 736
 737	return 0;
 738}
 739
 740static ssize_t
 741sched_feat_write(struct file *filp, const char __user *ubuf,
 742		size_t cnt, loff_t *ppos)
 743{
 744	char buf[64];
 745	char *cmp;
 746	int neg = 0;
 747	int i;
 748
 749	if (cnt > 63)
 750		cnt = 63;
 751
 752	if (copy_from_user(&buf, ubuf, cnt))
 753		return -EFAULT;
 754
 755	buf[cnt] = 0;
 756	cmp = strstrip(buf);
 757
 758	if (strncmp(cmp, "NO_", 3) == 0) {
 759		neg = 1;
 760		cmp += 3;
 761	}
 762
 763	for (i = 0; sched_feat_names[i]; i++) {
 764		if (strcmp(cmp, sched_feat_names[i]) == 0) {
 765			if (neg)
 766				sysctl_sched_features &= ~(1UL << i);
 767			else
 768				sysctl_sched_features |= (1UL << i);
 769			break;
 770		}
 771	}
 772
 773	if (!sched_feat_names[i])
 774		return -EINVAL;
 775
 776	*ppos += cnt;
 777
 778	return cnt;
 779}
 780
 781static int sched_feat_open(struct inode *inode, struct file *filp)
 782{
 783	return single_open(filp, sched_feat_show, NULL);
 784}
 785
 786static const struct file_operations sched_feat_fops = {
 787	.open		= sched_feat_open,
 788	.write		= sched_feat_write,
 789	.read		= seq_read,
 790	.llseek		= seq_lseek,
 791	.release	= single_release,
 792};
 793
 794static __init int sched_init_debug(void)
 795{
 796	debugfs_create_file("sched_features", 0644, NULL, NULL,
 797			&sched_feat_fops);
 798
 799	return 0;
 800}
 801late_initcall(sched_init_debug);
 802
 803#endif
 804
 805#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
 806
 807/*
 808 * Number of tasks to iterate in a single balance run.
 809 * Limited because this is done with IRQs disabled.
 810 */
 811const_debug unsigned int sysctl_sched_nr_migrate = 32;
 812
 813/*
 814 * period over which we average the RT time consumption, measured
 815 * in ms.
 816 *
 817 * default: 1s
 818 */
 819const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
 820
 821/*
 822 * period over which we measure -rt task cpu usage in us.
 823 * default: 1s
 824 */
 825unsigned int sysctl_sched_rt_period = 1000000;
 826
 827static __read_mostly int scheduler_running;
 828
 829/*
 830 * part of the period that we allow rt tasks to run in us.
 831 * default: 0.95s
 832 */
 833int sysctl_sched_rt_runtime = 950000;
 834
 835static inline u64 global_rt_period(void)
 836{
 837	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
 838}
 839
 840static inline u64 global_rt_runtime(void)
 841{
 842	if (sysctl_sched_rt_runtime < 0)
 843		return RUNTIME_INF;
 844
 845	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
 846}
 847
 848#ifndef prepare_arch_switch
 849# define prepare_arch_switch(next)	do { } while (0)
 850#endif
 851#ifndef finish_arch_switch
 852# define finish_arch_switch(prev)	do { } while (0)
 853#endif
 854
 855static inline int task_current(struct rq *rq, struct task_struct *p)
 856{
 857	return rq->curr == p;
 858}
 859
 860static inline int task_running(struct rq *rq, struct task_struct *p)
 861{
 862#ifdef CONFIG_SMP
 863	return p->on_cpu;
 864#else
 865	return task_current(rq, p);
 866#endif
 867}
 868
 869#ifndef __ARCH_WANT_UNLOCKED_CTXSW
 870static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 871{
 872#ifdef CONFIG_SMP
 873	/*
 874	 * We can optimise this out completely for !SMP, because the
 875	 * SMP rebalancing from interrupt is the only thing that cares
 876	 * here.
 877	 */
 878	next->on_cpu = 1;
 879#endif
 880}
 881
 882static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 883{
 884#ifdef CONFIG_SMP
 885	/*
 886	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
 887	 * We must ensure this doesn't happen until the switch is completely
 888	 * finished.
 889	 */
 890	smp_wmb();
 891	prev->on_cpu = 0;
 892#endif
 893#ifdef CONFIG_DEBUG_SPINLOCK
 894	/* this is a valid case when another task releases the spinlock */
 895	rq->lock.owner = current;
 896#endif
 897	/*
 898	 * If we are tracking spinlock dependencies then we have to
 899	 * fix up the runqueue lock - which gets 'carried over' from
 900	 * prev into current:
 901	 */
 902	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
 903
 904	raw_spin_unlock_irq(&rq->lock);
 905}
 906
 907#else /* __ARCH_WANT_UNLOCKED_CTXSW */
 908static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 909{
 910#ifdef CONFIG_SMP
 911	/*
 912	 * We can optimise this out completely for !SMP, because the
 913	 * SMP rebalancing from interrupt is the only thing that cares
 914	 * here.
 915	 */
 916	next->on_cpu = 1;
 917#endif
 918#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 919	raw_spin_unlock_irq(&rq->lock);
 920#else
 921	raw_spin_unlock(&rq->lock);
 922#endif
 923}
 924
 925static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 926{
 927#ifdef CONFIG_SMP
 928	/*
 929	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
 930	 * We must ensure this doesn't happen until the switch is completely
 931	 * finished.
 932	 */
 933	smp_wmb();
 934	prev->on_cpu = 0;
 935#endif
 936#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 937	local_irq_enable();
 938#endif
 939}
 940#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 941
 942/*
 943 * __task_rq_lock - lock the rq @p resides on.
 944 */
 945static inline struct rq *__task_rq_lock(struct task_struct *p)
 946	__acquires(rq->lock)
 947{
 948	struct rq *rq;
 949
 950	lockdep_assert_held(&p->pi_lock);
 951
 952	for (;;) {
 953		rq = task_rq(p);
 954		raw_spin_lock(&rq->lock);
 955		if (likely(rq == task_rq(p)))
 956			return rq;
 957		raw_spin_unlock(&rq->lock);
 958	}
 959}
 960
 961/*
 962 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
 963 */
 964static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
 965	__acquires(p->pi_lock)
 966	__acquires(rq->lock)
 967{
 968	struct rq *rq;
 969
 970	for (;;) {
 971		raw_spin_lock_irqsave(&p->pi_lock, *flags);
 972		rq = task_rq(p);
 973		raw_spin_lock(&rq->lock);
 974		if (likely(rq == task_rq(p)))
 975			return rq;
 976		raw_spin_unlock(&rq->lock);
 977		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
 978	}
 979}
 980
 981static void __task_rq_unlock(struct rq *rq)
 982	__releases(rq->lock)
 983{
 984	raw_spin_unlock(&rq->lock);
 985}
 986
 987static inline void
 988task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
 989	__releases(rq->lock)
 990	__releases(p->pi_lock)
 991{
 992	raw_spin_unlock(&rq->lock);
 993	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
 994}
 995
 996/*
 997 * this_rq_lock - lock this runqueue and disable interrupts.
 998 */
 999static struct rq *this_rq_lock(void)
1000	__acquires(rq->lock)
1001{
1002	struct rq *rq;
1003
1004	local_irq_disable();
1005	rq = this_rq();
1006	raw_spin_lock(&rq->lock);
1007
1008	return rq;
1009}
1010
1011#ifdef CONFIG_SCHED_HRTICK
1012/*
1013 * Use HR-timers to deliver accurate preemption points.
1014 *
1015 * Its all a bit involved since we cannot program an hrt while holding the
1016 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1017 * reschedule event.
1018 *
1019 * When we get rescheduled we reprogram the hrtick_timer outside of the
1020 * rq->lock.
1021 */
1022
1023/*
1024 * Use hrtick when:
1025 *  - enabled by features
1026 *  - hrtimer is actually high res
1027 */
1028static inline int hrtick_enabled(struct rq *rq)
1029{
1030	if (!sched_feat(HRTICK))
1031		return 0;
1032	if (!cpu_active(cpu_of(rq)))
1033		return 0;
1034	return hrtimer_is_hres_active(&rq->hrtick_timer);
1035}
1036
1037static void hrtick_clear(struct rq *rq)
1038{
1039	if (hrtimer_active(&rq->hrtick_timer))
1040		hrtimer_cancel(&rq->hrtick_timer);
1041}
1042
1043/*
1044 * High-resolution timer tick.
1045 * Runs from hardirq context with interrupts disabled.
1046 */
1047static enum hrtimer_restart hrtick(struct hrtimer *timer)
1048{
1049	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1050
1051	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1052
1053	raw_spin_lock(&rq->lock);
1054	update_rq_clock(rq);
1055	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1056	raw_spin_unlock(&rq->lock);
1057
1058	return HRTIMER_NORESTART;
1059}
1060
1061#ifdef CONFIG_SMP
1062/*
1063 * called from hardirq (IPI) context
1064 */
1065static void __hrtick_start(void *arg)
1066{
1067	struct rq *rq = arg;
1068
1069	raw_spin_lock(&rq->lock);
1070	hrtimer_restart(&rq->hrtick_timer);
1071	rq->hrtick_csd_pending = 0;
1072	raw_spin_unlock(&rq->lock);
1073}
1074
1075/*
1076 * Called to set the hrtick timer state.
1077 *
1078 * called with rq->lock held and irqs disabled
1079 */
1080static void hrtick_start(struct rq *rq, u64 delay)
1081{
1082	struct hrtimer *timer = &rq->hrtick_timer;
1083	ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
1084
1085	hrtimer_set_expires(timer, time);
1086
1087	if (rq == this_rq()) {
1088		hrtimer_restart(timer);
1089	} else if (!rq->hrtick_csd_pending) {
1090		__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
1091		rq->hrtick_csd_pending = 1;
1092	}
1093}
1094
1095static int
1096hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1097{
1098	int cpu = (int)(long)hcpu;
1099
1100	switch (action) {
1101	case CPU_UP_CANCELED:
1102	case CPU_UP_CANCELED_FROZEN:
1103	case CPU_DOWN_PREPARE:
1104	case CPU_DOWN_PREPARE_FROZEN:
1105	case CPU_DEAD:
1106	case CPU_DEAD_FROZEN:
1107		hrtick_clear(cpu_rq(cpu));
1108		return NOTIFY_OK;
1109	}
1110
1111	return NOTIFY_DONE;
1112}
1113
1114static __init void init_hrtick(void)
1115{
1116	hotcpu_notifier(hotplug_hrtick, 0);
1117}
1118#else
1119/*
1120 * Called to set the hrtick timer state.
1121 *
1122 * called with rq->lock held and irqs disabled
1123 */
1124static void hrtick_start(struct rq *rq, u64 delay)
1125{
1126	__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
1127			HRTIMER_MODE_REL_PINNED, 0);
1128}
1129
1130static inline void init_hrtick(void)
1131{
1132}
1133#endif /* CONFIG_SMP */
1134
1135static void init_rq_hrtick(struct rq *rq)
1136{
1137#ifdef CONFIG_SMP
1138	rq->hrtick_csd_pending = 0;
1139
1140	rq->hrtick_csd.flags = 0;
1141	rq->hrtick_csd.func = __hrtick_start;
1142	rq->hrtick_csd.info = rq;
1143#endif
1144
1145	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1146	rq->hrtick_timer.function = hrtick;
1147}
1148#else	/* CONFIG_SCHED_HRTICK */
1149static inline void hrtick_clear(struct rq *rq)
1150{
1151}
1152
1153static inline void init_rq_hrtick(struct rq *rq)
1154{
1155}
1156
1157static inline void init_hrtick(void)
1158{
1159}
1160#endif	/* CONFIG_SCHED_HRTICK */
1161
1162/*
1163 * resched_task - mark a task 'to be rescheduled now'.
1164 *
1165 * On UP this means the setting of the need_resched flag, on SMP it
1166 * might also involve a cross-CPU call to trigger the scheduler on
1167 * the target CPU.
1168 */
1169#ifdef CONFIG_SMP
1170
1171#ifndef tsk_is_polling
1172#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1173#endif
1174
1175static void resched_task(struct task_struct *p)
1176{
1177	int cpu;
1178
1179	assert_raw_spin_locked(&task_rq(p)->lock);
1180
1181	if (test_tsk_need_resched(p))
1182		return;
1183
1184	set_tsk_need_resched(p);
1185
1186	cpu = task_cpu(p);
1187	if (cpu == smp_processor_id())
1188		return;
1189
1190	/* NEED_RESCHED must be visible before we test polling */
1191	smp_mb();
1192	if (!tsk_is_polling(p))
1193		smp_send_reschedule(cpu);
1194}
1195
1196static void resched_cpu(int cpu)
1197{
1198	struct rq *rq = cpu_rq(cpu);
1199	unsigned long flags;
1200
1201	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
1202		return;
1203	resched_task(cpu_curr(cpu));
1204	raw_spin_unlock_irqrestore(&rq->lock, flags);
1205}
1206
1207#ifdef CONFIG_NO_HZ
1208/*
1209 * In the semi idle case, use the nearest busy cpu for migrating timers
1210 * from an idle cpu.  This is good for power-savings.
1211 *
1212 * We don't do similar optimization for completely idle system, as
1213 * selecting an idle cpu will add more delays to the timers than intended
1214 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1215 */
1216int get_nohz_timer_target(void)
1217{
1218	int cpu = smp_processor_id();
1219	int i;
1220	struct sched_domain *sd;
1221
1222	rcu_read_lock();
1223	for_each_domain(cpu, sd) {
1224		for_each_cpu(i, sched_domain_span(sd)) {
1225			if (!idle_cpu(i)) {
1226				cpu = i;
1227				goto unlock;
1228			}
1229		}
1230	}
1231unlock:
1232	rcu_read_unlock();
1233	return cpu;
1234}
1235/*
1236 * When add_timer_on() enqueues a timer into the timer wheel of an
1237 * idle CPU then this timer might expire before the next timer event
1238 * which is scheduled to wake up that CPU. In case of a completely
1239 * idle system the next event might even be infinite time into the
1240 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1241 * leaves the inner idle loop so the newly added timer is taken into
1242 * account when the CPU goes back to idle and evaluates the timer
1243 * wheel for the next timer event.
1244 */
1245void wake_up_idle_cpu(int cpu)
1246{
1247	struct rq *rq = cpu_rq(cpu);
1248
1249	if (cpu == smp_processor_id())
1250		return;
1251
1252	/*
1253	 * This is safe, as this function is called with the timer
1254	 * wheel base lock of (cpu) held. When the CPU is on the way
1255	 * to idle and has not yet set rq->curr to idle then it will
1256	 * be serialized on the timer wheel base lock and take the new
1257	 * timer into account automatically.
1258	 */
1259	if (rq->curr != rq->idle)
1260		return;
1261
1262	/*
1263	 * We can set TIF_RESCHED on the idle task of the other CPU
1264	 * lockless. The worst case is that the other CPU runs the
1265	 * idle task through an additional NOOP schedule()
1266	 */
1267	set_tsk_need_resched(rq->idle);
1268
1269	/* NEED_RESCHED must be visible before we test polling */
1270	smp_mb();
1271	if (!tsk_is_polling(rq->idle))
1272		smp_send_reschedule(cpu);
1273}
1274
1275#endif /* CONFIG_NO_HZ */
1276
1277static u64 sched_avg_period(void)
1278{
1279	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1280}
1281
1282static void sched_avg_update(struct rq *rq)
1283{
1284	s64 period = sched_avg_period();
1285
1286	while ((s64)(rq->clock - rq->age_stamp) > period) {
1287		/*
1288		 * Inline assembly required to prevent the compiler
1289		 * optimising this loop into a divmod call.
1290		 * See __iter_div_u64_rem() for another example of this.
1291		 */
1292		asm("" : "+rm" (rq->age_stamp));
1293		rq->age_stamp += period;
1294		rq->rt_avg /= 2;
1295	}
1296}
1297
1298static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1299{
1300	rq->rt_avg += rt_delta;
1301	sched_avg_update(rq);
1302}
1303
1304#else /* !CONFIG_SMP */
1305static void resched_task(struct task_struct *p)
1306{
1307	assert_raw_spin_locked(&task_rq(p)->lock);
1308	set_tsk_need_resched(p);
1309}
1310
1311static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1312{
1313}
1314
1315static void sched_avg_update(struct rq *rq)
1316{
1317}
1318#endif /* CONFIG_SMP */
1319
1320#if BITS_PER_LONG == 32
1321# define WMULT_CONST	(~0UL)
1322#else
1323# define WMULT_CONST	(1UL << 32)
1324#endif
1325
1326#define WMULT_SHIFT	32
1327
1328/*
1329 * Shift right and round:
1330 */
1331#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
1332
1333/*
1334 * delta *= weight / lw
1335 */
1336static unsigned long
1337calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1338		struct load_weight *lw)
1339{
1340	u64 tmp;
1341
1342	/*
1343	 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
1344	 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
1345	 * 2^SCHED_LOAD_RESOLUTION.
1346	 */
1347	if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
1348		tmp = (u64)delta_exec * scale_load_down(weight);
1349	else
1350		tmp = (u64)delta_exec;
1351
1352	if (!lw->inv_weight) {
1353		unsigned long w = scale_load_down(lw->weight);
1354
1355		if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
1356			lw->inv_weight = 1;
1357		else if (unlikely(!w))
1358			lw->inv_weight = WMULT_CONST;
1359		else
1360			lw->inv_weight = WMULT_CONST / w;
1361	}
1362
1363	/*
1364	 * Check whether we'd overflow the 64-bit multiplication:
1365	 */
1366	if (unlikely(tmp > WMULT_CONST))
1367		tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
1368			WMULT_SHIFT/2);
1369	else
1370		tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
1371
1372	return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
1373}
1374
1375static inline void update_load_add(struct load_weight *lw, unsigned long inc)
1376{
1377	lw->weight += inc;
1378	lw->inv_weight = 0;
1379}
1380
1381static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
1382{
1383	lw->weight -= dec;
1384	lw->inv_weight = 0;
1385}
1386
1387static inline void update_load_set(struct load_weight *lw, unsigned long w)
1388{
1389	lw->weight = w;
1390	lw->inv_weight = 0;
1391}
1392
1393/*
1394 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1395 * of tasks with abnormal "nice" values across CPUs the contribution that
1396 * each task makes to its run queue's load is weighted according to its
1397 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1398 * scaled version of the new time slice allocation that they receive on time
1399 * slice expiry etc.
1400 */
1401
1402#define WEIGHT_IDLEPRIO                3
1403#define WMULT_IDLEPRIO         1431655765
1404
1405/*
1406 * Nice levels are multiplicative, with a gentle 10% change for every
1407 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1408 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1409 * that remained on nice 0.
1410 *
1411 * The "10% effect" is relative and cumulative: from _any_ nice level,
1412 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
1413 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1414 * If a task goes up by ~10% and another task goes down by ~10% then
1415 * the relative distance between them is ~25%.)
1416 */
1417static const int prio_to_weight[40] = {
1418 /* -20 */     88761,     71755,     56483,     46273,     36291,
1419 /* -15 */     29154,     23254,     18705,     14949,     11916,
1420 /* -10 */      9548,      7620,      6100,      4904,      3906,
1421 /*  -5 */      3121,      2501,      1991,      1586,      1277,
1422 /*   0 */      1024,       820,       655,       526,       423,
1423 /*   5 */       335,       272,       215,       172,       137,
1424 /*  10 */       110,        87,        70,        56,        45,
1425 /*  15 */        36,        29,        23,        18,        15,
1426};
1427
1428/*
1429 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1430 *
1431 * In cases where the weight does not change often, we can use the
1432 * precalculated inverse to speed up arithmetics by turning divisions
1433 * into multiplications:
1434 */
1435static const u32 prio_to_wmult[40] = {
1436 /* -20 */     48388,     59856,     76040,     92818,    118348,
1437 /* -15 */    147320,    184698,    229616,    287308,    360437,
1438 /* -10 */    449829,    563644,    704093,    875809,   1099582,
1439 /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
1440 /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
1441 /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
1442 /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
1443 /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1444};
1445
1446/* Time spent by the tasks of the cpu accounting group executing in ... */
1447enum cpuacct_stat_index {
1448	CPUACCT_STAT_USER,	/* ... user mode */
1449	CPUACCT_STAT_SYSTEM,	/* ... kernel mode */
1450
1451	CPUACCT_STAT_NSTATS,
1452};
1453
1454#ifdef CONFIG_CGROUP_CPUACCT
1455static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
1456static void cpuacct_update_stats(struct task_struct *tsk,
1457		enum cpuacct_stat_index idx, cputime_t val);
1458#else
1459static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
1460static inline void cpuacct_update_stats(struct task_struct *tsk,
1461		enum cpuacct_stat_index idx, cputime_t val) {}
1462#endif
1463
1464static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1465{
1466	update_load_add(&rq->load, load);
1467}
1468
1469static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1470{
1471	update_load_sub(&rq->load, load);
1472}
1473
1474#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
1475typedef int (*tg_visitor)(struct task_group *, void *);
1476
1477/*
1478 * Iterate the full tree, calling @down when first entering a node and @up when
1479 * leaving it for the final time.
1480 */
1481static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1482{
1483	struct task_group *parent, *child;
1484	int ret;
1485
1486	rcu_read_lock();
1487	parent = &root_task_group;
1488down:
1489	ret = (*down)(parent, data);
1490	if (ret)
1491		goto out_unlock;
1492	list_for_each_entry_rcu(child, &parent->children, siblings) {
1493		parent = child;
1494		goto down;
1495
1496up:
1497		continue;
1498	}
1499	ret = (*up)(parent, data);
1500	if (ret)
1501		goto out_unlock;
1502
1503	child = parent;
1504	parent = parent->parent;
1505	if (parent)
1506		goto up;
1507out_unlock:
1508	rcu_read_unlock();
1509
1510	return ret;
1511}
1512
1513static int tg_nop(struct task_group *tg, void *data)
1514{
1515	return 0;
1516}
1517#endif
1518
1519#ifdef CONFIG_SMP
1520/* Used instead of source_load when we know the type == 0 */
1521static unsigned long weighted_cpuload(const int cpu)
1522{
1523	return cpu_rq(cpu)->load.weight;
1524}
1525
1526/*
1527 * Return a low guess at the load of a migration-source cpu weighted
1528 * according to the scheduling class and "nice" value.
1529 *
1530 * We want to under-estimate the load of migration sources, to
1531 * balance conservatively.
1532 */
1533static unsigned long source_load(int cpu, int type)
1534{
1535	struct rq *rq = cpu_rq(cpu);
1536	unsigned long total = weighted_cpuload(cpu);
1537
1538	if (type == 0 || !sched_feat(LB_BIAS))
1539		return total;
1540
1541	return min(rq->cpu_load[type-1], total);
1542}
1543
1544/*
1545 * Return a high guess at the load of a migration-target cpu weighted
1546 * according to the scheduling class and "nice" value.
1547 */
1548static unsigned long target_load(int cpu, int type)
1549{
1550	struct rq *rq = cpu_rq(cpu);
1551	unsigned long total = weighted_cpuload(cpu);
1552
1553	if (type == 0 || !sched_feat(LB_BIAS))
1554		return total;
1555
1556	return max(rq->cpu_load[type-1], total);
1557}
1558
1559static unsigned long power_of(int cpu)
1560{
1561	return cpu_rq(cpu)->cpu_power;
1562}
1563
1564static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1565
1566static unsigned long cpu_avg_load_per_task(int cpu)
1567{
1568	struct rq *rq = cpu_rq(cpu);
1569	unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
1570
1571	if (nr_running)
1572		rq->avg_load_per_task = rq->load.weight / nr_running;
1573	else
1574		rq->avg_load_per_task = 0;
1575
1576	return rq->avg_load_per_task;
1577}
1578
1579#ifdef CONFIG_PREEMPT
1580
1581static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1582
1583/*
1584 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1585 * way at the expense of forcing extra atomic operations in all
1586 * invocations.  This assures that the double_lock is acquired using the
1587 * same underlying policy as the spinlock_t on this architecture, which
1588 * reduces latency compared to the unfair variant below.  However, it
1589 * also adds more overhead and therefore may reduce throughput.
1590 */
1591static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1592	__releases(this_rq->lock)
1593	__acquires(busiest->lock)
1594	__acquires(this_rq->lock)
1595{
1596	raw_spin_unlock(&this_rq->lock);
1597	double_rq_lock(this_rq, busiest);
1598
1599	return 1;
1600}
1601
1602#else
1603/*
1604 * Unfair double_lock_balance: Optimizes throughput at the expense of
1605 * latency by eliminating extra atomic operations when the locks are
1606 * already in proper order on entry.  This favors lower cpu-ids and will
1607 * grant the double lock to lower cpus over higher ids under contention,
1608 * regardless of entry order into the function.
1609 */
1610static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1611	__releases(this_rq->lock)
1612	__acquires(busiest->lock)
1613	__acquires(this_rq->lock)
1614{
1615	int ret = 0;
1616
1617	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1618		if (busiest < this_rq) {
1619			raw_spin_unlock(&this_rq->lock);
1620			raw_spin_lock(&busiest->lock);
1621			raw_spin_lock_nested(&this_rq->lock,
1622					      SINGLE_DEPTH_NESTING);
1623			ret = 1;
1624		} else
1625			raw_spin_lock_nested(&busiest->lock,
1626					      SINGLE_DEPTH_NESTING);
1627	}
1628	return ret;
1629}
1630
1631#endif /* CONFIG_PREEMPT */
1632
1633/*
1634 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1635 */
1636static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1637{
1638	if (unlikely(!irqs_disabled())) {
1639		/* printk() doesn't work good under rq->lock */
1640		raw_spin_unlock(&this_rq->lock);
1641		BUG_ON(1);
1642	}
1643
1644	return _double_lock_balance(this_rq, busiest);
1645}
1646
1647static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1648	__releases(busiest->lock)
1649{
1650	raw_spin_unlock(&busiest->lock);
1651	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1652}
1653
1654/*
1655 * double_rq_lock - safely lock two runqueues
1656 *
1657 * Note this does not disable interrupts like task_rq_lock,
1658 * you need to do so manually before calling.
1659 */
1660static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1661	__acquires(rq1->lock)
1662	__acquires(rq2->lock)
1663{
1664	BUG_ON(!irqs_disabled());
1665	if (rq1 == rq2) {
1666		raw_spin_lock(&rq1->lock);
1667		__acquire(rq2->lock);	/* Fake it out ;) */
1668	} else {
1669		if (rq1 < rq2) {
1670			raw_spin_lock(&rq1->lock);
1671			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1672		} else {
1673			raw_spin_lock(&rq2->lock);
1674			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1675		}
1676	}
1677}
1678
1679/*
1680 * double_rq_unlock - safely unlock two runqueues
1681 *
1682 * Note this does not restore interrupts like task_rq_unlock,
1683 * you need to do so manually after calling.
1684 */
1685static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1686	__releases(rq1->lock)
1687	__releases(rq2->lock)
1688{
1689	raw_spin_unlock(&rq1->lock);
1690	if (rq1 != rq2)
1691		raw_spin_unlock(&rq2->lock);
1692	else
1693		__release(rq2->lock);
1694}
1695
1696#else /* CONFIG_SMP */
1697
1698/*
1699 * double_rq_lock - safely lock two runqueues
1700 *
1701 * Note this does not disable interrupts like task_rq_lock,
1702 * you need to do so manually before calling.
1703 */
1704static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1705	__acquires(rq1->lock)
1706	__acquires(rq2->lock)
1707{
1708	BUG_ON(!irqs_disabled());
1709	BUG_ON(rq1 != rq2);
1710	raw_spin_lock(&rq1->lock);
1711	__acquire(rq2->lock);	/* Fake it out ;) */
1712}
1713
1714/*
1715 * double_rq_unlock - safely unlock two runqueues
1716 *
1717 * Note this does not restore interrupts like task_rq_unlock,
1718 * you need to do so manually after calling.
1719 */
1720static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1721	__releases(rq1->lock)
1722	__releases(rq2->lock)
1723{
1724	BUG_ON(rq1 != rq2);
1725	raw_spin_unlock(&rq1->lock);
1726	__release(rq2->lock);
1727}
1728
1729#endif
1730
1731static void calc_load_account_idle(struct rq *this_rq);
1732static void update_sysctl(void);
1733static int get_update_sysctl_factor(void);
1734static void update_cpu_load(struct rq *this_rq);
1735
1736static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1737{
1738	set_task_rq(p, cpu);
1739#ifdef CONFIG_SMP
1740	/*
1741	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1742	 * successfuly executed on another CPU. We must ensure that updates of
1743	 * per-task data have been completed by this moment.
1744	 */
1745	smp_wmb();
1746	task_thread_info(p)->cpu = cpu;
1747#endif
1748}
1749
1750static const struct sched_class rt_sched_class;
1751
1752#define sched_class_highest (&stop_sched_class)
1753#define for_each_class(class) \
1754   for (class = sched_class_highest; class; class = class->next)
1755
1756#include "sched_stats.h"
1757
1758static void inc_nr_running(struct rq *rq)
1759{
1760	rq->nr_running++;
1761}
1762
1763static void dec_nr_running(struct rq *rq)
1764{
1765	rq->nr_running--;
1766}
1767
1768static void set_load_weight(struct task_struct *p)
1769{
1770	int prio = p->static_prio - MAX_RT_PRIO;
1771	struct load_weight *load = &p->se.load;
1772
1773	/*
1774	 * SCHED_IDLE tasks get minimal weight:
1775	 */
1776	if (p->policy == SCHED_IDLE) {
1777		load->weight = scale_load(WEIGHT_IDLEPRIO);
1778		load->inv_weight = WMULT_IDLEPRIO;
1779		return;
1780	}
1781
1782	load->weight = scale_load(prio_to_weight[prio]);
1783	load->inv_weight = prio_to_wmult[prio];
1784}
1785
1786static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1787{
1788	update_rq_clock(rq);
1789	sched_info_queued(p);
1790	p->sched_class->enqueue_task(rq, p, flags);
1791}
1792
1793static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1794{
1795	update_rq_clock(rq);
1796	sched_info_dequeued(p);
1797	p->sched_class->dequeue_task(rq, p, flags);
1798}
1799
1800/*
1801 * activate_task - move a task to the runqueue.
1802 */
1803static void activate_task(struct rq *rq, struct task_struct *p, int flags)
1804{
1805	if (task_contributes_to_load(p))
1806		rq->nr_uninterruptible--;
1807
1808	enqueue_task(rq, p, flags);
1809	inc_nr_running(rq);
1810}
1811
1812/*
1813 * deactivate_task - remove a task from the runqueue.
1814 */
1815static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1816{
1817	if (task_contributes_to_load(p))
1818		rq->nr_uninterruptible++;
1819
1820	dequeue_task(rq, p, flags);
1821	dec_nr_running(rq);
1822}
1823
1824#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1825
1826/*
1827 * There are no locks covering percpu hardirq/softirq time.
1828 * They are only modified in account_system_vtime, on corresponding CPU
1829 * with interrupts disabled. So, writes are safe.
1830 * They are read and saved off onto struct rq in update_rq_clock().
1831 * This may result in other CPU reading this CPU's irq time and can
1832 * race with irq/account_system_vtime on this CPU. We would either get old
1833 * or new value with a side effect of accounting a slice of irq time to wrong
1834 * task when irq is in progress while we read rq->clock. That is a worthy
1835 * compromise in place of having locks on each irq in account_system_time.
1836 */
1837static DEFINE_PER_CPU(u64, cpu_hardirq_time);
1838static DEFINE_PER_CPU(u64, cpu_softirq_time);
1839
1840static DEFINE_PER_CPU(u64, irq_start_time);
1841static int sched_clock_irqtime;
1842
1843void enable_sched_clock_irqtime(void)
1844{
1845	sched_clock_irqtime = 1;
1846}
1847
1848void disable_sched_clock_irqtime(void)
1849{
1850	sched_clock_irqtime = 0;
1851}
1852
1853#ifndef CONFIG_64BIT
1854static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
1855
1856static inline void irq_time_write_begin(void)
1857{
1858	__this_cpu_inc(irq_time_seq.sequence);
1859	smp_wmb();
1860}
1861
1862static inline void irq_time_write_end(void)
1863{
1864	smp_wmb();
1865	__this_cpu_inc(irq_time_seq.sequence);
1866}
1867
1868static inline u64 irq_time_read(int cpu)
1869{
1870	u64 irq_time;
1871	unsigned seq;
1872
1873	do {
1874		seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1875		irq_time = per_cpu(cpu_softirq_time, cpu) +
1876			   per_cpu(cpu_hardirq_time, cpu);
1877	} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1878
1879	return irq_time;
1880}
1881#else /* CONFIG_64BIT */
1882static inline void irq_time_write_begin(void)
1883{
1884}
1885
1886static inline void irq_time_write_end(void)
1887{
1888}
1889
1890static inline u64 irq_time_read(int cpu)
1891{
1892	return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1893}
1894#endif /* CONFIG_64BIT */
1895
1896/*
1897 * Called before incrementing preempt_count on {soft,}irq_enter
1898 * and before decrementing preempt_count on {soft,}irq_exit.
1899 */
1900void account_system_vtime(struct task_struct *curr)
1901{
1902	unsigned long flags;
1903	s64 delta;
1904	int cpu;
1905
1906	if (!sched_clock_irqtime)
1907		return;
1908
1909	local_irq_save(flags);
1910
1911	cpu = smp_processor_id();
1912	delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
1913	__this_cpu_add(irq_start_time, delta);
1914
1915	irq_time_write_begin();
1916	/*
1917	 * We do not account for softirq time from ksoftirqd here.
1918	 * We want to continue accounting softirq time to ksoftirqd thread
1919	 * in that case, so as not to confuse scheduler with a special task
1920	 * that do not consume any time, but still wants to run.
1921	 */
1922	if (hardirq_count())
1923		__this_cpu_add(cpu_hardirq_time, delta);
1924	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
1925		__this_cpu_add(cpu_softirq_time, delta);
1926
1927	irq_time_write_end();
1928	local_irq_restore(flags);
1929}
1930EXPORT_SYMBOL_GPL(account_system_vtime);
1931
1932#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1933
1934#ifdef CONFIG_PARAVIRT
1935static inline u64 steal_ticks(u64 steal)
1936{
1937	if (unlikely(steal > NSEC_PER_SEC))
1938		return div_u64(steal, TICK_NSEC);
1939
1940	return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
1941}
1942#endif
1943
1944static void update_rq_clock_task(struct rq *rq, s64 delta)
1945{
1946/*
1947 * In theory, the compile should just see 0 here, and optimize out the call
1948 * to sched_rt_avg_update. But I don't trust it...
1949 */
1950#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
1951	s64 steal = 0, irq_delta = 0;
1952#endif
1953#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1954	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
1955
1956	/*
1957	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
1958	 * this case when a previous update_rq_clock() happened inside a
1959	 * {soft,}irq region.
1960	 *
1961	 * When this happens, we stop ->clock_task and only update the
1962	 * prev_irq_time stamp to account for the part that fit, so that a next
1963	 * update will consume the rest. This ensures ->clock_task is
1964	 * monotonic.
1965	 *
1966	 * It does however cause some slight miss-attribution of {soft,}irq
1967	 * time, a more accurate solution would be to update the irq_time using
1968	 * the current rq->clock timestamp, except that would require using
1969	 * atomic ops.
1970	 */
1971	if (irq_delta > delta)
1972		irq_delta = delta;
1973
1974	rq->prev_irq_time += irq_delta;
1975	delta -= irq_delta;
1976#endif
1977#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
1978	if (static_branch((&paravirt_steal_rq_enabled))) {
1979		u64 st;
1980
1981		steal = paravirt_steal_clock(cpu_of(rq));
1982		steal -= rq->prev_steal_time_rq;
1983
1984		if (unlikely(steal > delta))
1985			steal = delta;
1986
1987		st = steal_ticks(steal);
1988		steal = st * TICK_NSEC;
1989
1990		rq->prev_steal_time_rq += steal;
1991
1992		delta -= steal;
1993	}
1994#endif
1995
1996	rq->clock_task += delta;
1997
1998#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
1999	if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
2000		sched_rt_avg_update(rq, irq_delta + steal);
2001#endif
2002}
2003
2004#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2005static int irqtime_account_hi_update(void)
2006{
2007	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2008	unsigned long flags;
2009	u64 latest_ns;
2010	int ret = 0;
2011
2012	local_irq_save(flags);
2013	latest_ns = this_cpu_read(cpu_hardirq_time);
2014	if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
2015		ret = 1;
2016	local_irq_restore(flags);
2017	return ret;
2018}
2019
2020static int irqtime_account_si_update(void)
2021{
2022	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2023	unsigned long flags;
2024	u64 latest_ns;
2025	int ret = 0;
2026
2027	local_irq_save(flags);
2028	latest_ns = this_cpu_read(cpu_softirq_time);
2029	if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
2030		ret = 1;
2031	local_irq_restore(flags);
2032	return ret;
2033}
2034
2035#else /* CONFIG_IRQ_TIME_ACCOUNTING */
2036
2037#define sched_clock_irqtime	(0)
2038
2039#endif
2040
2041#include "sched_idletask.c"
2042#include "sched_fair.c"
2043#include "sched_rt.c"
2044#include "sched_autogroup.c"
2045#include "sched_stoptask.c"
2046#ifdef CONFIG_SCHED_DEBUG
2047# include "sched_debug.c"
2048#endif
2049
2050void sched_set_stop_task(int cpu, struct task_struct *stop)
2051{
2052	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2053	struct task_struct *old_stop = cpu_rq(cpu)->stop;
2054
2055	if (stop) {
2056		/*
2057		 * Make it appear like a SCHED_FIFO task, its something
2058		 * userspace knows about and won't get confused about.
2059		 *
2060		 * Also, it will make PI more or less work without too
2061		 * much confusion -- but then, stop work should not
2062		 * rely on PI working anyway.
2063		 */
2064		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2065
2066		stop->sched_class = &stop_sched_class;
2067	}
2068
2069	cpu_rq(cpu)->stop = stop;
2070
2071	if (old_stop) {
2072		/*
2073		 * Reset it back to a normal scheduling class so that
2074		 * it can die in pieces.
2075		 */
2076		old_stop->sched_class = &rt_sched_class;
2077	}
2078}
2079
2080/*
2081 * __normal_prio - return the priority that is based on the static prio
2082 */
2083static inline int __normal_prio(struct task_struct *p)
2084{
2085	return p->static_prio;
2086}
2087
2088/*
2089 * Calculate the expected normal priority: i.e. priority
2090 * without taking RT-inheritance into account. Might be
2091 * boosted by interactivity modifiers. Changes upon fork,
2092 * setprio syscalls, and whenever the interactivity
2093 * estimator recalculates.
2094 */
2095static inline int normal_prio(struct task_struct *p)
2096{
2097	int prio;
2098
2099	if (task_has_rt_policy(p))
2100		prio = MAX_RT_PRIO-1 - p->rt_priority;
2101	else
2102		prio = __normal_prio(p);
2103	return prio;
2104}
2105
2106/*
2107 * Calculate the current priority, i.e. the priority
2108 * taken into account by the scheduler. This value might
2109 * be boosted by RT tasks, or might be boosted by
2110 * interactivity modifiers. Will be RT if the task got
2111 * RT-boosted. If not then it returns p->normal_prio.
2112 */
2113static int effective_prio(struct task_struct *p)
2114{
2115	p->normal_prio = normal_prio(p);
2116	/*
2117	 * If we are RT tasks or we were boosted to RT priority,
2118	 * keep the priority unchanged. Otherwise, update priority
2119	 * to the normal priority:
2120	 */
2121	if (!rt_prio(p->prio))
2122		return p->normal_prio;
2123	return p->prio;
2124}
2125
2126/**
2127 * task_curr - is this task currently executing on a CPU?
2128 * @p: the task in question.
2129 */
2130inline int task_curr(const struct task_struct *p)
2131{
2132	return cpu_curr(task_cpu(p)) == p;
2133}
2134
2135static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2136				       const struct sched_class *prev_class,
2137				       int oldprio)
2138{
2139	if (prev_class != p->sched_class) {
2140		if (prev_class->switched_from)
2141			prev_class->switched_from(rq, p);
2142		p->sched_class->switched_to(rq, p);
2143	} else if (oldprio != p->prio)
2144		p->sched_class->prio_changed(rq, p, oldprio);
2145}
2146
2147static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2148{
2149	const struct sched_class *class;
2150
2151	if (p->sched_class == rq->curr->sched_class) {
2152		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2153	} else {
2154		for_each_class(class) {
2155			if (class == rq->curr->sched_class)
2156				break;
2157			if (class == p->sched_class) {
2158				resched_task(rq->curr);
2159				break;
2160			}
2161		}
2162	}
2163
2164	/*
2165	 * A queue event has occurred, and we're going to schedule.  In
2166	 * this case, we can save a useless back to back clock update.
2167	 */
2168	if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
2169		rq->skip_clock_update = 1;
2170}
2171
2172#ifdef CONFIG_SMP
2173/*
2174 * Is this task likely cache-hot:
2175 */
2176static int
2177task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2178{
2179	s64 delta;
2180
2181	if (p->sched_class != &fair_sched_class)
2182		return 0;
2183
2184	if (unlikely(p->policy == SCHED_IDLE))
2185		return 0;
2186
2187	/*
2188	 * Buddy candidates are cache hot:
2189	 */
2190	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2191			(&p->se == cfs_rq_of(&p->se)->next ||
2192			 &p->se == cfs_rq_of(&p->se)->last))
2193		return 1;
2194
2195	if (sysctl_sched_migration_cost == -1)
2196		return 1;
2197	if (sysctl_sched_migration_cost == 0)
2198		return 0;
2199
2200	delta = now - p->se.exec_start;
2201
2202	return delta < (s64)sysctl_sched_migration_cost;
2203}
2204
2205void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2206{
2207#ifdef CONFIG_SCHED_DEBUG
2208	/*
2209	 * We should never call set_task_cpu() on a blocked task,
2210	 * ttwu() will sort out the placement.
2211	 */
2212	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2213			!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2214
2215#ifdef CONFIG_LOCKDEP
2216	/*
2217	 * The caller should hold either p->pi_lock or rq->lock, when changing
2218	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
2219	 *
2220	 * sched_move_task() holds both and thus holding either pins the cgroup,
2221	 * see set_task_rq().
2222	 *
2223	 * Furthermore, all task_rq users should acquire both locks, see
2224	 * task_rq_lock().
2225	 */
2226	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2227				      lockdep_is_held(&task_rq(p)->lock)));
2228#endif
2229#endif
2230
2231	trace_sched_migrate_task(p, new_cpu);
2232
2233	if (task_cpu(p) != new_cpu) {
2234		p->se.nr_migrations++;
2235		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
2236	}
2237
2238	__set_task_cpu(p, new_cpu);
2239}
2240
2241struct migration_arg {
2242	struct task_struct *task;
2243	int dest_cpu;
2244};
2245
2246static int migration_cpu_stop(void *data);
2247
2248/*
2249 * wait_task_inactive - wait for a thread to unschedule.
2250 *
2251 * If @match_state is nonzero, it's the @p->state value just checked and
2252 * not expected to change.  If it changes, i.e. @p might have woken up,
2253 * then return zero.  When we succeed in waiting for @p to be off its CPU,
2254 * we return a positive number (its total switch count).  If a second call
2255 * a short while later returns the same number, the caller can be sure that
2256 * @p has remained unscheduled the whole time.
2257 *
2258 * The caller must ensure that the task *will* unschedule sometime soon,
2259 * else this function might spin for a *long* time. This function can't
2260 * be called with interrupts off, or it may introduce deadlock with
2261 * smp_call_function() if an IPI is sent by the same process we are
2262 * waiting to become inactive.
2263 */
2264unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2265{
2266	unsigned long flags;
2267	int running, on_rq;
2268	unsigned long ncsw;
2269	struct rq *rq;
2270
2271	for (;;) {
2272		/*
2273		 * We do the initial early heuristics without holding
2274		 * any task-queue locks at all. We'll only try to get
2275		 * the runqueue lock when things look like they will
2276		 * work out!
2277		 */
2278		rq = task_rq(p);
2279
2280		/*
2281		 * If the task is actively running on another CPU
2282		 * still, just relax and busy-wait without holding
2283		 * any locks.
2284		 *
2285		 * NOTE! Since we don't hold any locks, it's not
2286		 * even sure that "rq" stays as the right runqueue!
2287		 * But we don't care, since "task_running()" will
2288		 * return false if the runqueue has changed and p
2289		 * is actually now running somewhere else!
2290		 */
2291		while (task_running(rq, p)) {
2292			if (match_state && unlikely(p->state != match_state))
2293				return 0;
2294			cpu_relax();
2295		}
2296
2297		/*
2298		 * Ok, time to look more closely! We need the rq
2299		 * lock now, to be *sure*. If we're wrong, we'll
2300		 * just go back and repeat.
2301		 */
2302		rq = task_rq_lock(p, &flags);
2303		trace_sched_wait_task(p);
2304		running = task_running(rq, p);
2305		on_rq = p->on_rq;
2306		ncsw = 0;
2307		if (!match_state || p->state == match_state)
2308			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2309		task_rq_unlock(rq, p, &flags);
2310
2311		/*
2312		 * If it changed from the expected state, bail out now.
2313		 */
2314		if (unlikely(!ncsw))
2315			break;
2316
2317		/*
2318		 * Was it really running after all now that we
2319		 * checked with the proper locks actually held?
2320		 *
2321		 * Oops. Go back and try again..
2322		 */
2323		if (unlikely(running)) {
2324			cpu_relax();
2325			continue;
2326		}
2327
2328		/*
2329		 * It's not enough that it's not actively running,
2330		 * it must be off the runqueue _entirely_, and not
2331		 * preempted!
2332		 *
2333		 * So if it was still runnable (but just not actively
2334		 * running right now), it's preempted, and we should
2335		 * yield - it could be a while.
2336		 */
2337		if (unlikely(on_rq)) {
2338			ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
2339
2340			set_current_state(TASK_UNINTERRUPTIBLE);
2341			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
2342			continue;
2343		}
2344
2345		/*
2346		 * Ahh, all good. It wasn't running, and it wasn't
2347		 * runnable, which means that it will never become
2348		 * running in the future either. We're all done!
2349		 */
2350		break;
2351	}
2352
2353	return ncsw;
2354}
2355
2356/***
2357 * kick_process - kick a running thread to enter/exit the kernel
2358 * @p: the to-be-kicked thread
2359 *
2360 * Cause a process which is running on another CPU to enter
2361 * kernel-mode, without any delay. (to get signals handled.)
2362 *
2363 * NOTE: this function doesn't have to take the runqueue lock,
2364 * because all it wants to ensure is that the remote task enters
2365 * the kernel. If the IPI races and the task has been migrated
2366 * to another CPU then no harm is done and the purpose has been
2367 * achieved as well.
2368 */
2369void kick_process(struct task_struct *p)
2370{
2371	int cpu;
2372
2373	preempt_disable();
2374	cpu = task_cpu(p);
2375	if ((cpu != smp_processor_id()) && task_curr(p))
2376		smp_send_reschedule(cpu);
2377	preempt_enable();
2378}
2379EXPORT_SYMBOL_GPL(kick_process);
2380#endif /* CONFIG_SMP */
2381
2382#ifdef CONFIG_SMP
2383/*
2384 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
2385 */
2386static int select_fallback_rq(int cpu, struct task_struct *p)
2387{
2388	int dest_cpu;
2389	const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2390
2391	/* Look for allowed, online CPU in same node. */
2392	for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2393		if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2394			return dest_cpu;
2395
2396	/* Any allowed, online CPU? */
2397	dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2398	if (dest_cpu < nr_cpu_ids)
2399		return dest_cpu;
2400
2401	/* No more Mr. Nice Guy. */
2402	dest_cpu = cpuset_cpus_allowed_fallback(p);
2403	/*
2404	 * Don't tell them about moving exiting tasks or
2405	 * kernel threads (both mm NULL), since they never
2406	 * leave kernel.
2407	 */
2408	if (p->mm && printk_ratelimit()) {
2409		printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
2410				task_pid_nr(p), p->comm, cpu);
2411	}
2412
2413	return dest_cpu;
2414}
2415
2416/*
2417 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
2418 */
2419static inline
2420int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2421{
2422	int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
2423
2424	/*
2425	 * In order not to call set_task_cpu() on a blocking task we need
2426	 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2427	 * cpu.
2428	 *
2429	 * Since this is common to all placement strategies, this lives here.
2430	 *
2431	 * [ this allows ->select_task() to simply return task_cpu(p) and
2432	 *   not worry about this generic constraint ]
2433	 */
2434	if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
2435		     !cpu_online(cpu)))
2436		cpu = select_fallback_rq(task_cpu(p), p);
2437
2438	return cpu;
2439}
2440
2441static void update_avg(u64 *avg, u64 sample)
2442{
2443	s64 diff = sample - *avg;
2444	*avg += diff >> 3;
2445}
2446#endif
2447
2448static void
2449ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2450{
2451#ifdef CONFIG_SCHEDSTATS
2452	struct rq *rq = this_rq();
2453
2454#ifdef CONFIG_SMP
2455	int this_cpu = smp_processor_id();
2456
2457	if (cpu == this_cpu) {
2458		schedstat_inc(rq, ttwu_local);
2459		schedstat_inc(p, se.statistics.nr_wakeups_local);
2460	} else {
2461		struct sched_domain *sd;
2462
2463		schedstat_inc(p, se.statistics.nr_wakeups_remote);
2464		rcu_read_lock();
2465		for_each_domain(this_cpu, sd) {
2466			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2467				schedstat_inc(sd, ttwu_wake_remote);
2468				break;
2469			}
2470		}
2471		rcu_read_unlock();
2472	}
2473
2474	if (wake_flags & WF_MIGRATED)
2475		schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2476
2477#endif /* CONFIG_SMP */
2478
2479	schedstat_inc(rq, ttwu_count);
2480	schedstat_inc(p, se.statistics.nr_wakeups);
2481
2482	if (wake_flags & WF_SYNC)
2483		schedstat_inc(p, se.statistics.nr_wakeups_sync);
2484
2485#endif /* CONFIG_SCHEDSTATS */
2486}
2487
2488static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
2489{
2490	activate_task(rq, p, en_flags);
2491	p->on_rq = 1;
2492
2493	/* if a worker is waking up, notify workqueue */
2494	if (p->flags & PF_WQ_WORKER)
2495		wq_worker_waking_up(p, cpu_of(rq));
2496}
2497
2498/*
2499 * Mark the task runnable and perform wakeup-preemption.
2500 */
2501static void
2502ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
2503{
2504	trace_sched_wakeup(p, true);
2505	check_preempt_curr(rq, p, wake_flags);
2506
2507	p->state = TASK_RUNNING;
2508#ifdef CONFIG_SMP
2509	if (p->sched_class->task_woken)
2510		p->sched_class->task_woken(rq, p);
2511
2512	if (rq->idle_stamp) {
2513		u64 delta = rq->clock - rq->idle_stamp;
2514		u64 max = 2*sysctl_sched_migration_cost;
2515
2516		if (delta > max)
2517			rq->avg_idle = max;
2518		else
2519			update_avg(&rq->avg_idle, delta);
2520		rq->idle_stamp = 0;
2521	}
2522#endif
2523}
2524
2525static void
2526ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
2527{
2528#ifdef CONFIG_SMP
2529	if (p->sched_contributes_to_load)
2530		rq->nr_uninterruptible--;
2531#endif
2532
2533	ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
2534	ttwu_do_wakeup(rq, p, wake_flags);
2535}
2536
2537/*
2538 * Called in case the task @p isn't fully descheduled from its runqueue,
2539 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
2540 * since all we need to do is flip p->state to TASK_RUNNING, since
2541 * the task is still ->on_rq.
2542 */
2543static int ttwu_remote(struct task_struct *p, int wake_flags)
2544{
2545	struct rq *rq;
2546	int ret = 0;
2547
2548	rq = __task_rq_lock(p);
2549	if (p->on_rq) {
2550		ttwu_do_wakeup(rq, p, wake_flags);
2551		ret = 1;
2552	}
2553	__task_rq_unlock(rq);
2554
2555	return ret;
2556}
2557
2558#ifdef CONFIG_SMP
2559static void sched_ttwu_do_pending(struct task_struct *list)
2560{
2561	struct rq *rq = this_rq();
2562
2563	raw_spin_lock(&rq->lock);
2564
2565	while (list) {
2566		struct task_struct *p = list;
2567		list = list->wake_entry;
2568		ttwu_do_activate(rq, p, 0);
2569	}
2570
2571	raw_spin_unlock(&rq->lock);
2572}
2573
2574#ifdef CONFIG_HOTPLUG_CPU
2575
2576static void sched_ttwu_pending(void)
2577{
2578	struct rq *rq = this_rq();
2579	struct task_struct *list = xchg(&rq->wake_list, NULL);
2580
2581	if (!list)
2582		return;
2583
2584	sched_ttwu_do_pending(list);
2585}
2586
2587#endif /* CONFIG_HOTPLUG_CPU */
2588
2589void scheduler_ipi(void)
2590{
2591	struct rq *rq = this_rq();
2592	struct task_struct *list = xchg(&rq->wake_list, NULL);
2593
2594	if (!list)
2595		return;
2596
2597	/*
2598	 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
2599	 * traditionally all their work was done from the interrupt return
2600	 * path. Now that we actually do some work, we need to make sure
2601	 * we do call them.
2602	 *
2603	 * Some archs already do call them, luckily irq_enter/exit nest
2604	 * properly.
2605	 *
2606	 * Arguably we should visit all archs and update all handlers,
2607	 * however a fair share of IPIs are still resched only so this would
2608	 * somewhat pessimize the simple resched case.
2609	 */
2610	irq_enter();
2611	sched_ttwu_do_pending(list);
2612	irq_exit();
2613}
2614
2615static void ttwu_queue_remote(struct task_struct *p, int cpu)
2616{
2617	struct rq *rq = cpu_rq(cpu);
2618	struct task_struct *next = rq->wake_list;
2619
2620	for (;;) {
2621		struct task_struct *old = next;
2622
2623		p->wake_entry = next;
2624		next = cmpxchg(&rq->wake_list, old, p);
2625		if (next == old)
2626			break;
2627	}
2628
2629	if (!next)
2630		smp_send_reschedule(cpu);
2631}
2632
2633#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2634static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
2635{
2636	struct rq *rq;
2637	int ret = 0;
2638
2639	rq = __task_rq_lock(p);
2640	if (p->on_cpu) {
2641		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2642		ttwu_do_wakeup(rq, p, wake_flags);
2643		ret = 1;
2644	}
2645	__task_rq_unlock(rq);
2646
2647	return ret;
2648
2649}
2650#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2651#endif /* CONFIG_SMP */
2652
2653static void ttwu_queue(struct task_struct *p, int cpu)
2654{
2655	struct rq *rq = cpu_rq(cpu);
2656
2657#if defined(CONFIG_SMP)
2658	if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
2659		sched_clock_cpu(cpu); /* sync clocks x-cpu */
2660		ttwu_queue_remote(p, cpu);
2661		return;
2662	}
2663#endif
2664
2665	raw_spin_lock(&rq->lock);
2666	ttwu_do_activate(rq, p, 0);
2667	raw_spin_unlock(&rq->lock);
2668}
2669
2670/**
2671 * try_to_wake_up - wake up a thread
2672 * @p: the thread to be awakened
2673 * @state: the mask of task states that can be woken
2674 * @wake_flags: wake modifier flags (WF_*)
2675 *
2676 * Put it on the run-queue if it's not already there. The "current"
2677 * thread is always on the run-queue (except when the actual
2678 * re-schedule is in progress), and as such you're allowed to do
2679 * the simpler "current->state = TASK_RUNNING" to mark yourself
2680 * runnable without the overhead of this.
2681 *
2682 * Returns %true if @p was woken up, %false if it was already running
2683 * or @state didn't match @p's state.
2684 */
2685static int
2686try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2687{
2688	unsigned long flags;
2689	int cpu, success = 0;
2690
2691	smp_wmb();
2692	raw_spin_lock_irqsave(&p->pi_lock, flags);
2693	if (!(p->state & state))
2694		goto out;
2695
2696	success = 1; /* we're going to change ->state */
2697	cpu = task_cpu(p);
2698
2699	if (p->on_rq && ttwu_remote(p, wake_flags))
2700		goto stat;
2701
2702#ifdef CONFIG_SMP
2703	/*
2704	 * If the owning (remote) cpu is still in the middle of schedule() with
2705	 * this task as prev, wait until its done referencing the task.
2706	 */
2707	while (p->on_cpu) {
2708#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2709		/*
2710		 * In case the architecture enables interrupts in
2711		 * context_switch(), we cannot busy wait, since that
2712		 * would lead to deadlocks when an interrupt hits and
2713		 * tries to wake up @prev. So bail and do a complete
2714		 * remote wakeup.
2715		 */
2716		if (ttwu_activate_remote(p, wake_flags))
2717			goto stat;
2718#else
2719		cpu_relax();
2720#endif
2721	}
2722	/*
2723	 * Pairs with the smp_wmb() in finish_lock_switch().
2724	 */
2725	smp_rmb();
2726
2727	p->sched_contributes_to_load = !!task_contributes_to_load(p);
2728	p->state = TASK_WAKING;
2729
2730	if (p->sched_class->task_waking)
2731		p->sched_class->task_waking(p);
2732
2733	cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2734	if (task_cpu(p) != cpu) {
2735		wake_flags |= WF_MIGRATED;
2736		set_task_cpu(p, cpu);
2737	}
2738#endif /* CONFIG_SMP */
2739
2740	ttwu_queue(p, cpu);
2741stat:
2742	ttwu_stat(p, cpu, wake_flags);
2743out:
2744	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2745
2746	return success;
2747}
2748
2749/**
2750 * try_to_wake_up_local - try to wake up a local task with rq lock held
2751 * @p: the thread to be awakened
2752 *
2753 * Put @p on the run-queue if it's not already there. The caller must
2754 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2755 * the current task.
2756 */
2757static void try_to_wake_up_local(struct task_struct *p)
2758{
2759	struct rq *rq = task_rq(p);
2760
2761	BUG_ON(rq != this_rq());
2762	BUG_ON(p == current);
2763	lockdep_assert_held(&rq->lock);
2764
2765	if (!raw_spin_trylock(&p->pi_lock)) {
2766		raw_spin_unlock(&rq->lock);
2767		raw_spin_lock(&p->pi_lock);
2768		raw_spin_lock(&rq->lock);
2769	}
2770
2771	if (!(p->state & TASK_NORMAL))
2772		goto out;
2773
2774	if (!p->on_rq)
2775		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2776
2777	ttwu_do_wakeup(rq, p, 0);
2778	ttwu_stat(p, smp_processor_id(), 0);
2779out:
2780	raw_spin_unlock(&p->pi_lock);
2781}
2782
2783/**
2784 * wake_up_process - Wake up a specific process
2785 * @p: The process to be woken up.
2786 *
2787 * Attempt to wake up the nominated process and move it to the set of runnable
2788 * processes.  Returns 1 if the process was woken up, 0 if it was already
2789 * running.
2790 *
2791 * It may be assumed that this function implies a write memory barrier before
2792 * changing the task state if and only if any tasks are woken up.
2793 */
2794int wake_up_process(struct task_struct *p)
2795{
2796	return try_to_wake_up(p, TASK_ALL, 0);
2797}
2798EXPORT_SYMBOL(wake_up_process);
2799
2800int wake_up_state(struct task_struct *p, unsigned int state)
2801{
2802	return try_to_wake_up(p, state, 0);
2803}
2804
2805/*
2806 * Perform scheduler related setup for a newly forked process p.
2807 * p is forked by current.
2808 *
2809 * __sched_fork() is basic setup used by init_idle() too:
2810 */
2811static void __sched_fork(struct task_struct *p)
2812{
2813	p->on_rq			= 0;
2814
2815	p->se.on_rq			= 0;
2816	p->se.exec_start		= 0;
2817	p->se.sum_exec_runtime		= 0;
2818	p->se.prev_sum_exec_runtime	= 0;
2819	p->se.nr_migrations		= 0;
2820	p->se.vruntime			= 0;
2821	INIT_LIST_HEAD(&p->se.group_node);
2822
2823#ifdef CONFIG_SCHEDSTATS
2824	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2825#endif
2826
2827	INIT_LIST_HEAD(&p->rt.run_list);
2828
2829#ifdef CONFIG_PREEMPT_NOTIFIERS
2830	INIT_HLIST_HEAD(&p->preempt_notifiers);
2831#endif
2832}
2833
2834/*
2835 * fork()/clone()-time setup:
2836 */
2837void sched_fork(struct task_struct *p)
2838{
2839	unsigned long flags;
2840	int cpu = get_cpu();
2841
2842	__sched_fork(p);
2843	/*
2844	 * We mark the process as running here. This guarantees that
2845	 * nobody will actually run it, and a signal or other external
2846	 * event cannot wake it up and insert it on the runqueue either.
2847	 */
2848	p->state = TASK_RUNNING;
2849
2850	/*
2851	 * Revert to default priority/policy on fork if requested.
2852	 */
2853	if (unlikely(p->sched_reset_on_fork)) {
2854		if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
2855			p->policy = SCHED_NORMAL;
2856			p->normal_prio = p->static_prio;
2857		}
2858
2859		if (PRIO_TO_NICE(p->static_prio) < 0) {
2860			p->static_prio = NICE_TO_PRIO(0);
2861			p->normal_prio = p->static_prio;
2862			set_load_weight(p);
2863		}
2864
2865		/*
2866		 * We don't need the reset flag anymore after the fork. It has
2867		 * fulfilled its duty:
2868		 */
2869		p->sched_reset_on_fork = 0;
2870	}
2871
2872	/*
2873	 * Make sure we do not leak PI boosting priority to the child.
2874	 */
2875	p->prio = current->normal_prio;
2876
2877	if (!rt_prio(p->prio))
2878		p->sched_class = &fair_sched_class;
2879
2880	if (p->sched_class->task_fork)
2881		p->sched_class->task_fork(p);
2882
2883	/*
2884	 * The child is not yet in the pid-hash so no cgroup attach races,
2885	 * and the cgroup is pinned to this child due to cgroup_fork()
2886	 * is ran before sched_fork().
2887	 *
2888	 * Silence PROVE_RCU.
2889	 */
2890	raw_spin_lock_irqsave(&p->pi_lock, flags);
2891	set_task_cpu(p, cpu);
2892	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2893
2894#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2895	if (likely(sched_info_on()))
2896		memset(&p->sched_info, 0, sizeof(p->sched_info));
2897#endif
2898#if defined(CONFIG_SMP)
2899	p->on_cpu = 0;
2900#endif
2901#ifdef CONFIG_PREEMPT_COUNT
2902	/* Want to start with kernel preemption disabled. */
2903	task_thread_info(p)->preempt_count = 1;
2904#endif
2905#ifdef CONFIG_SMP
2906	plist_node_init(&p->pushable_tasks, MAX_PRIO);
2907#endif
2908
2909	put_cpu();
2910}
2911
2912/*
2913 * wake_up_new_task - wake up a newly created task for the first time.
2914 *
2915 * This function will do some initial scheduler statistics housekeeping
2916 * that must be done for every newly created context, then puts the task
2917 * on the runqueue and wakes it.
2918 */
2919void wake_up_new_task(struct task_struct *p)
2920{
2921	unsigned long flags;
2922	struct rq *rq;
2923
2924	raw_spin_lock_irqsave(&p->pi_lock, flags);
2925#ifdef CONFIG_SMP
2926	/*
2927	 * Fork balancing, do it here and not earlier because:
2928	 *  - cpus_allowed can change in the fork path
2929	 *  - any previously selected cpu might disappear through hotplug
2930	 */
2931	set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
2932#endif
2933
2934	rq = __task_rq_lock(p);
2935	activate_task(rq, p, 0);
2936	p->on_rq = 1;
2937	trace_sched_wakeup_new(p, true);
2938	check_preempt_curr(rq, p, WF_FORK);
2939#ifdef CONFIG_SMP
2940	if (p->sched_class->task_woken)
2941		p->sched_class->task_woken(rq, p);
2942#endif
2943	task_rq_unlock(rq, p, &flags);
2944}
2945
2946#ifdef CONFIG_PREEMPT_NOTIFIERS
2947
2948/**
2949 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2950 * @notifier: notifier struct to register
2951 */
2952void preempt_notifier_register(struct preempt_notifier *notifier)
2953{
2954	hlist_add_head(&notifier->link, &current->preempt_notifiers);
2955}
2956EXPORT_SYMBOL_GPL(preempt_notifier_register);
2957
2958/**
2959 * preempt_notifier_unregister - no longer interested in preemption notifications
2960 * @notifier: notifier struct to unregister
2961 *
2962 * This is safe to call from within a preemption notifier.
2963 */
2964void preempt_notifier_unregister(struct preempt_notifier *notifier)
2965{
2966	hlist_del(&notifier->link);
2967}
2968EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2969
2970static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2971{
2972	struct preempt_notifier *notifier;
2973	struct hlist_node *node;
2974
2975	hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2976		notifier->ops->sched_in(notifier, raw_smp_processor_id());
2977}
2978
2979static void
2980fire_sched_out_preempt_notifiers(struct task_struct *curr,
2981				 struct task_struct *next)
2982{
2983	struct preempt_notifier *notifier;
2984	struct hlist_node *node;
2985
2986	hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2987		notifier->ops->sched_out(notifier, next);
2988}
2989
2990#else /* !CONFIG_PREEMPT_NOTIFIERS */
2991
2992static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2993{
2994}
2995
2996static void
2997fire_sched_out_preempt_notifiers(struct task_struct *curr,
2998				 struct task_struct *next)
2999{
3000}
3001
3002#endif /* CONFIG_PREEMPT_NOTIFIERS */
3003
3004/**
3005 * prepare_task_switch - prepare to switch tasks
3006 * @rq: the runqueue preparing to switch
3007 * @prev: the current task that is being switched out
3008 * @next: the task we are going to switch to.
3009 *
3010 * This is called with the rq lock held and interrupts off. It must
3011 * be paired with a subsequent finish_task_switch after the context
3012 * switch.
3013 *
3014 * prepare_task_switch sets up locking and calls architecture specific
3015 * hooks.
3016 */
3017static inline void
3018prepare_task_switch(struct rq *rq, struct task_struct *prev,
3019		    struct task_struct *next)
3020{
3021	sched_info_switch(prev, next);
3022	perf_event_task_sched_out(prev, next);
3023	fire_sched_out_preempt_notifiers(prev, next);
3024	prepare_lock_switch(rq, next);
3025	prepare_arch_switch(next);
3026	trace_sched_switch(prev, next);
3027}
3028
3029/**
3030 * finish_task_switch - clean up after a task-switch
3031 * @rq: runqueue associated with task-switch
3032 * @prev: the thread we just switched away from.
3033 *
3034 * finish_task_switch must be called after the context switch, paired
3035 * with a prepare_task_switch call before the context switch.
3036 * finish_task_switch will reconcile locking set up by prepare_task_switch,
3037 * and do any other architecture-specific cleanup actions.
3038 *
3039 * Note that we may have delayed dropping an mm in context_switch(). If
3040 * so, we finish that here outside of the runqueue lock. (Doing it
3041 * with the lock held can cause deadlocks; see schedule() for
3042 * details.)
3043 */
3044static void finish_task_switch(struct rq *rq, struct task_struct *prev)
3045	__releases(rq->lock)
3046{
3047	struct mm_struct *mm = rq->prev_mm;
3048	long prev_state;
3049
3050	rq->prev_mm = NULL;
3051
3052	/*
3053	 * A task struct has one reference for the use as "current".
3054	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
3055	 * schedule one last time. The schedule call will never return, and
3056	 * the scheduled task must drop that reference.
3057	 * The test for TASK_DEAD must occur while the runqueue locks are
3058	 * still held, otherwise prev could be scheduled on another cpu, die
3059	 * there before we look at prev->state, and then the reference would
3060	 * be dropped twice.
3061	 *		Manfred Spraul <manfred@colorfullife.com>
3062	 */
3063	prev_state = prev->state;
3064	finish_arch_switch(prev);
3065#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3066	local_irq_disable();
3067#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
3068	perf_event_task_sched_in(prev, current);
3069#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3070	local_irq_enable();
3071#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
3072	finish_lock_switch(rq, prev);
3073
3074	fire_sched_in_preempt_notifiers(current);
3075	if (mm)
3076		mmdrop(mm);
3077	if (unlikely(prev_state == TASK_DEAD)) {
3078		/*
3079		 * Remove function-return probe instances associated with this
3080		 * task and put them back on the free list.
3081		 */
3082		kprobe_flush_task(prev);
3083		put_task_struct(prev);
3084	}
3085}
3086
3087#ifdef CONFIG_SMP
3088
3089/* assumes rq->lock is held */
3090static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
3091{
3092	if (prev->sched_class->pre_schedule)
3093		prev->sched_class->pre_schedule(rq, prev);
3094}
3095
3096/* rq->lock is NOT held, but preemption is disabled */
3097static inline void post_schedule(struct rq *rq)
3098{
3099	if (rq->post_schedule) {
3100		unsigned long flags;
3101
3102		raw_spin_lock_irqsave(&rq->lock, flags);
3103		if (rq->curr->sched_class->post_schedule)
3104			rq->curr->sched_class->post_schedule(rq);
3105		raw_spin_unlock_irqrestore(&rq->lock, flags);
3106
3107		rq->post_schedule = 0;
3108	}
3109}
3110
3111#else
3112
3113static inline void pre_schedule(struct rq *rq, struct task_struct *p)
3114{
3115}
3116
3117static inline void post_schedule(struct rq *rq)
3118{
3119}
3120
3121#endif
3122
3123/**
3124 * schedule_tail - first thing a freshly forked thread must call.
3125 * @prev: the thread we just switched away from.
3126 */
3127asmlinkage void schedule_tail(struct task_struct *prev)
3128	__releases(rq->lock)
3129{
3130	struct rq *rq = this_rq();
3131
3132	finish_task_switch(rq, prev);
3133
3134	/*
3135	 * FIXME: do we need to worry about rq being invalidated by the
3136	 * task_switch?
3137	 */
3138	post_schedule(rq);
3139
3140#ifdef __ARCH_WANT_UNLOCKED_CTXSW
3141	/* In this case, finish_task_switch does not reenable preemption */
3142	preempt_enable();
3143#endif
3144	if (current->set_child_tid)
3145		put_user(task_pid_vnr(current), current->set_child_tid);
3146}
3147
3148/*
3149 * context_switch - switch to the new MM and the new
3150 * thread's register state.
3151 */
3152static inline void
3153context_switch(struct rq *rq, struct task_struct *prev,
3154	       struct task_struct *next)
3155{
3156	struct mm_struct *mm, *oldmm;
3157
3158	prepare_task_switch(rq, prev, next);
3159
3160	mm = next->mm;
3161	oldmm = prev->active_mm;
3162	/*
3163	 * For paravirt, this is coupled with an exit in switch_to to
3164	 * combine the page table reload and the switch backend into
3165	 * one hypercall.
3166	 */
3167	arch_start_context_switch(prev);
3168
3169	if (!mm) {
3170		next->active_mm = oldmm;
3171		atomic_inc(&oldmm->mm_count);
3172		enter_lazy_tlb(oldmm, next);
3173	} else
3174		switch_mm(oldmm, mm, next);
3175
3176	if (!prev->mm) {
3177		prev->active_mm = NULL;
3178		rq->prev_mm = oldmm;
3179	}
3180	/*
3181	 * Since the runqueue lock will be released by the next
3182	 * task (which is an invalid locking op but in the case
3183	 * of the scheduler it's an obvious special-case), so we
3184	 * do an early lockdep release here:
3185	 */
3186#ifndef __ARCH_WANT_UNLOCKED_CTXSW
3187	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
3188#endif
3189
3190	/* Here we just switch the register state and the stack. */
3191	switch_to(prev, next, prev);
3192
3193	barrier();
3194	/*
3195	 * this_rq must be evaluated again because prev may have moved
3196	 * CPUs since it called schedule(), thus the 'rq' on its stack
3197	 * frame will be invalid.
3198	 */
3199	finish_task_switch(this_rq(), prev);
3200}
3201
3202/*
3203 * nr_running, nr_uninterruptible and nr_context_switches:
3204 *
3205 * externally visible scheduler statistics: current number of runnable
3206 * threads, current number of uninterruptible-sleeping threads, total
3207 * number of context switches performed since bootup.
3208 */
3209unsigned long nr_running(void)
3210{
3211	unsigned long i, sum = 0;
3212
3213	for_each_online_cpu(i)
3214		sum += cpu_rq(i)->nr_running;
3215
3216	return sum;
3217}
3218
3219unsigned long nr_uninterruptible(void)
3220{
3221	unsigned long i, sum = 0;
3222
3223	for_each_possible_cpu(i)
3224		sum += cpu_rq(i)->nr_uninterruptible;
3225
3226	/*
3227	 * Since we read the counters lockless, it might be slightly
3228	 * inaccurate. Do not allow it to go below zero though:
3229	 */
3230	if (unlikely((long)sum < 0))
3231		sum = 0;
3232
3233	return sum;
3234}
3235
3236unsigned long long nr_context_switches(void)
3237{
3238	int i;
3239	unsigned long long sum = 0;
3240
3241	for_each_possible_cpu(i)
3242		sum += cpu_rq(i)->nr_switches;
3243
3244	return sum;
3245}
3246
3247unsigned long nr_iowait(void)
3248{
3249	unsigned long i, sum = 0;
3250
3251	for_each_possible_cpu(i)
3252		sum += atomic_read(&cpu_rq(i)->nr_iowait);
3253
3254	return sum;
3255}
3256
3257unsigned long nr_iowait_cpu(int cpu)
3258{
3259	struct rq *this = cpu_rq(cpu);
3260	return atomic_read(&this->nr_iowait);
3261}
3262
3263unsigned long this_cpu_load(void)
3264{
3265	struct rq *this = this_rq();
3266	return this->cpu_load[0];
3267}
3268
3269
3270/* Variables and functions for calc_load */
3271static atomic_long_t calc_load_tasks;
3272static unsigned long calc_load_update;
3273unsigned long avenrun[3];
3274EXPORT_SYMBOL(avenrun);
3275
3276static long calc_load_fold_active(struct rq *this_rq)
3277{
3278	long nr_active, delta = 0;
3279
3280	nr_active = this_rq->nr_running;
3281	nr_active += (long) this_rq->nr_uninterruptible;
3282
3283	if (nr_active != this_rq->calc_load_active) {
3284		delta = nr_active - this_rq->calc_load_active;
3285		this_rq->calc_load_active = nr_active;
3286	}
3287
3288	return delta;
3289}
3290
3291static unsigned long
3292calc_load(unsigned long load, unsigned long exp, unsigned long active)
3293{
3294	load *= exp;
3295	load += active * (FIXED_1 - exp);
3296	load += 1UL << (FSHIFT - 1);
3297	return load >> FSHIFT;
3298}
3299
3300#ifdef CONFIG_NO_HZ
3301/*
3302 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
3303 *
3304 * When making the ILB scale, we should try to pull this in as well.
3305 */
3306static atomic_long_t calc_load_tasks_idle;
3307
3308static void calc_load_account_idle(struct rq *this_rq)
3309{
3310	long delta;
3311
3312	delta = calc_load_fold_active(this_rq);
3313	if (delta)
3314		atomic_long_add(delta, &calc_load_tasks_idle);
3315}
3316
3317static long calc_load_fold_idle(void)
3318{
3319	long delta = 0;
3320
3321	/*
3322	 * Its got a race, we don't care...
3323	 */
3324	if (atomic_long_read(&calc_load_tasks_idle))
3325		delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
3326
3327	return delta;
3328}
3329
3330/**
3331 * fixed_power_int - compute: x^n, in O(log n) time
3332 *
3333 * @x:         base of the power
3334 * @frac_bits: fractional bits of @x
3335 * @n:         power to raise @x to.
3336 *
3337 * By exploiting the relation between the definition of the natural power
3338 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
3339 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
3340 * (where: n_i \elem {0, 1}, the binary vector representing n),
3341 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
3342 * of course trivially computable in O(log_2 n), the length of our binary
3343 * vector.
3344 */
3345static unsigned long
3346fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
3347{
3348	unsigned long result = 1UL << frac_bits;
3349
3350	if (n) for (;;) {
3351		if (n & 1) {
3352			result *= x;
3353			result += 1UL << (frac_bits - 1);
3354			result >>= frac_bits;
3355		}
3356		n >>= 1;
3357		if (!n)
3358			break;
3359		x *= x;
3360		x += 1UL << (frac_bits - 1);
3361		x >>= frac_bits;
3362	}
3363
3364	return result;
3365}
3366
3367/*
3368 * a1 = a0 * e + a * (1 - e)
3369 *
3370 * a2 = a1 * e + a * (1 - e)
3371 *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
3372 *    = a0 * e^2 + a * (1 - e) * (1 + e)
3373 *
3374 * a3 = a2 * e + a * (1 - e)
3375 *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
3376 *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
3377 *
3378 *  ...
3379 *
3380 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
3381 *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
3382 *    = a0 * e^n + a * (1 - e^n)
3383 *
3384 * [1] application of the geometric series:
3385 *
3386 *              n         1 - x^(n+1)
3387 *     S_n := \Sum x^i = -------------
3388 *             i=0          1 - x
3389 */
3390static unsigned long
3391calc_load_n(unsigned long load, unsigned long exp,
3392	    unsigned long active, unsigned int n)
3393{
3394
3395	return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
3396}
3397
3398/*
3399 * NO_HZ can leave us missing all per-cpu ticks calling
3400 * calc_load_account_active(), but since an idle CPU folds its delta into
3401 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
3402 * in the pending idle delta if our idle period crossed a load cycle boundary.
3403 *
3404 * Once we've updated the global active value, we need to apply the exponential
3405 * weights adjusted to the number of cycles missed.
3406 */
3407static void calc_global_nohz(unsigned long ticks)
3408{
3409	long delta, active, n;
3410
3411	if (time_before(jiffies, calc_load_update))
3412		return;
3413
3414	/*
3415	 * If we crossed a calc_load_update boundary, make sure to fold
3416	 * any pending idle changes, the respective CPUs might have
3417	 * missed the tick driven calc_load_account_active() update
3418	 * due to NO_HZ.
3419	 */
3420	delta = calc_load_fold_idle();
3421	if (delta)
3422		atomic_long_add(delta, &calc_load_tasks);
3423
3424	/*
3425	 * If we were idle for multiple load cycles, apply them.
3426	 */
3427	if (ticks >= LOAD_FREQ) {
3428		n = ticks / LOAD_FREQ;
3429
3430		active = atomic_long_read(&calc_load_tasks);
3431		active = active > 0 ? active * FIXED_1 : 0;
3432
3433		avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
3434		avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
3435		avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
3436
3437		calc_load_update += n * LOAD_FREQ;
3438	}
3439
3440	/*
3441	 * Its possible the remainder of the above division also crosses
3442	 * a LOAD_FREQ period, the regular check in calc_global_load()
3443	 * which comes after this will take care of that.
3444	 *
3445	 * Consider us being 11 ticks before a cycle completion, and us
3446	 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
3447	 * age us 4 cycles, and the test in calc_global_load() will
3448	 * pick up the final one.
3449	 */
3450}
3451#else
3452static void calc_load_account_idle(struct rq *this_rq)
3453{
3454}
3455
3456static inline long calc_load_fold_idle(void)
3457{
3458	return 0;
3459}
3460
3461static void calc_global_nohz(unsigned long ticks)
3462{
3463}
3464#endif
3465
3466/**
3467 * get_avenrun - get the load average array
3468 * @loads:	pointer to dest load array
3469 * @offset:	offset to add
3470 * @shift:	shift count to shift the result left
3471 *
3472 * These values are estimates at best, so no need for locking.
3473 */
3474void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3475{
3476	loads[0] = (avenrun[0] + offset) << shift;
3477	loads[1] = (avenrun[1] + offset) << shift;
3478	loads[2] = (avenrun[2] + offset) << shift;
3479}
3480
3481/*
3482 * calc_load - update the avenrun load estimates 10 ticks after the
3483 * CPUs have updated calc_load_tasks.
3484 */
3485void calc_global_load(unsigned long ticks)
3486{
3487	long active;
3488
3489	calc_global_nohz(ticks);
3490
3491	if (time_before(jiffies, calc_load_update + 10))
3492		return;
3493
3494	active = atomic_long_read(&calc_load_tasks);
3495	active = active > 0 ? active * FIXED_1 : 0;
3496
3497	avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3498	avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3499	avenrun[2] = calc_load(avenrun[2], EXP_15, active);
3500
3501	calc_load_update += LOAD_FREQ;
3502}
3503
3504/*
3505 * Called from update_cpu_load() to periodically update this CPU's
3506 * active count.
3507 */
3508static void calc_load_account_active(struct rq *this_rq)
3509{
3510	long delta;
3511
3512	if (time_before(jiffies, this_rq->calc_load_update))
3513		return;
3514
3515	delta  = calc_load_fold_active(this_rq);
3516	delta += calc_load_fold_idle();
3517	if (delta)
3518		atomic_long_add(delta, &calc_load_tasks);
3519
3520	this_rq->calc_load_update += LOAD_FREQ;
3521}
3522
3523/*
3524 * The exact cpuload at various idx values, calculated at every tick would be
3525 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3526 *
3527 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3528 * on nth tick when cpu may be busy, then we have:
3529 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3530 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3531 *
3532 * decay_load_missed() below does efficient calculation of
3533 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3534 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3535 *
3536 * The calculation is approximated on a 128 point scale.
3537 * degrade_zero_ticks is the number of ticks after which load at any
3538 * particular idx is approximated to be zero.
3539 * degrade_factor is a precomputed table, a row for each load idx.
3540 * Each column corresponds to degradation factor for a power of two ticks,
3541 * based on 128 point scale.
3542 * Example:
3543 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3544 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3545 *
3546 * With this power of 2 load factors, we can degrade the load n times
3547 * by looking at 1 bits in n and doing as many mult/shift instead of
3548 * n mult/shifts needed by the exact degradation.
3549 */
3550#define DEGRADE_SHIFT		7
3551static const unsigned char
3552		degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3553static const unsigned char
3554		degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3555					{0, 0, 0, 0, 0, 0, 0, 0},
3556					{64, 32, 8, 0, 0, 0, 0, 0},
3557					{96, 72, 40, 12, 1, 0, 0},
3558					{112, 98, 75, 43, 15, 1, 0},
3559					{120, 112, 98, 76, 45, 16, 2} };
3560
3561/*
3562 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3563 * would be when CPU is idle and so we just decay the old load without
3564 * adding any new load.
3565 */
3566static unsigned long
3567decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3568{
3569	int j = 0;
3570
3571	if (!missed_updates)
3572		return load;
3573
3574	if (missed_updates >= degrade_zero_ticks[idx])
3575		return 0;
3576
3577	if (idx == 1)
3578		return load >> missed_updates;
3579
3580	while (missed_updates) {
3581		if (missed_updates % 2)
3582			load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3583
3584		missed_updates >>= 1;
3585		j++;
3586	}
3587	return load;
3588}
3589
3590/*
3591 * Update rq->cpu_load[] statistics. This function is usually called every
3592 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3593 * every tick. We fix it up based on jiffies.
3594 */
3595static void update_cpu_load(struct rq *this_rq)
3596{
3597	unsigned long this_load = this_rq->load.weight;
3598	unsigned long curr_jiffies = jiffies;
3599	unsigned long pending_updates;
3600	int i, scale;
3601
3602	this_rq->nr_load_updates++;
3603
3604	/* Avoid repeated calls on same jiffy, when moving in and out of idle */
3605	if (curr_jiffies == this_rq->last_load_update_tick)
3606		return;
3607
3608	pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3609	this_rq->last_load_update_tick = curr_jiffies;
3610
3611	/* Update our load: */
3612	this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3613	for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
3614		unsigned long old_load, new_load;
3615
3616		/* scale is effectively 1 << i now, and >> i divides by scale */
3617
3618		old_load = this_rq->cpu_load[i];
3619		old_load = decay_load_missed(old_load, pending_updates - 1, i);
3620		new_load = this_load;
3621		/*
3622		 * Round up the averaging division if load is increasing. This
3623		 * prevents us from getting stuck on 9 if the load is 10, for
3624		 * example.
3625		 */
3626		if (new_load > old_load)
3627			new_load += scale - 1;
3628
3629		this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
3630	}
3631
3632	sched_avg_update(this_rq);
3633}
3634
3635static void update_cpu_load_active(struct rq *this_rq)
3636{
3637	update_cpu_load(this_rq);
3638
3639	calc_load_account_active(this_rq);
3640}
3641
3642#ifdef CONFIG_SMP
3643
3644/*
3645 * sched_exec - execve() is a valuable balancing opportunity, because at
3646 * this point the task has the smallest effective memory and cache footprint.
3647 */
3648void sched_exec(void)
3649{
3650	struct task_struct *p = current;
3651	unsigned long flags;
3652	int dest_cpu;
3653
3654	raw_spin_lock_irqsave(&p->pi_lock, flags);
3655	dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
3656	if (dest_cpu == smp_processor_id())
3657		goto unlock;
3658
3659	if (likely(cpu_active(dest_cpu))) {
3660		struct migration_arg arg = { p, dest_cpu };
3661
3662		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3663		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
3664		return;
3665	}
3666unlock:
3667	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3668}
3669
3670#endif
3671
3672DEFINE_PER_CPU(struct kernel_stat, kstat);
3673
3674EXPORT_PER_CPU_SYMBOL(kstat);
3675
3676/*
3677 * Return any ns on the sched_clock that have not yet been accounted in
3678 * @p in case that task is currently running.
3679 *
3680 * Called with task_rq_lock() held on @rq.
3681 */
3682static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3683{
3684	u64 ns = 0;
3685
3686	if (task_current(rq, p)) {
3687		update_rq_clock(rq);
3688		ns = rq->clock_task - p->se.exec_start;
3689		if ((s64)ns < 0)
3690			ns = 0;
3691	}
3692
3693	return ns;
3694}
3695
3696unsigned long long task_delta_exec(struct task_struct *p)
3697{
3698	unsigned long flags;
3699	struct rq *rq;
3700	u64 ns = 0;
3701
3702	rq = task_rq_lock(p, &flags);
3703	ns = do_task_delta_exec(p, rq);
3704	task_rq_unlock(rq, p, &flags);
3705
3706	return ns;
3707}
3708
3709/*
3710 * Return accounted runtime for the task.
3711 * In case the task is currently running, return the runtime plus current's
3712 * pending runtime that have not been accounted yet.
3713 */
3714unsigned long long task_sched_runtime(struct task_struct *p)
3715{
3716	unsigned long flags;
3717	struct rq *rq;
3718	u64 ns = 0;
3719
3720	rq = task_rq_lock(p, &flags);
3721	ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
3722	task_rq_unlock(rq, p, &flags);
3723
3724	return ns;
3725}
3726
3727/*
3728 * Account user cpu time to a process.
3729 * @p: the process that the cpu time gets accounted to
3730 * @cputime: the cpu time spent in user space since the last update
3731 * @cputime_scaled: cputime scaled by cpu frequency
3732 */
3733void account_user_time(struct task_struct *p, cputime_t cputime,
3734		       cputime_t cputime_scaled)
3735{
3736	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3737	cputime64_t tmp;
3738
3739	/* Add user time to process. */
3740	p->utime = cputime_add(p->utime, cputime);
3741	p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
3742	account_group_user_time(p, cputime);
3743
3744	/* Add user time to cpustat. */
3745	tmp = cputime_to_cputime64(cputime);
3746	if (TASK_NICE(p) > 0)
3747		cpustat->nice = cputime64_add(cpustat->nice, tmp);
3748	else
3749		cpustat->user = cputime64_add(cpustat->user, tmp);
3750
3751	cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
3752	/* Account for user time used */
3753	acct_update_integrals(p);
3754}
3755
3756/*
3757 * Account guest cpu time to a process.
3758 * @p: the process that the cpu time gets accounted to
3759 * @cputime: the cpu time spent in virtual machine since the last update
3760 * @cputime_scaled: cputime scaled by cpu frequency
3761 */
3762static void account_guest_time(struct task_struct *p, cputime_t cputime,
3763			       cputime_t cputime_scaled)
3764{
3765	cputime64_t tmp;
3766	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3767
3768	tmp = cputime_to_cputime64(cputime);
3769
3770	/* Add guest time to process. */
3771	p->utime = cputime_add(p->utime, cputime);
3772	p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
3773	account_group_user_time(p, cputime);
3774	p->gtime = cputime_add(p->gtime, cputime);
3775
3776	/* Add guest time to cpustat. */
3777	if (TASK_NICE(p) > 0) {
3778		cpustat->nice = cputime64_add(cpustat->nice, tmp);
3779		cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3780	} else {
3781		cpustat->user = cputime64_add(cpustat->user, tmp);
3782		cpustat->guest = cputime64_add(cpustat->guest, tmp);
3783	}
3784}
3785
3786/*
3787 * Account system cpu time to a process and desired cpustat field
3788 * @p: the process that the cpu time gets accounted to
3789 * @cputime: the cpu time spent in kernel space since the last update
3790 * @cputime_scaled: cputime scaled by cpu frequency
3791 * @target_cputime64: pointer to cpustat field that has to be updated
3792 */
3793static inline
3794void __account_system_time(struct task_struct *p, cputime_t cputime,
3795			cputime_t cputime_scaled, cputime64_t *target_cputime64)
3796{
3797	cputime64_t tmp = cputime_to_cputime64(cputime);
3798
3799	/* Add system time to process. */
3800	p->stime = cputime_add(p->stime, cputime);
3801	p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
3802	account_group_system_time(p, cputime);
3803
3804	/* Add system time to cpustat. */
3805	*target_cputime64 = cputime64_add(*target_cputime64, tmp);
3806	cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3807
3808	/* Account for system time used */
3809	acct_update_integrals(p);
3810}
3811
3812/*
3813 * Account system cpu time to a process.
3814 * @p: the process that the cpu time gets accounted to
3815 * @hardirq_offset: the offset to subtract from hardirq_count()
3816 * @cputime: the cpu time spent in kernel space since the last update
3817 * @cputime_scaled: cputime scaled by cpu frequency
3818 */
3819void account_system_time(struct task_struct *p, int hardirq_offset,
3820			 cputime_t cputime, cputime_t cputime_scaled)
3821{
3822	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3823	cputime64_t *target_cputime64;
3824
3825	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
3826		account_guest_time(p, cputime, cputime_scaled);
3827		return;
3828	}
3829
3830	if (hardirq_count() - hardirq_offset)
3831		target_cputime64 = &cpustat->irq;
3832	else if (in_serving_softirq())
3833		target_cputime64 = &cpustat->softirq;
3834	else
3835		target_cputime64 = &cpustat->system;
3836
3837	__account_system_time(p, cputime, cputime_scaled, target_cputime64);
3838}
3839
3840/*
3841 * Account for involuntary wait time.
3842 * @cputime: the cpu time spent in involuntary wait
3843 */
3844void account_steal_time(cputime_t cputime)
3845{
3846	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3847	cputime64_t cputime64 = cputime_to_cputime64(cputime);
3848
3849	cpustat->steal = cputime64_add(cpustat->steal, cputime64);
3850}
3851
3852/*
3853 * Account for idle time.
3854 * @cputime: the cpu time spent in idle wait
3855 */
3856void account_idle_time(cputime_t cputime)
3857{
3858	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3859	cputime64_t cputime64 = cputime_to_cputime64(cputime);
3860	struct rq *rq = this_rq();
3861
3862	if (atomic_read(&rq->nr_iowait) > 0)
3863		cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3864	else
3865		cpustat->idle = cputime64_add(cpustat->idle, cputime64);
3866}
3867
3868static __always_inline bool steal_account_process_tick(void)
3869{
3870#ifdef CONFIG_PARAVIRT
3871	if (static_branch(&paravirt_steal_enabled)) {
3872		u64 steal, st = 0;
3873
3874		steal = paravirt_steal_clock(smp_processor_id());
3875		steal -= this_rq()->prev_steal_time;
3876
3877		st = steal_ticks(steal);
3878		this_rq()->prev_steal_time += st * TICK_NSEC;
3879
3880		account_steal_time(st);
3881		return st;
3882	}
3883#endif
3884	return false;
3885}
3886
3887#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3888
3889#ifdef CONFIG_IRQ_TIME_ACCOUNTING
3890/*
3891 * Account a tick to a process and cpustat
3892 * @p: the process that the cpu time gets accounted to
3893 * @user_tick: is the tick from userspace
3894 * @rq: the pointer to rq
3895 *
3896 * Tick demultiplexing follows the order
3897 * - pending hardirq update
3898 * - pending softirq update
3899 * - user_time
3900 * - idle_time
3901 * - system time
3902 *   - check for guest_time
3903 *   - else account as system_time
3904 *
3905 * Check for hardirq is done both for system and user time as there is
3906 * no timer going off while we are on hardirq and hence we may never get an
3907 * opportunity to update it solely in system time.
3908 * p->stime and friends are only updated on system time and not on irq
3909 * softirq as those do not count in task exec_runtime any more.
3910 */
3911static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3912						struct rq *rq)
3913{
3914	cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3915	cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
3916	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3917
3918	if (steal_account_process_tick())
3919		return;
3920
3921	if (irqtime_account_hi_update()) {
3922		cpustat->irq = cputime64_add(cpustat->irq, tmp);
3923	} else if (irqtime_account_si_update()) {
3924		cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
3925	} else if (this_cpu_ksoftirqd() == p) {
3926		/*
3927		 * ksoftirqd time do not get accounted in cpu_softirq_time.
3928		 * So, we have to handle it separately here.
3929		 * Also, p->stime needs to be updated for ksoftirqd.
3930		 */
3931		__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3932					&cpustat->softirq);
3933	} else if (user_tick) {
3934		account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3935	} else if (p == rq->idle) {
3936		account_idle_time(cputime_one_jiffy);
3937	} else if (p->flags & PF_VCPU) { /* System time or guest time */
3938		account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
3939	} else {
3940		__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3941					&cpustat->system);
3942	}
3943}
3944
3945static void irqtime_account_idle_ticks(int ticks)
3946{
3947	int i;
3948	struct rq *rq = this_rq();
3949
3950	for (i = 0; i < ticks; i++)
3951		irqtime_account_process_tick(current, 0, rq);
3952}
3953#else /* CONFIG_IRQ_TIME_ACCOUNTING */
3954static void irqtime_account_idle_ticks(int ticks) {}
3955static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3956						struct rq *rq) {}
3957#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
3958
3959/*
3960 * Account a single tick of cpu time.
3961 * @p: the process that the cpu time gets accounted to
3962 * @user_tick: indicates if the tick is a user or a system tick
3963 */
3964void account_process_tick(struct task_struct *p, int user_tick)
3965{
3966	cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3967	struct rq *rq = this_rq();
3968
3969	if (sched_clock_irqtime) {
3970		irqtime_account_process_tick(p, user_tick, rq);
3971		return;
3972	}
3973
3974	if (steal_account_process_tick())
3975		return;
3976
3977	if (user_tick)
3978		account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3979	else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
3980		account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
3981				    one_jiffy_scaled);
3982	else
3983		account_idle_time(cputime_one_jiffy);
3984}
3985
3986/*
3987 * Account multiple ticks of steal time.
3988 * @p: the process from which the cpu time has been stolen
3989 * @ticks: number of stolen ticks
3990 */
3991void account_steal_ticks(unsigned long ticks)
3992{
3993	account_steal_time(jiffies_to_cputime(ticks));
3994}
3995
3996/*
3997 * Account multiple ticks of idle time.
3998 * @ticks: number of stolen ticks
3999 */
4000void account_idle_ticks(unsigned long ticks)
4001{
4002
4003	if (sched_clock_irqtime) {
4004		irqtime_account_idle_ticks(ticks);
4005		return;
4006	}
4007
4008	account_idle_time(jiffies_to_cputime(ticks));
4009}
4010
4011#endif
4012
4013/*
4014 * Use precise platform statistics if available:
4015 */
4016#ifdef CONFIG_VIRT_CPU_ACCOUNTING
4017void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4018{
4019	*ut = p->utime;
4020	*st = p->stime;
4021}
4022
4023void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4024{
4025	struct task_cputime cputime;
4026
4027	thread_group_cputime(p, &cputime);
4028
4029	*ut = cputime.utime;
4030	*st = cputime.stime;
4031}
4032#else
4033
4034#ifndef nsecs_to_cputime
4035# define nsecs_to_cputime(__nsecs)	nsecs_to_jiffies(__nsecs)
4036#endif
4037
4038void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4039{
4040	cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
4041
4042	/*
4043	 * Use CFS's precise accounting:
4044	 */
4045	rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
4046
4047	if (total) {
4048		u64 temp = rtime;
4049
4050		temp *= utime;
4051		do_div(temp, total);
4052		utime = (cputime_t)temp;
4053	} else
4054		utime = rtime;
4055
4056	/*
4057	 * Compare with previous values, to keep monotonicity:
4058	 */
4059	p->prev_utime = max(p->prev_utime, utime);
4060	p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
4061
4062	*ut = p->prev_utime;
4063	*st = p->prev_stime;
4064}
4065
4066/*
4067 * Must be called with siglock held.
4068 */
4069void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4070{
4071	struct signal_struct *sig = p->signal;
4072	struct task_cputime cputime;
4073	cputime_t rtime, utime, total;
4074
4075	thread_group_cputime(p, &cputime);
4076
4077	total = cputime_add(cputime.utime, cputime.stime);
4078	rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
4079
4080	if (total) {
4081		u64 temp = rtime;
4082
4083		temp *= cputime.utime;
4084		do_div(temp, total);
4085		utime = (cputime_t)temp;
4086	} else
4087		utime = rtime;
4088
4089	sig->prev_utime = max(sig->prev_utime, utime);
4090	sig->prev_stime = max(sig->prev_stime,
4091			      cputime_sub(rtime, sig->prev_utime));
4092
4093	*ut = sig->prev_utime;
4094	*st = sig->prev_stime;
4095}
4096#endif
4097
4098/*
4099 * This function gets called by the timer code, with HZ frequency.
4100 * We call it with interrupts disabled.
4101 */
4102void scheduler_tick(void)
4103{
4104	int cpu = smp_processor_id();
4105	struct rq *rq = cpu_rq(cpu);
4106	struct task_struct *curr = rq->curr;
4107
4108	sched_clock_tick();
4109
4110	raw_spin_lock(&rq->lock);
4111	update_rq_clock(rq);
4112	update_cpu_load_active(rq);
4113	curr->sched_class->task_tick(rq, curr, 0);
4114	raw_spin_unlock(&rq->lock);
4115
4116	perf_event_task_tick();
4117
4118#ifdef CONFIG_SMP
4119	rq->idle_at_tick = idle_cpu(cpu);
4120	trigger_load_balance(rq, cpu);
4121#endif
4122}
4123
4124notrace unsigned long get_parent_ip(unsigned long addr)
4125{
4126	if (in_lock_functions(addr)) {
4127		addr = CALLER_ADDR2;
4128		if (in_lock_functions(addr))
4129			addr = CALLER_ADDR3;
4130	}
4131	return addr;
4132}
4133
4134#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
4135				defined(CONFIG_PREEMPT_TRACER))
4136
4137void __kprobes add_preempt_count(int val)
4138{
4139#ifdef CONFIG_DEBUG_PREEMPT
4140	/*
4141	 * Underflow?
4142	 */
4143	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
4144		return;
4145#endif
4146	preempt_count() += val;
4147#ifdef CONFIG_DEBUG_PREEMPT
4148	/*
4149	 * Spinlock count overflowing soon?
4150	 */
4151	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
4152				PREEMPT_MASK - 10);
4153#endif
4154	if (preempt_count() == val)
4155		trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
4156}
4157EXPORT_SYMBOL(add_preempt_count);
4158
4159void __kprobes sub_preempt_count(int val)
4160{
4161#ifdef CONFIG_DEBUG_PREEMPT
4162	/*
4163	 * Underflow?
4164	 */
4165	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
4166		return;
4167	/*
4168	 * Is the spinlock portion underflowing?
4169	 */
4170	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
4171			!(preempt_count() & PREEMPT_MASK)))
4172		return;
4173#endif
4174
4175	if (preempt_count() == val)
4176		trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
4177	preempt_count() -= val;
4178}
4179EXPORT_SYMBOL(sub_preempt_count);
4180
4181#endif
4182
4183/*
4184 * Print scheduling while atomic bug:
4185 */
4186static noinline void __schedule_bug(struct task_struct *prev)
4187{
4188	struct pt_regs *regs = get_irq_regs();
4189
4190	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
4191		prev->comm, prev->pid, preempt_count());
4192
4193	debug_show_held_locks(prev);
4194	print_modules();
4195	if (irqs_disabled())
4196		print_irqtrace_events(prev);
4197
4198	if (regs)
4199		show_regs(regs);
4200	else
4201		dump_stack();
4202}
4203
4204/*
4205 * Various schedule()-time debugging checks and statistics:
4206 */
4207static inline void schedule_debug(struct task_struct *prev)
4208{
4209	/*
4210	 * Test if we are atomic. Since do_exit() needs to call into
4211	 * schedule() atomically, we ignore that path for now.
4212	 * Otherwise, whine if we are scheduling when we should not be.
4213	 */
4214	if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
4215		__schedule_bug(prev);
4216
4217	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4218
4219	schedstat_inc(this_rq(), sched_count);
4220}
4221
4222static void put_prev_task(struct rq *rq, struct task_struct *prev)
4223{
4224	if (prev->on_rq || rq->skip_clock_update < 0)
4225		update_rq_clock(rq);
4226	prev->sched_class->put_prev_task(rq, prev);
4227}
4228
4229/*
4230 * Pick up the highest-prio task:
4231 */
4232static inline struct task_struct *
4233pick_next_task(struct rq *rq)
4234{
4235	const struct sched_class *class;
4236	struct task_struct *p;
4237
4238	/*
4239	 * Optimization: we know that if all tasks are in
4240	 * the fair class we can call that function directly:
4241	 */
4242	if (likely(rq->nr_running == rq->cfs.nr_running)) {
4243		p = fair_sched_class.pick_next_task(rq);
4244		if (likely(p))
4245			return p;
4246	}
4247
4248	for_each_class(class) {
4249		p = class->pick_next_task(rq);
4250		if (p)
4251			return p;
4252	}
4253
4254	BUG(); /* the idle class will always have a runnable task */
4255}
4256
4257/*
4258 * __schedule() is the main scheduler function.
4259 */
4260static void __sched __schedule(void)
4261{
4262	struct task_struct *prev, *next;
4263	unsigned long *switch_count;
4264	struct rq *rq;
4265	int cpu;
4266
4267need_resched:
4268	preempt_disable();
4269	cpu = smp_processor_id();
4270	rq = cpu_rq(cpu);
4271	rcu_note_context_switch(cpu);
4272	prev = rq->curr;
4273
4274	schedule_debug(prev);
4275
4276	if (sched_feat(HRTICK))
4277		hrtick_clear(rq);
4278
4279	raw_spin_lock_irq(&rq->lock);
4280
4281	switch_count = &prev->nivcsw;
4282	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
4283		if (unlikely(signal_pending_state(prev->state, prev))) {
4284			prev->state = TASK_RUNNING;
4285		} else {
4286			deactivate_task(rq, prev, DEQUEUE_SLEEP);
4287			prev->on_rq = 0;
4288
4289			/*
4290			 * If a worker went to sleep, notify and ask workqueue
4291			 * whether it wants to wake up a task to maintain
4292			 * concurrency.
4293			 */
4294			if (prev->flags & PF_WQ_WORKER) {
4295				struct task_struct *to_wakeup;
4296
4297				to_wakeup = wq_worker_sleeping(prev, cpu);
4298				if (to_wakeup)
4299					try_to_wake_up_local(to_wakeup);
4300			}
4301		}
4302		switch_count = &prev->nvcsw;
4303	}
4304
4305	pre_schedule(rq, prev);
4306
4307	if (unlikely(!rq->nr_running))
4308		idle_balance(cpu, rq);
4309
4310	put_prev_task(rq, prev);
4311	next = pick_next_task(rq);
4312	clear_tsk_need_resched(prev);
4313	rq->skip_clock_update = 0;
4314
4315	if (likely(prev != next)) {
4316		rq->nr_switches++;
4317		rq->curr = next;
4318		++*switch_count;
4319
4320		context_switch(rq, prev, next); /* unlocks the rq */
4321		/*
4322		 * The context switch have flipped the stack from under us
4323		 * and restored the local variables which were saved when
4324		 * this task called schedule() in the past. prev == current
4325		 * is still correct, but it can be moved to another cpu/rq.
4326		 */
4327		cpu = smp_processor_id();
4328		rq = cpu_rq(cpu);
4329	} else
4330		raw_spin_unlock_irq(&rq->lock);
4331
4332	post_schedule(rq);
4333
4334	preempt_enable_no_resched();
4335	if (need_resched())
4336		goto need_resched;
4337}
4338
4339static inline void sched_submit_work(struct task_struct *tsk)
4340{
4341	if (!tsk->state)
4342		return;
4343	/*
4344	 * If we are going to sleep and we have plugged IO queued,
4345	 * make sure to submit it to avoid deadlocks.
4346	 */
4347	if (blk_needs_flush_plug(tsk))
4348		blk_schedule_flush_plug(tsk);
4349}
4350
4351asmlinkage void __sched schedule(void)
4352{
4353	struct task_struct *tsk = current;
4354
4355	sched_submit_work(tsk);
4356	__schedule();
4357}
4358EXPORT_SYMBOL(schedule);
4359
4360#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
4361
4362static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
4363{
4364	if (lock->owner != owner)
4365		return false;
4366
4367	/*
4368	 * Ensure we emit the owner->on_cpu, dereference _after_ checking
4369	 * lock->owner still matches owner, if that fails, owner might
4370	 * point to free()d memory, if it still matches, the rcu_read_lock()
4371	 * ensures the memory stays valid.
4372	 */
4373	barrier();
4374
4375	return owner->on_cpu;
4376}
4377
4378/*
4379 * Look out! "owner" is an entirely speculative pointer
4380 * access and not reliable.
4381 */
4382int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
4383{
4384	if (!sched_feat(OWNER_SPIN))
4385		return 0;
4386
4387	rcu_read_lock();
4388	while (owner_running(lock, owner)) {
4389		if (need_resched())
4390			break;
4391
4392		arch_mutex_cpu_relax();
4393	}
4394	rcu_read_unlock();
4395
4396	/*
4397	 * We break out the loop above on need_resched() and when the
4398	 * owner changed, which is a sign for heavy contention. Return
4399	 * success only when lock->owner is NULL.
4400	 */
4401	return lock->owner == NULL;
4402}
4403#endif
4404
4405#ifdef CONFIG_PREEMPT
4406/*
4407 * this is the entry point to schedule() from in-kernel preemption
4408 * off of preempt_enable. Kernel preemptions off return from interrupt
4409 * occur there and call schedule directly.
4410 */
4411asmlinkage void __sched notrace preempt_schedule(void)
4412{
4413	struct thread_info *ti = current_thread_info();
4414
4415	/*
4416	 * If there is a non-zero preempt_count or interrupts are disabled,
4417	 * we do not want to preempt the current task. Just return..
4418	 */
4419	if (likely(ti->preempt_count || irqs_disabled()))
4420		return;
4421
4422	do {
4423		add_preempt_count_notrace(PREEMPT_ACTIVE);
4424		__schedule();
4425		sub_preempt_count_notrace(PREEMPT_ACTIVE);
4426
4427		/*
4428		 * Check again in case we missed a preemption opportunity
4429		 * between schedule and now.
4430		 */
4431		barrier();
4432	} while (need_resched());
4433}
4434EXPORT_SYMBOL(preempt_schedule);
4435
4436/*
4437 * this is the entry point to schedule() from kernel preemption
4438 * off of irq context.
4439 * Note, that this is called and return with irqs disabled. This will
4440 * protect us against recursive calling from irq.
4441 */
4442asmlinkage void __sched preempt_schedule_irq(void)
4443{
4444	struct thread_info *ti = current_thread_info();
4445
4446	/* Catch callers which need to be fixed */
4447	BUG_ON(ti->preempt_count || !irqs_disabled());
4448
4449	do {
4450		add_preempt_count(PREEMPT_ACTIVE);
4451		local_irq_enable();
4452		__schedule();
4453		local_irq_disable();
4454		sub_preempt_count(PREEMPT_ACTIVE);
4455
4456		/*
4457		 * Check again in case we missed a preemption opportunity
4458		 * between schedule and now.
4459		 */
4460		barrier();
4461	} while (need_resched());
4462}
4463
4464#endif /* CONFIG_PREEMPT */
4465
4466int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
4467			  void *key)
4468{
4469	return try_to_wake_up(curr->private, mode, wake_flags);
4470}
4471EXPORT_SYMBOL(default_wake_function);
4472
4473/*
4474 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4475 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
4476 * number) then we wake all the non-exclusive tasks and one exclusive task.
4477 *
4478 * There are circumstances in which we can try to wake a task which has already
4479 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
4480 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4481 */
4482static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4483			int nr_exclusive, int wake_flags, void *key)
4484{
4485	wait_queue_t *curr, *next;
4486
4487	list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
4488		unsigned flags = curr->flags;
4489
4490		if (curr->func(curr, mode, wake_flags, key) &&
4491				(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
4492			break;
4493	}
4494}
4495
4496/**
4497 * __wake_up - wake up threads blocked on a waitqueue.
4498 * @q: the waitqueue
4499 * @mode: which threads
4500 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4501 * @key: is directly passed to the wakeup function
4502 *
4503 * It may be assumed that this function implies a write memory barrier before
4504 * changing the task state if and only if any tasks are woken up.
4505 */
4506void __wake_up(wait_queue_head_t *q, unsigned int mode,
4507			int nr_exclusive, void *key)
4508{
4509	unsigned long flags;
4510
4511	spin_lock_irqsave(&q->lock, flags);
4512	__wake_up_common(q, mode, nr_exclusive, 0, key);
4513	spin_unlock_irqrestore(&q->lock, flags);
4514}
4515EXPORT_SYMBOL(__wake_up);
4516
4517/*
4518 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4519 */
4520void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4521{
4522	__wake_up_common(q, mode, 1, 0, NULL);
4523}
4524EXPORT_SYMBOL_GPL(__wake_up_locked);
4525
4526void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4527{
4528	__wake_up_common(q, mode, 1, 0, key);
4529}
4530EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4531
4532/**
4533 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
4534 * @q: the waitqueue
4535 * @mode: which threads
4536 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4537 * @key: opaque value to be passed to wakeup targets
4538 *
4539 * The sync wakeup differs that the waker knows that it will schedule
4540 * away soon, so while the target thread will be woken up, it will not
4541 * be migrated to another CPU - ie. the two threads are 'synchronized'
4542 * with each other. This can prevent needless bouncing between CPUs.
4543 *
4544 * On UP it can prevent extra preemption.
4545 *
4546 * It may be assumed that this function implies a write memory barrier before
4547 * changing the task state if and only if any tasks are woken up.
4548 */
4549void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4550			int nr_exclusive, void *key)
4551{
4552	unsigned long flags;
4553	int wake_flags = WF_SYNC;
4554
4555	if (unlikely(!q))
4556		return;
4557
4558	if (unlikely(!nr_exclusive))
4559		wake_flags = 0;
4560
4561	spin_lock_irqsave(&q->lock, flags);
4562	__wake_up_common(q, mode, nr_exclusive, wake_flags, key);
4563	spin_unlock_irqrestore(&q->lock, flags);
4564}
4565EXPORT_SYMBOL_GPL(__wake_up_sync_key);
4566
4567/*
4568 * __wake_up_sync - see __wake_up_sync_key()
4569 */
4570void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4571{
4572	__wake_up_sync_key(q, mode, nr_exclusive, NULL);
4573}
4574EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
4575
4576/**
4577 * complete: - signals a single thread waiting on this completion
4578 * @x:  holds the state of this particular completion
4579 *
4580 * This will wake up a single thread waiting on this completion. Threads will be
4581 * awakened in the same order in which they were queued.
4582 *
4583 * See also complete_all(), wait_for_completion() and related routines.
4584 *
4585 * It may be assumed that this function implies a write memory barrier before
4586 * changing the task state if and only if any tasks are woken up.
4587 */
4588void complete(struct completion *x)
4589{
4590	unsigned long flags;
4591
4592	spin_lock_irqsave(&x->wait.lock, flags);
4593	x->done++;
4594	__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
4595	spin_unlock_irqrestore(&x->wait.lock, flags);
4596}
4597EXPORT_SYMBOL(complete);
4598
4599/**
4600 * complete_all: - signals all threads waiting on this completion
4601 * @x:  holds the state of this particular completion
4602 *
4603 * This will wake up all threads waiting on this particular completion event.
4604 *
4605 * It may be assumed that this function implies a write memory barrier before
4606 * changing the task state if and only if any tasks are woken up.
4607 */
4608void complete_all(struct completion *x)
4609{
4610	unsigned long flags;
4611
4612	spin_lock_irqsave(&x->wait.lock, flags);
4613	x->done += UINT_MAX/2;
4614	__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
4615	spin_unlock_irqrestore(&x->wait.lock, flags);
4616}
4617EXPORT_SYMBOL(complete_all);
4618
4619static inline long __sched
4620do_wait_for_common(struct completion *x, long timeout, int state)
4621{
4622	if (!x->done) {
4623		DECLARE_WAITQUEUE(wait, current);
4624
4625		__add_wait_queue_tail_exclusive(&x->wait, &wait);
4626		do {
4627			if (signal_pending_state(state, current)) {
4628				timeout = -ERESTARTSYS;
4629				break;
4630			}
4631			__set_current_state(state);
4632			spin_unlock_irq(&x->wait.lock);
4633			timeout = schedule_timeout(timeout);
4634			spin_lock_irq(&x->wait.lock);
4635		} while (!x->done && timeout);
4636		__remove_wait_queue(&x->wait, &wait);
4637		if (!x->done)
4638			return timeout;
4639	}
4640	x->done--;
4641	return timeout ?: 1;
4642}
4643
4644static long __sched
4645wait_for_common(struct completion *x, long timeout, int state)
4646{
4647	might_sleep();
4648
4649	spin_lock_irq(&x->wait.lock);
4650	timeout = do_wait_for_common(x, timeout, state);
4651	spin_unlock_irq(&x->wait.lock);
4652	return timeout;
4653}
4654
4655/**
4656 * wait_for_completion: - waits for completion of a task
4657 * @x:  holds the state of this particular completion
4658 *
4659 * This waits to be signaled for completion of a specific task. It is NOT
4660 * interruptible and there is no timeout.
4661 *
4662 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4663 * and interrupt capability. Also see complete().
4664 */
4665void __sched wait_for_completion(struct completion *x)
4666{
4667	wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
4668}
4669EXPORT_SYMBOL(wait_for_completion);
4670
4671/**
4672 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4673 * @x:  holds the state of this particular completion
4674 * @timeout:  timeout value in jiffies
4675 *
4676 * This waits for either a completion of a specific task to be signaled or for a
4677 * specified timeout to expire. The timeout is in jiffies. It is not
4678 * interruptible.
4679 */
4680unsigned long __sched
4681wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4682{
4683	return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
4684}
4685EXPORT_SYMBOL(wait_for_completion_timeout);
4686
4687/**
4688 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4689 * @x:  holds the state of this particular completion
4690 *
4691 * This waits for completion of a specific task to be signaled. It is
4692 * interruptible.
4693 */
4694int __sched wait_for_completion_interruptible(struct completion *x)
4695{
4696	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4697	if (t == -ERESTARTSYS)
4698		return t;
4699	return 0;
4700}
4701EXPORT_SYMBOL(wait_for_completion_interruptible);
4702
4703/**
4704 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4705 * @x:  holds the state of this particular completion
4706 * @timeout:  timeout value in jiffies
4707 *
4708 * This waits for either a completion of a specific task to be signaled or for a
4709 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4710 */
4711long __sched
4712wait_for_completion_interruptible_timeout(struct completion *x,
4713					  unsigned long timeout)
4714{
4715	return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
4716}
4717EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4718
4719/**
4720 * wait_for_completion_killable: - waits for completion of a task (killable)
4721 * @x:  holds the state of this particular completion
4722 *
4723 * This waits to be signaled for completion of a specific task. It can be
4724 * interrupted by a kill signal.
4725 */
4726int __sched wait_for_completion_killable(struct completion *x)
4727{
4728	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4729	if (t == -ERESTARTSYS)
4730		return t;
4731	return 0;
4732}
4733EXPORT_SYMBOL(wait_for_completion_killable);
4734
4735/**
4736 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4737 * @x:  holds the state of this particular completion
4738 * @timeout:  timeout value in jiffies
4739 *
4740 * This waits for either a completion of a specific task to be
4741 * signaled or for a specified timeout to expire. It can be
4742 * interrupted by a kill signal. The timeout is in jiffies.
4743 */
4744long __sched
4745wait_for_completion_killable_timeout(struct completion *x,
4746				     unsigned long timeout)
4747{
4748	return wait_for_common(x, timeout, TASK_KILLABLE);
4749}
4750EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4751
4752/**
4753 *	try_wait_for_completion - try to decrement a completion without blocking
4754 *	@x:	completion structure
4755 *
4756 *	Returns: 0 if a decrement cannot be done without blocking
4757 *		 1 if a decrement succeeded.
4758 *
4759 *	If a completion is being used as a counting completion,
4760 *	attempt to decrement the counter without blocking. This
4761 *	enables us to avoid waiting if the resource the completion
4762 *	is protecting is not available.
4763 */
4764bool try_wait_for_completion(struct completion *x)
4765{
4766	unsigned long flags;
4767	int ret = 1;
4768
4769	spin_lock_irqsave(&x->wait.lock, flags);
4770	if (!x->done)
4771		ret = 0;
4772	else
4773		x->done--;
4774	spin_unlock_irqrestore(&x->wait.lock, flags);
4775	return ret;
4776}
4777EXPORT_SYMBOL(try_wait_for_completion);
4778
4779/**
4780 *	completion_done - Test to see if a completion has any waiters
4781 *	@x:	completion structure
4782 *
4783 *	Returns: 0 if there are waiters (wait_for_completion() in progress)
4784 *		 1 if there are no waiters.
4785 *
4786 */
4787bool completion_done(struct completion *x)
4788{
4789	unsigned long flags;
4790	int ret = 1;
4791
4792	spin_lock_irqsave(&x->wait.lock, flags);
4793	if (!x->done)
4794		ret = 0;
4795	spin_unlock_irqrestore(&x->wait.lock, flags);
4796	return ret;
4797}
4798EXPORT_SYMBOL(completion_done);
4799
4800static long __sched
4801sleep_on_common(wait_queue_head_t *q, int state, long timeout)
4802{
4803	unsigned long flags;
4804	wait_queue_t wait;
4805
4806	init_waitqueue_entry(&wait, current);
4807
4808	__set_current_state(state);
4809
4810	spin_lock_irqsave(&q->lock, flags);
4811	__add_wait_queue(q, &wait);
4812	spin_unlock(&q->lock);
4813	timeout = schedule_timeout(timeout);
4814	spin_lock_irq(&q->lock);
4815	__remove_wait_queue(q, &wait);
4816	spin_unlock_irqrestore(&q->lock, flags);
4817
4818	return timeout;
4819}
4820
4821void __sched interruptible_sleep_on(wait_queue_head_t *q)
4822{
4823	sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
4824}
4825EXPORT_SYMBOL(interruptible_sleep_on);
4826
4827long __sched
4828interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
4829{
4830	return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
4831}
4832EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4833
4834void __sched sleep_on(wait_queue_head_t *q)
4835{
4836	sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
4837}
4838EXPORT_SYMBOL(sleep_on);
4839
4840long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
4841{
4842	return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
4843}
4844EXPORT_SYMBOL(sleep_on_timeout);
4845
4846#ifdef CONFIG_RT_MUTEXES
4847
4848/*
4849 * rt_mutex_setprio - set the current priority of a task
4850 * @p: task
4851 * @prio: prio value (kernel-internal form)
4852 *
4853 * This function changes the 'effective' priority of a task. It does
4854 * not touch ->normal_prio like __setscheduler().
4855 *
4856 * Used by the rt_mutex code to implement priority inheritance logic.
4857 */
4858void rt_mutex_setprio(struct task_struct *p, int prio)
4859{
4860	int oldprio, on_rq, running;
4861	struct rq *rq;
4862	const struct sched_class *prev_class;
4863
4864	BUG_ON(prio < 0 || prio > MAX_PRIO);
4865
4866	rq = __task_rq_lock(p);
4867
4868	trace_sched_pi_setprio(p, prio);
4869	oldprio = p->prio;
4870	prev_class = p->sched_class;
4871	on_rq = p->on_rq;
4872	running = task_current(rq, p);
4873	if (on_rq)
4874		dequeue_task(rq, p, 0);
4875	if (running)
4876		p->sched_class->put_prev_task(rq, p);
4877
4878	if (rt_prio(prio))
4879		p->sched_class = &rt_sched_class;
4880	else
4881		p->sched_class = &fair_sched_class;
4882
4883	p->prio = prio;
4884
4885	if (running)
4886		p->sched_class->set_curr_task(rq);
4887	if (on_rq)
4888		enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4889
4890	check_class_changed(rq, p, prev_class, oldprio);
4891	__task_rq_unlock(rq);
4892}
4893
4894#endif
4895
4896void set_user_nice(struct task_struct *p, long nice)
4897{
4898	int old_prio, delta, on_rq;
4899	unsigned long flags;
4900	struct rq *rq;
4901
4902	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4903		return;
4904	/*
4905	 * We have to be careful, if called from sys_setpriority(),
4906	 * the task might be in the middle of scheduling on another CPU.
4907	 */
4908	rq = task_rq_lock(p, &flags);
4909	/*
4910	 * The RT priorities are set via sched_setscheduler(), but we still
4911	 * allow the 'normal' nice value to be set - but as expected
4912	 * it wont have any effect on scheduling until the task is
4913	 * SCHED_FIFO/SCHED_RR:
4914	 */
4915	if (task_has_rt_policy(p)) {
4916		p->static_prio = NICE_TO_PRIO(nice);
4917		goto out_unlock;
4918	}
4919	on_rq = p->on_rq;
4920	if (on_rq)
4921		dequeue_task(rq, p, 0);
4922
4923	p->static_prio = NICE_TO_PRIO(nice);
4924	set_load_weight(p);
4925	old_prio = p->prio;
4926	p->prio = effective_prio(p);
4927	delta = p->prio - old_prio;
4928
4929	if (on_rq) {
4930		enqueue_task(rq, p, 0);
4931		/*
4932		 * If the task increased its priority or is running and
4933		 * lowered its priority, then reschedule its CPU:
4934		 */
4935		if (delta < 0 || (delta > 0 && task_running(rq, p)))
4936			resched_task(rq->curr);
4937	}
4938out_unlock:
4939	task_rq_unlock(rq, p, &flags);
4940}
4941EXPORT_SYMBOL(set_user_nice);
4942
4943/*
4944 * can_nice - check if a task can reduce its nice value
4945 * @p: task
4946 * @nice: nice value
4947 */
4948int can_nice(const struct task_struct *p, const int nice)
4949{
4950	/* convert nice value [19,-20] to rlimit style value [1,40] */
4951	int nice_rlim = 20 - nice;
4952
4953	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
4954		capable(CAP_SYS_NICE));
4955}
4956
4957#ifdef __ARCH_WANT_SYS_NICE
4958
4959/*
4960 * sys_nice - change the priority of the current process.
4961 * @increment: priority increment
4962 *
4963 * sys_setpriority is a more generic, but much slower function that
4964 * does similar things.
4965 */
4966SYSCALL_DEFINE1(nice, int, increment)
4967{
4968	long nice, retval;
4969
4970	/*
4971	 * Setpriority might change our priority at the same moment.
4972	 * We don't have to worry. Conceptually one call occurs first
4973	 * and we have a single winner.
4974	 */
4975	if (increment < -40)
4976		increment = -40;
4977	if (increment > 40)
4978		increment = 40;
4979
4980	nice = TASK_NICE(current) + increment;
4981	if (nice < -20)
4982		nice = -20;
4983	if (nice > 19)
4984		nice = 19;
4985
4986	if (increment < 0 && !can_nice(current, nice))
4987		return -EPERM;
4988
4989	retval = security_task_setnice(current, nice);
4990	if (retval)
4991		return retval;
4992
4993	set_user_nice(current, nice);
4994	return 0;
4995}
4996
4997#endif
4998
4999/**
5000 * task_prio - return the priority value of a given task.
5001 * @p: the task in question.
5002 *
5003 * This is the priority value as seen by users in /proc.
5004 * RT tasks are offset by -200. Normal tasks are centered
5005 * around 0, value goes from -16 to +15.
5006 */
5007int task_prio(const struct task_struct *p)
5008{
5009	return p->prio - MAX_RT_PRIO;
5010}
5011
5012/**
5013 * task_nice - return the nice value of a given task.
5014 * @p: the task in question.
5015 */
5016int task_nice(const struct task_struct *p)
5017{
5018	return TASK_NICE(p);
5019}
5020EXPORT_SYMBOL(task_nice);
5021
5022/**
5023 * idle_cpu - is a given cpu idle currently?
5024 * @cpu: the processor in question.
5025 */
5026int idle_cpu(int cpu)
5027{
5028	return cpu_curr(cpu) == cpu_rq(cpu)->idle;
5029}
5030
5031/**
5032 * idle_task - return the idle task for a given cpu.
5033 * @cpu: the processor in question.
5034 */
5035struct task_struct *idle_task(int cpu)
5036{
5037	return cpu_rq(cpu)->idle;
5038}
5039
5040/**
5041 * find_process_by_pid - find a process with a matching PID value.
5042 * @pid: the pid in question.
5043 */
5044static struct task_struct *find_process_by_pid(pid_t pid)
5045{
5046	return pid ? find_task_by_vpid(pid) : current;
5047}
5048
5049/* Actually do priority change: must hold rq lock. */
5050static void
5051__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
5052{
5053	p->policy = policy;
5054	p->rt_priority = prio;
5055	p->normal_prio = normal_prio(p);
5056	/* we are holding p->pi_lock already */
5057	p->prio = rt_mutex_getprio(p);
5058	if (rt_prio(p->prio))
5059		p->sched_class = &rt_sched_class;
5060	else
5061		p->sched_class = &fair_sched_class;
5062	set_load_weight(p);
5063}
5064
5065/*
5066 * check the target process has a UID that matches the current process's
5067 */
5068static bool check_same_owner(struct task_struct *p)
5069{
5070	const struct cred *cred = current_cred(), *pcred;
5071	bool match;
5072
5073	rcu_read_lock();
5074	pcred = __task_cred(p);
5075	if (cred->user->user_ns == pcred->user->user_ns)
5076		match = (cred->euid == pcred->euid ||
5077			 cred->euid == pcred->uid);
5078	else
5079		match = false;
5080	rcu_read_unlock();
5081	return match;
5082}
5083
5084static int __sched_setscheduler(struct task_struct *p, int policy,
5085				const struct sched_param *param, bool user)
5086{
5087	int retval, oldprio, oldpolicy = -1, on_rq, running;
5088	unsigned long flags;
5089	const struct sched_class *prev_class;
5090	struct rq *rq;
5091	int reset_on_fork;
5092
5093	/* may grab non-irq protected spin_locks */
5094	BUG_ON(in_interrupt());
5095recheck:
5096	/* double check policy once rq lock held */
5097	if (policy < 0) {
5098		reset_on_fork = p->sched_reset_on_fork;
5099		policy = oldpolicy = p->policy;
5100	} else {
5101		reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
5102		policy &= ~SCHED_RESET_ON_FORK;
5103
5104		if (policy != SCHED_FIFO && policy != SCHED_RR &&
5105				policy != SCHED_NORMAL && policy != SCHED_BATCH &&
5106				policy != SCHED_IDLE)
5107			return -EINVAL;
5108	}
5109
5110	/*
5111	 * Valid priorities for SCHED_FIFO and SCHED_RR are
5112	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
5113	 * SCHED_BATCH and SCHED_IDLE is 0.
5114	 */
5115	if (param->sched_priority < 0 ||
5116	    (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
5117	    (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
5118		return -EINVAL;
5119	if (rt_policy(policy) != (param->sched_priority != 0))
5120		return -EINVAL;
5121
5122	/*
5123	 * Allow unprivileged RT tasks to decrease priority:
5124	 */
5125	if (user && !capable(CAP_SYS_NICE)) {
5126		if (rt_policy(policy)) {
5127			unsigned long rlim_rtprio =
5128					task_rlimit(p, RLIMIT_RTPRIO);
5129
5130			/* can't set/change the rt policy */
5131			if (policy != p->policy && !rlim_rtprio)
5132				return -EPERM;
5133
5134			/* can't increase priority */
5135			if (param->sched_priority > p->rt_priority &&
5136			    param->sched_priority > rlim_rtprio)
5137				return -EPERM;
5138		}
5139
5140		/*
5141		 * Treat SCHED_IDLE as nice 20. Only allow a switch to
5142		 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
5143		 */
5144		if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
5145			if (!can_nice(p, TASK_NICE(p)))
5146				return -EPERM;
5147		}
5148
5149		/* can't change other user's priorities */
5150		if (!check_same_owner(p))
5151			return -EPERM;
5152
5153		/* Normal users shall not reset the sched_reset_on_fork flag */
5154		if (p->sched_reset_on_fork && !reset_on_fork)
5155			return -EPERM;
5156	}
5157
5158	if (user) {
5159		retval = security_task_setscheduler(p);
5160		if (retval)
5161			return retval;
5162	}
5163
5164	/*
5165	 * make sure no PI-waiters arrive (or leave) while we are
5166	 * changing the priority of the task:
5167	 *
5168	 * To be able to change p->policy safely, the appropriate
5169	 * runqueue lock must be held.
5170	 */
5171	rq = task_rq_lock(p, &flags);
5172
5173	/*
5174	 * Changing the policy of the stop threads its a very bad idea
5175	 */
5176	if (p == rq->stop) {
5177		task_rq_unlock(rq, p, &flags);
5178		return -EINVAL;
5179	}
5180
5181	/*
5182	 * If not changing anything there's no need to proceed further:
5183	 */
5184	if (unlikely(policy == p->policy && (!rt_policy(policy) ||
5185			param->sched_priority == p->rt_priority))) {
5186
5187		__task_rq_unlock(rq);
5188		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5189		return 0;
5190	}
5191
5192#ifdef CONFIG_RT_GROUP_SCHED
5193	if (user) {
5194		/*
5195		 * Do not allow realtime tasks into groups that have no runtime
5196		 * assigned.
5197		 */
5198		if (rt_bandwidth_enabled() && rt_policy(policy) &&
5199				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
5200				!task_group_is_autogroup(task_group(p))) {
5201			task_rq_unlock(rq, p, &flags);
5202			return -EPERM;
5203		}
5204	}
5205#endif
5206
5207	/* recheck policy now with rq lock held */
5208	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
5209		policy = oldpolicy = -1;
5210		task_rq_unlock(rq, p, &flags);
5211		goto recheck;
5212	}
5213	on_rq = p->on_rq;
5214	running = task_current(rq, p);
5215	if (on_rq)
5216		deactivate_task(rq, p, 0);
5217	if (running)
5218		p->sched_class->put_prev_task(rq, p);
5219
5220	p->sched_reset_on_fork = reset_on_fork;
5221
5222	oldprio = p->prio;
5223	prev_class = p->sched_class;
5224	__setscheduler(rq, p, policy, param->sched_priority);
5225
5226	if (running)
5227		p->sched_class->set_curr_task(rq);
5228	if (on_rq)
5229		activate_task(rq, p, 0);
5230
5231	check_class_changed(rq, p, prev_class, oldprio);
5232	task_rq_unlock(rq, p, &flags);
5233
5234	rt_mutex_adjust_pi(p);
5235
5236	return 0;
5237}
5238
5239/**
5240 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
5241 * @p: the task in question.
5242 * @policy: new policy.
5243 * @param: structure containing the new RT priority.
5244 *
5245 * NOTE that the task may be already dead.
5246 */
5247int sched_setscheduler(struct task_struct *p, int policy,
5248		       const struct sched_param *param)
5249{
5250	return __sched_setscheduler(p, policy, param, true);
5251}
5252EXPORT_SYMBOL_GPL(sched_setscheduler);
5253
5254/**
5255 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
5256 * @p: the task in question.
5257 * @policy: new policy.
5258 * @param: structure containing the new RT priority.
5259 *
5260 * Just like sched_setscheduler, only don't bother checking if the
5261 * current context has permission.  For example, this is needed in
5262 * stop_machine(): we create temporary high priority worker threads,
5263 * but our caller might not have that capability.
5264 */
5265int sched_setscheduler_nocheck(struct task_struct *p, int policy,
5266			       const struct sched_param *param)
5267{
5268	return __sched_setscheduler(p, policy, param, false);
5269}
5270
5271static int
5272do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
5273{
5274	struct sched_param lparam;
5275	struct task_struct *p;
5276	int retval;
5277
5278	if (!param || pid < 0)
5279		return -EINVAL;
5280	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
5281		return -EFAULT;
5282
5283	rcu_read_lock();
5284	retval = -ESRCH;
5285	p = find_process_by_pid(pid);
5286	if (p != NULL)
5287		retval = sched_setscheduler(p, policy, &lparam);
5288	rcu_read_unlock();
5289
5290	return retval;
5291}
5292
5293/**
5294 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
5295 * @pid: the pid in question.
5296 * @policy: new policy.
5297 * @param: structure containing the new RT priority.
5298 */
5299SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
5300		struct sched_param __user *, param)
5301{
5302	/* negative values for policy are not valid */
5303	if (policy < 0)
5304		return -EINVAL;
5305
5306	return do_sched_setscheduler(pid, policy, param);
5307}
5308
5309/**
5310 * sys_sched_setparam - set/change the RT priority of a thread
5311 * @pid: the pid in question.
5312 * @param: structure containing the new RT priority.
5313 */
5314SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
5315{
5316	return do_sched_setscheduler(pid, -1, param);
5317}
5318
5319/**
5320 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5321 * @pid: the pid in question.
5322 */
5323SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
5324{
5325	struct task_struct *p;
5326	int retval;
5327
5328	if (pid < 0)
5329		return -EINVAL;
5330
5331	retval = -ESRCH;
5332	rcu_read_lock();
5333	p = find_process_by_pid(pid);
5334	if (p) {
5335		retval = security_task_getscheduler(p);
5336		if (!retval)
5337			retval = p->policy
5338				| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
5339	}
5340	rcu_read_unlock();
5341	return retval;
5342}
5343
5344/**
5345 * sys_sched_getparam - get the RT priority of a thread
5346 * @pid: the pid in question.
5347 * @param: structure containing the RT priority.
5348 */
5349SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
5350{
5351	struct sched_param lp;
5352	struct task_struct *p;
5353	int retval;
5354
5355	if (!param || pid < 0)
5356		return -EINVAL;
5357
5358	rcu_read_lock();
5359	p = find_process_by_pid(pid);
5360	retval = -ESRCH;
5361	if (!p)
5362		goto out_unlock;
5363
5364	retval = security_task_getscheduler(p);
5365	if (retval)
5366		goto out_unlock;
5367
5368	lp.sched_priority = p->rt_priority;
5369	rcu_read_unlock();
5370
5371	/*
5372	 * This one might sleep, we cannot do it with a spinlock held ...
5373	 */
5374	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5375
5376	return retval;
5377
5378out_unlock:
5379	rcu_read_unlock();
5380	return retval;
5381}
5382
5383long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
5384{
5385	cpumask_var_t cpus_allowed, new_mask;
5386	struct task_struct *p;
5387	int retval;
5388
5389	get_online_cpus();
5390	rcu_read_lock();
5391
5392	p = find_process_by_pid(pid);
5393	if (!p) {
5394		rcu_read_unlock();
5395		put_online_cpus();
5396		return -ESRCH;
5397	}
5398
5399	/* Prevent p going away */
5400	get_task_struct(p);
5401	rcu_read_unlock();
5402
5403	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5404		retval = -ENOMEM;
5405		goto out_put_task;
5406	}
5407	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5408		retval = -ENOMEM;
5409		goto out_free_cpus_allowed;
5410	}
5411	retval = -EPERM;
5412	if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE))
5413		goto out_unlock;
5414
5415	retval = security_task_setscheduler(p);
5416	if (retval)
5417		goto out_unlock;
5418
5419	cpuset_cpus_allowed(p, cpus_allowed);
5420	cpumask_and(new_mask, in_mask, cpus_allowed);
5421again:
5422	retval = set_cpus_allowed_ptr(p, new_mask);
5423
5424	if (!retval) {
5425		cpuset_cpus_allowed(p, cpus_allowed);
5426		if (!cpumask_subset(new_mask, cpus_allowed)) {
5427			/*
5428			 * We must have raced with a concurrent cpuset
5429			 * update. Just reset the cpus_allowed to the
5430			 * cpuset's cpus_allowed
5431			 */
5432			cpumask_copy(new_mask, cpus_allowed);
5433			goto again;
5434		}
5435	}
5436out_unlock:
5437	free_cpumask_var(new_mask);
5438out_free_cpus_allowed:
5439	free_cpumask_var(cpus_allowed);
5440out_put_task:
5441	put_task_struct(p);
5442	put_online_cpus();
5443	return retval;
5444}
5445
5446static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
5447			     struct cpumask *new_mask)
5448{
5449	if (len < cpumask_size())
5450		cpumask_clear(new_mask);
5451	else if (len > cpumask_size())
5452		len = cpumask_size();
5453
5454	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5455}
5456
5457/**
5458 * sys_sched_setaffinity - set the cpu affinity of a process
5459 * @pid: pid of the process
5460 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5461 * @user_mask_ptr: user-space pointer to the new cpu mask
5462 */
5463SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5464		unsigned long __user *, user_mask_ptr)
5465{
5466	cpumask_var_t new_mask;
5467	int retval;
5468
5469	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5470		return -ENOMEM;
5471
5472	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5473	if (retval == 0)
5474		retval = sched_setaffinity(pid, new_mask);
5475	free_cpumask_var(new_mask);
5476	return retval;
5477}
5478
5479long sched_getaffinity(pid_t pid, struct cpumask *mask)
5480{
5481	struct task_struct *p;
5482	unsigned long flags;
5483	int retval;
5484
5485	get_online_cpus();
5486	rcu_read_lock();
5487
5488	retval = -ESRCH;
5489	p = find_process_by_pid(pid);
5490	if (!p)
5491		goto out_unlock;
5492
5493	retval = security_task_getscheduler(p);
5494	if (retval)
5495		goto out_unlock;
5496
5497	raw_spin_lock_irqsave(&p->pi_lock, flags);
5498	cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
5499	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5500
5501out_unlock:
5502	rcu_read_unlock();
5503	put_online_cpus();
5504
5505	return retval;
5506}
5507
5508/**
5509 * sys_sched_getaffinity - get the cpu affinity of a process
5510 * @pid: pid of the process
5511 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5512 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5513 */
5514SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5515		unsigned long __user *, user_mask_ptr)
5516{
5517	int ret;
5518	cpumask_var_t mask;
5519
5520	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
5521		return -EINVAL;
5522	if (len & (sizeof(unsigned long)-1))
5523		return -EINVAL;
5524
5525	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5526		return -ENOMEM;
5527
5528	ret = sched_getaffinity(pid, mask);
5529	if (ret == 0) {
5530		size_t retlen = min_t(size_t, len, cpumask_size());
5531
5532		if (copy_to_user(user_mask_ptr, mask, retlen))
5533			ret = -EFAULT;
5534		else
5535			ret = retlen;
5536	}
5537	free_cpumask_var(mask);
5538
5539	return ret;
5540}
5541
5542/**
5543 * sys_sched_yield - yield the current processor to other threads.
5544 *
5545 * This function yields the current CPU to other tasks. If there are no
5546 * other threads running on this CPU then this function will return.
5547 */
5548SYSCALL_DEFINE0(sched_yield)
5549{
5550	struct rq *rq = this_rq_lock();
5551
5552	schedstat_inc(rq, yld_count);
5553	current->sched_class->yield_task(rq);
5554
5555	/*
5556	 * Since we are going to call schedule() anyway, there's
5557	 * no need to preempt or enable interrupts:
5558	 */
5559	__release(rq->lock);
5560	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
5561	do_raw_spin_unlock(&rq->lock);
5562	preempt_enable_no_resched();
5563
5564	schedule();
5565
5566	return 0;
5567}
5568
5569static inline int should_resched(void)
5570{
5571	return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
5572}
5573
5574static void __cond_resched(void)
5575{
5576	add_preempt_count(PREEMPT_ACTIVE);
5577	__schedule();
5578	sub_preempt_count(PREEMPT_ACTIVE);
5579}
5580
5581int __sched _cond_resched(void)
5582{
5583	if (should_resched()) {
5584		__cond_resched();
5585		return 1;
5586	}
5587	return 0;
5588}
5589EXPORT_SYMBOL(_cond_resched);
5590
5591/*
5592 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
5593 * call schedule, and on return reacquire the lock.
5594 *
5595 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
5596 * operations here to prevent schedule() from being called twice (once via
5597 * spin_unlock(), once by hand).
5598 */
5599int __cond_resched_lock(spinlock_t *lock)
5600{
5601	int resched = should_resched();
5602	int ret = 0;
5603
5604	lockdep_assert_held(lock);
5605
5606	if (spin_needbreak(lock) || resched) {
5607		spin_unlock(lock);
5608		if (resched)
5609			__cond_resched();
5610		else
5611			cpu_relax();
5612		ret = 1;
5613		spin_lock(lock);
5614	}
5615	return ret;
5616}
5617EXPORT_SYMBOL(__cond_resched_lock);
5618
5619int __sched __cond_resched_softirq(void)
5620{
5621	BUG_ON(!in_softirq());
5622
5623	if (should_resched()) {
5624		local_bh_enable();
5625		__cond_resched();
5626		local_bh_disable();
5627		return 1;
5628	}
5629	return 0;
5630}
5631EXPORT_SYMBOL(__cond_resched_softirq);
5632
5633/**
5634 * yield - yield the current processor to other threads.
5635 *
5636 * This is a shortcut for kernel-space yielding - it marks the
5637 * thread runnable and calls sys_sched_yield().
5638 */
5639void __sched yield(void)
5640{
5641	set_current_state(TASK_RUNNING);
5642	sys_sched_yield();
5643}
5644EXPORT_SYMBOL(yield);
5645
5646/**
5647 * yield_to - yield the current processor to another thread in
5648 * your thread group, or accelerate that thread toward the
5649 * processor it's on.
5650 * @p: target task
5651 * @preempt: whether task preemption is allowed or not
5652 *
5653 * It's the caller's job to ensure that the target task struct
5654 * can't go away on us before we can do any checks.
5655 *
5656 * Returns true if we indeed boosted the target task.
5657 */
5658bool __sched yield_to(struct task_struct *p, bool preempt)
5659{
5660	struct task_struct *curr = current;
5661	struct rq *rq, *p_rq;
5662	unsigned long flags;
5663	bool yielded = 0;
5664
5665	local_irq_save(flags);
5666	rq = this_rq();
5667
5668again:
5669	p_rq = task_rq(p);
5670	double_rq_lock(rq, p_rq);
5671	while (task_rq(p) != p_rq) {
5672		double_rq_unlock(rq, p_rq);
5673		goto again;
5674	}
5675
5676	if (!curr->sched_class->yield_to_task)
5677		goto out;
5678
5679	if (curr->sched_class != p->sched_class)
5680		goto out;
5681
5682	if (task_running(p_rq, p) || p->state)
5683		goto out;
5684
5685	yielded = curr->sched_class->yield_to_task(rq, p, preempt);
5686	if (yielded) {
5687		schedstat_inc(rq, yld_count);
5688		/*
5689		 * Make p's CPU reschedule; pick_next_entity takes care of
5690		 * fairness.
5691		 */
5692		if (preempt && rq != p_rq)
5693			resched_task(p_rq->curr);
5694	}
5695
5696out:
5697	double_rq_unlock(rq, p_rq);
5698	local_irq_restore(flags);
5699
5700	if (yielded)
5701		schedule();
5702
5703	return yielded;
5704}
5705EXPORT_SYMBOL_GPL(yield_to);
5706
5707/*
5708 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
5709 * that process accounting knows that this is a task in IO wait state.
5710 */
5711void __sched io_schedule(void)
5712{
5713	struct rq *rq = raw_rq();
5714
5715	delayacct_blkio_start();
5716	atomic_inc(&rq->nr_iowait);
5717	blk_flush_plug(current);
5718	current->in_iowait = 1;
5719	schedule();
5720	current->in_iowait = 0;
5721	atomic_dec(&rq->nr_iowait);
5722	delayacct_blkio_end();
5723}
5724EXPORT_SYMBOL(io_schedule);
5725
5726long __sched io_schedule_timeout(long timeout)
5727{
5728	struct rq *rq = raw_rq();
5729	long ret;
5730
5731	delayacct_blkio_start();
5732	atomic_inc(&rq->nr_iowait);
5733	blk_flush_plug(current);
5734	current->in_iowait = 1;
5735	ret = schedule_timeout(timeout);
5736	current->in_iowait = 0;
5737	atomic_dec(&rq->nr_iowait);
5738	delayacct_blkio_end();
5739	return ret;
5740}
5741
5742/**
5743 * sys_sched_get_priority_max - return maximum RT priority.
5744 * @policy: scheduling class.
5745 *
5746 * this syscall returns the maximum rt_priority that can be used
5747 * by a given scheduling class.
5748 */
5749SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
5750{
5751	int ret = -EINVAL;
5752
5753	switch (policy) {
5754	case SCHED_FIFO:
5755	case SCHED_RR:
5756		ret = MAX_USER_RT_PRIO-1;
5757		break;
5758	case SCHED_NORMAL:
5759	case SCHED_BATCH:
5760	case SCHED_IDLE:
5761		ret = 0;
5762		break;
5763	}
5764	return ret;
5765}
5766
5767/**
5768 * sys_sched_get_priority_min - return minimum RT priority.
5769 * @policy: scheduling class.
5770 *
5771 * this syscall returns the minimum rt_priority that can be used
5772 * by a given scheduling class.
5773 */
5774SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
5775{
5776	int ret = -EINVAL;
5777
5778	switch (policy) {
5779	case SCHED_FIFO:
5780	case SCHED_RR:
5781		ret = 1;
5782		break;
5783	case SCHED_NORMAL:
5784	case SCHED_BATCH:
5785	case SCHED_IDLE:
5786		ret = 0;
5787	}
5788	return ret;
5789}
5790
5791/**
5792 * sys_sched_rr_get_interval - return the default timeslice of a process.
5793 * @pid: pid of the process.
5794 * @interval: userspace pointer to the timeslice value.
5795 *
5796 * this syscall writes the default timeslice value of a given process
5797 * into the user-space timespec buffer. A value of '0' means infinity.
5798 */
5799SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5800		struct timespec __user *, interval)
5801{
5802	struct task_struct *p;
5803	unsigned int time_slice;
5804	unsigned long flags;
5805	struct rq *rq;
5806	int retval;
5807	struct timespec t;
5808
5809	if (pid < 0)
5810		return -EINVAL;
5811
5812	retval = -ESRCH;
5813	rcu_read_lock();
5814	p = find_process_by_pid(pid);
5815	if (!p)
5816		goto out_unlock;
5817
5818	retval = security_task_getscheduler(p);
5819	if (retval)
5820		goto out_unlock;
5821
5822	rq = task_rq_lock(p, &flags);
5823	time_slice = p->sched_class->get_rr_interval(rq, p);
5824	task_rq_unlock(rq, p, &flags);
5825
5826	rcu_read_unlock();
5827	jiffies_to_timespec(time_slice, &t);
5828	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
5829	return retval;
5830
5831out_unlock:
5832	rcu_read_unlock();
5833	return retval;
5834}
5835
5836static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
5837
5838void sched_show_task(struct task_struct *p)
5839{
5840	unsigned long free = 0;
5841	unsigned state;
5842
5843	state = p->state ? __ffs(p->state) + 1 : 0;
5844	printk(KERN_INFO "%-15.15s %c", p->comm,
5845		state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
5846#if BITS_PER_LONG == 32
5847	if (state == TASK_RUNNING)
5848		printk(KERN_CONT " running  ");
5849	else
5850		printk(KERN_CONT " %08lx ", thread_saved_pc(p));
5851#else
5852	if (state == TASK_RUNNING)
5853		printk(KERN_CONT "  running task    ");
5854	else
5855		printk(KERN_CONT " %016lx ", thread_saved_pc(p));
5856#endif
5857#ifdef CONFIG_DEBUG_STACK_USAGE
5858	free = stack_not_used(p);
5859#endif
5860	printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
5861		task_pid_nr(p), task_pid_nr(p->real_parent),
5862		(unsigned long)task_thread_info(p)->flags);
5863
5864	show_stack(p, NULL);
5865}
5866
5867void show_state_filter(unsigned long state_filter)
5868{
5869	struct task_struct *g, *p;
5870
5871#if BITS_PER_LONG == 32
5872	printk(KERN_INFO
5873		"  task                PC stack   pid father\n");
5874#else
5875	printk(KERN_INFO
5876		"  task                        PC stack   pid father\n");
5877#endif
5878	read_lock(&tasklist_lock);
5879	do_each_thread(g, p) {
5880		/*
5881		 * reset the NMI-timeout, listing all files on a slow
5882		 * console might take a lot of time:
5883		 */
5884		touch_nmi_watchdog();
5885		if (!state_filter || (p->state & state_filter))
5886			sched_show_task(p);
5887	} while_each_thread(g, p);
5888
5889	touch_all_softlockup_watchdogs();
5890
5891#ifdef CONFIG_SCHED_DEBUG
5892	sysrq_sched_debug_show();
5893#endif
5894	read_unlock(&tasklist_lock);
5895	/*
5896	 * Only show locks if all tasks are dumped:
5897	 */
5898	if (!state_filter)
5899		debug_show_all_locks();
5900}
5901
5902void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5903{
5904	idle->sched_class = &idle_sched_class;
5905}
5906
5907/**
5908 * init_idle - set up an idle thread for a given CPU
5909 * @idle: task in question
5910 * @cpu: cpu the idle task belongs to
5911 *
5912 * NOTE: this function does not set the idle thread's NEED_RESCHED
5913 * flag, to make booting more robust.
5914 */
5915void __cpuinit init_idle(struct task_struct *idle, int cpu)
5916{
5917	struct rq *rq = cpu_rq(cpu);
5918	unsigned long flags;
5919
5920	raw_spin_lock_irqsave(&rq->lock, flags);
5921
5922	__sched_fork(idle);
5923	idle->state = TASK_RUNNING;
5924	idle->se.exec_start = sched_clock();
5925
5926	do_set_cpus_allowed(idle, cpumask_of(cpu));
5927	/*
5928	 * We're having a chicken and egg problem, even though we are
5929	 * holding rq->lock, the cpu isn't yet set to this cpu so the
5930	 * lockdep check in task_group() will fail.
5931	 *
5932	 * Similar case to sched_fork(). / Alternatively we could
5933	 * use task_rq_lock() here and obtain the other rq->lock.
5934	 *
5935	 * Silence PROVE_RCU
5936	 */
5937	rcu_read_lock();
5938	__set_task_cpu(idle, cpu);
5939	rcu_read_unlock();
5940
5941	rq->curr = rq->idle = idle;
5942#if defined(CONFIG_SMP)
5943	idle->on_cpu = 1;
5944#endif
5945	raw_spin_unlock_irqrestore(&rq->lock, flags);
5946
5947	/* Set the preempt count _outside_ the spinlocks! */
5948	task_thread_info(idle)->preempt_count = 0;
5949
5950	/*
5951	 * The idle tasks have their own, simple scheduling class:
5952	 */
5953	idle->sched_class = &idle_sched_class;
5954	ftrace_graph_init_idle_task(idle, cpu);
5955}
5956
5957/*
5958 * In a system that switches off the HZ timer nohz_cpu_mask
5959 * indicates which cpus entered this state. This is used
5960 * in the rcu update to wait only for active cpus. For system
5961 * which do not switch off the HZ timer nohz_cpu_mask should
5962 * always be CPU_BITS_NONE.
5963 */
5964cpumask_var_t nohz_cpu_mask;
5965
5966/*
5967 * Increase the granularity value when there are more CPUs,
5968 * because with more CPUs the 'effective latency' as visible
5969 * to users decreases. But the relationship is not linear,
5970 * so pick a second-best guess by going with the log2 of the
5971 * number of CPUs.
5972 *
5973 * This idea comes from the SD scheduler of Con Kolivas:
5974 */
5975static int get_update_sysctl_factor(void)
5976{
5977	unsigned int cpus = min_t(int, num_online_cpus(), 8);
5978	unsigned int factor;
5979
5980	switch (sysctl_sched_tunable_scaling) {
5981	case SCHED_TUNABLESCALING_NONE:
5982		factor = 1;
5983		break;
5984	case SCHED_TUNABLESCALING_LINEAR:
5985		factor = cpus;
5986		break;
5987	case SCHED_TUNABLESCALING_LOG:
5988	default:
5989		factor = 1 + ilog2(cpus);
5990		break;
5991	}
5992
5993	return factor;
5994}
5995
5996static void update_sysctl(void)
5997{
5998	unsigned int factor = get_update_sysctl_factor();
5999
6000#define SET_SYSCTL(name) \
6001	(sysctl_##name = (factor) * normalized_sysctl_##name)
6002	SET_SYSCTL(sched_min_granularity);
6003	SET_SYSCTL(sched_latency);
6004	SET_SYSCTL(sched_wakeup_granularity);
6005#undef SET_SYSCTL
6006}
6007
6008static inline void sched_init_granularity(void)
6009{
6010	update_sysctl();
6011}
6012
6013#ifdef CONFIG_SMP
6014void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
6015{
6016	if (p->sched_class && p->sched_class->set_cpus_allowed)
6017		p->sched_class->set_cpus_allowed(p, new_mask);
6018	else {
6019		cpumask_copy(&p->cpus_allowed, new_mask);
6020		p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
6021	}
6022}
6023
6024/*
6025 * This is how migration works:
6026 *
6027 * 1) we invoke migration_cpu_stop() on the target CPU using
6028 *    stop_one_cpu().
6029 * 2) stopper starts to run (implicitly forcing the migrated thread
6030 *    off the CPU)
6031 * 3) it checks whether the migrated task is still in the wrong runqueue.
6032 * 4) if it's in the wrong runqueue then the migration thread removes
6033 *    it and puts it into the right queue.
6034 * 5) stopper completes and stop_one_cpu() returns and the migration
6035 *    is done.
6036 */
6037
6038/*
6039 * Change a given task's CPU affinity. Migrate the thread to a
6040 * proper CPU and schedule it away if the CPU it's executing on
6041 * is removed from the allowed bitmask.
6042 *
6043 * NOTE: the caller must have a valid reference to the task, the
6044 * task must not exit() & deallocate itself prematurely. The
6045 * call is not atomic; no spinlocks may be held.
6046 */
6047int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
6048{
6049	unsigned long flags;
6050	struct rq *rq;
6051	unsigned int dest_cpu;
6052	int ret = 0;
6053
6054	rq = task_rq_lock(p, &flags);
6055
6056	if (cpumask_equal(&p->cpus_allowed, new_mask))
6057		goto out;
6058
6059	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
6060		ret = -EINVAL;
6061		goto out;
6062	}
6063
6064	if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
6065		ret = -EINVAL;
6066		goto out;
6067	}
6068
6069	do_set_cpus_allowed(p, new_mask);
6070
6071	/* Can the task run on the task's current CPU? If so, we're done */
6072	if (cpumask_test_cpu(task_cpu(p), new_mask))
6073		goto out;
6074
6075	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
6076	if (p->on_rq) {
6077		struct migration_arg arg = { p, dest_cpu };
6078		/* Need help from migration thread: drop lock and wait. */
6079		task_rq_unlock(rq, p, &flags);
6080		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
6081		tlb_migrate_finish(p->mm);
6082		return 0;
6083	}
6084out:
6085	task_rq_unlock(rq, p, &flags);
6086
6087	return ret;
6088}
6089EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
6090
6091/*
6092 * Move (not current) task off this cpu, onto dest cpu. We're doing
6093 * this because either it can't run here any more (set_cpus_allowed()
6094 * away from this CPU, or CPU going down), or because we're
6095 * attempting to rebalance this task on exec (sched_exec).
6096 *
6097 * So we race with normal scheduler movements, but that's OK, as long
6098 * as the task is no longer on this CPU.
6099 *
6100 * Returns non-zero if task was successfully migrated.
6101 */
6102static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
6103{
6104	struct rq *rq_dest, *rq_src;
6105	int ret = 0;
6106
6107	if (unlikely(!cpu_active(dest_cpu)))
6108		return ret;
6109
6110	rq_src = cpu_rq(src_cpu);
6111	rq_dest = cpu_rq(dest_cpu);
6112
6113	raw_spin_lock(&p->pi_lock);
6114	double_rq_lock(rq_src, rq_dest);
6115	/* Already moved. */
6116	if (task_cpu(p) != src_cpu)
6117		goto done;
6118	/* Affinity changed (again). */
6119	if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
6120		goto fail;
6121
6122	/*
6123	 * If we're not on a rq, the next wake-up will ensure we're
6124	 * placed properly.
6125	 */
6126	if (p->on_rq) {
6127		deactivate_task(rq_src, p, 0);
6128		set_task_cpu(p, dest_cpu);
6129		activate_task(rq_dest, p, 0);
6130		check_preempt_curr(rq_dest, p, 0);
6131	}
6132done:
6133	ret = 1;
6134fail:
6135	double_rq_unlock(rq_src, rq_dest);
6136	raw_spin_unlock(&p->pi_lock);
6137	return ret;
6138}
6139
6140/*
6141 * migration_cpu_stop - this will be executed by a highprio stopper thread
6142 * and performs thread migration by bumping thread off CPU then
6143 * 'pushing' onto another runqueue.
6144 */
6145static int migration_cpu_stop(void *data)
6146{
6147	struct migration_arg *arg = data;
6148
6149	/*
6150	 * The original target cpu might have gone down and we might
6151	 * be on another cpu but it doesn't matter.
6152	 */
6153	local_irq_disable();
6154	__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
6155	local_irq_enable();
6156	return 0;
6157}
6158
6159#ifdef CONFIG_HOTPLUG_CPU
6160
6161/*
6162 * Ensures that the idle task is using init_mm right before its cpu goes
6163 * offline.
6164 */
6165void idle_task_exit(void)
6166{
6167	struct mm_struct *mm = current->active_mm;
6168
6169	BUG_ON(cpu_online(smp_processor_id()));
6170
6171	if (mm != &init_mm)
6172		switch_mm(mm, &init_mm, current);
6173	mmdrop(mm);
6174}
6175
6176/*
6177 * While a dead CPU has no uninterruptible tasks queued at this point,
6178 * it might still have a nonzero ->nr_uninterruptible counter, because
6179 * for performance reasons the counter is not stricly tracking tasks to
6180 * their home CPUs. So we just add the counter to another CPU's counter,
6181 * to keep the global sum constant after CPU-down:
6182 */
6183static void migrate_nr_uninterruptible(struct rq *rq_src)
6184{
6185	struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
6186
6187	rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
6188	rq_src->nr_uninterruptible = 0;
6189}
6190
6191/*
6192 * remove the tasks which were accounted by rq from calc_load_tasks.
6193 */
6194static void calc_global_load_remove(struct rq *rq)
6195{
6196	atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
6197	rq->calc_load_active = 0;
6198}
6199
6200/*
6201 * Migrate all tasks from the rq, sleeping tasks will be migrated by
6202 * try_to_wake_up()->select_task_rq().
6203 *
6204 * Called with rq->lock held even though we'er in stop_machine() and
6205 * there's no concurrency possible, we hold the required locks anyway
6206 * because of lock validation efforts.
6207 */
6208static void migrate_tasks(unsigned int dead_cpu)
6209{
6210	struct rq *rq = cpu_rq(dead_cpu);
6211	struct task_struct *next, *stop = rq->stop;
6212	int dest_cpu;
6213
6214	/*
6215	 * Fudge the rq selection such that the below task selection loop
6216	 * doesn't get stuck on the currently eligible stop task.
6217	 *
6218	 * We're currently inside stop_machine() and the rq is either stuck
6219	 * in the stop_machine_cpu_stop() loop, or we're executing this code,
6220	 * either way we should never end up calling schedule() until we're
6221	 * done here.
6222	 */
6223	rq->stop = NULL;
6224
6225	for ( ; ; ) {
6226		/*
6227		 * There's this thread running, bail when that's the only
6228		 * remaining thread.
6229		 */
6230		if (rq->nr_running == 1)
6231			break;
6232
6233		next = pick_next_task(rq);
6234		BUG_ON(!next);
6235		next->sched_class->put_prev_task(rq, next);
6236
6237		/* Find suitable destination for @next, with force if needed. */
6238		dest_cpu = select_fallback_rq(dead_cpu, next);
6239		raw_spin_unlock(&rq->lock);
6240
6241		__migrate_task(next, dead_cpu, dest_cpu);
6242
6243		raw_spin_lock(&rq->lock);
6244	}
6245
6246	rq->stop = stop;
6247}
6248
6249#endif /* CONFIG_HOTPLUG_CPU */
6250
6251#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
6252
6253static struct ctl_table sd_ctl_dir[] = {
6254	{
6255		.procname	= "sched_domain",
6256		.mode		= 0555,
6257	},
6258	{}
6259};
6260
6261static struct ctl_table sd_ctl_root[] = {
6262	{
6263		.procname	= "kernel",
6264		.mode		= 0555,
6265		.child		= sd_ctl_dir,
6266	},
6267	{}
6268};
6269
6270static struct ctl_table *sd_alloc_ctl_entry(int n)
6271{
6272	struct ctl_table *entry =
6273		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
6274
6275	return entry;
6276}
6277
6278static void sd_free_ctl_entry(struct ctl_table **tablep)
6279{
6280	struct ctl_table *entry;
6281
6282	/*
6283	 * In the intermediate directories, both the child directory and
6284	 * procname are dynamically allocated and could fail but the mode
6285	 * will always be set. In the lowest directory the names are
6286	 * static strings and all have proc handlers.
6287	 */
6288	for (entry = *tablep; entry->mode; entry++) {
6289		if (entry->child)
6290			sd_free_ctl_entry(&entry->child);
6291		if (entry->proc_handler == NULL)
6292			kfree(entry->procname);
6293	}
6294
6295	kfree(*tablep);
6296	*tablep = NULL;
6297}
6298
6299static void
6300set_table_entry(struct ctl_table *entry,
6301		const char *procname, void *data, int maxlen,
6302		mode_t mode, proc_handler *proc_handler)
6303{
6304	entry->procname = procname;
6305	entry->data = data;
6306	entry->maxlen = maxlen;
6307	entry->mode = mode;
6308	entry->proc_handler = proc_handler;
6309}
6310
6311static struct ctl_table *
6312sd_alloc_ctl_domain_table(struct sched_domain *sd)
6313{
6314	struct ctl_table *table = sd_alloc_ctl_entry(13);
6315
6316	if (table == NULL)
6317		return NULL;
6318
6319	set_table_entry(&table[0], "min_interval", &sd->min_interval,
6320		sizeof(long), 0644, proc_doulongvec_minmax);
6321	set_table_entry(&table[1], "max_interval", &sd->max_interval,
6322		sizeof(long), 0644, proc_doulongvec_minmax);
6323	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
6324		sizeof(int), 0644, proc_dointvec_minmax);
6325	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
6326		sizeof(int), 0644, proc_dointvec_minmax);
6327	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
6328		sizeof(int), 0644, proc_dointvec_minmax);
6329	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
6330		sizeof(int), 0644, proc_dointvec_minmax);
6331	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
6332		sizeof(int), 0644, proc_dointvec_minmax);
6333	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
6334		sizeof(int), 0644, proc_dointvec_minmax);
6335	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
6336		sizeof(int), 0644, proc_dointvec_minmax);
6337	set_table_entry(&table[9], "cache_nice_tries",
6338		&sd->cache_nice_tries,
6339		sizeof(int), 0644, proc_dointvec_minmax);
6340	set_table_entry(&table[10], "flags", &sd->flags,
6341		sizeof(int), 0644, proc_dointvec_minmax);
6342	set_table_entry(&table[11], "name", sd->name,
6343		CORENAME_MAX_SIZE, 0444, proc_dostring);
6344	/* &table[12] is terminator */
6345
6346	return table;
6347}
6348
6349static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
6350{
6351	struct ctl_table *entry, *table;
6352	struct sched_domain *sd;
6353	int domain_num = 0, i;
6354	char buf[32];
6355
6356	for_each_domain(cpu, sd)
6357		domain_num++;
6358	entry = table = sd_alloc_ctl_entry(domain_num + 1);
6359	if (table == NULL)
6360		return NULL;
6361
6362	i = 0;
6363	for_each_domain(cpu, sd) {
6364		snprintf(buf, 32, "domain%d", i);
6365		entry->procname = kstrdup(buf, GFP_KERNEL);
6366		entry->mode = 0555;
6367		entry->child = sd_alloc_ctl_domain_table(sd);
6368		entry++;
6369		i++;
6370	}
6371	return table;
6372}
6373
6374static struct ctl_table_header *sd_sysctl_header;
6375static void register_sched_domain_sysctl(void)
6376{
6377	int i, cpu_num = num_possible_cpus();
6378	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
6379	char buf[32];
6380
6381	WARN_ON(sd_ctl_dir[0].child);
6382	sd_ctl_dir[0].child = entry;
6383
6384	if (entry == NULL)
6385		return;
6386
6387	for_each_possible_cpu(i) {
6388		snprintf(buf, 32, "cpu%d", i);
6389		entry->procname = kstrdup(buf, GFP_KERNEL);
6390		entry->mode = 0555;
6391		entry->child = sd_alloc_ctl_cpu_table(i);
6392		entry++;
6393	}
6394
6395	WARN_ON(sd_sysctl_header);
6396	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
6397}
6398
6399/* may be called multiple times per register */
6400static void unregister_sched_domain_sysctl(void)
6401{
6402	if (sd_sysctl_header)
6403		unregister_sysctl_table(sd_sysctl_header);
6404	sd_sysctl_header = NULL;
6405	if (sd_ctl_dir[0].child)
6406		sd_free_ctl_entry(&sd_ctl_dir[0].child);
6407}
6408#else
6409static void register_sched_domain_sysctl(void)
6410{
6411}
6412static void unregister_sched_domain_sysctl(void)
6413{
6414}
6415#endif
6416
6417static void set_rq_online(struct rq *rq)
6418{
6419	if (!rq->online) {
6420		const struct sched_class *class;
6421
6422		cpumask_set_cpu(rq->cpu, rq->rd->online);
6423		rq->online = 1;
6424
6425		for_each_class(class) {
6426			if (class->rq_online)
6427				class->rq_online(rq);
6428		}
6429	}
6430}
6431
6432static void set_rq_offline(struct rq *rq)
6433{
6434	if (rq->online) {
6435		const struct sched_class *class;
6436
6437		for_each_class(class) {
6438			if (class->rq_offline)
6439				class->rq_offline(rq);
6440		}
6441
6442		cpumask_clear_cpu(rq->cpu, rq->rd->online);
6443		rq->online = 0;
6444	}
6445}
6446
6447/*
6448 * migration_call - callback that gets triggered when a CPU is added.
6449 * Here we can start up the necessary migration thread for the new CPU.
6450 */
6451static int __cpuinit
6452migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6453{
6454	int cpu = (long)hcpu;
6455	unsigned long flags;
6456	struct rq *rq = cpu_rq(cpu);
6457
6458	switch (action & ~CPU_TASKS_FROZEN) {
6459
6460	case CPU_UP_PREPARE:
6461		rq->calc_load_update = calc_load_update;
6462		break;
6463
6464	case CPU_ONLINE:
6465		/* Update our root-domain */
6466		raw_spin_lock_irqsave(&rq->lock, flags);
6467		if (rq->rd) {
6468			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6469
6470			set_rq_online(rq);
6471		}
6472		raw_spin_unlock_irqrestore(&rq->lock, flags);
6473		break;
6474
6475#ifdef CONFIG_HOTPLUG_CPU
6476	case CPU_DYING:
6477		sched_ttwu_pending();
6478		/* Update our root-domain */
6479		raw_spin_lock_irqsave(&rq->lock, flags);
6480		if (rq->rd) {
6481			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6482			set_rq_offline(rq);
6483		}
6484		migrate_tasks(cpu);
6485		BUG_ON(rq->nr_running != 1); /* the migration thread */
6486		raw_spin_unlock_irqrestore(&rq->lock, flags);
6487
6488		migrate_nr_uninterruptible(rq);
6489		calc_global_load_remove(rq);
6490		break;
6491#endif
6492	}
6493
6494	update_max_interval();
6495
6496	return NOTIFY_OK;
6497}
6498
6499/*
6500 * Register at high priority so that task migration (migrate_all_tasks)
6501 * happens before everything else.  This has to be lower priority than
6502 * the notifier in the perf_event subsystem, though.
6503 */
6504static struct notifier_block __cpuinitdata migration_notifier = {
6505	.notifier_call = migration_call,
6506	.priority = CPU_PRI_MIGRATION,
6507};
6508
6509static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
6510				      unsigned long action, void *hcpu)
6511{
6512	switch (action & ~CPU_TASKS_FROZEN) {
6513	case CPU_ONLINE:
6514	case CPU_DOWN_FAILED:
6515		set_cpu_active((long)hcpu, true);
6516		return NOTIFY_OK;
6517	default:
6518		return NOTIFY_DONE;
6519	}
6520}
6521
6522static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6523					unsigned long action, void *hcpu)
6524{
6525	switch (action & ~CPU_TASKS_FROZEN) {
6526	case CPU_DOWN_PREPARE:
6527		set_cpu_active((long)hcpu, false);
6528		return NOTIFY_OK;
6529	default:
6530		return NOTIFY_DONE;
6531	}
6532}
6533
6534static int __init migration_init(void)
6535{
6536	void *cpu = (void *)(long)smp_processor_id();
6537	int err;
6538
6539	/* Initialize migration for the boot CPU */
6540	err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6541	BUG_ON(err == NOTIFY_BAD);
6542	migration_call(&migration_notifier, CPU_ONLINE, cpu);
6543	register_cpu_notifier(&migration_notifier);
6544
6545	/* Register cpu active notifiers */
6546	cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6547	cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6548
6549	return 0;
6550}
6551early_initcall(migration_init);
6552#endif
6553
6554#ifdef CONFIG_SMP
6555
6556static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
6557
6558#ifdef CONFIG_SCHED_DEBUG
6559
6560static __read_mostly int sched_domain_debug_enabled;
6561
6562static int __init sched_domain_debug_setup(char *str)
6563{
6564	sched_domain_debug_enabled = 1;
6565
6566	return 0;
6567}
6568early_param("sched_debug", sched_domain_debug_setup);
6569
6570static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6571				  struct cpumask *groupmask)
6572{
6573	struct sched_group *group = sd->groups;
6574	char str[256];
6575
6576	cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
6577	cpumask_clear(groupmask);
6578
6579	printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6580
6581	if (!(sd->flags & SD_LOAD_BALANCE)) {
6582		printk("does not load-balance\n");
6583		if (sd->parent)
6584			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6585					" has parent");
6586		return -1;
6587	}
6588
6589	printk(KERN_CONT "span %s level %s\n", str, sd->name);
6590
6591	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
6592		printk(KERN_ERR "ERROR: domain->span does not contain "
6593				"CPU%d\n", cpu);
6594	}
6595	if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
6596		printk(KERN_ERR "ERROR: domain->groups does not contain"
6597				" CPU%d\n", cpu);
6598	}
6599
6600	printk(KERN_DEBUG "%*s groups:", level + 1, "");
6601	do {
6602		if (!group) {
6603			printk("\n");
6604			printk(KERN_ERR "ERROR: group is NULL\n");
6605			break;
6606		}
6607
6608		if (!group->sgp->power) {
6609			printk(KERN_CONT "\n");
6610			printk(KERN_ERR "ERROR: domain->cpu_power not "
6611					"set\n");
6612			break;
6613		}
6614
6615		if (!cpumask_weight(sched_group_cpus(group))) {
6616			printk(KERN_CONT "\n");
6617			printk(KERN_ERR "ERROR: empty group\n");
6618			break;
6619		}
6620
6621		if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
6622			printk(KERN_CONT "\n");
6623			printk(KERN_ERR "ERROR: repeated CPUs\n");
6624			break;
6625		}
6626
6627		cpumask_or(groupmask, groupmask, sched_group_cpus(group));
6628
6629		cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
6630
6631		printk(KERN_CONT " %s", str);
6632		if (group->sgp->power != SCHED_POWER_SCALE) {
6633			printk(KERN_CONT " (cpu_power = %d)",
6634				group->sgp->power);
6635		}
6636
6637		group = group->next;
6638	} while (group != sd->groups);
6639	printk(KERN_CONT "\n");
6640
6641	if (!cpumask_equal(sched_domain_span(sd), groupmask))
6642		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
6643
6644	if (sd->parent &&
6645	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
6646		printk(KERN_ERR "ERROR: parent span is not a superset "
6647			"of domain->span\n");
6648	return 0;
6649}
6650
6651static void sched_domain_debug(struct sched_domain *sd, int cpu)
6652{
6653	int level = 0;
6654
6655	if (!sched_domain_debug_enabled)
6656		return;
6657
6658	if (!sd) {
6659		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6660		return;
6661	}
6662
6663	printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6664
6665	for (;;) {
6666		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
6667			break;
6668		level++;
6669		sd = sd->parent;
6670		if (!sd)
6671			break;
6672	}
6673}
6674#else /* !CONFIG_SCHED_DEBUG */
6675# define sched_domain_debug(sd, cpu) do { } while (0)
6676#endif /* CONFIG_SCHED_DEBUG */
6677
6678static int sd_degenerate(struct sched_domain *sd)
6679{
6680	if (cpumask_weight(sched_domain_span(sd)) == 1)
6681		return 1;
6682
6683	/* Following flags need at least 2 groups */
6684	if (sd->flags & (SD_LOAD_BALANCE |
6685			 SD_BALANCE_NEWIDLE |
6686			 SD_BALANCE_FORK |
6687			 SD_BALANCE_EXEC |
6688			 SD_SHARE_CPUPOWER |
6689			 SD_SHARE_PKG_RESOURCES)) {
6690		if (sd->groups != sd->groups->next)
6691			return 0;
6692	}
6693
6694	/* Following flags don't use groups */
6695	if (sd->flags & (SD_WAKE_AFFINE))
6696		return 0;
6697
6698	return 1;
6699}
6700
6701static int
6702sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6703{
6704	unsigned long cflags = sd->flags, pflags = parent->flags;
6705
6706	if (sd_degenerate(parent))
6707		return 1;
6708
6709	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
6710		return 0;
6711
6712	/* Flags needing groups don't count if only 1 group in parent */
6713	if (parent->groups == parent->groups->next) {
6714		pflags &= ~(SD_LOAD_BALANCE |
6715				SD_BALANCE_NEWIDLE |
6716				SD_BALANCE_FORK |
6717				SD_BALANCE_EXEC |
6718				SD_SHARE_CPUPOWER |
6719				SD_SHARE_PKG_RESOURCES);
6720		if (nr_node_ids == 1)
6721			pflags &= ~SD_SERIALIZE;
6722	}
6723	if (~cflags & pflags)
6724		return 0;
6725
6726	return 1;
6727}
6728
6729static void free_rootdomain(struct rcu_head *rcu)
6730{
6731	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
6732
6733	cpupri_cleanup(&rd->cpupri);
6734	free_cpumask_var(rd->rto_mask);
6735	free_cpumask_var(rd->online);
6736	free_cpumask_var(rd->span);
6737	kfree(rd);
6738}
6739
6740static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6741{
6742	struct root_domain *old_rd = NULL;
6743	unsigned long flags;
6744
6745	raw_spin_lock_irqsave(&rq->lock, flags);
6746
6747	if (rq->rd) {
6748		old_rd = rq->rd;
6749
6750		if (cpumask_test_cpu(rq->cpu, old_rd->online))
6751			set_rq_offline(rq);
6752
6753		cpumask_clear_cpu(rq->cpu, old_rd->span);
6754
6755		/*
6756		 * If we dont want to free the old_rt yet then
6757		 * set old_rd to NULL to skip the freeing later
6758		 * in this function:
6759		 */
6760		if (!atomic_dec_and_test(&old_rd->refcount))
6761			old_rd = NULL;
6762	}
6763
6764	atomic_inc(&rd->refcount);
6765	rq->rd = rd;
6766
6767	cpumask_set_cpu(rq->cpu, rd->span);
6768	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
6769		set_rq_online(rq);
6770
6771	raw_spin_unlock_irqrestore(&rq->lock, flags);
6772
6773	if (old_rd)
6774		call_rcu_sched(&old_rd->rcu, free_rootdomain);
6775}
6776
6777static int init_rootdomain(struct root_domain *rd)
6778{
6779	memset(rd, 0, sizeof(*rd));
6780
6781	if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
6782		goto out;
6783	if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
6784		goto free_span;
6785	if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
6786		goto free_online;
6787
6788	if (cpupri_init(&rd->cpupri) != 0)
6789		goto free_rto_mask;
6790	return 0;
6791
6792free_rto_mask:
6793	free_cpumask_var(rd->rto_mask);
6794free_online:
6795	free_cpumask_var(rd->online);
6796free_span:
6797	free_cpumask_var(rd->span);
6798out:
6799	return -ENOMEM;
6800}
6801
6802static void init_defrootdomain(void)
6803{
6804	init_rootdomain(&def_root_domain);
6805
6806	atomic_set(&def_root_domain.refcount, 1);
6807}
6808
6809static struct root_domain *alloc_rootdomain(void)
6810{
6811	struct root_domain *rd;
6812
6813	rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6814	if (!rd)
6815		return NULL;
6816
6817	if (init_rootdomain(rd) != 0) {
6818		kfree(rd);
6819		return NULL;
6820	}
6821
6822	return rd;
6823}
6824
6825static void free_sched_groups(struct sched_group *sg, int free_sgp)
6826{
6827	struct sched_group *tmp, *first;
6828
6829	if (!sg)
6830		return;
6831
6832	first = sg;
6833	do {
6834		tmp = sg->next;
6835
6836		if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
6837			kfree(sg->sgp);
6838
6839		kfree(sg);
6840		sg = tmp;
6841	} while (sg != first);
6842}
6843
6844static void free_sched_domain(struct rcu_head *rcu)
6845{
6846	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
6847
6848	/*
6849	 * If its an overlapping domain it has private groups, iterate and
6850	 * nuke them all.
6851	 */
6852	if (sd->flags & SD_OVERLAP) {
6853		free_sched_groups(sd->groups, 1);
6854	} else if (atomic_dec_and_test(&sd->groups->ref)) {
6855		kfree(sd->groups->sgp);
6856		kfree(sd->groups);
6857	}
6858	kfree(sd);
6859}
6860
6861static void destroy_sched_domain(struct sched_domain *sd, int cpu)
6862{
6863	call_rcu(&sd->rcu, free_sched_domain);
6864}
6865
6866static void destroy_sched_domains(struct sched_domain *sd, int cpu)
6867{
6868	for (; sd; sd = sd->parent)
6869		destroy_sched_domain(sd, cpu);
6870}
6871
6872/*
6873 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
6874 * hold the hotplug lock.
6875 */
6876static void
6877cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6878{
6879	struct rq *rq = cpu_rq(cpu);
6880	struct sched_domain *tmp;
6881
6882	/* Remove the sched domains which do not contribute to scheduling. */
6883	for (tmp = sd; tmp; ) {
6884		struct sched_domain *parent = tmp->parent;
6885		if (!parent)
6886			break;
6887
6888		if (sd_parent_degenerate(tmp, parent)) {
6889			tmp->parent = parent->parent;
6890			if (parent->parent)
6891				parent->parent->child = tmp;
6892			destroy_sched_domain(parent, cpu);
6893		} else
6894			tmp = tmp->parent;
6895	}
6896
6897	if (sd && sd_degenerate(sd)) {
6898		tmp = sd;
6899		sd = sd->parent;
6900		destroy_sched_domain(tmp, cpu);
6901		if (sd)
6902			sd->child = NULL;
6903	}
6904
6905	sched_domain_debug(sd, cpu);
6906
6907	rq_attach_root(rq, rd);
6908	tmp = rq->sd;
6909	rcu_assign_pointer(rq->sd, sd);
6910	destroy_sched_domains(tmp, cpu);
6911}
6912
6913/* cpus with isolated domains */
6914static cpumask_var_t cpu_isolated_map;
6915
6916/* Setup the mask of cpus configured for isolated domains */
6917static int __init isolated_cpu_setup(char *str)
6918{
6919	alloc_bootmem_cpumask_var(&cpu_isolated_map);
6920	cpulist_parse(str, cpu_isolated_map);
6921	return 1;
6922}
6923
6924__setup("isolcpus=", isolated_cpu_setup);
6925
6926#define SD_NODES_PER_DOMAIN 16
6927
6928#ifdef CONFIG_NUMA
6929
6930/**
6931 * find_next_best_node - find the next node to include in a sched_domain
6932 * @node: node whose sched_domain we're building
6933 * @used_nodes: nodes already in the sched_domain
6934 *
6935 * Find the next node to include in a given scheduling domain. Simply
6936 * finds the closest node not already in the @used_nodes map.
6937 *
6938 * Should use nodemask_t.
6939 */
6940static int find_next_best_node(int node, nodemask_t *used_nodes)
6941{
6942	int i, n, val, min_val, best_node = -1;
6943
6944	min_val = INT_MAX;
6945
6946	for (i = 0; i < nr_node_ids; i++) {
6947		/* Start at @node */
6948		n = (node + i) % nr_node_ids;
6949
6950		if (!nr_cpus_node(n))
6951			continue;
6952
6953		/* Skip already used nodes */
6954		if (node_isset(n, *used_nodes))
6955			continue;
6956
6957		/* Simple min distance search */
6958		val = node_distance(node, n);
6959
6960		if (val < min_val) {
6961			min_val = val;
6962			best_node = n;
6963		}
6964	}
6965
6966	if (best_node != -1)
6967		node_set(best_node, *used_nodes);
6968	return best_node;
6969}
6970
6971/**
6972 * sched_domain_node_span - get a cpumask for a node's sched_domain
6973 * @node: node whose cpumask we're constructing
6974 * @span: resulting cpumask
6975 *
6976 * Given a node, construct a good cpumask for its sched_domain to span. It
6977 * should be one that prevents unnecessary balancing, but also spreads tasks
6978 * out optimally.
6979 */
6980static void sched_domain_node_span(int node, struct cpumask *span)
6981{
6982	nodemask_t used_nodes;
6983	int i;
6984
6985	cpumask_clear(span);
6986	nodes_clear(used_nodes);
6987
6988	cpumask_or(span, span, cpumask_of_node(node));
6989	node_set(node, used_nodes);
6990
6991	for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
6992		int next_node = find_next_best_node(node, &used_nodes);
6993		if (next_node < 0)
6994			break;
6995		cpumask_or(span, span, cpumask_of_node(next_node));
6996	}
6997}
6998
6999static const struct cpumask *cpu_node_mask(int cpu)
7000{
7001	lockdep_assert_held(&sched_domains_mutex);
7002
7003	sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask);
7004
7005	return sched_domains_tmpmask;
7006}
7007
7008static const struct cpumask *cpu_allnodes_mask(int cpu)
7009{
7010	return cpu_possible_mask;
7011}
7012#endif /* CONFIG_NUMA */
7013
7014static const struct cpumask *cpu_cpu_mask(int cpu)
7015{
7016	return cpumask_of_node(cpu_to_node(cpu));
7017}
7018
7019int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
7020
7021struct sd_data {
7022	struct sched_domain **__percpu sd;
7023	struct sched_group **__percpu sg;
7024	struct sched_group_power **__percpu sgp;
7025};
7026
7027struct s_data {
7028	struct sched_domain ** __percpu sd;
7029	struct root_domain	*rd;
7030};
7031
7032enum s_alloc {
7033	sa_rootdomain,
7034	sa_sd,
7035	sa_sd_storage,
7036	sa_none,
7037};
7038
7039struct sched_domain_topology_level;
7040
7041typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
7042typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
7043
7044#define SDTL_OVERLAP	0x01
7045
7046struct sched_domain_topology_level {
7047	sched_domain_init_f init;
7048	sched_domain_mask_f mask;
7049	int		    flags;
7050	struct sd_data      data;
7051};
7052
7053static int
7054build_overlap_sched_groups(struct sched_domain *sd, int cpu)
7055{
7056	struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
7057	const struct cpumask *span = sched_domain_span(sd);
7058	struct cpumask *covered = sched_domains_tmpmask;
7059	struct sd_data *sdd = sd->private;
7060	struct sched_domain *child;
7061	int i;
7062
7063	cpumask_clear(covered);
7064
7065	for_each_cpu(i, span) {
7066		struct cpumask *sg_span;
7067
7068		if (cpumask_test_cpu(i, covered))
7069			continue;
7070
7071		sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
7072				GFP_KERNEL, cpu_to_node(i));
7073
7074		if (!sg)
7075			goto fail;
7076
7077		sg_span = sched_group_cpus(sg);
7078
7079		child = *per_cpu_ptr(sdd->sd, i);
7080		if (child->child) {
7081			child = child->child;
7082			cpumask_copy(sg_span, sched_domain_span(child));
7083		} else
7084			cpumask_set_cpu(i, sg_span);
7085
7086		cpumask_or(covered, covered, sg_span);
7087
7088		sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
7089		atomic_inc(&sg->sgp->ref);
7090
7091		if (cpumask_test_cpu(cpu, sg_span))
7092			groups = sg;
7093
7094		if (!first)
7095			first = sg;
7096		if (last)
7097			last->next = sg;
7098		last = sg;
7099		last->next = first;
7100	}
7101	sd->groups = groups;
7102
7103	return 0;
7104
7105fail:
7106	free_sched_groups(first, 0);
7107
7108	return -ENOMEM;
7109}
7110
7111static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
7112{
7113	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
7114	struct sched_domain *child = sd->child;
7115
7116	if (child)
7117		cpu = cpumask_first(sched_domain_span(child));
7118
7119	if (sg) {
7120		*sg = *per_cpu_ptr(sdd->sg, cpu);
7121		(*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
7122		atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
7123	}
7124
7125	return cpu;
7126}
7127
7128/*
7129 * build_sched_groups will build a circular linked list of the groups
7130 * covered by the given span, and will set each group's ->cpumask correctly,
7131 * and ->cpu_power to 0.
7132 *
7133 * Assumes the sched_domain tree is fully constructed
7134 */
7135static int
7136build_sched_groups(struct sched_domain *sd, int cpu)
7137{
7138	struct sched_group *first = NULL, *last = NULL;
7139	struct sd_data *sdd = sd->private;
7140	const struct cpumask *span = sched_domain_span(sd);
7141	struct cpumask *covered;
7142	int i;
7143
7144	get_group(cpu, sdd, &sd->groups);
7145	atomic_inc(&sd->groups->ref);
7146
7147	if (cpu != cpumask_first(sched_domain_span(sd)))
7148		return 0;
7149
7150	lockdep_assert_held(&sched_domains_mutex);
7151	covered = sched_domains_tmpmask;
7152
7153	cpumask_clear(covered);
7154
7155	for_each_cpu(i, span) {
7156		struct sched_group *sg;
7157		int group = get_group(i, sdd, &sg);
7158		int j;
7159
7160		if (cpumask_test_cpu(i, covered))
7161			continue;
7162
7163		cpumask_clear(sched_group_cpus(sg));
7164		sg->sgp->power = 0;
7165
7166		for_each_cpu(j, span) {
7167			if (get_group(j, sdd, NULL) != group)
7168				continue;
7169
7170			cpumask_set_cpu(j, covered);
7171			cpumask_set_cpu(j, sched_group_cpus(sg));
7172		}
7173
7174		if (!first)
7175			first = sg;
7176		if (last)
7177			last->next = sg;
7178		last = sg;
7179	}
7180	last->next = first;
7181
7182	return 0;
7183}
7184
7185/*
7186 * Initialize sched groups cpu_power.
7187 *
7188 * cpu_power indicates the capacity of sched group, which is used while
7189 * distributing the load between different sched groups in a sched domain.
7190 * Typically cpu_power for all the groups in a sched domain will be same unless
7191 * there are asymmetries in the topology. If there are asymmetries, group
7192 * having more cpu_power will pickup more load compared to the group having
7193 * less cpu_power.
7194 */
7195static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7196{
7197	struct sched_group *sg = sd->groups;
7198
7199	WARN_ON(!sd || !sg);
7200
7201	do {
7202		sg->group_weight = cpumask_weight(sched_group_cpus(sg));
7203		sg = sg->next;
7204	} while (sg != sd->groups);
7205
7206	if (cpu != group_first_cpu(sg))
7207		return;
7208
7209	update_group_power(sd, cpu);
7210}
7211
7212/*
7213 * Initializers for schedule domains
7214 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
7215 */
7216
7217#ifdef CONFIG_SCHED_DEBUG
7218# define SD_INIT_NAME(sd, type)		sd->name = #type
7219#else
7220# define SD_INIT_NAME(sd, type)		do { } while (0)
7221#endif
7222
7223#define SD_INIT_FUNC(type)						\
7224static noinline struct sched_domain *					\
7225sd_init_##type(struct sched_domain_topology_level *tl, int cpu) 	\
7226{									\
7227	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);	\
7228	*sd = SD_##type##_INIT;						\
7229	SD_INIT_NAME(sd, type);						\
7230	sd->private = &tl->data;					\
7231	return sd;							\
7232}
7233
7234SD_INIT_FUNC(CPU)
7235#ifdef CONFIG_NUMA
7236 SD_INIT_FUNC(ALLNODES)
7237 SD_INIT_FUNC(NODE)
7238#endif
7239#ifdef CONFIG_SCHED_SMT
7240 SD_INIT_FUNC(SIBLING)
7241#endif
7242#ifdef CONFIG_SCHED_MC
7243 SD_INIT_FUNC(MC)
7244#endif
7245#ifdef CONFIG_SCHED_BOOK
7246 SD_INIT_FUNC(BOOK)
7247#endif
7248
7249static int default_relax_domain_level = -1;
7250int sched_domain_level_max;
7251
7252static int __init setup_relax_domain_level(char *str)
7253{
7254	unsigned long val;
7255
7256	val = simple_strtoul(str, NULL, 0);
7257	if (val < sched_domain_level_max)
7258		default_relax_domain_level = val;
7259
7260	return 1;
7261}
7262__setup("relax_domain_level=", setup_relax_domain_level);
7263
7264static void set_domain_attribute(struct sched_domain *sd,
7265				 struct sched_domain_attr *attr)
7266{
7267	int request;
7268
7269	if (!attr || attr->relax_domain_level < 0) {
7270		if (default_relax_domain_level < 0)
7271			return;
7272		else
7273			request = default_relax_domain_level;
7274	} else
7275		request = attr->relax_domain_level;
7276	if (request < sd->level) {
7277		/* turn off idle balance on this domain */
7278		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
7279	} else {
7280		/* turn on idle balance on this domain */
7281		sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
7282	}
7283}
7284
7285static void __sdt_free(const struct cpumask *cpu_map);
7286static int __sdt_alloc(const struct cpumask *cpu_map);
7287
7288static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
7289				 const struct cpumask *cpu_map)
7290{
7291	switch (what) {
7292	case sa_rootdomain:
7293		if (!atomic_read(&d->rd->refcount))
7294			free_rootdomain(&d->rd->rcu); /* fall through */
7295	case sa_sd:
7296		free_percpu(d->sd); /* fall through */
7297	case sa_sd_storage:
7298		__sdt_free(cpu_map); /* fall through */
7299	case sa_none:
7300		break;
7301	}
7302}
7303
7304static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7305						   const struct cpumask *cpu_map)
7306{
7307	memset(d, 0, sizeof(*d));
7308
7309	if (__sdt_alloc(cpu_map))
7310		return sa_sd_storage;
7311	d->sd = alloc_percpu(struct sched_domain *);
7312	if (!d->sd)
7313		return sa_sd_storage;
7314	d->rd = alloc_rootdomain();
7315	if (!d->rd)
7316		return sa_sd;
7317	return sa_rootdomain;
7318}
7319
7320/*
7321 * NULL the sd_data elements we've used to build the sched_domain and
7322 * sched_group structure so that the subsequent __free_domain_allocs()
7323 * will not free the data we're using.
7324 */
7325static void claim_allocations(int cpu, struct sched_domain *sd)
7326{
7327	struct sd_data *sdd = sd->private;
7328
7329	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
7330	*per_cpu_ptr(sdd->sd, cpu) = NULL;
7331
7332	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
7333		*per_cpu_ptr(sdd->sg, cpu) = NULL;
7334
7335	if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
7336		*per_cpu_ptr(sdd->sgp, cpu) = NULL;
7337}
7338
7339#ifdef CONFIG_SCHED_SMT
7340static const struct cpumask *cpu_smt_mask(int cpu)
7341{
7342	return topology_thread_cpumask(cpu);
7343}
7344#endif
7345
7346/*
7347 * Topology list, bottom-up.
7348 */
7349static struct sched_domain_topology_level default_topology[] = {
7350#ifdef CONFIG_SCHED_SMT
7351	{ sd_init_SIBLING, cpu_smt_mask, },
7352#endif
7353#ifdef CONFIG_SCHED_MC
7354	{ sd_init_MC, cpu_coregroup_mask, },
7355#endif
7356#ifdef CONFIG_SCHED_BOOK
7357	{ sd_init_BOOK, cpu_book_mask, },
7358#endif
7359	{ sd_init_CPU, cpu_cpu_mask, },
7360#ifdef CONFIG_NUMA
7361	{ sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
7362	{ sd_init_ALLNODES, cpu_allnodes_mask, },
7363#endif
7364	{ NULL, },
7365};
7366
7367static struct sched_domain_topology_level *sched_domain_topology = default_topology;
7368
7369static int __sdt_alloc(const struct cpumask *cpu_map)
7370{
7371	struct sched_domain_topology_level *tl;
7372	int j;
7373
7374	for (tl = sched_domain_topology; tl->init; tl++) {
7375		struct sd_data *sdd = &tl->data;
7376
7377		sdd->sd = alloc_percpu(struct sched_domain *);
7378		if (!sdd->sd)
7379			return -ENOMEM;
7380
7381		sdd->sg = alloc_percpu(struct sched_group *);
7382		if (!sdd->sg)
7383			return -ENOMEM;
7384
7385		sdd->sgp = alloc_percpu(struct sched_group_power *);
7386		if (!sdd->sgp)
7387			return -ENOMEM;
7388
7389		for_each_cpu(j, cpu_map) {
7390			struct sched_domain *sd;
7391			struct sched_group *sg;
7392			struct sched_group_power *sgp;
7393
7394		       	sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
7395					GFP_KERNEL, cpu_to_node(j));
7396			if (!sd)
7397				return -ENOMEM;
7398
7399			*per_cpu_ptr(sdd->sd, j) = sd;
7400
7401			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
7402					GFP_KERNEL, cpu_to_node(j));
7403			if (!sg)
7404				return -ENOMEM;
7405
7406			*per_cpu_ptr(sdd->sg, j) = sg;
7407
7408			sgp = kzalloc_node(sizeof(struct sched_group_power),
7409					GFP_KERNEL, cpu_to_node(j));
7410			if (!sgp)
7411				return -ENOMEM;
7412
7413			*per_cpu_ptr(sdd->sgp, j) = sgp;
7414		}
7415	}
7416
7417	return 0;
7418}
7419
7420static void __sdt_free(const struct cpumask *cpu_map)
7421{
7422	struct sched_domain_topology_level *tl;
7423	int j;
7424
7425	for (tl = sched_domain_topology; tl->init; tl++) {
7426		struct sd_data *sdd = &tl->data;
7427
7428		for_each_cpu(j, cpu_map) {
7429			struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
7430			if (sd && (sd->flags & SD_OVERLAP))
7431				free_sched_groups(sd->groups, 0);
7432			kfree(*per_cpu_ptr(sdd->sd, j));
7433			kfree(*per_cpu_ptr(sdd->sg, j));
7434			kfree(*per_cpu_ptr(sdd->sgp, j));
7435		}
7436		free_percpu(sdd->sd);
7437		free_percpu(sdd->sg);
7438		free_percpu(sdd->sgp);
7439	}
7440}
7441
7442struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
7443		struct s_data *d, const struct cpumask *cpu_map,
7444		struct sched_domain_attr *attr, struct sched_domain *child,
7445		int cpu)
7446{
7447	struct sched_domain *sd = tl->init(tl, cpu);
7448	if (!sd)
7449		return child;
7450
7451	set_domain_attribute(sd, attr);
7452	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
7453	if (child) {
7454		sd->level = child->level + 1;
7455		sched_domain_level_max = max(sched_domain_level_max, sd->level);
7456		child->parent = sd;
7457	}
7458	sd->child = child;
7459
7460	return sd;
7461}
7462
7463/*
7464 * Build sched domains for a given set of cpus and attach the sched domains
7465 * to the individual cpus
7466 */
7467static int build_sched_domains(const struct cpumask *cpu_map,
7468			       struct sched_domain_attr *attr)
7469{
7470	enum s_alloc alloc_state = sa_none;
7471	struct sched_domain *sd;
7472	struct s_data d;
7473	int i, ret = -ENOMEM;
7474
7475	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7476	if (alloc_state != sa_rootdomain)
7477		goto error;
7478
7479	/* Set up domains for cpus specified by the cpu_map. */
7480	for_each_cpu(i, cpu_map) {
7481		struct sched_domain_topology_level *tl;
7482
7483		sd = NULL;
7484		for (tl = sched_domain_topology; tl->init; tl++) {
7485			sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
7486			if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
7487				sd->flags |= SD_OVERLAP;
7488			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
7489				break;
7490		}
7491
7492		while (sd->child)
7493			sd = sd->child;
7494
7495		*per_cpu_ptr(d.sd, i) = sd;
7496	}
7497
7498	/* Build the groups for the domains */
7499	for_each_cpu(i, cpu_map) {
7500		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7501			sd->span_weight = cpumask_weight(sched_domain_span(sd));
7502			if (sd->flags & SD_OVERLAP) {
7503				if (build_overlap_sched_groups(sd, i))
7504					goto error;
7505			} else {
7506				if (build_sched_groups(sd, i))
7507					goto error;
7508			}
7509		}
7510	}
7511
7512	/* Calculate CPU power for physical packages and nodes */
7513	for (i = nr_cpumask_bits-1; i >= 0; i--) {
7514		if (!cpumask_test_cpu(i, cpu_map))
7515			continue;
7516
7517		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7518			claim_allocations(i, sd);
7519			init_sched_groups_power(i, sd);
7520		}
7521	}
7522
7523	/* Attach the domains */
7524	rcu_read_lock();
7525	for_each_cpu(i, cpu_map) {
7526		sd = *per_cpu_ptr(d.sd, i);
7527		cpu_attach_domain(sd, d.rd, i);
7528	}
7529	rcu_read_unlock();
7530
7531	ret = 0;
7532error:
7533	__free_domain_allocs(&d, alloc_state, cpu_map);
7534	return ret;
7535}
7536
7537static cpumask_var_t *doms_cur;	/* current sched domains */
7538static int ndoms_cur;		/* number of sched domains in 'doms_cur' */
7539static struct sched_domain_attr *dattr_cur;
7540				/* attribues of custom domains in 'doms_cur' */
7541
7542/*
7543 * Special case: If a kmalloc of a doms_cur partition (array of
7544 * cpumask) fails, then fallback to a single sched domain,
7545 * as determined by the single cpumask fallback_doms.
7546 */
7547static cpumask_var_t fallback_doms;
7548
7549/*
7550 * arch_update_cpu_topology lets virtualized architectures update the
7551 * cpu core maps. It is supposed to return 1 if the topology changed
7552 * or 0 if it stayed the same.
7553 */
7554int __attribute__((weak)) arch_update_cpu_topology(void)
7555{
7556	return 0;
7557}
7558
7559cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7560{
7561	int i;
7562	cpumask_var_t *doms;
7563
7564	doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7565	if (!doms)
7566		return NULL;
7567	for (i = 0; i < ndoms; i++) {
7568		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7569			free_sched_domains(doms, i);
7570			return NULL;
7571		}
7572	}
7573	return doms;
7574}
7575
7576void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7577{
7578	unsigned int i;
7579	for (i = 0; i < ndoms; i++)
7580		free_cpumask_var(doms[i]);
7581	kfree(doms);
7582}
7583
7584/*
7585 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7586 * For now this just excludes isolated cpus, but could be used to
7587 * exclude other special cases in the future.
7588 */
7589static int init_sched_domains(const struct cpumask *cpu_map)
7590{
7591	int err;
7592
7593	arch_update_cpu_topology();
7594	ndoms_cur = 1;
7595	doms_cur = alloc_sched_domains(ndoms_cur);
7596	if (!doms_cur)
7597		doms_cur = &fallback_doms;
7598	cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
7599	dattr_cur = NULL;
7600	err = build_sched_domains(doms_cur[0], NULL);
7601	register_sched_domain_sysctl();
7602
7603	return err;
7604}
7605
7606/*
7607 * Detach sched domains from a group of cpus specified in cpu_map
7608 * These cpus will now be attached to the NULL domain
7609 */
7610static void detach_destroy_domains(const struct cpumask *cpu_map)
7611{
7612	int i;
7613
7614	rcu_read_lock();
7615	for_each_cpu(i, cpu_map)
7616		cpu_attach_domain(NULL, &def_root_domain, i);
7617	rcu_read_unlock();
7618}
7619
7620/* handle null as "default" */
7621static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7622			struct sched_domain_attr *new, int idx_new)
7623{
7624	struct sched_domain_attr tmp;
7625
7626	/* fast path */
7627	if (!new && !cur)
7628		return 1;
7629
7630	tmp = SD_ATTR_INIT;
7631	return !memcmp(cur ? (cur + idx_cur) : &tmp,
7632			new ? (new + idx_new) : &tmp,
7633			sizeof(struct sched_domain_attr));
7634}
7635
7636/*
7637 * Partition sched domains as specified by the 'ndoms_new'
7638 * cpumasks in the array doms_new[] of cpumasks. This compares
7639 * doms_new[] to the current sched domain partitioning, doms_cur[].
7640 * It destroys each deleted domain and builds each new domain.
7641 *
7642 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
7643 * The masks don't intersect (don't overlap.) We should setup one
7644 * sched domain for each mask. CPUs not in any of the cpumasks will
7645 * not be load balanced. If the same cpumask appears both in the
7646 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7647 * it as it is.
7648 *
7649 * The passed in 'doms_new' should be allocated using
7650 * alloc_sched_domains.  This routine takes ownership of it and will
7651 * free_sched_domains it when done with it. If the caller failed the
7652 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7653 * and partition_sched_domains() will fallback to the single partition
7654 * 'fallback_doms', it also forces the domains to be rebuilt.
7655 *
7656 * If doms_new == NULL it will be replaced with cpu_online_mask.
7657 * ndoms_new == 0 is a special case for destroying existing domains,
7658 * and it will not create the default domain.
7659 *
7660 * Call with hotplug lock held
7661 */
7662void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
7663			     struct sched_domain_attr *dattr_new)
7664{
7665	int i, j, n;
7666	int new_topology;
7667
7668	mutex_lock(&sched_domains_mutex);
7669
7670	/* always unregister in case we don't destroy any domains */
7671	unregister_sched_domain_sysctl();
7672
7673	/* Let architecture update cpu core mappings. */
7674	new_topology = arch_update_cpu_topology();
7675
7676	n = doms_new ? ndoms_new : 0;
7677
7678	/* Destroy deleted domains */
7679	for (i = 0; i < ndoms_cur; i++) {
7680		for (j = 0; j < n && !new_topology; j++) {
7681			if (cpumask_equal(doms_cur[i], doms_new[j])
7682			    && dattrs_equal(dattr_cur, i, dattr_new, j))
7683				goto match1;
7684		}
7685		/* no match - a current sched domain not in new doms_new[] */
7686		detach_destroy_domains(doms_cur[i]);
7687match1:
7688		;
7689	}
7690
7691	if (doms_new == NULL) {
7692		ndoms_cur = 0;
7693		doms_new = &fallback_doms;
7694		cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
7695		WARN_ON_ONCE(dattr_new);
7696	}
7697
7698	/* Build new domains */
7699	for (i = 0; i < ndoms_new; i++) {
7700		for (j = 0; j < ndoms_cur && !new_topology; j++) {
7701			if (cpumask_equal(doms_new[i], doms_cur[j])
7702			    && dattrs_equal(dattr_new, i, dattr_cur, j))
7703				goto match2;
7704		}
7705		/* no match - add a new doms_new */
7706		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
7707match2:
7708		;
7709	}
7710
7711	/* Remember the new sched domains */
7712	if (doms_cur != &fallback_doms)
7713		free_sched_domains(doms_cur, ndoms_cur);
7714	kfree(dattr_cur);	/* kfree(NULL) is safe */
7715	doms_cur = doms_new;
7716	dattr_cur = dattr_new;
7717	ndoms_cur = ndoms_new;
7718
7719	register_sched_domain_sysctl();
7720
7721	mutex_unlock(&sched_domains_mutex);
7722}
7723
7724#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
7725static void reinit_sched_domains(void)
7726{
7727	get_online_cpus();
7728
7729	/* Destroy domains first to force the rebuild */
7730	partition_sched_domains(0, NULL, NULL);
7731
7732	rebuild_sched_domains();
7733	put_online_cpus();
7734}
7735
7736static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7737{
7738	unsigned int level = 0;
7739
7740	if (sscanf(buf, "%u", &level) != 1)
7741		return -EINVAL;
7742
7743	/*
7744	 * level is always be positive so don't check for
7745	 * level < POWERSAVINGS_BALANCE_NONE which is 0
7746	 * What happens on 0 or 1 byte write,
7747	 * need to check for count as well?
7748	 */
7749
7750	if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
7751		return -EINVAL;
7752
7753	if (smt)
7754		sched_smt_power_savings = level;
7755	else
7756		sched_mc_power_savings = level;
7757
7758	reinit_sched_domains();
7759
7760	return count;
7761}
7762
7763#ifdef CONFIG_SCHED_MC
7764static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
7765					   struct sysdev_class_attribute *attr,
7766					   char *page)
7767{
7768	return sprintf(page, "%u\n", sched_mc_power_savings);
7769}
7770static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
7771					    struct sysdev_class_attribute *attr,
7772					    const char *buf, size_t count)
7773{
7774	return sched_power_savings_store(buf, count, 0);
7775}
7776static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7777			 sched_mc_power_savings_show,
7778			 sched_mc_power_savings_store);
7779#endif
7780
7781#ifdef CONFIG_SCHED_SMT
7782static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
7783					    struct sysdev_class_attribute *attr,
7784					    char *page)
7785{
7786	return sprintf(page, "%u\n", sched_smt_power_savings);
7787}
7788static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
7789					     struct sysdev_class_attribute *attr,
7790					     const char *buf, size_t count)
7791{
7792	return sched_power_savings_store(buf, count, 1);
7793}
7794static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7795		   sched_smt_power_savings_show,
7796		   sched_smt_power_savings_store);
7797#endif
7798
7799int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
7800{
7801	int err = 0;
7802
7803#ifdef CONFIG_SCHED_SMT
7804	if (smt_capable())
7805		err = sysfs_create_file(&cls->kset.kobj,
7806					&attr_sched_smt_power_savings.attr);
7807#endif
7808#ifdef CONFIG_SCHED_MC
7809	if (!err && mc_capable())
7810		err = sysfs_create_file(&cls->kset.kobj,
7811					&attr_sched_mc_power_savings.attr);
7812#endif
7813	return err;
7814}
7815#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
7816
7817/*
7818 * Update cpusets according to cpu_active mask.  If cpusets are
7819 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7820 * around partition_sched_domains().
7821 */
7822static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7823			     void *hcpu)
7824{
7825	switch (action & ~CPU_TASKS_FROZEN) {
7826	case CPU_ONLINE:
7827	case CPU_DOWN_FAILED:
7828		cpuset_update_active_cpus();
7829		return NOTIFY_OK;
7830	default:
7831		return NOTIFY_DONE;
7832	}
7833}
7834
7835static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7836			       void *hcpu)
7837{
7838	switch (action & ~CPU_TASKS_FROZEN) {
7839	case CPU_DOWN_PREPARE:
7840		cpuset_update_active_cpus();
7841		return NOTIFY_OK;
7842	default:
7843		return NOTIFY_DONE;
7844	}
7845}
7846
7847static int update_runtime(struct notifier_block *nfb,
7848				unsigned long action, void *hcpu)
7849{
7850	int cpu = (int)(long)hcpu;
7851
7852	switch (action) {
7853	case CPU_DOWN_PREPARE:
7854	case CPU_DOWN_PREPARE_FROZEN:
7855		disable_runtime(cpu_rq(cpu));
7856		return NOTIFY_OK;
7857
7858	case CPU_DOWN_FAILED:
7859	case CPU_DOWN_FAILED_FROZEN:
7860	case CPU_ONLINE:
7861	case CPU_ONLINE_FROZEN:
7862		enable_runtime(cpu_rq(cpu));
7863		return NOTIFY_OK;
7864
7865	default:
7866		return NOTIFY_DONE;
7867	}
7868}
7869
7870void __init sched_init_smp(void)
7871{
7872	cpumask_var_t non_isolated_cpus;
7873
7874	alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
7875	alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
7876
7877	get_online_cpus();
7878	mutex_lock(&sched_domains_mutex);
7879	init_sched_domains(cpu_active_mask);
7880	cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7881	if (cpumask_empty(non_isolated_cpus))
7882		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
7883	mutex_unlock(&sched_domains_mutex);
7884	put_online_cpus();
7885
7886	hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7887	hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
7888
7889	/* RT runtime code needs to handle some hotplug events */
7890	hotcpu_notifier(update_runtime, 0);
7891
7892	init_hrtick();
7893
7894	/* Move init over to a non-isolated CPU */
7895	if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
7896		BUG();
7897	sched_init_granularity();
7898	free_cpumask_var(non_isolated_cpus);
7899
7900	init_sched_rt_class();
7901}
7902#else
7903void __init sched_init_smp(void)
7904{
7905	sched_init_granularity();
7906}
7907#endif /* CONFIG_SMP */
7908
7909const_debug unsigned int sysctl_timer_migration = 1;
7910
7911int in_sched_functions(unsigned long addr)
7912{
7913	return in_lock_functions(addr) ||
7914		(addr >= (unsigned long)__sched_text_start
7915		&& addr < (unsigned long)__sched_text_end);
7916}
7917
7918static void init_cfs_rq(struct cfs_rq *cfs_rq)
7919{
7920	cfs_rq->tasks_timeline = RB_ROOT;
7921	INIT_LIST_HEAD(&cfs_rq->tasks);
7922	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7923#ifndef CONFIG_64BIT
7924	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7925#endif
7926}
7927
7928static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7929{
7930	struct rt_prio_array *array;
7931	int i;
7932
7933	array = &rt_rq->active;
7934	for (i = 0; i < MAX_RT_PRIO; i++) {
7935		INIT_LIST_HEAD(array->queue + i);
7936		__clear_bit(i, array->bitmap);
7937	}
7938	/* delimiter for bitsearch: */
7939	__set_bit(MAX_RT_PRIO, array->bitmap);
7940
7941#if defined CONFIG_SMP
7942	rt_rq->highest_prio.curr = MAX_RT_PRIO;
7943	rt_rq->highest_prio.next = MAX_RT_PRIO;
7944	rt_rq->rt_nr_migratory = 0;
7945	rt_rq->overloaded = 0;
7946	plist_head_init(&rt_rq->pushable_tasks);
7947#endif
7948
7949	rt_rq->rt_time = 0;
7950	rt_rq->rt_throttled = 0;
7951	rt_rq->rt_runtime = 0;
7952	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
7953}
7954
7955#ifdef CONFIG_FAIR_GROUP_SCHED
7956static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7957				struct sched_entity *se, int cpu,
7958				struct sched_entity *parent)
7959{
7960	struct rq *rq = cpu_rq(cpu);
7961
7962	cfs_rq->tg = tg;
7963	cfs_rq->rq = rq;
7964#ifdef CONFIG_SMP
7965	/* allow initial update_cfs_load() to truncate */
7966	cfs_rq->load_stamp = 1;
7967#endif
7968
7969	tg->cfs_rq[cpu] = cfs_rq;
7970	tg->se[cpu] = se;
7971
7972	/* se could be NULL for root_task_group */
7973	if (!se)
7974		return;
7975
7976	if (!parent)
7977		se->cfs_rq = &rq->cfs;
7978	else
7979		se->cfs_rq = parent->my_q;
7980
7981	se->my_q = cfs_rq;
7982	update_load_set(&se->load, 0);
7983	se->parent = parent;
7984}
7985#endif
7986
7987#ifdef CONFIG_RT_GROUP_SCHED
7988static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
7989		struct sched_rt_entity *rt_se, int cpu,
7990		struct sched_rt_entity *parent)
7991{
7992	struct rq *rq = cpu_rq(cpu);
7993
7994	rt_rq->highest_prio.curr = MAX_RT_PRIO;
7995	rt_rq->rt_nr_boosted = 0;
7996	rt_rq->rq = rq;
7997	rt_rq->tg = tg;
7998
7999	tg->rt_rq[cpu] = rt_rq;
8000	tg->rt_se[cpu] = rt_se;
8001
8002	if (!rt_se)
8003		return;
8004
8005	if (!parent)
8006		rt_se->rt_rq = &rq->rt;
8007	else
8008		rt_se->rt_rq = parent->my_q;
8009
8010	rt_se->my_q = rt_rq;
8011	rt_se->parent = parent;
8012	INIT_LIST_HEAD(&rt_se->run_list);
8013}
8014#endif
8015
8016void __init sched_init(void)
8017{
8018	int i, j;
8019	unsigned long alloc_size = 0, ptr;
8020
8021#ifdef CONFIG_FAIR_GROUP_SCHED
8022	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8023#endif
8024#ifdef CONFIG_RT_GROUP_SCHED
8025	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8026#endif
8027#ifdef CONFIG_CPUMASK_OFFSTACK
8028	alloc_size += num_possible_cpus() * cpumask_size();
8029#endif
8030	if (alloc_size) {
8031		ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
8032
8033#ifdef CONFIG_FAIR_GROUP_SCHED
8034		root_task_group.se = (struct sched_entity **)ptr;
8035		ptr += nr_cpu_ids * sizeof(void **);
8036
8037		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8038		ptr += nr_cpu_ids * sizeof(void **);
8039
8040#endif /* CONFIG_FAIR_GROUP_SCHED */
8041#ifdef CONFIG_RT_GROUP_SCHED
8042		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8043		ptr += nr_cpu_ids * sizeof(void **);
8044
8045		root_task_group.rt_rq = (struct rt_rq **)ptr;
8046		ptr += nr_cpu_ids * sizeof(void **);
8047
8048#endif /* CONFIG_RT_GROUP_SCHED */
8049#ifdef CONFIG_CPUMASK_OFFSTACK
8050		for_each_possible_cpu(i) {
8051			per_cpu(load_balance_tmpmask, i) = (void *)ptr;
8052			ptr += cpumask_size();
8053		}
8054#endif /* CONFIG_CPUMASK_OFFSTACK */
8055	}
8056
8057#ifdef CONFIG_SMP
8058	init_defrootdomain();
8059#endif
8060
8061	init_rt_bandwidth(&def_rt_bandwidth,
8062			global_rt_period(), global_rt_runtime());
8063
8064#ifdef CONFIG_RT_GROUP_SCHED
8065	init_rt_bandwidth(&root_task_group.rt_bandwidth,
8066			global_rt_period(), global_rt_runtime());
8067#endif /* CONFIG_RT_GROUP_SCHED */
8068
8069#ifdef CONFIG_CGROUP_SCHED
8070	list_add(&root_task_group.list, &task_groups);
8071	INIT_LIST_HEAD(&root_task_group.children);
8072	autogroup_init(&init_task);
8073#endif /* CONFIG_CGROUP_SCHED */
8074
8075	for_each_possible_cpu(i) {
8076		struct rq *rq;
8077
8078		rq = cpu_rq(i);
8079		raw_spin_lock_init(&rq->lock);
8080		rq->nr_running = 0;
8081		rq->calc_load_active = 0;
8082		rq->calc_load_update = jiffies + LOAD_FREQ;
8083		init_cfs_rq(&rq->cfs);
8084		init_rt_rq(&rq->rt, rq);
8085#ifdef CONFIG_FAIR_GROUP_SCHED
8086		root_task_group.shares = root_task_group_load;
8087		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8088		/*
8089		 * How much cpu bandwidth does root_task_group get?
8090		 *
8091		 * In case of task-groups formed thr' the cgroup filesystem, it
8092		 * gets 100% of the cpu resources in the system. This overall
8093		 * system cpu resource is divided among the tasks of
8094		 * root_task_group and its child task-groups in a fair manner,
8095		 * based on each entity's (task or task-group's) weight
8096		 * (se->load.weight).
8097		 *
8098		 * In other words, if root_task_group has 10 tasks of weight
8099		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8100		 * then A0's share of the cpu resource is:
8101		 *
8102		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8103		 *
8104		 * We achieve this by letting root_task_group's tasks sit
8105		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8106		 */
8107		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8108#endif /* CONFIG_FAIR_GROUP_SCHED */
8109
8110		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
8111#ifdef CONFIG_RT_GROUP_SCHED
8112		INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
8113		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8114#endif
8115
8116		for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
8117			rq->cpu_load[j] = 0;
8118
8119		rq->last_load_update_tick = jiffies;
8120
8121#ifdef CONFIG_SMP
8122		rq->sd = NULL;
8123		rq->rd = NULL;
8124		rq->cpu_power = SCHED_POWER_SCALE;
8125		rq->post_schedule = 0;
8126		rq->active_balance = 0;
8127		rq->next_balance = jiffies;
8128		rq->push_cpu = 0;
8129		rq->cpu = i;
8130		rq->online = 0;
8131		rq->idle_stamp = 0;
8132		rq->avg_idle = 2*sysctl_sched_migration_cost;
8133		rq_attach_root(rq, &def_root_domain);
8134#ifdef CONFIG_NO_HZ
8135		rq->nohz_balance_kick = 0;
8136		init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
8137#endif
8138#endif
8139		init_rq_hrtick(rq);
8140		atomic_set(&rq->nr_iowait, 0);
8141	}
8142
8143	set_load_weight(&init_task);
8144
8145#ifdef CONFIG_PREEMPT_NOTIFIERS
8146	INIT_HLIST_HEAD(&init_task.preempt_notifiers);
8147#endif
8148
8149#ifdef CONFIG_SMP
8150	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
8151#endif
8152
8153#ifdef CONFIG_RT_MUTEXES
8154	plist_head_init(&init_task.pi_waiters);
8155#endif
8156
8157	/*
8158	 * The boot idle thread does lazy MMU switching as well:
8159	 */
8160	atomic_inc(&init_mm.mm_count);
8161	enter_lazy_tlb(&init_mm, current);
8162
8163	/*
8164	 * Make us the idle thread. Technically, schedule() should not be
8165	 * called from this thread, however somewhere below it might be,
8166	 * but because we are the idle thread, we just pick up running again
8167	 * when this runqueue becomes "idle".
8168	 */
8169	init_idle(current, smp_processor_id());
8170
8171	calc_load_update = jiffies + LOAD_FREQ;
8172
8173	/*
8174	 * During early bootup we pretend to be a normal task:
8175	 */
8176	current->sched_class = &fair_sched_class;
8177
8178	/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
8179	zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
8180#ifdef CONFIG_SMP
8181	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
8182#ifdef CONFIG_NO_HZ
8183	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8184	alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
8185	atomic_set(&nohz.load_balancer, nr_cpu_ids);
8186	atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
8187	atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
8188#endif
8189	/* May be allocated at isolcpus cmdline parse time */
8190	if (cpu_isolated_map == NULL)
8191		zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
8192#endif /* SMP */
8193
8194	scheduler_running = 1;
8195}
8196
8197#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8198static inline int preempt_count_equals(int preempt_offset)
8199{
8200	int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
8201
8202	return (nested == preempt_offset);
8203}
8204
8205void __might_sleep(const char *file, int line, int preempt_offset)
8206{
8207	static unsigned long prev_jiffy;	/* ratelimiting */
8208
8209	if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
8210	    system_state != SYSTEM_RUNNING || oops_in_progress)
8211		return;
8212	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8213		return;
8214	prev_jiffy = jiffies;
8215
8216	printk(KERN_ERR
8217		"BUG: sleeping function called from invalid context at %s:%d\n",
8218			file, line);
8219	printk(KERN_ERR
8220		"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8221			in_atomic(), irqs_disabled(),
8222			current->pid, current->comm);
8223
8224	debug_show_held_locks(current);
8225	if (irqs_disabled())
8226		print_irqtrace_events(current);
8227	dump_stack();
8228}
8229EXPORT_SYMBOL(__might_sleep);
8230#endif
8231
8232#ifdef CONFIG_MAGIC_SYSRQ
8233static void normalize_task(struct rq *rq, struct task_struct *p)
8234{
8235	const struct sched_class *prev_class = p->sched_class;
8236	int old_prio = p->prio;
8237	int on_rq;
8238
8239	on_rq = p->on_rq;
8240	if (on_rq)
8241		deactivate_task(rq, p, 0);
8242	__setscheduler(rq, p, SCHED_NORMAL, 0);
8243	if (on_rq) {
8244		activate_task(rq, p, 0);
8245		resched_task(rq->curr);
8246	}
8247
8248	check_class_changed(rq, p, prev_class, old_prio);
8249}
8250
8251void normalize_rt_tasks(void)
8252{
8253	struct task_struct *g, *p;
8254	unsigned long flags;
8255	struct rq *rq;
8256
8257	read_lock_irqsave(&tasklist_lock, flags);
8258	do_each_thread(g, p) {
8259		/*
8260		 * Only normalize user tasks:
8261		 */
8262		if (!p->mm)
8263			continue;
8264
8265		p->se.exec_start		= 0;
8266#ifdef CONFIG_SCHEDSTATS
8267		p->se.statistics.wait_start	= 0;
8268		p->se.statistics.sleep_start	= 0;
8269		p->se.statistics.block_start	= 0;
8270#endif
8271
8272		if (!rt_task(p)) {
8273			/*
8274			 * Renice negative nice level userspace
8275			 * tasks back to 0:
8276			 */
8277			if (TASK_NICE(p) < 0 && p->mm)
8278				set_user_nice(p, 0);
8279			continue;
8280		}
8281
8282		raw_spin_lock(&p->pi_lock);
8283		rq = __task_rq_lock(p);
8284
8285		normalize_task(rq, p);
8286
8287		__task_rq_unlock(rq);
8288		raw_spin_unlock(&p->pi_lock);
8289	} while_each_thread(g, p);
8290
8291	read_unlock_irqrestore(&tasklist_lock, flags);
8292}
8293
8294#endif /* CONFIG_MAGIC_SYSRQ */
8295
8296#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
8297/*
8298 * These functions are only useful for the IA64 MCA handling, or kdb.
8299 *
8300 * They can only be called when the whole system has been
8301 * stopped - every CPU needs to be quiescent, and no scheduling
8302 * activity can take place. Using them for anything else would
8303 * be a serious bug, and as a result, they aren't even visible
8304 * under any other configuration.
8305 */
8306
8307/**
8308 * curr_task - return the current task for a given cpu.
8309 * @cpu: the processor in question.
8310 *
8311 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8312 */
8313struct task_struct *curr_task(int cpu)
8314{
8315	return cpu_curr(cpu);
8316}
8317
8318#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
8319
8320#ifdef CONFIG_IA64
8321/**
8322 * set_curr_task - set the current task for a given cpu.
8323 * @cpu: the processor in question.
8324 * @p: the task pointer to set.
8325 *
8326 * Description: This function must only be used when non-maskable interrupts
8327 * are serviced on a separate stack. It allows the architecture to switch the
8328 * notion of the current task on a cpu in a non-blocking manner. This function
8329 * must be called with all CPU's synchronized, and interrupts disabled, the
8330 * and caller must save the original value of the current task (see
8331 * curr_task() above) and restore that value before reenabling interrupts and
8332 * re-starting the system.
8333 *
8334 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8335 */
8336void set_curr_task(int cpu, struct task_struct *p)
8337{
8338	cpu_curr(cpu) = p;
8339}
8340
8341#endif
8342
8343#ifdef CONFIG_FAIR_GROUP_SCHED
8344static void free_fair_sched_group(struct task_group *tg)
8345{
8346	int i;
8347
8348	for_each_possible_cpu(i) {
8349		if (tg->cfs_rq)
8350			kfree(tg->cfs_rq[i]);
8351		if (tg->se)
8352			kfree(tg->se[i]);
8353	}
8354
8355	kfree(tg->cfs_rq);
8356	kfree(tg->se);
8357}
8358
8359static
8360int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8361{
8362	struct cfs_rq *cfs_rq;
8363	struct sched_entity *se;
8364	int i;
8365
8366	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
8367	if (!tg->cfs_rq)
8368		goto err;
8369	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
8370	if (!tg->se)
8371		goto err;
8372
8373	tg->shares = NICE_0_LOAD;
8374
8375	for_each_possible_cpu(i) {
8376		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8377				      GFP_KERNEL, cpu_to_node(i));
8378		if (!cfs_rq)
8379			goto err;
8380
8381		se = kzalloc_node(sizeof(struct sched_entity),
8382				  GFP_KERNEL, cpu_to_node(i));
8383		if (!se)
8384			goto err_free_rq;
8385
8386		init_cfs_rq(cfs_rq);
8387		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
8388	}
8389
8390	return 1;
8391
8392err_free_rq:
8393	kfree(cfs_rq);
8394err:
8395	return 0;
8396}
8397
8398static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8399{
8400	struct rq *rq = cpu_rq(cpu);
8401	unsigned long flags;
8402
8403	/*
8404	* Only empty task groups can be destroyed; so we can speculatively
8405	* check on_list without danger of it being re-added.
8406	*/
8407	if (!tg->cfs_rq[cpu]->on_list)
8408		return;
8409
8410	raw_spin_lock_irqsave(&rq->lock, flags);
8411	list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
8412	raw_spin_unlock_irqrestore(&rq->lock, flags);
8413}
8414#else /* !CONFIG_FAIR_GROUP_SCHED */
8415static inline void free_fair_sched_group(struct task_group *tg)
8416{
8417}
8418
8419static inline
8420int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8421{
8422	return 1;
8423}
8424
8425static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8426{
8427}
8428#endif /* CONFIG_FAIR_GROUP_SCHED */
8429
8430#ifdef CONFIG_RT_GROUP_SCHED
8431static void free_rt_sched_group(struct task_group *tg)
8432{
8433	int i;
8434
8435	if (tg->rt_se)
8436		destroy_rt_bandwidth(&tg->rt_bandwidth);
8437
8438	for_each_possible_cpu(i) {
8439		if (tg->rt_rq)
8440			kfree(tg->rt_rq[i]);
8441		if (tg->rt_se)
8442			kfree(tg->rt_se[i]);
8443	}
8444
8445	kfree(tg->rt_rq);
8446	kfree(tg->rt_se);
8447}
8448
8449static
8450int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8451{
8452	struct rt_rq *rt_rq;
8453	struct sched_rt_entity *rt_se;
8454	int i;
8455
8456	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
8457	if (!tg->rt_rq)
8458		goto err;
8459	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
8460	if (!tg->rt_se)
8461		goto err;
8462
8463	init_rt_bandwidth(&tg->rt_bandwidth,
8464			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
8465
8466	for_each_possible_cpu(i) {
8467		rt_rq = kzalloc_node(sizeof(struct rt_rq),
8468				     GFP_KERNEL, cpu_to_node(i));
8469		if (!rt_rq)
8470			goto err;
8471
8472		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8473				     GFP_KERNEL, cpu_to_node(i));
8474		if (!rt_se)
8475			goto err_free_rq;
8476
8477		init_rt_rq(rt_rq, cpu_rq(i));
8478		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
8479		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
8480	}
8481
8482	return 1;
8483
8484err_free_rq:
8485	kfree(rt_rq);
8486err:
8487	return 0;
8488}
8489#else /* !CONFIG_RT_GROUP_SCHED */
8490static inline void free_rt_sched_group(struct task_group *tg)
8491{
8492}
8493
8494static inline
8495int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8496{
8497	return 1;
8498}
8499#endif /* CONFIG_RT_GROUP_SCHED */
8500
8501#ifdef CONFIG_CGROUP_SCHED
8502static void free_sched_group(struct task_group *tg)
8503{
8504	free_fair_sched_group(tg);
8505	free_rt_sched_group(tg);
8506	autogroup_free(tg);
8507	kfree(tg);
8508}
8509
8510/* allocate runqueue etc for a new task group */
8511struct task_group *sched_create_group(struct task_group *parent)
8512{
8513	struct task_group *tg;
8514	unsigned long flags;
8515
8516	tg = kzalloc(sizeof(*tg), GFP_KERNEL);
8517	if (!tg)
8518		return ERR_PTR(-ENOMEM);
8519
8520	if (!alloc_fair_sched_group(tg, parent))
8521		goto err;
8522
8523	if (!alloc_rt_sched_group(tg, parent))
8524		goto err;
8525
8526	spin_lock_irqsave(&task_group_lock, flags);
8527	list_add_rcu(&tg->list, &task_groups);
8528
8529	WARN_ON(!parent); /* root should already exist */
8530
8531	tg->parent = parent;
8532	INIT_LIST_HEAD(&tg->children);
8533	list_add_rcu(&tg->siblings, &parent->children);
8534	spin_unlock_irqrestore(&task_group_lock, flags);
8535
8536	return tg;
8537
8538err:
8539	free_sched_group(tg);
8540	return ERR_PTR(-ENOMEM);
8541}
8542
8543/* rcu callback to free various structures associated with a task group */
8544static void free_sched_group_rcu(struct rcu_head *rhp)
8545{
8546	/* now it should be safe to free those cfs_rqs */
8547	free_sched_group(container_of(rhp, struct task_group, rcu));
8548}
8549
8550/* Destroy runqueue etc associated with a task group */
8551void sched_destroy_group(struct task_group *tg)
8552{
8553	unsigned long flags;
8554	int i;
8555
8556	/* end participation in shares distribution */
8557	for_each_possible_cpu(i)
8558		unregister_fair_sched_group(tg, i);
8559
8560	spin_lock_irqsave(&task_group_lock, flags);
8561	list_del_rcu(&tg->list);
8562	list_del_rcu(&tg->siblings);
8563	spin_unlock_irqrestore(&task_group_lock, flags);
8564
8565	/* wait for possible concurrent references to cfs_rqs complete */
8566	call_rcu(&tg->rcu, free_sched_group_rcu);
8567}
8568
8569/* change task's runqueue when it moves between groups.
8570 *	The caller of this function should have put the task in its new group
8571 *	by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8572 *	reflect its new group.
8573 */
8574void sched_move_task(struct task_struct *tsk)
8575{
8576	int on_rq, running;
8577	unsigned long flags;
8578	struct rq *rq;
8579
8580	rq = task_rq_lock(tsk, &flags);
8581
8582	running = task_current(rq, tsk);
8583	on_rq = tsk->on_rq;
8584
8585	if (on_rq)
8586		dequeue_task(rq, tsk, 0);
8587	if (unlikely(running))
8588		tsk->sched_class->put_prev_task(rq, tsk);
8589
8590#ifdef CONFIG_FAIR_GROUP_SCHED
8591	if (tsk->sched_class->task_move_group)
8592		tsk->sched_class->task_move_group(tsk, on_rq);
8593	else
8594#endif
8595		set_task_rq(tsk, task_cpu(tsk));
8596
8597	if (unlikely(running))
8598		tsk->sched_class->set_curr_task(rq);
8599	if (on_rq)
8600		enqueue_task(rq, tsk, 0);
8601
8602	task_rq_unlock(rq, tsk, &flags);
8603}
8604#endif /* CONFIG_CGROUP_SCHED */
8605
8606#ifdef CONFIG_FAIR_GROUP_SCHED
8607static DEFINE_MUTEX(shares_mutex);
8608
8609int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8610{
8611	int i;
8612	unsigned long flags;
8613
8614	/*
8615	 * We can't change the weight of the root cgroup.
8616	 */
8617	if (!tg->se[0])
8618		return -EINVAL;
8619
8620	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
8621
8622	mutex_lock(&shares_mutex);
8623	if (tg->shares == shares)
8624		goto done;
8625
8626	tg->shares = shares;
8627	for_each_possible_cpu(i) {
8628		struct rq *rq = cpu_rq(i);
8629		struct sched_entity *se;
8630
8631		se = tg->se[i];
8632		/* Propagate contribution to hierarchy */
8633		raw_spin_lock_irqsave(&rq->lock, flags);
8634		for_each_sched_entity(se)
8635			update_cfs_shares(group_cfs_rq(se));
8636		raw_spin_unlock_irqrestore(&rq->lock, flags);
8637	}
8638
8639done:
8640	mutex_unlock(&shares_mutex);
8641	return 0;
8642}
8643
8644unsigned long sched_group_shares(struct task_group *tg)
8645{
8646	return tg->shares;
8647}
8648#endif
8649
8650#ifdef CONFIG_RT_GROUP_SCHED
8651/*
8652 * Ensure that the real time constraints are schedulable.
8653 */
8654static DEFINE_MUTEX(rt_constraints_mutex);
8655
8656static unsigned long to_ratio(u64 period, u64 runtime)
8657{
8658	if (runtime == RUNTIME_INF)
8659		return 1ULL << 20;
8660
8661	return div64_u64(runtime << 20, period);
8662}
8663
8664/* Must be called with tasklist_lock held */
8665static inline int tg_has_rt_tasks(struct task_group *tg)
8666{
8667	struct task_struct *g, *p;
8668
8669	do_each_thread(g, p) {
8670		if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8671			return 1;
8672	} while_each_thread(g, p);
8673
8674	return 0;
8675}
8676
8677struct rt_schedulable_data {
8678	struct task_group *tg;
8679	u64 rt_period;
8680	u64 rt_runtime;
8681};
8682
8683static int tg_schedulable(struct task_group *tg, void *data)
8684{
8685	struct rt_schedulable_data *d = data;
8686	struct task_group *child;
8687	unsigned long total, sum = 0;
8688	u64 period, runtime;
8689
8690	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8691	runtime = tg->rt_bandwidth.rt_runtime;
8692
8693	if (tg == d->tg) {
8694		period = d->rt_period;
8695		runtime = d->rt_runtime;
8696	}
8697
8698	/*
8699	 * Cannot have more runtime than the period.
8700	 */
8701	if (runtime > period && runtime != RUNTIME_INF)
8702		return -EINVAL;
8703
8704	/*
8705	 * Ensure we don't starve existing RT tasks.
8706	 */
8707	if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8708		return -EBUSY;
8709
8710	total = to_ratio(period, runtime);
8711
8712	/*
8713	 * Nobody can have more than the global setting allows.
8714	 */
8715	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8716		return -EINVAL;
8717
8718	/*
8719	 * The sum of our children's runtime should not exceed our own.
8720	 */
8721	list_for_each_entry_rcu(child, &tg->children, siblings) {
8722		period = ktime_to_ns(child->rt_bandwidth.rt_period);
8723		runtime = child->rt_bandwidth.rt_runtime;
8724
8725		if (child == d->tg) {
8726			period = d->rt_period;
8727			runtime = d->rt_runtime;
8728		}
8729
8730		sum += to_ratio(period, runtime);
8731	}
8732
8733	if (sum > total)
8734		return -EINVAL;
8735
8736	return 0;
8737}
8738
8739static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8740{
8741	struct rt_schedulable_data data = {
8742		.tg = tg,
8743		.rt_period = period,
8744		.rt_runtime = runtime,
8745	};
8746
8747	return walk_tg_tree(tg_schedulable, tg_nop, &data);
8748}
8749
8750static int tg_set_bandwidth(struct task_group *tg,
8751		u64 rt_period, u64 rt_runtime)
8752{
8753	int i, err = 0;
8754
8755	mutex_lock(&rt_constraints_mutex);
8756	read_lock(&tasklist_lock);
8757	err = __rt_schedulable(tg, rt_period, rt_runtime);
8758	if (err)
8759		goto unlock;
8760
8761	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
8762	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8763	tg->rt_bandwidth.rt_runtime = rt_runtime;
8764
8765	for_each_possible_cpu(i) {
8766		struct rt_rq *rt_rq = tg->rt_rq[i];
8767
8768		raw_spin_lock(&rt_rq->rt_runtime_lock);
8769		rt_rq->rt_runtime = rt_runtime;
8770		raw_spin_unlock(&rt_rq->rt_runtime_lock);
8771	}
8772	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
8773unlock:
8774	read_unlock(&tasklist_lock);
8775	mutex_unlock(&rt_constraints_mutex);
8776
8777	return err;
8778}
8779
8780int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8781{
8782	u64 rt_runtime, rt_period;
8783
8784	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8785	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8786	if (rt_runtime_us < 0)
8787		rt_runtime = RUNTIME_INF;
8788
8789	return tg_set_bandwidth(tg, rt_period, rt_runtime);
8790}
8791
8792long sched_group_rt_runtime(struct task_group *tg)
8793{
8794	u64 rt_runtime_us;
8795
8796	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
8797		return -1;
8798
8799	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
8800	do_div(rt_runtime_us, NSEC_PER_USEC);
8801	return rt_runtime_us;
8802}
8803
8804int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8805{
8806	u64 rt_runtime, rt_period;
8807
8808	rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8809	rt_runtime = tg->rt_bandwidth.rt_runtime;
8810
8811	if (rt_period == 0)
8812		return -EINVAL;
8813
8814	return tg_set_bandwidth(tg, rt_period, rt_runtime);
8815}
8816
8817long sched_group_rt_period(struct task_group *tg)
8818{
8819	u64 rt_period_us;
8820
8821	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8822	do_div(rt_period_us, NSEC_PER_USEC);
8823	return rt_period_us;
8824}
8825
8826static int sched_rt_global_constraints(void)
8827{
8828	u64 runtime, period;
8829	int ret = 0;
8830
8831	if (sysctl_sched_rt_period <= 0)
8832		return -EINVAL;
8833
8834	runtime = global_rt_runtime();
8835	period = global_rt_period();
8836
8837	/*
8838	 * Sanity check on the sysctl variables.
8839	 */
8840	if (runtime > period && runtime != RUNTIME_INF)
8841		return -EINVAL;
8842
8843	mutex_lock(&rt_constraints_mutex);
8844	read_lock(&tasklist_lock);
8845	ret = __rt_schedulable(NULL, 0, 0);
8846	read_unlock(&tasklist_lock);
8847	mutex_unlock(&rt_constraints_mutex);
8848
8849	return ret;
8850}
8851
8852int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
8853{
8854	/* Don't accept realtime tasks when there is no way for them to run */
8855	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8856		return 0;
8857
8858	return 1;
8859}
8860
8861#else /* !CONFIG_RT_GROUP_SCHED */
8862static int sched_rt_global_constraints(void)
8863{
8864	unsigned long flags;
8865	int i;
8866
8867	if (sysctl_sched_rt_period <= 0)
8868		return -EINVAL;
8869
8870	/*
8871	 * There's always some RT tasks in the root group
8872	 * -- migration, kstopmachine etc..
8873	 */
8874	if (sysctl_sched_rt_runtime == 0)
8875		return -EBUSY;
8876
8877	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
8878	for_each_possible_cpu(i) {
8879		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8880
8881		raw_spin_lock(&rt_rq->rt_runtime_lock);
8882		rt_rq->rt_runtime = global_rt_runtime();
8883		raw_spin_unlock(&rt_rq->rt_runtime_lock);
8884	}
8885	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
8886
8887	return 0;
8888}
8889#endif /* CONFIG_RT_GROUP_SCHED */
8890
8891int sched_rt_handler(struct ctl_table *table, int write,
8892		void __user *buffer, size_t *lenp,
8893		loff_t *ppos)
8894{
8895	int ret;
8896	int old_period, old_runtime;
8897	static DEFINE_MUTEX(mutex);
8898
8899	mutex_lock(&mutex);
8900	old_period = sysctl_sched_rt_period;
8901	old_runtime = sysctl_sched_rt_runtime;
8902
8903	ret = proc_dointvec(table, write, buffer, lenp, ppos);
8904
8905	if (!ret && write) {
8906		ret = sched_rt_global_constraints();
8907		if (ret) {
8908			sysctl_sched_rt_period = old_period;
8909			sysctl_sched_rt_runtime = old_runtime;
8910		} else {
8911			def_rt_bandwidth.rt_runtime = global_rt_runtime();
8912			def_rt_bandwidth.rt_period =
8913				ns_to_ktime(global_rt_period());
8914		}
8915	}
8916	mutex_unlock(&mutex);
8917
8918	return ret;
8919}
8920
8921#ifdef CONFIG_CGROUP_SCHED
8922
8923/* return corresponding task_group object of a cgroup */
8924static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
8925{
8926	return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
8927			    struct task_group, css);
8928}
8929
8930static struct cgroup_subsys_state *
8931cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
8932{
8933	struct task_group *tg, *parent;
8934
8935	if (!cgrp->parent) {
8936		/* This is early initialization for the top cgroup */
8937		return &root_task_group.css;
8938	}
8939
8940	parent = cgroup_tg(cgrp->parent);
8941	tg = sched_create_group(parent);
8942	if (IS_ERR(tg))
8943		return ERR_PTR(-ENOMEM);
8944
8945	return &tg->css;
8946}
8947
8948static void
8949cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
8950{
8951	struct task_group *tg = cgroup_tg(cgrp);
8952
8953	sched_destroy_group(tg);
8954}
8955
8956static int
8957cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
8958{
8959#ifdef CONFIG_RT_GROUP_SCHED
8960	if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
8961		return -EINVAL;
8962#else
8963	/* We don't support RT-tasks being in separate groups */
8964	if (tsk->sched_class != &fair_sched_class)
8965		return -EINVAL;
8966#endif
8967	return 0;
8968}
8969
8970static void
8971cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
8972{
8973	sched_move_task(tsk);
8974}
8975
8976static void
8977cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
8978		struct cgroup *old_cgrp, struct task_struct *task)
8979{
8980	/*
8981	 * cgroup_exit() is called in the copy_process() failure path.
8982	 * Ignore this case since the task hasn't ran yet, this avoids
8983	 * trying to poke a half freed task state from generic code.
8984	 */
8985	if (!(task->flags & PF_EXITING))
8986		return;
8987
8988	sched_move_task(task);
8989}
8990
8991#ifdef CONFIG_FAIR_GROUP_SCHED
8992static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
8993				u64 shareval)
8994{
8995	return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
8996}
8997
8998static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
8999{
9000	struct task_group *tg = cgroup_tg(cgrp);
9001
9002	return (u64) scale_load_down(tg->shares);
9003}
9004#endif /* CONFIG_FAIR_GROUP_SCHED */
9005
9006#ifdef CONFIG_RT_GROUP_SCHED
9007static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
9008				s64 val)
9009{
9010	return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
9011}
9012
9013static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
9014{
9015	return sched_group_rt_runtime(cgroup_tg(cgrp));
9016}
9017
9018static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
9019		u64 rt_period_us)
9020{
9021	return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
9022}
9023
9024static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
9025{
9026	return sched_group_rt_period(cgroup_tg(cgrp));
9027}
9028#endif /* CONFIG_RT_GROUP_SCHED */
9029
9030static struct cftype cpu_files[] = {
9031#ifdef CONFIG_FAIR_GROUP_SCHED
9032	{
9033		.name = "shares",
9034		.read_u64 = cpu_shares_read_u64,
9035		.write_u64 = cpu_shares_write_u64,
9036	},
9037#endif
9038#ifdef CONFIG_RT_GROUP_SCHED
9039	{
9040		.name = "rt_runtime_us",
9041		.read_s64 = cpu_rt_runtime_read,
9042		.write_s64 = cpu_rt_runtime_write,
9043	},
9044	{
9045		.name = "rt_period_us",
9046		.read_u64 = cpu_rt_period_read_uint,
9047		.write_u64 = cpu_rt_period_write_uint,
9048	},
9049#endif
9050};
9051
9052static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
9053{
9054	return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
9055}
9056
9057struct cgroup_subsys cpu_cgroup_subsys = {
9058	.name		= "cpu",
9059	.create		= cpu_cgroup_create,
9060	.destroy	= cpu_cgroup_destroy,
9061	.can_attach_task = cpu_cgroup_can_attach_task,
9062	.attach_task	= cpu_cgroup_attach_task,
9063	.exit		= cpu_cgroup_exit,
9064	.populate	= cpu_cgroup_populate,
9065	.subsys_id	= cpu_cgroup_subsys_id,
9066	.early_init	= 1,
9067};
9068
9069#endif	/* CONFIG_CGROUP_SCHED */
9070
9071#ifdef CONFIG_CGROUP_CPUACCT
9072
9073/*
9074 * CPU accounting code for task groups.
9075 *
9076 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
9077 * (balbir@in.ibm.com).
9078 */
9079
9080/* track cpu usage of a group of tasks and its child groups */
9081struct cpuacct {
9082	struct cgroup_subsys_state css;
9083	/* cpuusage holds pointer to a u64-type object on every cpu */
9084	u64 __percpu *cpuusage;
9085	struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
9086	struct cpuacct *parent;
9087};
9088
9089struct cgroup_subsys cpuacct_subsys;
9090
9091/* return cpu accounting group corresponding to this container */
9092static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
9093{
9094	return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
9095			    struct cpuacct, css);
9096}
9097
9098/* return cpu accounting group to which this task belongs */
9099static inline struct cpuacct *task_ca(struct task_struct *tsk)
9100{
9101	return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
9102			    struct cpuacct, css);
9103}
9104
9105/* create a new cpu accounting group */
9106static struct cgroup_subsys_state *cpuacct_create(
9107	struct cgroup_subsys *ss, struct cgroup *cgrp)
9108{
9109	struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
9110	int i;
9111
9112	if (!ca)
9113		goto out;
9114
9115	ca->cpuusage = alloc_percpu(u64);
9116	if (!ca->cpuusage)
9117		goto out_free_ca;
9118
9119	for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9120		if (percpu_counter_init(&ca->cpustat[i], 0))
9121			goto out_free_counters;
9122
9123	if (cgrp->parent)
9124		ca->parent = cgroup_ca(cgrp->parent);
9125
9126	return &ca->css;
9127
9128out_free_counters:
9129	while (--i >= 0)
9130		percpu_counter_destroy(&ca->cpustat[i]);
9131	free_percpu(ca->cpuusage);
9132out_free_ca:
9133	kfree(ca);
9134out:
9135	return ERR_PTR(-ENOMEM);
9136}
9137
9138/* destroy an existing cpu accounting group */
9139static void
9140cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
9141{
9142	struct cpuacct *ca = cgroup_ca(cgrp);
9143	int i;
9144
9145	for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9146		percpu_counter_destroy(&ca->cpustat[i]);
9147	free_percpu(ca->cpuusage);
9148	kfree(ca);
9149}
9150
9151static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9152{
9153	u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9154	u64 data;
9155
9156#ifndef CONFIG_64BIT
9157	/*
9158	 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
9159	 */
9160	raw_spin_lock_irq(&cpu_rq(cpu)->lock);
9161	data = *cpuusage;
9162	raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
9163#else
9164	data = *cpuusage;
9165#endif
9166
9167	return data;
9168}
9169
9170static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
9171{
9172	u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9173
9174#ifndef CONFIG_64BIT
9175	/*
9176	 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
9177	 */
9178	raw_spin_lock_irq(&cpu_rq(cpu)->lock);
9179	*cpuusage = val;
9180	raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
9181#else
9182	*cpuusage = val;
9183#endif
9184}
9185
9186/* return total cpu usage (in nanoseconds) of a group */
9187static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
9188{
9189	struct cpuacct *ca = cgroup_ca(cgrp);
9190	u64 totalcpuusage = 0;
9191	int i;
9192
9193	for_each_present_cpu(i)
9194		totalcpuusage += cpuacct_cpuusage_read(ca, i);
9195
9196	return totalcpuusage;
9197}
9198
9199static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
9200								u64 reset)
9201{
9202	struct cpuacct *ca = cgroup_ca(cgrp);
9203	int err = 0;
9204	int i;
9205
9206	if (reset) {
9207		err = -EINVAL;
9208		goto out;
9209	}
9210
9211	for_each_present_cpu(i)
9212		cpuacct_cpuusage_write(ca, i, 0);
9213
9214out:
9215	return err;
9216}
9217
9218static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
9219				   struct seq_file *m)
9220{
9221	struct cpuacct *ca = cgroup_ca(cgroup);
9222	u64 percpu;
9223	int i;
9224
9225	for_each_present_cpu(i) {
9226		percpu = cpuacct_cpuusage_read(ca, i);
9227		seq_printf(m, "%llu ", (unsigned long long) percpu);
9228	}
9229	seq_printf(m, "\n");
9230	return 0;
9231}
9232
9233static const char *cpuacct_stat_desc[] = {
9234	[CPUACCT_STAT_USER] = "user",
9235	[CPUACCT_STAT_SYSTEM] = "system",
9236};
9237
9238static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
9239		struct cgroup_map_cb *cb)
9240{
9241	struct cpuacct *ca = cgroup_ca(cgrp);
9242	int i;
9243
9244	for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
9245		s64 val = percpu_counter_read(&ca->cpustat[i]);
9246		val = cputime64_to_clock_t(val);
9247		cb->fill(cb, cpuacct_stat_desc[i], val);
9248	}
9249	return 0;
9250}
9251
9252static struct cftype files[] = {
9253	{
9254		.name = "usage",
9255		.read_u64 = cpuusage_read,
9256		.write_u64 = cpuusage_write,
9257	},
9258	{
9259		.name = "usage_percpu",
9260		.read_seq_string = cpuacct_percpu_seq_read,
9261	},
9262	{
9263		.name = "stat",
9264		.read_map = cpuacct_stats_show,
9265	},
9266};
9267
9268static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
9269{
9270	return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
9271}
9272
9273/*
9274 * charge this task's execution time to its accounting group.
9275 *
9276 * called with rq->lock held.
9277 */
9278static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9279{
9280	struct cpuacct *ca;
9281	int cpu;
9282
9283	if (unlikely(!cpuacct_subsys.active))
9284		return;
9285
9286	cpu = task_cpu(tsk);
9287
9288	rcu_read_lock();
9289
9290	ca = task_ca(tsk);
9291
9292	for (; ca; ca = ca->parent) {
9293		u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9294		*cpuusage += cputime;
9295	}
9296
9297	rcu_read_unlock();
9298}
9299
9300/*
9301 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
9302 * in cputime_t units. As a result, cpuacct_update_stats calls
9303 * percpu_counter_add with values large enough to always overflow the
9304 * per cpu batch limit causing bad SMP scalability.
9305 *
9306 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
9307 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
9308 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
9309 */
9310#ifdef CONFIG_SMP
9311#define CPUACCT_BATCH	\
9312	min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
9313#else
9314#define CPUACCT_BATCH	0
9315#endif
9316
9317/*
9318 * Charge the system/user time to the task's accounting group.
9319 */
9320static void cpuacct_update_stats(struct task_struct *tsk,
9321		enum cpuacct_stat_index idx, cputime_t val)
9322{
9323	struct cpuacct *ca;
9324	int batch = CPUACCT_BATCH;
9325
9326	if (unlikely(!cpuacct_subsys.active))
9327		return;
9328
9329	rcu_read_lock();
9330	ca = task_ca(tsk);
9331
9332	do {
9333		__percpu_counter_add(&ca->cpustat[idx], val, batch);
9334		ca = ca->parent;
9335	} while (ca);
9336	rcu_read_unlock();
9337}
9338
9339struct cgroup_subsys cpuacct_subsys = {
9340	.name = "cpuacct",
9341	.create = cpuacct_create,
9342	.destroy = cpuacct_destroy,
9343	.populate = cpuacct_populate,
9344	.subsys_id = cpuacct_subsys_id,
9345};
9346#endif	/* CONFIG_CGROUP_CPUACCT */
9347