Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
    1// SPDX-License-Identifier: GPL-2.0
    2/*
    3 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
    4 *
    5 *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
    6 *
    7 *  Interactivity improvements by Mike Galbraith
    8 *  (C) 2007 Mike Galbraith <efault@gmx.de>
    9 *
   10 *  Various enhancements by Dmitry Adamushko.
   11 *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
   12 *
   13 *  Group scheduling enhancements by Srivatsa Vaddagiri
   14 *  Copyright IBM Corporation, 2007
   15 *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
   16 *
   17 *  Scaled math optimizations by Thomas Gleixner
   18 *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
   19 *
   20 *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
   21 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
   22 */
   23#include "sched.h"
   24
   25/*
   26 * Targeted preemption latency for CPU-bound tasks:
   27 *
   28 * NOTE: this latency value is not the same as the concept of
   29 * 'timeslice length' - timeslices in CFS are of variable length
   30 * and have no persistent notion like in traditional, time-slice
   31 * based scheduling concepts.
   32 *
   33 * (to see the precise effective timeslice length of your workload,
   34 *  run vmstat and monitor the context-switches (cs) field)
   35 *
   36 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
   37 */
   38unsigned int sysctl_sched_latency			= 6000000ULL;
   39static unsigned int normalized_sysctl_sched_latency	= 6000000ULL;
   40
   41/*
   42 * The initial- and re-scaling of tunables is configurable
   43 *
   44 * Options are:
   45 *
   46 *   SCHED_TUNABLESCALING_NONE - unscaled, always *1
   47 *   SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
   48 *   SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
   49 *
   50 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
   51 */
   52unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
   53
   54/*
   55 * Minimal preemption granularity for CPU-bound tasks:
   56 *
   57 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
   58 */
   59unsigned int sysctl_sched_min_granularity			= 750000ULL;
   60static unsigned int normalized_sysctl_sched_min_granularity	= 750000ULL;
   61
   62/*
   63 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
   64 */
   65static unsigned int sched_nr_latency = 8;
   66
   67/*
   68 * After fork, child runs first. If set to 0 (default) then
   69 * parent will (try to) run first.
   70 */
   71unsigned int sysctl_sched_child_runs_first __read_mostly;
   72
   73/*
   74 * SCHED_OTHER wake-up granularity.
   75 *
   76 * This option delays the preemption effects of decoupled workloads
   77 * and reduces their over-scheduling. Synchronous workloads will still
   78 * have immediate wakeup/sleep latencies.
   79 *
   80 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
   81 */
   82unsigned int sysctl_sched_wakeup_granularity			= 1000000UL;
   83static unsigned int normalized_sysctl_sched_wakeup_granularity	= 1000000UL;
   84
   85const_debug unsigned int sysctl_sched_migration_cost	= 500000UL;
   86
   87int sched_thermal_decay_shift;
   88static int __init setup_sched_thermal_decay_shift(char *str)
   89{
   90	int _shift = 0;
   91
   92	if (kstrtoint(str, 0, &_shift))
   93		pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n");
   94
   95	sched_thermal_decay_shift = clamp(_shift, 0, 10);
   96	return 1;
   97}
   98__setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
   99
  100#ifdef CONFIG_SMP
  101/*
  102 * For asym packing, by default the lower numbered CPU has higher priority.
  103 */
  104int __weak arch_asym_cpu_priority(int cpu)
  105{
  106	return -cpu;
  107}
  108
  109/*
  110 * The margin used when comparing utilization with CPU capacity.
  111 *
  112 * (default: ~20%)
  113 */
  114#define fits_capacity(cap, max)	((cap) * 1280 < (max) * 1024)
  115
  116/*
  117 * The margin used when comparing CPU capacities.
  118 * is 'cap1' noticeably greater than 'cap2'
  119 *
  120 * (default: ~5%)
  121 */
  122#define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078)
  123#endif
  124
  125#ifdef CONFIG_CFS_BANDWIDTH
  126/*
  127 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
  128 * each time a cfs_rq requests quota.
  129 *
  130 * Note: in the case that the slice exceeds the runtime remaining (either due
  131 * to consumption or the quota being specified to be smaller than the slice)
  132 * we will always only issue the remaining available time.
  133 *
  134 * (default: 5 msec, units: microseconds)
  135 */
  136unsigned int sysctl_sched_cfs_bandwidth_slice		= 5000UL;
  137#endif
  138
  139static inline void update_load_add(struct load_weight *lw, unsigned long inc)
  140{
  141	lw->weight += inc;
  142	lw->inv_weight = 0;
  143}
  144
  145static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
  146{
  147	lw->weight -= dec;
  148	lw->inv_weight = 0;
  149}
  150
  151static inline void update_load_set(struct load_weight *lw, unsigned long w)
  152{
  153	lw->weight = w;
  154	lw->inv_weight = 0;
  155}
  156
  157/*
  158 * Increase the granularity value when there are more CPUs,
  159 * because with more CPUs the 'effective latency' as visible
  160 * to users decreases. But the relationship is not linear,
  161 * so pick a second-best guess by going with the log2 of the
  162 * number of CPUs.
  163 *
  164 * This idea comes from the SD scheduler of Con Kolivas:
  165 */
  166static unsigned int get_update_sysctl_factor(void)
  167{
  168	unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
  169	unsigned int factor;
  170
  171	switch (sysctl_sched_tunable_scaling) {
  172	case SCHED_TUNABLESCALING_NONE:
  173		factor = 1;
  174		break;
  175	case SCHED_TUNABLESCALING_LINEAR:
  176		factor = cpus;
  177		break;
  178	case SCHED_TUNABLESCALING_LOG:
  179	default:
  180		factor = 1 + ilog2(cpus);
  181		break;
  182	}
  183
  184	return factor;
  185}
  186
  187static void update_sysctl(void)
  188{
  189	unsigned int factor = get_update_sysctl_factor();
  190
  191#define SET_SYSCTL(name) \
  192	(sysctl_##name = (factor) * normalized_sysctl_##name)
  193	SET_SYSCTL(sched_min_granularity);
  194	SET_SYSCTL(sched_latency);
  195	SET_SYSCTL(sched_wakeup_granularity);
  196#undef SET_SYSCTL
  197}
  198
  199void __init sched_init_granularity(void)
  200{
  201	update_sysctl();
  202}
  203
  204#define WMULT_CONST	(~0U)
  205#define WMULT_SHIFT	32
  206
  207static void __update_inv_weight(struct load_weight *lw)
  208{
  209	unsigned long w;
  210
  211	if (likely(lw->inv_weight))
  212		return;
  213
  214	w = scale_load_down(lw->weight);
  215
  216	if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
  217		lw->inv_weight = 1;
  218	else if (unlikely(!w))
  219		lw->inv_weight = WMULT_CONST;
  220	else
  221		lw->inv_weight = WMULT_CONST / w;
  222}
  223
  224/*
  225 * delta_exec * weight / lw.weight
  226 *   OR
  227 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
  228 *
  229 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
  230 * we're guaranteed shift stays positive because inv_weight is guaranteed to
  231 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
  232 *
  233 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
  234 * weight/lw.weight <= 1, and therefore our shift will also be positive.
  235 */
  236static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
  237{
  238	u64 fact = scale_load_down(weight);
  239	u32 fact_hi = (u32)(fact >> 32);
  240	int shift = WMULT_SHIFT;
  241	int fs;
  242
  243	__update_inv_weight(lw);
  244
  245	if (unlikely(fact_hi)) {
  246		fs = fls(fact_hi);
  247		shift -= fs;
  248		fact >>= fs;
  249	}
  250
  251	fact = mul_u32_u32(fact, lw->inv_weight);
  252
  253	fact_hi = (u32)(fact >> 32);
  254	if (fact_hi) {
  255		fs = fls(fact_hi);
  256		shift -= fs;
  257		fact >>= fs;
  258	}
  259
  260	return mul_u64_u32_shr(delta_exec, fact, shift);
  261}
  262
  263
  264const struct sched_class fair_sched_class;
  265
  266/**************************************************************
  267 * CFS operations on generic schedulable entities:
  268 */
  269
  270#ifdef CONFIG_FAIR_GROUP_SCHED
  271
  272/* Walk up scheduling entities hierarchy */
  273#define for_each_sched_entity(se) \
  274		for (; se; se = se->parent)
  275
  276static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
  277{
  278	if (!path)
  279		return;
  280
  281	if (cfs_rq && task_group_is_autogroup(cfs_rq->tg))
  282		autogroup_path(cfs_rq->tg, path, len);
  283	else if (cfs_rq && cfs_rq->tg->css.cgroup)
  284		cgroup_path(cfs_rq->tg->css.cgroup, path, len);
  285	else
  286		strlcpy(path, "(null)", len);
  287}
  288
  289static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  290{
  291	struct rq *rq = rq_of(cfs_rq);
  292	int cpu = cpu_of(rq);
  293
  294	if (cfs_rq->on_list)
  295		return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
  296
  297	cfs_rq->on_list = 1;
  298
  299	/*
  300	 * Ensure we either appear before our parent (if already
  301	 * enqueued) or force our parent to appear after us when it is
  302	 * enqueued. The fact that we always enqueue bottom-up
  303	 * reduces this to two cases and a special case for the root
  304	 * cfs_rq. Furthermore, it also means that we will always reset
  305	 * tmp_alone_branch either when the branch is connected
  306	 * to a tree or when we reach the top of the tree
  307	 */
  308	if (cfs_rq->tg->parent &&
  309	    cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
  310		/*
  311		 * If parent is already on the list, we add the child
  312		 * just before. Thanks to circular linked property of
  313		 * the list, this means to put the child at the tail
  314		 * of the list that starts by parent.
  315		 */
  316		list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
  317			&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
  318		/*
  319		 * The branch is now connected to its tree so we can
  320		 * reset tmp_alone_branch to the beginning of the
  321		 * list.
  322		 */
  323		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
  324		return true;
  325	}
  326
  327	if (!cfs_rq->tg->parent) {
  328		/*
  329		 * cfs rq without parent should be put
  330		 * at the tail of the list.
  331		 */
  332		list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
  333			&rq->leaf_cfs_rq_list);
  334		/*
  335		 * We have reach the top of a tree so we can reset
  336		 * tmp_alone_branch to the beginning of the list.
  337		 */
  338		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
  339		return true;
  340	}
  341
  342	/*
  343	 * The parent has not already been added so we want to
  344	 * make sure that it will be put after us.
  345	 * tmp_alone_branch points to the begin of the branch
  346	 * where we will add parent.
  347	 */
  348	list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
  349	/*
  350	 * update tmp_alone_branch to points to the new begin
  351	 * of the branch
  352	 */
  353	rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
  354	return false;
  355}
  356
  357static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  358{
  359	if (cfs_rq->on_list) {
  360		struct rq *rq = rq_of(cfs_rq);
  361
  362		/*
  363		 * With cfs_rq being unthrottled/throttled during an enqueue,
  364		 * it can happen the tmp_alone_branch points the a leaf that
  365		 * we finally want to del. In this case, tmp_alone_branch moves
  366		 * to the prev element but it will point to rq->leaf_cfs_rq_list
  367		 * at the end of the enqueue.
  368		 */
  369		if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
  370			rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
  371
  372		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
  373		cfs_rq->on_list = 0;
  374	}
  375}
  376
  377static inline void assert_list_leaf_cfs_rq(struct rq *rq)
  378{
  379	SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
  380}
  381
  382/* Iterate thr' all leaf cfs_rq's on a runqueue */
  383#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)			\
  384	list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,	\
  385				 leaf_cfs_rq_list)
  386
  387/* Do the two (enqueued) entities belong to the same group ? */
  388static inline struct cfs_rq *
  389is_same_group(struct sched_entity *se, struct sched_entity *pse)
  390{
  391	if (se->cfs_rq == pse->cfs_rq)
  392		return se->cfs_rq;
  393
  394	return NULL;
  395}
  396
  397static inline struct sched_entity *parent_entity(struct sched_entity *se)
  398{
  399	return se->parent;
  400}
  401
  402static void
  403find_matching_se(struct sched_entity **se, struct sched_entity **pse)
  404{
  405	int se_depth, pse_depth;
  406
  407	/*
  408	 * preemption test can be made between sibling entities who are in the
  409	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
  410	 * both tasks until we find their ancestors who are siblings of common
  411	 * parent.
  412	 */
  413
  414	/* First walk up until both entities are at same depth */
  415	se_depth = (*se)->depth;
  416	pse_depth = (*pse)->depth;
  417
  418	while (se_depth > pse_depth) {
  419		se_depth--;
  420		*se = parent_entity(*se);
  421	}
  422
  423	while (pse_depth > se_depth) {
  424		pse_depth--;
  425		*pse = parent_entity(*pse);
  426	}
  427
  428	while (!is_same_group(*se, *pse)) {
  429		*se = parent_entity(*se);
  430		*pse = parent_entity(*pse);
  431	}
  432}
  433
  434#else	/* !CONFIG_FAIR_GROUP_SCHED */
  435
  436#define for_each_sched_entity(se) \
  437		for (; se; se = NULL)
  438
  439static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
  440{
  441	if (path)
  442		strlcpy(path, "(null)", len);
  443}
  444
  445static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  446{
  447	return true;
  448}
  449
  450static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  451{
  452}
  453
  454static inline void assert_list_leaf_cfs_rq(struct rq *rq)
  455{
  456}
  457
  458#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)	\
  459		for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
  460
  461static inline struct sched_entity *parent_entity(struct sched_entity *se)
  462{
  463	return NULL;
  464}
  465
  466static inline void
  467find_matching_se(struct sched_entity **se, struct sched_entity **pse)
  468{
  469}
  470
  471#endif	/* CONFIG_FAIR_GROUP_SCHED */
  472
  473static __always_inline
  474void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
  475
  476/**************************************************************
  477 * Scheduling class tree data structure manipulation methods:
  478 */
  479
  480static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
  481{
  482	s64 delta = (s64)(vruntime - max_vruntime);
  483	if (delta > 0)
  484		max_vruntime = vruntime;
  485
  486	return max_vruntime;
  487}
  488
  489static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
  490{
  491	s64 delta = (s64)(vruntime - min_vruntime);
  492	if (delta < 0)
  493		min_vruntime = vruntime;
  494
  495	return min_vruntime;
  496}
  497
  498static inline bool entity_before(struct sched_entity *a,
  499				struct sched_entity *b)
  500{
  501	return (s64)(a->vruntime - b->vruntime) < 0;
  502}
  503
  504#define __node_2_se(node) \
  505	rb_entry((node), struct sched_entity, run_node)
  506
  507static void update_min_vruntime(struct cfs_rq *cfs_rq)
  508{
  509	struct sched_entity *curr = cfs_rq->curr;
  510	struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
  511
  512	u64 vruntime = cfs_rq->min_vruntime;
  513
  514	if (curr) {
  515		if (curr->on_rq)
  516			vruntime = curr->vruntime;
  517		else
  518			curr = NULL;
  519	}
  520
  521	if (leftmost) { /* non-empty tree */
  522		struct sched_entity *se = __node_2_se(leftmost);
  523
  524		if (!curr)
  525			vruntime = se->vruntime;
  526		else
  527			vruntime = min_vruntime(vruntime, se->vruntime);
  528	}
  529
  530	/* ensure we never gain time by being placed backwards. */
  531	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
  532#ifndef CONFIG_64BIT
  533	smp_wmb();
  534	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
  535#endif
  536}
  537
  538static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
  539{
  540	return entity_before(__node_2_se(a), __node_2_se(b));
  541}
  542
  543/*
  544 * Enqueue an entity into the rb-tree:
  545 */
  546static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  547{
  548	rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less);
  549}
  550
  551static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  552{
  553	rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
  554}
  555
  556struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
  557{
  558	struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
  559
  560	if (!left)
  561		return NULL;
  562
  563	return __node_2_se(left);
  564}
  565
  566static struct sched_entity *__pick_next_entity(struct sched_entity *se)
  567{
  568	struct rb_node *next = rb_next(&se->run_node);
  569
  570	if (!next)
  571		return NULL;
  572
  573	return __node_2_se(next);
  574}
  575
  576#ifdef CONFIG_SCHED_DEBUG
  577struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
  578{
  579	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
  580
  581	if (!last)
  582		return NULL;
  583
  584	return __node_2_se(last);
  585}
  586
  587/**************************************************************
  588 * Scheduling class statistics methods:
  589 */
  590
  591int sched_update_scaling(void)
  592{
  593	unsigned int factor = get_update_sysctl_factor();
  594
  595	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
  596					sysctl_sched_min_granularity);
  597
  598#define WRT_SYSCTL(name) \
  599	(normalized_sysctl_##name = sysctl_##name / (factor))
  600	WRT_SYSCTL(sched_min_granularity);
  601	WRT_SYSCTL(sched_latency);
  602	WRT_SYSCTL(sched_wakeup_granularity);
  603#undef WRT_SYSCTL
  604
  605	return 0;
  606}
  607#endif
  608
  609/*
  610 * delta /= w
  611 */
  612static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
  613{
  614	if (unlikely(se->load.weight != NICE_0_LOAD))
  615		delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
  616
  617	return delta;
  618}
  619
  620/*
  621 * The idea is to set a period in which each task runs once.
  622 *
  623 * When there are too many tasks (sched_nr_latency) we have to stretch
  624 * this period because otherwise the slices get too small.
  625 *
  626 * p = (nr <= nl) ? l : l*nr/nl
  627 */
  628static u64 __sched_period(unsigned long nr_running)
  629{
  630	if (unlikely(nr_running > sched_nr_latency))
  631		return nr_running * sysctl_sched_min_granularity;
  632	else
  633		return sysctl_sched_latency;
  634}
  635
  636/*
  637 * We calculate the wall-time slice from the period by taking a part
  638 * proportional to the weight.
  639 *
  640 * s = p*P[w/rw]
  641 */
  642static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
  643{
  644	unsigned int nr_running = cfs_rq->nr_running;
  645	u64 slice;
  646
  647	if (sched_feat(ALT_PERIOD))
  648		nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
  649
  650	slice = __sched_period(nr_running + !se->on_rq);
  651
  652	for_each_sched_entity(se) {
  653		struct load_weight *load;
  654		struct load_weight lw;
  655
  656		cfs_rq = cfs_rq_of(se);
  657		load = &cfs_rq->load;
  658
  659		if (unlikely(!se->on_rq)) {
  660			lw = cfs_rq->load;
  661
  662			update_load_add(&lw, se->load.weight);
  663			load = &lw;
  664		}
  665		slice = __calc_delta(slice, se->load.weight, load);
  666	}
  667
  668	if (sched_feat(BASE_SLICE))
  669		slice = max(slice, (u64)sysctl_sched_min_granularity);
  670
  671	return slice;
  672}
  673
  674/*
  675 * We calculate the vruntime slice of a to-be-inserted task.
  676 *
  677 * vs = s/w
  678 */
  679static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
  680{
  681	return calc_delta_fair(sched_slice(cfs_rq, se), se);
  682}
  683
  684#include "pelt.h"
  685#ifdef CONFIG_SMP
  686
  687static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
  688static unsigned long task_h_load(struct task_struct *p);
  689static unsigned long capacity_of(int cpu);
  690
  691/* Give new sched_entity start runnable values to heavy its load in infant time */
  692void init_entity_runnable_average(struct sched_entity *se)
  693{
  694	struct sched_avg *sa = &se->avg;
  695
  696	memset(sa, 0, sizeof(*sa));
  697
  698	/*
  699	 * Tasks are initialized with full load to be seen as heavy tasks until
  700	 * they get a chance to stabilize to their real load level.
  701	 * Group entities are initialized with zero load to reflect the fact that
  702	 * nothing has been attached to the task group yet.
  703	 */
  704	if (entity_is_task(se))
  705		sa->load_avg = scale_load_down(se->load.weight);
  706
  707	/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
  708}
  709
  710static void attach_entity_cfs_rq(struct sched_entity *se);
  711
  712/*
  713 * With new tasks being created, their initial util_avgs are extrapolated
  714 * based on the cfs_rq's current util_avg:
  715 *
  716 *   util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
  717 *
  718 * However, in many cases, the above util_avg does not give a desired
  719 * value. Moreover, the sum of the util_avgs may be divergent, such
  720 * as when the series is a harmonic series.
  721 *
  722 * To solve this problem, we also cap the util_avg of successive tasks to
  723 * only 1/2 of the left utilization budget:
  724 *
  725 *   util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
  726 *
  727 * where n denotes the nth task and cpu_scale the CPU capacity.
  728 *
  729 * For example, for a CPU with 1024 of capacity, a simplest series from
  730 * the beginning would be like:
  731 *
  732 *  task  util_avg: 512, 256, 128,  64,  32,   16,    8, ...
  733 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
  734 *
  735 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
  736 * if util_avg > util_avg_cap.
  737 */
  738void post_init_entity_util_avg(struct task_struct *p)
  739{
  740	struct sched_entity *se = &p->se;
  741	struct cfs_rq *cfs_rq = cfs_rq_of(se);
  742	struct sched_avg *sa = &se->avg;
  743	long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
  744	long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
  745
  746	if (cap > 0) {
  747		if (cfs_rq->avg.util_avg != 0) {
  748			sa->util_avg  = cfs_rq->avg.util_avg * se->load.weight;
  749			sa->util_avg /= (cfs_rq->avg.load_avg + 1);
  750
  751			if (sa->util_avg > cap)
  752				sa->util_avg = cap;
  753		} else {
  754			sa->util_avg = cap;
  755		}
  756	}
  757
  758	sa->runnable_avg = sa->util_avg;
  759
  760	if (p->sched_class != &fair_sched_class) {
  761		/*
  762		 * For !fair tasks do:
  763		 *
  764		update_cfs_rq_load_avg(now, cfs_rq);
  765		attach_entity_load_avg(cfs_rq, se);
  766		switched_from_fair(rq, p);
  767		 *
  768		 * such that the next switched_to_fair() has the
  769		 * expected state.
  770		 */
  771		se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
  772		return;
  773	}
  774
  775	attach_entity_cfs_rq(se);
  776}
  777
  778#else /* !CONFIG_SMP */
  779void init_entity_runnable_average(struct sched_entity *se)
  780{
  781}
  782void post_init_entity_util_avg(struct task_struct *p)
  783{
  784}
  785static void update_tg_load_avg(struct cfs_rq *cfs_rq)
  786{
  787}
  788#endif /* CONFIG_SMP */
  789
  790/*
  791 * Update the current task's runtime statistics.
  792 */
  793static void update_curr(struct cfs_rq *cfs_rq)
  794{
  795	struct sched_entity *curr = cfs_rq->curr;
  796	u64 now = rq_clock_task(rq_of(cfs_rq));
  797	u64 delta_exec;
  798
  799	if (unlikely(!curr))
  800		return;
  801
  802	delta_exec = now - curr->exec_start;
  803	if (unlikely((s64)delta_exec <= 0))
  804		return;
  805
  806	curr->exec_start = now;
  807
  808	schedstat_set(curr->statistics.exec_max,
  809		      max(delta_exec, curr->statistics.exec_max));
  810
  811	curr->sum_exec_runtime += delta_exec;
  812	schedstat_add(cfs_rq->exec_clock, delta_exec);
  813
  814	curr->vruntime += calc_delta_fair(delta_exec, curr);
  815	update_min_vruntime(cfs_rq);
  816
  817	if (entity_is_task(curr)) {
  818		struct task_struct *curtask = task_of(curr);
  819
  820		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
  821		cgroup_account_cputime(curtask, delta_exec);
  822		account_group_exec_runtime(curtask, delta_exec);
  823	}
  824
  825	account_cfs_rq_runtime(cfs_rq, delta_exec);
  826}
  827
  828static void update_curr_fair(struct rq *rq)
  829{
  830	update_curr(cfs_rq_of(&rq->curr->se));
  831}
  832
  833static inline void
  834update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  835{
  836	u64 wait_start, prev_wait_start;
  837
  838	if (!schedstat_enabled())
  839		return;
  840
  841	wait_start = rq_clock(rq_of(cfs_rq));
  842	prev_wait_start = schedstat_val(se->statistics.wait_start);
  843
  844	if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
  845	    likely(wait_start > prev_wait_start))
  846		wait_start -= prev_wait_start;
  847
  848	__schedstat_set(se->statistics.wait_start, wait_start);
  849}
  850
  851static inline void
  852update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
  853{
  854	struct task_struct *p;
  855	u64 delta;
  856
  857	if (!schedstat_enabled())
  858		return;
  859
  860	/*
  861	 * When the sched_schedstat changes from 0 to 1, some sched se
  862	 * maybe already in the runqueue, the se->statistics.wait_start
  863	 * will be 0.So it will let the delta wrong. We need to avoid this
  864	 * scenario.
  865	 */
  866	if (unlikely(!schedstat_val(se->statistics.wait_start)))
  867		return;
  868
  869	delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
  870
  871	if (entity_is_task(se)) {
  872		p = task_of(se);
  873		if (task_on_rq_migrating(p)) {
  874			/*
  875			 * Preserve migrating task's wait time so wait_start
  876			 * time stamp can be adjusted to accumulate wait time
  877			 * prior to migration.
  878			 */
  879			__schedstat_set(se->statistics.wait_start, delta);
  880			return;
  881		}
  882		trace_sched_stat_wait(p, delta);
  883	}
  884
  885	__schedstat_set(se->statistics.wait_max,
  886		      max(schedstat_val(se->statistics.wait_max), delta));
  887	__schedstat_inc(se->statistics.wait_count);
  888	__schedstat_add(se->statistics.wait_sum, delta);
  889	__schedstat_set(se->statistics.wait_start, 0);
  890}
  891
  892static inline void
  893update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
  894{
  895	struct task_struct *tsk = NULL;
  896	u64 sleep_start, block_start;
  897
  898	if (!schedstat_enabled())
  899		return;
  900
  901	sleep_start = schedstat_val(se->statistics.sleep_start);
  902	block_start = schedstat_val(se->statistics.block_start);
  903
  904	if (entity_is_task(se))
  905		tsk = task_of(se);
  906
  907	if (sleep_start) {
  908		u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
  909
  910		if ((s64)delta < 0)
  911			delta = 0;
  912
  913		if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
  914			__schedstat_set(se->statistics.sleep_max, delta);
  915
  916		__schedstat_set(se->statistics.sleep_start, 0);
  917		__schedstat_add(se->statistics.sum_sleep_runtime, delta);
  918
  919		if (tsk) {
  920			account_scheduler_latency(tsk, delta >> 10, 1);
  921			trace_sched_stat_sleep(tsk, delta);
  922		}
  923	}
  924	if (block_start) {
  925		u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
  926
  927		if ((s64)delta < 0)
  928			delta = 0;
  929
  930		if (unlikely(delta > schedstat_val(se->statistics.block_max)))
  931			__schedstat_set(se->statistics.block_max, delta);
  932
  933		__schedstat_set(se->statistics.block_start, 0);
  934		__schedstat_add(se->statistics.sum_sleep_runtime, delta);
  935
  936		if (tsk) {
  937			if (tsk->in_iowait) {
  938				__schedstat_add(se->statistics.iowait_sum, delta);
  939				__schedstat_inc(se->statistics.iowait_count);
  940				trace_sched_stat_iowait(tsk, delta);
  941			}
  942
  943			trace_sched_stat_blocked(tsk, delta);
  944
  945			/*
  946			 * Blocking time is in units of nanosecs, so shift by
  947			 * 20 to get a milliseconds-range estimation of the
  948			 * amount of time that the task spent sleeping:
  949			 */
  950			if (unlikely(prof_on == SLEEP_PROFILING)) {
  951				profile_hits(SLEEP_PROFILING,
  952						(void *)get_wchan(tsk),
  953						delta >> 20);
  954			}
  955			account_scheduler_latency(tsk, delta >> 10, 0);
  956		}
  957	}
  958}
  959
  960/*
  961 * Task is being enqueued - update stats:
  962 */
  963static inline void
  964update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
  965{
  966	if (!schedstat_enabled())
  967		return;
  968
  969	/*
  970	 * Are we enqueueing a waiting task? (for current tasks
  971	 * a dequeue/enqueue event is a NOP)
  972	 */
  973	if (se != cfs_rq->curr)
  974		update_stats_wait_start(cfs_rq, se);
  975
  976	if (flags & ENQUEUE_WAKEUP)
  977		update_stats_enqueue_sleeper(cfs_rq, se);
  978}
  979
  980static inline void
  981update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
  982{
  983
  984	if (!schedstat_enabled())
  985		return;
  986
  987	/*
  988	 * Mark the end of the wait period if dequeueing a
  989	 * waiting task:
  990	 */
  991	if (se != cfs_rq->curr)
  992		update_stats_wait_end(cfs_rq, se);
  993
  994	if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
  995		struct task_struct *tsk = task_of(se);
  996		unsigned int state;
  997
  998		/* XXX racy against TTWU */
  999		state = READ_ONCE(tsk->__state);
 1000		if (state & TASK_INTERRUPTIBLE)
 1001			__schedstat_set(se->statistics.sleep_start,
 1002				      rq_clock(rq_of(cfs_rq)));
 1003		if (state & TASK_UNINTERRUPTIBLE)
 1004			__schedstat_set(se->statistics.block_start,
 1005				      rq_clock(rq_of(cfs_rq)));
 1006	}
 1007}
 1008
 1009/*
 1010 * We are picking a new current task - update its stats:
 1011 */
 1012static inline void
 1013update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 1014{
 1015	/*
 1016	 * We are starting a new run period:
 1017	 */
 1018	se->exec_start = rq_clock_task(rq_of(cfs_rq));
 1019}
 1020
 1021/**************************************************
 1022 * Scheduling class queueing methods:
 1023 */
 1024
 1025#ifdef CONFIG_NUMA_BALANCING
 1026/*
 1027 * Approximate time to scan a full NUMA task in ms. The task scan period is
 1028 * calculated based on the tasks virtual memory size and
 1029 * numa_balancing_scan_size.
 1030 */
 1031unsigned int sysctl_numa_balancing_scan_period_min = 1000;
 1032unsigned int sysctl_numa_balancing_scan_period_max = 60000;
 1033
 1034/* Portion of address space to scan in MB */
 1035unsigned int sysctl_numa_balancing_scan_size = 256;
 1036
 1037/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
 1038unsigned int sysctl_numa_balancing_scan_delay = 1000;
 1039
 1040struct numa_group {
 1041	refcount_t refcount;
 1042
 1043	spinlock_t lock; /* nr_tasks, tasks */
 1044	int nr_tasks;
 1045	pid_t gid;
 1046	int active_nodes;
 1047
 1048	struct rcu_head rcu;
 1049	unsigned long total_faults;
 1050	unsigned long max_faults_cpu;
 1051	/*
 1052	 * Faults_cpu is used to decide whether memory should move
 1053	 * towards the CPU. As a consequence, these stats are weighted
 1054	 * more by CPU use than by memory faults.
 1055	 */
 1056	unsigned long *faults_cpu;
 1057	unsigned long faults[];
 1058};
 1059
 1060/*
 1061 * For functions that can be called in multiple contexts that permit reading
 1062 * ->numa_group (see struct task_struct for locking rules).
 1063 */
 1064static struct numa_group *deref_task_numa_group(struct task_struct *p)
 1065{
 1066	return rcu_dereference_check(p->numa_group, p == current ||
 1067		(lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu)));
 1068}
 1069
 1070static struct numa_group *deref_curr_numa_group(struct task_struct *p)
 1071{
 1072	return rcu_dereference_protected(p->numa_group, p == current);
 1073}
 1074
 1075static inline unsigned long group_faults_priv(struct numa_group *ng);
 1076static inline unsigned long group_faults_shared(struct numa_group *ng);
 1077
 1078static unsigned int task_nr_scan_windows(struct task_struct *p)
 1079{
 1080	unsigned long rss = 0;
 1081	unsigned long nr_scan_pages;
 1082
 1083	/*
 1084	 * Calculations based on RSS as non-present and empty pages are skipped
 1085	 * by the PTE scanner and NUMA hinting faults should be trapped based
 1086	 * on resident pages
 1087	 */
 1088	nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
 1089	rss = get_mm_rss(p->mm);
 1090	if (!rss)
 1091		rss = nr_scan_pages;
 1092
 1093	rss = round_up(rss, nr_scan_pages);
 1094	return rss / nr_scan_pages;
 1095}
 1096
 1097/* For sanity's sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
 1098#define MAX_SCAN_WINDOW 2560
 1099
 1100static unsigned int task_scan_min(struct task_struct *p)
 1101{
 1102	unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
 1103	unsigned int scan, floor;
 1104	unsigned int windows = 1;
 1105
 1106	if (scan_size < MAX_SCAN_WINDOW)
 1107		windows = MAX_SCAN_WINDOW / scan_size;
 1108	floor = 1000 / windows;
 1109
 1110	scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
 1111	return max_t(unsigned int, floor, scan);
 1112}
 1113
 1114static unsigned int task_scan_start(struct task_struct *p)
 1115{
 1116	unsigned long smin = task_scan_min(p);
 1117	unsigned long period = smin;
 1118	struct numa_group *ng;
 1119
 1120	/* Scale the maximum scan period with the amount of shared memory. */
 1121	rcu_read_lock();
 1122	ng = rcu_dereference(p->numa_group);
 1123	if (ng) {
 1124		unsigned long shared = group_faults_shared(ng);
 1125		unsigned long private = group_faults_priv(ng);
 1126
 1127		period *= refcount_read(&ng->refcount);
 1128		period *= shared + 1;
 1129		period /= private + shared + 1;
 1130	}
 1131	rcu_read_unlock();
 1132
 1133	return max(smin, period);
 1134}
 1135
 1136static unsigned int task_scan_max(struct task_struct *p)
 1137{
 1138	unsigned long smin = task_scan_min(p);
 1139	unsigned long smax;
 1140	struct numa_group *ng;
 1141
 1142	/* Watch for min being lower than max due to floor calculations */
 1143	smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
 1144
 1145	/* Scale the maximum scan period with the amount of shared memory. */
 1146	ng = deref_curr_numa_group(p);
 1147	if (ng) {
 1148		unsigned long shared = group_faults_shared(ng);
 1149		unsigned long private = group_faults_priv(ng);
 1150		unsigned long period = smax;
 1151
 1152		period *= refcount_read(&ng->refcount);
 1153		period *= shared + 1;
 1154		period /= private + shared + 1;
 1155
 1156		smax = max(smax, period);
 1157	}
 1158
 1159	return max(smin, smax);
 1160}
 1161
 1162static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
 1163{
 1164	rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
 1165	rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
 1166}
 1167
 1168static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
 1169{
 1170	rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
 1171	rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
 1172}
 1173
 1174/* Shared or private faults. */
 1175#define NR_NUMA_HINT_FAULT_TYPES 2
 1176
 1177/* Memory and CPU locality */
 1178#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
 1179
 1180/* Averaged statistics, and temporary buffers. */
 1181#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
 1182
 1183pid_t task_numa_group_id(struct task_struct *p)
 1184{
 1185	struct numa_group *ng;
 1186	pid_t gid = 0;
 1187
 1188	rcu_read_lock();
 1189	ng = rcu_dereference(p->numa_group);
 1190	if (ng)
 1191		gid = ng->gid;
 1192	rcu_read_unlock();
 1193
 1194	return gid;
 1195}
 1196
 1197/*
 1198 * The averaged statistics, shared & private, memory & CPU,
 1199 * occupy the first half of the array. The second half of the
 1200 * array is for current counters, which are averaged into the
 1201 * first set by task_numa_placement.
 1202 */
 1203static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
 1204{
 1205	return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
 1206}
 1207
 1208static inline unsigned long task_faults(struct task_struct *p, int nid)
 1209{
 1210	if (!p->numa_faults)
 1211		return 0;
 1212
 1213	return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
 1214		p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
 1215}
 1216
 1217static inline unsigned long group_faults(struct task_struct *p, int nid)
 1218{
 1219	struct numa_group *ng = deref_task_numa_group(p);
 1220
 1221	if (!ng)
 1222		return 0;
 1223
 1224	return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
 1225		ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
 1226}
 1227
 1228static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
 1229{
 1230	return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
 1231		group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
 1232}
 1233
 1234static inline unsigned long group_faults_priv(struct numa_group *ng)
 1235{
 1236	unsigned long faults = 0;
 1237	int node;
 1238
 1239	for_each_online_node(node) {
 1240		faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
 1241	}
 1242
 1243	return faults;
 1244}
 1245
 1246static inline unsigned long group_faults_shared(struct numa_group *ng)
 1247{
 1248	unsigned long faults = 0;
 1249	int node;
 1250
 1251	for_each_online_node(node) {
 1252		faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
 1253	}
 1254
 1255	return faults;
 1256}
 1257
 1258/*
 1259 * A node triggering more than 1/3 as many NUMA faults as the maximum is
 1260 * considered part of a numa group's pseudo-interleaving set. Migrations
 1261 * between these nodes are slowed down, to allow things to settle down.
 1262 */
 1263#define ACTIVE_NODE_FRACTION 3
 1264
 1265static bool numa_is_active_node(int nid, struct numa_group *ng)
 1266{
 1267	return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
 1268}
 1269
 1270/* Handle placement on systems where not all nodes are directly connected. */
 1271static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
 1272					int maxdist, bool task)
 1273{
 1274	unsigned long score = 0;
 1275	int node;
 1276
 1277	/*
 1278	 * All nodes are directly connected, and the same distance
 1279	 * from each other. No need for fancy placement algorithms.
 1280	 */
 1281	if (sched_numa_topology_type == NUMA_DIRECT)
 1282		return 0;
 1283
 1284	/*
 1285	 * This code is called for each node, introducing N^2 complexity,
 1286	 * which should be ok given the number of nodes rarely exceeds 8.
 1287	 */
 1288	for_each_online_node(node) {
 1289		unsigned long faults;
 1290		int dist = node_distance(nid, node);
 1291
 1292		/*
 1293		 * The furthest away nodes in the system are not interesting
 1294		 * for placement; nid was already counted.
 1295		 */
 1296		if (dist == sched_max_numa_distance || node == nid)
 1297			continue;
 1298
 1299		/*
 1300		 * On systems with a backplane NUMA topology, compare groups
 1301		 * of nodes, and move tasks towards the group with the most
 1302		 * memory accesses. When comparing two nodes at distance
 1303		 * "hoplimit", only nodes closer by than "hoplimit" are part
 1304		 * of each group. Skip other nodes.
 1305		 */
 1306		if (sched_numa_topology_type == NUMA_BACKPLANE &&
 1307					dist >= maxdist)
 1308			continue;
 1309
 1310		/* Add up the faults from nearby nodes. */
 1311		if (task)
 1312			faults = task_faults(p, node);
 1313		else
 1314			faults = group_faults(p, node);
 1315
 1316		/*
 1317		 * On systems with a glueless mesh NUMA topology, there are
 1318		 * no fixed "groups of nodes". Instead, nodes that are not
 1319		 * directly connected bounce traffic through intermediate
 1320		 * nodes; a numa_group can occupy any set of nodes.
 1321		 * The further away a node is, the less the faults count.
 1322		 * This seems to result in good task placement.
 1323		 */
 1324		if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
 1325			faults *= (sched_max_numa_distance - dist);
 1326			faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
 1327		}
 1328
 1329		score += faults;
 1330	}
 1331
 1332	return score;
 1333}
 1334
 1335/*
 1336 * These return the fraction of accesses done by a particular task, or
 1337 * task group, on a particular numa node.  The group weight is given a
 1338 * larger multiplier, in order to group tasks together that are almost
 1339 * evenly spread out between numa nodes.
 1340 */
 1341static inline unsigned long task_weight(struct task_struct *p, int nid,
 1342					int dist)
 1343{
 1344	unsigned long faults, total_faults;
 1345
 1346	if (!p->numa_faults)
 1347		return 0;
 1348
 1349	total_faults = p->total_numa_faults;
 1350
 1351	if (!total_faults)
 1352		return 0;
 1353
 1354	faults = task_faults(p, nid);
 1355	faults += score_nearby_nodes(p, nid, dist, true);
 1356
 1357	return 1000 * faults / total_faults;
 1358}
 1359
 1360static inline unsigned long group_weight(struct task_struct *p, int nid,
 1361					 int dist)
 1362{
 1363	struct numa_group *ng = deref_task_numa_group(p);
 1364	unsigned long faults, total_faults;
 1365
 1366	if (!ng)
 1367		return 0;
 1368
 1369	total_faults = ng->total_faults;
 1370
 1371	if (!total_faults)
 1372		return 0;
 1373
 1374	faults = group_faults(p, nid);
 1375	faults += score_nearby_nodes(p, nid, dist, false);
 1376
 1377	return 1000 * faults / total_faults;
 1378}
 1379
 1380bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
 1381				int src_nid, int dst_cpu)
 1382{
 1383	struct numa_group *ng = deref_curr_numa_group(p);
 1384	int dst_nid = cpu_to_node(dst_cpu);
 1385	int last_cpupid, this_cpupid;
 1386
 1387	this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
 1388	last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
 1389
 1390	/*
 1391	 * Allow first faults or private faults to migrate immediately early in
 1392	 * the lifetime of a task. The magic number 4 is based on waiting for
 1393	 * two full passes of the "multi-stage node selection" test that is
 1394	 * executed below.
 1395	 */
 1396	if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
 1397	    (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
 1398		return true;
 1399
 1400	/*
 1401	 * Multi-stage node selection is used in conjunction with a periodic
 1402	 * migration fault to build a temporal task<->page relation. By using
 1403	 * a two-stage filter we remove short/unlikely relations.
 1404	 *
 1405	 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
 1406	 * a task's usage of a particular page (n_p) per total usage of this
 1407	 * page (n_t) (in a given time-span) to a probability.
 1408	 *
 1409	 * Our periodic faults will sample this probability and getting the
 1410	 * same result twice in a row, given these samples are fully
 1411	 * independent, is then given by P(n)^2, provided our sample period
 1412	 * is sufficiently short compared to the usage pattern.
 1413	 *
 1414	 * This quadric squishes small probabilities, making it less likely we
 1415	 * act on an unlikely task<->page relation.
 1416	 */
 1417	if (!cpupid_pid_unset(last_cpupid) &&
 1418				cpupid_to_nid(last_cpupid) != dst_nid)
 1419		return false;
 1420
 1421	/* Always allow migrate on private faults */
 1422	if (cpupid_match_pid(p, last_cpupid))
 1423		return true;
 1424
 1425	/* A shared fault, but p->numa_group has not been set up yet. */
 1426	if (!ng)
 1427		return true;
 1428
 1429	/*
 1430	 * Destination node is much more heavily used than the source
 1431	 * node? Allow migration.
 1432	 */
 1433	if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
 1434					ACTIVE_NODE_FRACTION)
 1435		return true;
 1436
 1437	/*
 1438	 * Distribute memory according to CPU & memory use on each node,
 1439	 * with 3/4 hysteresis to avoid unnecessary memory migrations:
 1440	 *
 1441	 * faults_cpu(dst)   3   faults_cpu(src)
 1442	 * --------------- * - > ---------------
 1443	 * faults_mem(dst)   4   faults_mem(src)
 1444	 */
 1445	return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
 1446	       group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
 1447}
 1448
 1449/*
 1450 * 'numa_type' describes the node at the moment of load balancing.
 1451 */
 1452enum numa_type {
 1453	/* The node has spare capacity that can be used to run more tasks.  */
 1454	node_has_spare = 0,
 1455	/*
 1456	 * The node is fully used and the tasks don't compete for more CPU
 1457	 * cycles. Nevertheless, some tasks might wait before running.
 1458	 */
 1459	node_fully_busy,
 1460	/*
 1461	 * The node is overloaded and can't provide expected CPU cycles to all
 1462	 * tasks.
 1463	 */
 1464	node_overloaded
 1465};
 1466
 1467/* Cached statistics for all CPUs within a node */
 1468struct numa_stats {
 1469	unsigned long load;
 1470	unsigned long runnable;
 1471	unsigned long util;
 1472	/* Total compute capacity of CPUs on a node */
 1473	unsigned long compute_capacity;
 1474	unsigned int nr_running;
 1475	unsigned int weight;
 1476	enum numa_type node_type;
 1477	int idle_cpu;
 1478};
 1479
 1480static inline bool is_core_idle(int cpu)
 1481{
 1482#ifdef CONFIG_SCHED_SMT
 1483	int sibling;
 1484
 1485	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
 1486		if (cpu == sibling)
 1487			continue;
 1488
 1489		if (!idle_cpu(sibling))
 1490			return false;
 1491	}
 1492#endif
 1493
 1494	return true;
 1495}
 1496
 1497struct task_numa_env {
 1498	struct task_struct *p;
 1499
 1500	int src_cpu, src_nid;
 1501	int dst_cpu, dst_nid;
 1502
 1503	struct numa_stats src_stats, dst_stats;
 1504
 1505	int imbalance_pct;
 1506	int dist;
 1507
 1508	struct task_struct *best_task;
 1509	long best_imp;
 1510	int best_cpu;
 1511};
 1512
 1513static unsigned long cpu_load(struct rq *rq);
 1514static unsigned long cpu_runnable(struct rq *rq);
 1515static unsigned long cpu_util(int cpu);
 1516static inline long adjust_numa_imbalance(int imbalance,
 1517					int dst_running, int dst_weight);
 1518
 1519static inline enum
 1520numa_type numa_classify(unsigned int imbalance_pct,
 1521			 struct numa_stats *ns)
 1522{
 1523	if ((ns->nr_running > ns->weight) &&
 1524	    (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) ||
 1525	     ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100))))
 1526		return node_overloaded;
 1527
 1528	if ((ns->nr_running < ns->weight) ||
 1529	    (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) &&
 1530	     ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100))))
 1531		return node_has_spare;
 1532
 1533	return node_fully_busy;
 1534}
 1535
 1536#ifdef CONFIG_SCHED_SMT
 1537/* Forward declarations of select_idle_sibling helpers */
 1538static inline bool test_idle_cores(int cpu, bool def);
 1539static inline int numa_idle_core(int idle_core, int cpu)
 1540{
 1541	if (!static_branch_likely(&sched_smt_present) ||
 1542	    idle_core >= 0 || !test_idle_cores(cpu, false))
 1543		return idle_core;
 1544
 1545	/*
 1546	 * Prefer cores instead of packing HT siblings
 1547	 * and triggering future load balancing.
 1548	 */
 1549	if (is_core_idle(cpu))
 1550		idle_core = cpu;
 1551
 1552	return idle_core;
 1553}
 1554#else
 1555static inline int numa_idle_core(int idle_core, int cpu)
 1556{
 1557	return idle_core;
 1558}
 1559#endif
 1560
 1561/*
 1562 * Gather all necessary information to make NUMA balancing placement
 1563 * decisions that are compatible with standard load balancer. This
 1564 * borrows code and logic from update_sg_lb_stats but sharing a
 1565 * common implementation is impractical.
 1566 */
 1567static void update_numa_stats(struct task_numa_env *env,
 1568			      struct numa_stats *ns, int nid,
 1569			      bool find_idle)
 1570{
 1571	int cpu, idle_core = -1;
 1572
 1573	memset(ns, 0, sizeof(*ns));
 1574	ns->idle_cpu = -1;
 1575
 1576	rcu_read_lock();
 1577	for_each_cpu(cpu, cpumask_of_node(nid)) {
 1578		struct rq *rq = cpu_rq(cpu);
 1579
 1580		ns->load += cpu_load(rq);
 1581		ns->runnable += cpu_runnable(rq);
 1582		ns->util += cpu_util(cpu);
 1583		ns->nr_running += rq->cfs.h_nr_running;
 1584		ns->compute_capacity += capacity_of(cpu);
 1585
 1586		if (find_idle && !rq->nr_running && idle_cpu(cpu)) {
 1587			if (READ_ONCE(rq->numa_migrate_on) ||
 1588			    !cpumask_test_cpu(cpu, env->p->cpus_ptr))
 1589				continue;
 1590
 1591			if (ns->idle_cpu == -1)
 1592				ns->idle_cpu = cpu;
 1593
 1594			idle_core = numa_idle_core(idle_core, cpu);
 1595		}
 1596	}
 1597	rcu_read_unlock();
 1598
 1599	ns->weight = cpumask_weight(cpumask_of_node(nid));
 1600
 1601	ns->node_type = numa_classify(env->imbalance_pct, ns);
 1602
 1603	if (idle_core >= 0)
 1604		ns->idle_cpu = idle_core;
 1605}
 1606
 1607static void task_numa_assign(struct task_numa_env *env,
 1608			     struct task_struct *p, long imp)
 1609{
 1610	struct rq *rq = cpu_rq(env->dst_cpu);
 1611
 1612	/* Check if run-queue part of active NUMA balance. */
 1613	if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
 1614		int cpu;
 1615		int start = env->dst_cpu;
 1616
 1617		/* Find alternative idle CPU. */
 1618		for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start) {
 1619			if (cpu == env->best_cpu || !idle_cpu(cpu) ||
 1620			    !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
 1621				continue;
 1622			}
 1623
 1624			env->dst_cpu = cpu;
 1625			rq = cpu_rq(env->dst_cpu);
 1626			if (!xchg(&rq->numa_migrate_on, 1))
 1627				goto assign;
 1628		}
 1629
 1630		/* Failed to find an alternative idle CPU */
 1631		return;
 1632	}
 1633
 1634assign:
 1635	/*
 1636	 * Clear previous best_cpu/rq numa-migrate flag, since task now
 1637	 * found a better CPU to move/swap.
 1638	 */
 1639	if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) {
 1640		rq = cpu_rq(env->best_cpu);
 1641		WRITE_ONCE(rq->numa_migrate_on, 0);
 1642	}
 1643
 1644	if (env->best_task)
 1645		put_task_struct(env->best_task);
 1646	if (p)
 1647		get_task_struct(p);
 1648
 1649	env->best_task = p;
 1650	env->best_imp = imp;
 1651	env->best_cpu = env->dst_cpu;
 1652}
 1653
 1654static bool load_too_imbalanced(long src_load, long dst_load,
 1655				struct task_numa_env *env)
 1656{
 1657	long imb, old_imb;
 1658	long orig_src_load, orig_dst_load;
 1659	long src_capacity, dst_capacity;
 1660
 1661	/*
 1662	 * The load is corrected for the CPU capacity available on each node.
 1663	 *
 1664	 * src_load        dst_load
 1665	 * ------------ vs ---------
 1666	 * src_capacity    dst_capacity
 1667	 */
 1668	src_capacity = env->src_stats.compute_capacity;
 1669	dst_capacity = env->dst_stats.compute_capacity;
 1670
 1671	imb = abs(dst_load * src_capacity - src_load * dst_capacity);
 1672
 1673	orig_src_load = env->src_stats.load;
 1674	orig_dst_load = env->dst_stats.load;
 1675
 1676	old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
 1677
 1678	/* Would this change make things worse? */
 1679	return (imb > old_imb);
 1680}
 1681
 1682/*
 1683 * Maximum NUMA importance can be 1998 (2*999);
 1684 * SMALLIMP @ 30 would be close to 1998/64.
 1685 * Used to deter task migration.
 1686 */
 1687#define SMALLIMP	30
 1688
 1689/*
 1690 * This checks if the overall compute and NUMA accesses of the system would
 1691 * be improved if the source tasks was migrated to the target dst_cpu taking
 1692 * into account that it might be best if task running on the dst_cpu should
 1693 * be exchanged with the source task
 1694 */
 1695static bool task_numa_compare(struct task_numa_env *env,
 1696			      long taskimp, long groupimp, bool maymove)
 1697{
 1698	struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
 1699	struct rq *dst_rq = cpu_rq(env->dst_cpu);
 1700	long imp = p_ng ? groupimp : taskimp;
 1701	struct task_struct *cur;
 1702	long src_load, dst_load;
 1703	int dist = env->dist;
 1704	long moveimp = imp;
 1705	long load;
 1706	bool stopsearch = false;
 1707
 1708	if (READ_ONCE(dst_rq->numa_migrate_on))
 1709		return false;
 1710
 1711	rcu_read_lock();
 1712	cur = rcu_dereference(dst_rq->curr);
 1713	if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
 1714		cur = NULL;
 1715
 1716	/*
 1717	 * Because we have preemption enabled we can get migrated around and
 1718	 * end try selecting ourselves (current == env->p) as a swap candidate.
 1719	 */
 1720	if (cur == env->p) {
 1721		stopsearch = true;
 1722		goto unlock;
 1723	}
 1724
 1725	if (!cur) {
 1726		if (maymove && moveimp >= env->best_imp)
 1727			goto assign;
 1728		else
 1729			goto unlock;
 1730	}
 1731
 1732	/* Skip this swap candidate if cannot move to the source cpu. */
 1733	if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
 1734		goto unlock;
 1735
 1736	/*
 1737	 * Skip this swap candidate if it is not moving to its preferred
 1738	 * node and the best task is.
 1739	 */
 1740	if (env->best_task &&
 1741	    env->best_task->numa_preferred_nid == env->src_nid &&
 1742	    cur->numa_preferred_nid != env->src_nid) {
 1743		goto unlock;
 1744	}
 1745
 1746	/*
 1747	 * "imp" is the fault differential for the source task between the
 1748	 * source and destination node. Calculate the total differential for
 1749	 * the source task and potential destination task. The more negative
 1750	 * the value is, the more remote accesses that would be expected to
 1751	 * be incurred if the tasks were swapped.
 1752	 *
 1753	 * If dst and source tasks are in the same NUMA group, or not
 1754	 * in any group then look only at task weights.
 1755	 */
 1756	cur_ng = rcu_dereference(cur->numa_group);
 1757	if (cur_ng == p_ng) {
 1758		imp = taskimp + task_weight(cur, env->src_nid, dist) -
 1759		      task_weight(cur, env->dst_nid, dist);
 1760		/*
 1761		 * Add some hysteresis to prevent swapping the
 1762		 * tasks within a group over tiny differences.
 1763		 */
 1764		if (cur_ng)
 1765			imp -= imp / 16;
 1766	} else {
 1767		/*
 1768		 * Compare the group weights. If a task is all by itself
 1769		 * (not part of a group), use the task weight instead.
 1770		 */
 1771		if (cur_ng && p_ng)
 1772			imp += group_weight(cur, env->src_nid, dist) -
 1773			       group_weight(cur, env->dst_nid, dist);
 1774		else
 1775			imp += task_weight(cur, env->src_nid, dist) -
 1776			       task_weight(cur, env->dst_nid, dist);
 1777	}
 1778
 1779	/* Discourage picking a task already on its preferred node */
 1780	if (cur->numa_preferred_nid == env->dst_nid)
 1781		imp -= imp / 16;
 1782
 1783	/*
 1784	 * Encourage picking a task that moves to its preferred node.
 1785	 * This potentially makes imp larger than it's maximum of
 1786	 * 1998 (see SMALLIMP and task_weight for why) but in this
 1787	 * case, it does not matter.
 1788	 */
 1789	if (cur->numa_preferred_nid == env->src_nid)
 1790		imp += imp / 8;
 1791
 1792	if (maymove && moveimp > imp && moveimp > env->best_imp) {
 1793		imp = moveimp;
 1794		cur = NULL;
 1795		goto assign;
 1796	}
 1797
 1798	/*
 1799	 * Prefer swapping with a task moving to its preferred node over a
 1800	 * task that is not.
 1801	 */
 1802	if (env->best_task && cur->numa_preferred_nid == env->src_nid &&
 1803	    env->best_task->numa_preferred_nid != env->src_nid) {
 1804		goto assign;
 1805	}
 1806
 1807	/*
 1808	 * If the NUMA importance is less than SMALLIMP,
 1809	 * task migration might only result in ping pong
 1810	 * of tasks and also hurt performance due to cache
 1811	 * misses.
 1812	 */
 1813	if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
 1814		goto unlock;
 1815
 1816	/*
 1817	 * In the overloaded case, try and keep the load balanced.
 1818	 */
 1819	load = task_h_load(env->p) - task_h_load(cur);
 1820	if (!load)
 1821		goto assign;
 1822
 1823	dst_load = env->dst_stats.load + load;
 1824	src_load = env->src_stats.load - load;
 1825
 1826	if (load_too_imbalanced(src_load, dst_load, env))
 1827		goto unlock;
 1828
 1829assign:
 1830	/* Evaluate an idle CPU for a task numa move. */
 1831	if (!cur) {
 1832		int cpu = env->dst_stats.idle_cpu;
 1833
 1834		/* Nothing cached so current CPU went idle since the search. */
 1835		if (cpu < 0)
 1836			cpu = env->dst_cpu;
 1837
 1838		/*
 1839		 * If the CPU is no longer truly idle and the previous best CPU
 1840		 * is, keep using it.
 1841		 */
 1842		if (!idle_cpu(cpu) && env->best_cpu >= 0 &&
 1843		    idle_cpu(env->best_cpu)) {
 1844			cpu = env->best_cpu;
 1845		}
 1846
 1847		env->dst_cpu = cpu;
 1848	}
 1849
 1850	task_numa_assign(env, cur, imp);
 1851
 1852	/*
 1853	 * If a move to idle is allowed because there is capacity or load
 1854	 * balance improves then stop the search. While a better swap
 1855	 * candidate may exist, a search is not free.
 1856	 */
 1857	if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu))
 1858		stopsearch = true;
 1859
 1860	/*
 1861	 * If a swap candidate must be identified and the current best task
 1862	 * moves its preferred node then stop the search.
 1863	 */
 1864	if (!maymove && env->best_task &&
 1865	    env->best_task->numa_preferred_nid == env->src_nid) {
 1866		stopsearch = true;
 1867	}
 1868unlock:
 1869	rcu_read_unlock();
 1870
 1871	return stopsearch;
 1872}
 1873
 1874static void task_numa_find_cpu(struct task_numa_env *env,
 1875				long taskimp, long groupimp)
 1876{
 1877	bool maymove = false;
 1878	int cpu;
 1879
 1880	/*
 1881	 * If dst node has spare capacity, then check if there is an
 1882	 * imbalance that would be overruled by the load balancer.
 1883	 */
 1884	if (env->dst_stats.node_type == node_has_spare) {
 1885		unsigned int imbalance;
 1886		int src_running, dst_running;
 1887
 1888		/*
 1889		 * Would movement cause an imbalance? Note that if src has
 1890		 * more running tasks that the imbalance is ignored as the
 1891		 * move improves the imbalance from the perspective of the
 1892		 * CPU load balancer.
 1893		 * */
 1894		src_running = env->src_stats.nr_running - 1;
 1895		dst_running = env->dst_stats.nr_running + 1;
 1896		imbalance = max(0, dst_running - src_running);
 1897		imbalance = adjust_numa_imbalance(imbalance, dst_running,
 1898							env->dst_stats.weight);
 1899
 1900		/* Use idle CPU if there is no imbalance */
 1901		if (!imbalance) {
 1902			maymove = true;
 1903			if (env->dst_stats.idle_cpu >= 0) {
 1904				env->dst_cpu = env->dst_stats.idle_cpu;
 1905				task_numa_assign(env, NULL, 0);
 1906				return;
 1907			}
 1908		}
 1909	} else {
 1910		long src_load, dst_load, load;
 1911		/*
 1912		 * If the improvement from just moving env->p direction is better
 1913		 * than swapping tasks around, check if a move is possible.
 1914		 */
 1915		load = task_h_load(env->p);
 1916		dst_load = env->dst_stats.load + load;
 1917		src_load = env->src_stats.load - load;
 1918		maymove = !load_too_imbalanced(src_load, dst_load, env);
 1919	}
 1920
 1921	for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
 1922		/* Skip this CPU if the source task cannot migrate */
 1923		if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
 1924			continue;
 1925
 1926		env->dst_cpu = cpu;
 1927		if (task_numa_compare(env, taskimp, groupimp, maymove))
 1928			break;
 1929	}
 1930}
 1931
 1932static int task_numa_migrate(struct task_struct *p)
 1933{
 1934	struct task_numa_env env = {
 1935		.p = p,
 1936
 1937		.src_cpu = task_cpu(p),
 1938		.src_nid = task_node(p),
 1939
 1940		.imbalance_pct = 112,
 1941
 1942		.best_task = NULL,
 1943		.best_imp = 0,
 1944		.best_cpu = -1,
 1945	};
 1946	unsigned long taskweight, groupweight;
 1947	struct sched_domain *sd;
 1948	long taskimp, groupimp;
 1949	struct numa_group *ng;
 1950	struct rq *best_rq;
 1951	int nid, ret, dist;
 1952
 1953	/*
 1954	 * Pick the lowest SD_NUMA domain, as that would have the smallest
 1955	 * imbalance and would be the first to start moving tasks about.
 1956	 *
 1957	 * And we want to avoid any moving of tasks about, as that would create
 1958	 * random movement of tasks -- counter the numa conditions we're trying
 1959	 * to satisfy here.
 1960	 */
 1961	rcu_read_lock();
 1962	sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
 1963	if (sd)
 1964		env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
 1965	rcu_read_unlock();
 1966
 1967	/*
 1968	 * Cpusets can break the scheduler domain tree into smaller
 1969	 * balance domains, some of which do not cross NUMA boundaries.
 1970	 * Tasks that are "trapped" in such domains cannot be migrated
 1971	 * elsewhere, so there is no point in (re)trying.
 1972	 */
 1973	if (unlikely(!sd)) {
 1974		sched_setnuma(p, task_node(p));
 1975		return -EINVAL;
 1976	}
 1977
 1978	env.dst_nid = p->numa_preferred_nid;
 1979	dist = env.dist = node_distance(env.src_nid, env.dst_nid);
 1980	taskweight = task_weight(p, env.src_nid, dist);
 1981	groupweight = group_weight(p, env.src_nid, dist);
 1982	update_numa_stats(&env, &env.src_stats, env.src_nid, false);
 1983	taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
 1984	groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
 1985	update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
 1986
 1987	/* Try to find a spot on the preferred nid. */
 1988	task_numa_find_cpu(&env, taskimp, groupimp);
 1989
 1990	/*
 1991	 * Look at other nodes in these cases:
 1992	 * - there is no space available on the preferred_nid
 1993	 * - the task is part of a numa_group that is interleaved across
 1994	 *   multiple NUMA nodes; in order to better consolidate the group,
 1995	 *   we need to check other locations.
 1996	 */
 1997	ng = deref_curr_numa_group(p);
 1998	if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
 1999		for_each_online_node(nid) {
 2000			if (nid == env.src_nid || nid == p->numa_preferred_nid)
 2001				continue;
 2002
 2003			dist = node_distance(env.src_nid, env.dst_nid);
 2004			if (sched_numa_topology_type == NUMA_BACKPLANE &&
 2005						dist != env.dist) {
 2006				taskweight = task_weight(p, env.src_nid, dist);
 2007				groupweight = group_weight(p, env.src_nid, dist);
 2008			}
 2009
 2010			/* Only consider nodes where both task and groups benefit */
 2011			taskimp = task_weight(p, nid, dist) - taskweight;
 2012			groupimp = group_weight(p, nid, dist) - groupweight;
 2013			if (taskimp < 0 && groupimp < 0)
 2014				continue;
 2015
 2016			env.dist = dist;
 2017			env.dst_nid = nid;
 2018			update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
 2019			task_numa_find_cpu(&env, taskimp, groupimp);
 2020		}
 2021	}
 2022
 2023	/*
 2024	 * If the task is part of a workload that spans multiple NUMA nodes,
 2025	 * and is migrating into one of the workload's active nodes, remember
 2026	 * this node as the task's preferred numa node, so the workload can
 2027	 * settle down.
 2028	 * A task that migrated to a second choice node will be better off
 2029	 * trying for a better one later. Do not set the preferred node here.
 2030	 */
 2031	if (ng) {
 2032		if (env.best_cpu == -1)
 2033			nid = env.src_nid;
 2034		else
 2035			nid = cpu_to_node(env.best_cpu);
 2036
 2037		if (nid != p->numa_preferred_nid)
 2038			sched_setnuma(p, nid);
 2039	}
 2040
 2041	/* No better CPU than the current one was found. */
 2042	if (env.best_cpu == -1) {
 2043		trace_sched_stick_numa(p, env.src_cpu, NULL, -1);
 2044		return -EAGAIN;
 2045	}
 2046
 2047	best_rq = cpu_rq(env.best_cpu);
 2048	if (env.best_task == NULL) {
 2049		ret = migrate_task_to(p, env.best_cpu);
 2050		WRITE_ONCE(best_rq->numa_migrate_on, 0);
 2051		if (ret != 0)
 2052			trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu);
 2053		return ret;
 2054	}
 2055
 2056	ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
 2057	WRITE_ONCE(best_rq->numa_migrate_on, 0);
 2058
 2059	if (ret != 0)
 2060		trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu);
 2061	put_task_struct(env.best_task);
 2062	return ret;
 2063}
 2064
 2065/* Attempt to migrate a task to a CPU on the preferred node. */
 2066static void numa_migrate_preferred(struct task_struct *p)
 2067{
 2068	unsigned long interval = HZ;
 2069
 2070	/* This task has no NUMA fault statistics yet */
 2071	if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
 2072		return;
 2073
 2074	/* Periodically retry migrating the task to the preferred node */
 2075	interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
 2076	p->numa_migrate_retry = jiffies + interval;
 2077
 2078	/* Success if task is already running on preferred CPU */
 2079	if (task_node(p) == p->numa_preferred_nid)
 2080		return;
 2081
 2082	/* Otherwise, try migrate to a CPU on the preferred node */
 2083	task_numa_migrate(p);
 2084}
 2085
 2086/*
 2087 * Find out how many nodes on the workload is actively running on. Do this by
 2088 * tracking the nodes from which NUMA hinting faults are triggered. This can
 2089 * be different from the set of nodes where the workload's memory is currently
 2090 * located.
 2091 */
 2092static void numa_group_count_active_nodes(struct numa_group *numa_group)
 2093{
 2094	unsigned long faults, max_faults = 0;
 2095	int nid, active_nodes = 0;
 2096
 2097	for_each_online_node(nid) {
 2098		faults = group_faults_cpu(numa_group, nid);
 2099		if (faults > max_faults)
 2100			max_faults = faults;
 2101	}
 2102
 2103	for_each_online_node(nid) {
 2104		faults = group_faults_cpu(numa_group, nid);
 2105		if (faults * ACTIVE_NODE_FRACTION > max_faults)
 2106			active_nodes++;
 2107	}
 2108
 2109	numa_group->max_faults_cpu = max_faults;
 2110	numa_group->active_nodes = active_nodes;
 2111}
 2112
 2113/*
 2114 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
 2115 * increments. The more local the fault statistics are, the higher the scan
 2116 * period will be for the next scan window. If local/(local+remote) ratio is
 2117 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
 2118 * the scan period will decrease. Aim for 70% local accesses.
 2119 */
 2120#define NUMA_PERIOD_SLOTS 10
 2121#define NUMA_PERIOD_THRESHOLD 7
 2122
 2123/*
 2124 * Increase the scan period (slow down scanning) if the majority of
 2125 * our memory is already on our local node, or if the majority of
 2126 * the page accesses are shared with other processes.
 2127 * Otherwise, decrease the scan period.
 2128 */
 2129static void update_task_scan_period(struct task_struct *p,
 2130			unsigned long shared, unsigned long private)
 2131{
 2132	unsigned int period_slot;
 2133	int lr_ratio, ps_ratio;
 2134	int diff;
 2135
 2136	unsigned long remote = p->numa_faults_locality[0];
 2137	unsigned long local = p->numa_faults_locality[1];
 2138
 2139	/*
 2140	 * If there were no record hinting faults then either the task is
 2141	 * completely idle or all activity is areas that are not of interest
 2142	 * to automatic numa balancing. Related to that, if there were failed
 2143	 * migration then it implies we are migrating too quickly or the local
 2144	 * node is overloaded. In either case, scan slower
 2145	 */
 2146	if (local + shared == 0 || p->numa_faults_locality[2]) {
 2147		p->numa_scan_period = min(p->numa_scan_period_max,
 2148			p->numa_scan_period << 1);
 2149
 2150		p->mm->numa_next_scan = jiffies +
 2151			msecs_to_jiffies(p->numa_scan_period);
 2152
 2153		return;
 2154	}
 2155
 2156	/*
 2157	 * Prepare to scale scan period relative to the current period.
 2158	 *	 == NUMA_PERIOD_THRESHOLD scan period stays the same
 2159	 *       <  NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
 2160	 *	 >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
 2161	 */
 2162	period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
 2163	lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
 2164	ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared);
 2165
 2166	if (ps_ratio >= NUMA_PERIOD_THRESHOLD) {
 2167		/*
 2168		 * Most memory accesses are local. There is no need to
 2169		 * do fast NUMA scanning, since memory is already local.
 2170		 */
 2171		int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
 2172		if (!slot)
 2173			slot = 1;
 2174		diff = slot * period_slot;
 2175	} else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) {
 2176		/*
 2177		 * Most memory accesses are shared with other tasks.
 2178		 * There is no point in continuing fast NUMA scanning,
 2179		 * since other tasks may just move the memory elsewhere.
 2180		 */
 2181		int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
 2182		if (!slot)
 2183			slot = 1;
 2184		diff = slot * period_slot;
 2185	} else {
 2186		/*
 2187		 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
 2188		 * yet they are not on the local NUMA node. Speed up
 2189		 * NUMA scanning to get the memory moved over.
 2190		 */
 2191		int ratio = max(lr_ratio, ps_ratio);
 2192		diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
 2193	}
 2194
 2195	p->numa_scan_period = clamp(p->numa_scan_period + diff,
 2196			task_scan_min(p), task_scan_max(p));
 2197	memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
 2198}
 2199
 2200/*
 2201 * Get the fraction of time the task has been running since the last
 2202 * NUMA placement cycle. The scheduler keeps similar statistics, but
 2203 * decays those on a 32ms period, which is orders of magnitude off
 2204 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
 2205 * stats only if the task is so new there are no NUMA statistics yet.
 2206 */
 2207static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
 2208{
 2209	u64 runtime, delta, now;
 2210	/* Use the start of this time slice to avoid calculations. */
 2211	now = p->se.exec_start;
 2212	runtime = p->se.sum_exec_runtime;
 2213
 2214	if (p->last_task_numa_placement) {
 2215		delta = runtime - p->last_sum_exec_runtime;
 2216		*period = now - p->last_task_numa_placement;
 2217
 2218		/* Avoid time going backwards, prevent potential divide error: */
 2219		if (unlikely((s64)*period < 0))
 2220			*period = 0;
 2221	} else {
 2222		delta = p->se.avg.load_sum;
 2223		*period = LOAD_AVG_MAX;
 2224	}
 2225
 2226	p->last_sum_exec_runtime = runtime;
 2227	p->last_task_numa_placement = now;
 2228
 2229	return delta;
 2230}
 2231
 2232/*
 2233 * Determine the preferred nid for a task in a numa_group. This needs to
 2234 * be done in a way that produces consistent results with group_weight,
 2235 * otherwise workloads might not converge.
 2236 */
 2237static int preferred_group_nid(struct task_struct *p, int nid)
 2238{
 2239	nodemask_t nodes;
 2240	int dist;
 2241
 2242	/* Direct connections between all NUMA nodes. */
 2243	if (sched_numa_topology_type == NUMA_DIRECT)
 2244		return nid;
 2245
 2246	/*
 2247	 * On a system with glueless mesh NUMA topology, group_weight
 2248	 * scores nodes according to the number of NUMA hinting faults on
 2249	 * both the node itself, and on nearby nodes.
 2250	 */
 2251	if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
 2252		unsigned long score, max_score = 0;
 2253		int node, max_node = nid;
 2254
 2255		dist = sched_max_numa_distance;
 2256
 2257		for_each_online_node(node) {
 2258			score = group_weight(p, node, dist);
 2259			if (score > max_score) {
 2260				max_score = score;
 2261				max_node = node;
 2262			}
 2263		}
 2264		return max_node;
 2265	}
 2266
 2267	/*
 2268	 * Finding the preferred nid in a system with NUMA backplane
 2269	 * interconnect topology is more involved. The goal is to locate
 2270	 * tasks from numa_groups near each other in the system, and
 2271	 * untangle workloads from different sides of the system. This requires
 2272	 * searching down the hierarchy of node groups, recursively searching
 2273	 * inside the highest scoring group of nodes. The nodemask tricks
 2274	 * keep the complexity of the search down.
 2275	 */
 2276	nodes = node_online_map;
 2277	for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
 2278		unsigned long max_faults = 0;
 2279		nodemask_t max_group = NODE_MASK_NONE;
 2280		int a, b;
 2281
 2282		/* Are there nodes at this distance from each other? */
 2283		if (!find_numa_distance(dist))
 2284			continue;
 2285
 2286		for_each_node_mask(a, nodes) {
 2287			unsigned long faults = 0;
 2288			nodemask_t this_group;
 2289			nodes_clear(this_group);
 2290
 2291			/* Sum group's NUMA faults; includes a==b case. */
 2292			for_each_node_mask(b, nodes) {
 2293				if (node_distance(a, b) < dist) {
 2294					faults += group_faults(p, b);
 2295					node_set(b, this_group);
 2296					node_clear(b, nodes);
 2297				}
 2298			}
 2299
 2300			/* Remember the top group. */
 2301			if (faults > max_faults) {
 2302				max_faults = faults;
 2303				max_group = this_group;
 2304				/*
 2305				 * subtle: at the smallest distance there is
 2306				 * just one node left in each "group", the
 2307				 * winner is the preferred nid.
 2308				 */
 2309				nid = a;
 2310			}
 2311		}
 2312		/* Next round, evaluate the nodes within max_group. */
 2313		if (!max_faults)
 2314			break;
 2315		nodes = max_group;
 2316	}
 2317	return nid;
 2318}
 2319
 2320static void task_numa_placement(struct task_struct *p)
 2321{
 2322	int seq, nid, max_nid = NUMA_NO_NODE;
 2323	unsigned long max_faults = 0;
 2324	unsigned long fault_types[2] = { 0, 0 };
 2325	unsigned long total_faults;
 2326	u64 runtime, period;
 2327	spinlock_t *group_lock = NULL;
 2328	struct numa_group *ng;
 2329
 2330	/*
 2331	 * The p->mm->numa_scan_seq field gets updated without
 2332	 * exclusive access. Use READ_ONCE() here to ensure
 2333	 * that the field is read in a single access:
 2334	 */
 2335	seq = READ_ONCE(p->mm->numa_scan_seq);
 2336	if (p->numa_scan_seq == seq)
 2337		return;
 2338	p->numa_scan_seq = seq;
 2339	p->numa_scan_period_max = task_scan_max(p);
 2340
 2341	total_faults = p->numa_faults_locality[0] +
 2342		       p->numa_faults_locality[1];
 2343	runtime = numa_get_avg_runtime(p, &period);
 2344
 2345	/* If the task is part of a group prevent parallel updates to group stats */
 2346	ng = deref_curr_numa_group(p);
 2347	if (ng) {
 2348		group_lock = &ng->lock;
 2349		spin_lock_irq(group_lock);
 2350	}
 2351
 2352	/* Find the node with the highest number of faults */
 2353	for_each_online_node(nid) {
 2354		/* Keep track of the offsets in numa_faults array */
 2355		int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
 2356		unsigned long faults = 0, group_faults = 0;
 2357		int priv;
 2358
 2359		for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
 2360			long diff, f_diff, f_weight;
 2361
 2362			mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
 2363			membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
 2364			cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
 2365			cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
 2366
 2367			/* Decay existing window, copy faults since last scan */
 2368			diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
 2369			fault_types[priv] += p->numa_faults[membuf_idx];
 2370			p->numa_faults[membuf_idx] = 0;
 2371
 2372			/*
 2373			 * Normalize the faults_from, so all tasks in a group
 2374			 * count according to CPU use, instead of by the raw
 2375			 * number of faults. Tasks with little runtime have
 2376			 * little over-all impact on throughput, and thus their
 2377			 * faults are less important.
 2378			 */
 2379			f_weight = div64_u64(runtime << 16, period + 1);
 2380			f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
 2381				   (total_faults + 1);
 2382			f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
 2383			p->numa_faults[cpubuf_idx] = 0;
 2384
 2385			p->numa_faults[mem_idx] += diff;
 2386			p->numa_faults[cpu_idx] += f_diff;
 2387			faults += p->numa_faults[mem_idx];
 2388			p->total_numa_faults += diff;
 2389			if (ng) {
 2390				/*
 2391				 * safe because we can only change our own group
 2392				 *
 2393				 * mem_idx represents the offset for a given
 2394				 * nid and priv in a specific region because it
 2395				 * is at the beginning of the numa_faults array.
 2396				 */
 2397				ng->faults[mem_idx] += diff;
 2398				ng->faults_cpu[mem_idx] += f_diff;
 2399				ng->total_faults += diff;
 2400				group_faults += ng->faults[mem_idx];
 2401			}
 2402		}
 2403
 2404		if (!ng) {
 2405			if (faults > max_faults) {
 2406				max_faults = faults;
 2407				max_nid = nid;
 2408			}
 2409		} else if (group_faults > max_faults) {
 2410			max_faults = group_faults;
 2411			max_nid = nid;
 2412		}
 2413	}
 2414
 2415	if (ng) {
 2416		numa_group_count_active_nodes(ng);
 2417		spin_unlock_irq(group_lock);
 2418		max_nid = preferred_group_nid(p, max_nid);
 2419	}
 2420
 2421	if (max_faults) {
 2422		/* Set the new preferred node */
 2423		if (max_nid != p->numa_preferred_nid)
 2424			sched_setnuma(p, max_nid);
 2425	}
 2426
 2427	update_task_scan_period(p, fault_types[0], fault_types[1]);
 2428}
 2429
 2430static inline int get_numa_group(struct numa_group *grp)
 2431{
 2432	return refcount_inc_not_zero(&grp->refcount);
 2433}
 2434
 2435static inline void put_numa_group(struct numa_group *grp)
 2436{
 2437	if (refcount_dec_and_test(&grp->refcount))
 2438		kfree_rcu(grp, rcu);
 2439}
 2440
 2441static void task_numa_group(struct task_struct *p, int cpupid, int flags,
 2442			int *priv)
 2443{
 2444	struct numa_group *grp, *my_grp;
 2445	struct task_struct *tsk;
 2446	bool join = false;
 2447	int cpu = cpupid_to_cpu(cpupid);
 2448	int i;
 2449
 2450	if (unlikely(!deref_curr_numa_group(p))) {
 2451		unsigned int size = sizeof(struct numa_group) +
 2452				    4*nr_node_ids*sizeof(unsigned long);
 2453
 2454		grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
 2455		if (!grp)
 2456			return;
 2457
 2458		refcount_set(&grp->refcount, 1);
 2459		grp->active_nodes = 1;
 2460		grp->max_faults_cpu = 0;
 2461		spin_lock_init(&grp->lock);
 2462		grp->gid = p->pid;
 2463		/* Second half of the array tracks nids where faults happen */
 2464		grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
 2465						nr_node_ids;
 2466
 2467		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
 2468			grp->faults[i] = p->numa_faults[i];
 2469
 2470		grp->total_faults = p->total_numa_faults;
 2471
 2472		grp->nr_tasks++;
 2473		rcu_assign_pointer(p->numa_group, grp);
 2474	}
 2475
 2476	rcu_read_lock();
 2477	tsk = READ_ONCE(cpu_rq(cpu)->curr);
 2478
 2479	if (!cpupid_match_pid(tsk, cpupid))
 2480		goto no_join;
 2481
 2482	grp = rcu_dereference(tsk->numa_group);
 2483	if (!grp)
 2484		goto no_join;
 2485
 2486	my_grp = deref_curr_numa_group(p);
 2487	if (grp == my_grp)
 2488		goto no_join;
 2489
 2490	/*
 2491	 * Only join the other group if its bigger; if we're the bigger group,
 2492	 * the other task will join us.
 2493	 */
 2494	if (my_grp->nr_tasks > grp->nr_tasks)
 2495		goto no_join;
 2496
 2497	/*
 2498	 * Tie-break on the grp address.
 2499	 */
 2500	if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
 2501		goto no_join;
 2502
 2503	/* Always join threads in the same process. */
 2504	if (tsk->mm == current->mm)
 2505		join = true;
 2506
 2507	/* Simple filter to avoid false positives due to PID collisions */
 2508	if (flags & TNF_SHARED)
 2509		join = true;
 2510
 2511	/* Update priv based on whether false sharing was detected */
 2512	*priv = !join;
 2513
 2514	if (join && !get_numa_group(grp))
 2515		goto no_join;
 2516
 2517	rcu_read_unlock();
 2518
 2519	if (!join)
 2520		return;
 2521
 2522	BUG_ON(irqs_disabled());
 2523	double_lock_irq(&my_grp->lock, &grp->lock);
 2524
 2525	for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
 2526		my_grp->faults[i] -= p->numa_faults[i];
 2527		grp->faults[i] += p->numa_faults[i];
 2528	}
 2529	my_grp->total_faults -= p->total_numa_faults;
 2530	grp->total_faults += p->total_numa_faults;
 2531
 2532	my_grp->nr_tasks--;
 2533	grp->nr_tasks++;
 2534
 2535	spin_unlock(&my_grp->lock);
 2536	spin_unlock_irq(&grp->lock);
 2537
 2538	rcu_assign_pointer(p->numa_group, grp);
 2539
 2540	put_numa_group(my_grp);
 2541	return;
 2542
 2543no_join:
 2544	rcu_read_unlock();
 2545	return;
 2546}
 2547
 2548/*
 2549 * Get rid of NUMA statistics associated with a task (either current or dead).
 2550 * If @final is set, the task is dead and has reached refcount zero, so we can
 2551 * safely free all relevant data structures. Otherwise, there might be
 2552 * concurrent reads from places like load balancing and procfs, and we should
 2553 * reset the data back to default state without freeing ->numa_faults.
 2554 */
 2555void task_numa_free(struct task_struct *p, bool final)
 2556{
 2557	/* safe: p either is current or is being freed by current */
 2558	struct numa_group *grp = rcu_dereference_raw(p->numa_group);
 2559	unsigned long *numa_faults = p->numa_faults;
 2560	unsigned long flags;
 2561	int i;
 2562
 2563	if (!numa_faults)
 2564		return;
 2565
 2566	if (grp) {
 2567		spin_lock_irqsave(&grp->lock, flags);
 2568		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
 2569			grp->faults[i] -= p->numa_faults[i];
 2570		grp->total_faults -= p->total_numa_faults;
 2571
 2572		grp->nr_tasks--;
 2573		spin_unlock_irqrestore(&grp->lock, flags);
 2574		RCU_INIT_POINTER(p->numa_group, NULL);
 2575		put_numa_group(grp);
 2576	}
 2577
 2578	if (final) {
 2579		p->numa_faults = NULL;
 2580		kfree(numa_faults);
 2581	} else {
 2582		p->total_numa_faults = 0;
 2583		for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
 2584			numa_faults[i] = 0;
 2585	}
 2586}
 2587
 2588/*
 2589 * Got a PROT_NONE fault for a page on @node.
 2590 */
 2591void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
 2592{
 2593	struct task_struct *p = current;
 2594	bool migrated = flags & TNF_MIGRATED;
 2595	int cpu_node = task_node(current);
 2596	int local = !!(flags & TNF_FAULT_LOCAL);
 2597	struct numa_group *ng;
 2598	int priv;
 2599
 2600	if (!static_branch_likely(&sched_numa_balancing))
 2601		return;
 2602
 2603	/* for example, ksmd faulting in a user's mm */
 2604	if (!p->mm)
 2605		return;
 2606
 2607	/* Allocate buffer to track faults on a per-node basis */
 2608	if (unlikely(!p->numa_faults)) {
 2609		int size = sizeof(*p->numa_faults) *
 2610			   NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
 2611
 2612		p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
 2613		if (!p->numa_faults)
 2614			return;
 2615
 2616		p->total_numa_faults = 0;
 2617		memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
 2618	}
 2619
 2620	/*
 2621	 * First accesses are treated as private, otherwise consider accesses
 2622	 * to be private if the accessing pid has not changed
 2623	 */
 2624	if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
 2625		priv = 1;
 2626	} else {
 2627		priv = cpupid_match_pid(p, last_cpupid);
 2628		if (!priv && !(flags & TNF_NO_GROUP))
 2629			task_numa_group(p, last_cpupid, flags, &priv);
 2630	}
 2631
 2632	/*
 2633	 * If a workload spans multiple NUMA nodes, a shared fault that
 2634	 * occurs wholly within the set of nodes that the workload is
 2635	 * actively using should be counted as local. This allows the
 2636	 * scan rate to slow down when a workload has settled down.
 2637	 */
 2638	ng = deref_curr_numa_group(p);
 2639	if (!priv && !local && ng && ng->active_nodes > 1 &&
 2640				numa_is_active_node(cpu_node, ng) &&
 2641				numa_is_active_node(mem_node, ng))
 2642		local = 1;
 2643
 2644	/*
 2645	 * Retry to migrate task to preferred node periodically, in case it
 2646	 * previously failed, or the scheduler moved us.
 2647	 */
 2648	if (time_after(jiffies, p->numa_migrate_retry)) {
 2649		task_numa_placement(p);
 2650		numa_migrate_preferred(p);
 2651	}
 2652
 2653	if (migrated)
 2654		p->numa_pages_migrated += pages;
 2655	if (flags & TNF_MIGRATE_FAIL)
 2656		p->numa_faults_locality[2] += pages;
 2657
 2658	p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
 2659	p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
 2660	p->numa_faults_locality[local] += pages;
 2661}
 2662
 2663static void reset_ptenuma_scan(struct task_struct *p)
 2664{
 2665	/*
 2666	 * We only did a read acquisition of the mmap sem, so
 2667	 * p->mm->numa_scan_seq is written to without exclusive access
 2668	 * and the update is not guaranteed to be atomic. That's not
 2669	 * much of an issue though, since this is just used for
 2670	 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
 2671	 * expensive, to avoid any form of compiler optimizations:
 2672	 */
 2673	WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
 2674	p->mm->numa_scan_offset = 0;
 2675}
 2676
 2677/*
 2678 * The expensive part of numa migration is done from task_work context.
 2679 * Triggered from task_tick_numa().
 2680 */
 2681static void task_numa_work(struct callback_head *work)
 2682{
 2683	unsigned long migrate, next_scan, now = jiffies;
 2684	struct task_struct *p = current;
 2685	struct mm_struct *mm = p->mm;
 2686	u64 runtime = p->se.sum_exec_runtime;
 2687	struct vm_area_struct *vma;
 2688	unsigned long start, end;
 2689	unsigned long nr_pte_updates = 0;
 2690	long pages, virtpages;
 2691
 2692	SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
 2693
 2694	work->next = work;
 2695	/*
 2696	 * Who cares about NUMA placement when they're dying.
 2697	 *
 2698	 * NOTE: make sure not to dereference p->mm before this check,
 2699	 * exit_task_work() happens _after_ exit_mm() so we could be called
 2700	 * without p->mm even though we still had it when we enqueued this
 2701	 * work.
 2702	 */
 2703	if (p->flags & PF_EXITING)
 2704		return;
 2705
 2706	if (!mm->numa_next_scan) {
 2707		mm->numa_next_scan = now +
 2708			msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
 2709	}
 2710
 2711	/*
 2712	 * Enforce maximal scan/migration frequency..
 2713	 */
 2714	migrate = mm->numa_next_scan;
 2715	if (time_before(now, migrate))
 2716		return;
 2717
 2718	if (p->numa_scan_period == 0) {
 2719		p->numa_scan_period_max = task_scan_max(p);
 2720		p->numa_scan_period = task_scan_start(p);
 2721	}
 2722
 2723	next_scan = now + msecs_to_jiffies(p->numa_scan_period);
 2724	if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
 2725		return;
 2726
 2727	/*
 2728	 * Delay this task enough that another task of this mm will likely win
 2729	 * the next time around.
 2730	 */
 2731	p->node_stamp += 2 * TICK_NSEC;
 2732
 2733	start = mm->numa_scan_offset;
 2734	pages = sysctl_numa_balancing_scan_size;
 2735	pages <<= 20 - PAGE_SHIFT; /* MB in pages */
 2736	virtpages = pages * 8;	   /* Scan up to this much virtual space */
 2737	if (!pages)
 2738		return;
 2739
 2740
 2741	if (!mmap_read_trylock(mm))
 2742		return;
 2743	vma = find_vma(mm, start);
 2744	if (!vma) {
 2745		reset_ptenuma_scan(p);
 2746		start = 0;
 2747		vma = mm->mmap;
 2748	}
 2749	for (; vma; vma = vma->vm_next) {
 2750		if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
 2751			is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
 2752			continue;
 2753		}
 2754
 2755		/*
 2756		 * Shared library pages mapped by multiple processes are not
 2757		 * migrated as it is expected they are cache replicated. Avoid
 2758		 * hinting faults in read-only file-backed mappings or the vdso
 2759		 * as migrating the pages will be of marginal benefit.
 2760		 */
 2761		if (!vma->vm_mm ||
 2762		    (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
 2763			continue;
 2764
 2765		/*
 2766		 * Skip inaccessible VMAs to avoid any confusion between
 2767		 * PROT_NONE and NUMA hinting ptes
 2768		 */
 2769		if (!vma_is_accessible(vma))
 2770			continue;
 2771
 2772		do {
 2773			start = max(start, vma->vm_start);
 2774			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
 2775			end = min(end, vma->vm_end);
 2776			nr_pte_updates = change_prot_numa(vma, start, end);
 2777
 2778			/*
 2779			 * Try to scan sysctl_numa_balancing_size worth of
 2780			 * hpages that have at least one present PTE that
 2781			 * is not already pte-numa. If the VMA contains
 2782			 * areas that are unused or already full of prot_numa
 2783			 * PTEs, scan up to virtpages, to skip through those
 2784			 * areas faster.
 2785			 */
 2786			if (nr_pte_updates)
 2787				pages -= (end - start) >> PAGE_SHIFT;
 2788			virtpages -= (end - start) >> PAGE_SHIFT;
 2789
 2790			start = end;
 2791			if (pages <= 0 || virtpages <= 0)
 2792				goto out;
 2793
 2794			cond_resched();
 2795		} while (end != vma->vm_end);
 2796	}
 2797
 2798out:
 2799	/*
 2800	 * It is possible to reach the end of the VMA list but the last few
 2801	 * VMAs are not guaranteed to the vma_migratable. If they are not, we
 2802	 * would find the !migratable VMA on the next scan but not reset the
 2803	 * scanner to the start so check it now.
 2804	 */
 2805	if (vma)
 2806		mm->numa_scan_offset = start;
 2807	else
 2808		reset_ptenuma_scan(p);
 2809	mmap_read_unlock(mm);
 2810
 2811	/*
 2812	 * Make sure tasks use at least 32x as much time to run other code
 2813	 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
 2814	 * Usually update_task_scan_period slows down scanning enough; on an
 2815	 * overloaded system we need to limit overhead on a per task basis.
 2816	 */
 2817	if (unlikely(p->se.sum_exec_runtime != runtime)) {
 2818		u64 diff = p->se.sum_exec_runtime - runtime;
 2819		p->node_stamp += 32 * diff;
 2820	}
 2821}
 2822
 2823void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
 2824{
 2825	int mm_users = 0;
 2826	struct mm_struct *mm = p->mm;
 2827
 2828	if (mm) {
 2829		mm_users = atomic_read(&mm->mm_users);
 2830		if (mm_users == 1) {
 2831			mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
 2832			mm->numa_scan_seq = 0;
 2833		}
 2834	}
 2835	p->node_stamp			= 0;
 2836	p->numa_scan_seq		= mm ? mm->numa_scan_seq : 0;
 2837	p->numa_scan_period		= sysctl_numa_balancing_scan_delay;
 2838	/* Protect against double add, see task_tick_numa and task_numa_work */
 2839	p->numa_work.next		= &p->numa_work;
 2840	p->numa_faults			= NULL;
 2841	RCU_INIT_POINTER(p->numa_group, NULL);
 2842	p->last_task_numa_placement	= 0;
 2843	p->last_sum_exec_runtime	= 0;
 2844
 2845	init_task_work(&p->numa_work, task_numa_work);
 2846
 2847	/* New address space, reset the preferred nid */
 2848	if (!(clone_flags & CLONE_VM)) {
 2849		p->numa_preferred_nid = NUMA_NO_NODE;
 2850		return;
 2851	}
 2852
 2853	/*
 2854	 * New thread, keep existing numa_preferred_nid which should be copied
 2855	 * already by arch_dup_task_struct but stagger when scans start.
 2856	 */
 2857	if (mm) {
 2858		unsigned int delay;
 2859
 2860		delay = min_t(unsigned int, task_scan_max(current),
 2861			current->numa_scan_period * mm_users * NSEC_PER_MSEC);
 2862		delay += 2 * TICK_NSEC;
 2863		p->node_stamp = delay;
 2864	}
 2865}
 2866
 2867/*
 2868 * Drive the periodic memory faults..
 2869 */
 2870static void task_tick_numa(struct rq *rq, struct task_struct *curr)
 2871{
 2872	struct callback_head *work = &curr->numa_work;
 2873	u64 period, now;
 2874
 2875	/*
 2876	 * We don't care about NUMA placement if we don't have memory.
 2877	 */
 2878	if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
 2879		return;
 2880
 2881	/*
 2882	 * Using runtime rather than walltime has the dual advantage that
 2883	 * we (mostly) drive the selection from busy threads and that the
 2884	 * task needs to have done some actual work before we bother with
 2885	 * NUMA placement.
 2886	 */
 2887	now = curr->se.sum_exec_runtime;
 2888	period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
 2889
 2890	if (now > curr->node_stamp + period) {
 2891		if (!curr->node_stamp)
 2892			curr->numa_scan_period = task_scan_start(curr);
 2893		curr->node_stamp += period;
 2894
 2895		if (!time_before(jiffies, curr->mm->numa_next_scan))
 2896			task_work_add(curr, work, TWA_RESUME);
 2897	}
 2898}
 2899
 2900static void update_scan_period(struct task_struct *p, int new_cpu)
 2901{
 2902	int src_nid = cpu_to_node(task_cpu(p));
 2903	int dst_nid = cpu_to_node(new_cpu);
 2904
 2905	if (!static_branch_likely(&sched_numa_balancing))
 2906		return;
 2907
 2908	if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
 2909		return;
 2910
 2911	if (src_nid == dst_nid)
 2912		return;
 2913
 2914	/*
 2915	 * Allow resets if faults have been trapped before one scan
 2916	 * has completed. This is most likely due to a new task that
 2917	 * is pulled cross-node due to wakeups or load balancing.
 2918	 */
 2919	if (p->numa_scan_seq) {
 2920		/*
 2921		 * Avoid scan adjustments if moving to the preferred
 2922		 * node or if the task was not previously running on
 2923		 * the preferred node.
 2924		 */
 2925		if (dst_nid == p->numa_preferred_nid ||
 2926		    (p->numa_preferred_nid != NUMA_NO_NODE &&
 2927			src_nid != p->numa_preferred_nid))
 2928			return;
 2929	}
 2930
 2931	p->numa_scan_period = task_scan_start(p);
 2932}
 2933
 2934#else
 2935static void task_tick_numa(struct rq *rq, struct task_struct *curr)
 2936{
 2937}
 2938
 2939static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
 2940{
 2941}
 2942
 2943static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
 2944{
 2945}
 2946
 2947static inline void update_scan_period(struct task_struct *p, int new_cpu)
 2948{
 2949}
 2950
 2951#endif /* CONFIG_NUMA_BALANCING */
 2952
 2953static void
 2954account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 2955{
 2956	update_load_add(&cfs_rq->load, se->load.weight);
 2957#ifdef CONFIG_SMP
 2958	if (entity_is_task(se)) {
 2959		struct rq *rq = rq_of(cfs_rq);
 2960
 2961		account_numa_enqueue(rq, task_of(se));
 2962		list_add(&se->group_node, &rq->cfs_tasks);
 2963	}
 2964#endif
 2965	cfs_rq->nr_running++;
 2966}
 2967
 2968static void
 2969account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 2970{
 2971	update_load_sub(&cfs_rq->load, se->load.weight);
 2972#ifdef CONFIG_SMP
 2973	if (entity_is_task(se)) {
 2974		account_numa_dequeue(rq_of(cfs_rq), task_of(se));
 2975		list_del_init(&se->group_node);
 2976	}
 2977#endif
 2978	cfs_rq->nr_running--;
 2979}
 2980
 2981/*
 2982 * Signed add and clamp on underflow.
 2983 *
 2984 * Explicitly do a load-store to ensure the intermediate value never hits
 2985 * memory. This allows lockless observations without ever seeing the negative
 2986 * values.
 2987 */
 2988#define add_positive(_ptr, _val) do {                           \
 2989	typeof(_ptr) ptr = (_ptr);                              \
 2990	typeof(_val) val = (_val);                              \
 2991	typeof(*ptr) res, var = READ_ONCE(*ptr);                \
 2992								\
 2993	res = var + val;                                        \
 2994								\
 2995	if (val < 0 && res > var)                               \
 2996		res = 0;                                        \
 2997								\
 2998	WRITE_ONCE(*ptr, res);                                  \
 2999} while (0)
 3000
 3001/*
 3002 * Unsigned subtract and clamp on underflow.
 3003 *
 3004 * Explicitly do a load-store to ensure the intermediate value never hits
 3005 * memory. This allows lockless observations without ever seeing the negative
 3006 * values.
 3007 */
 3008#define sub_positive(_ptr, _val) do {				\
 3009	typeof(_ptr) ptr = (_ptr);				\
 3010	typeof(*ptr) val = (_val);				\
 3011	typeof(*ptr) res, var = READ_ONCE(*ptr);		\
 3012	res = var - val;					\
 3013	if (res > var)						\
 3014		res = 0;					\
 3015	WRITE_ONCE(*ptr, res);					\
 3016} while (0)
 3017
 3018/*
 3019 * Remove and clamp on negative, from a local variable.
 3020 *
 3021 * A variant of sub_positive(), which does not use explicit load-store
 3022 * and is thus optimized for local variable updates.
 3023 */
 3024#define lsub_positive(_ptr, _val) do {				\
 3025	typeof(_ptr) ptr = (_ptr);				\
 3026	*ptr -= min_t(typeof(*ptr), *ptr, _val);		\
 3027} while (0)
 3028
 3029#ifdef CONFIG_SMP
 3030static inline void
 3031enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 3032{
 3033	cfs_rq->avg.load_avg += se->avg.load_avg;
 3034	cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
 3035}
 3036
 3037static inline void
 3038dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 3039{
 3040	u32 divider = get_pelt_divider(&se->avg);
 3041	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
 3042	cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
 3043}
 3044#else
 3045static inline void
 3046enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
 3047static inline void
 3048dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
 3049#endif
 3050
 3051static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 3052			    unsigned long weight)
 3053{
 3054	if (se->on_rq) {
 3055		/* commit outstanding execution time */
 3056		if (cfs_rq->curr == se)
 3057			update_curr(cfs_rq);
 3058		update_load_sub(&cfs_rq->load, se->load.weight);
 3059	}
 3060	dequeue_load_avg(cfs_rq, se);
 3061
 3062	update_load_set(&se->load, weight);
 3063
 3064#ifdef CONFIG_SMP
 3065	do {
 3066		u32 divider = get_pelt_divider(&se->avg);
 3067
 3068		se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
 3069	} while (0);
 3070#endif
 3071
 3072	enqueue_load_avg(cfs_rq, se);
 3073	if (se->on_rq)
 3074		update_load_add(&cfs_rq->load, se->load.weight);
 3075
 3076}
 3077
 3078void reweight_task(struct task_struct *p, int prio)
 3079{
 3080	struct sched_entity *se = &p->se;
 3081	struct cfs_rq *cfs_rq = cfs_rq_of(se);
 3082	struct load_weight *load = &se->load;
 3083	unsigned long weight = scale_load(sched_prio_to_weight[prio]);
 3084
 3085	reweight_entity(cfs_rq, se, weight);
 3086	load->inv_weight = sched_prio_to_wmult[prio];
 3087}
 3088
 3089#ifdef CONFIG_FAIR_GROUP_SCHED
 3090#ifdef CONFIG_SMP
 3091/*
 3092 * All this does is approximate the hierarchical proportion which includes that
 3093 * global sum we all love to hate.
 3094 *
 3095 * That is, the weight of a group entity, is the proportional share of the
 3096 * group weight based on the group runqueue weights. That is:
 3097 *
 3098 *                     tg->weight * grq->load.weight
 3099 *   ge->load.weight = -----------------------------               (1)
 3100 *                       \Sum grq->load.weight
 3101 *
 3102 * Now, because computing that sum is prohibitively expensive to compute (been
 3103 * there, done that) we approximate it with this average stuff. The average
 3104 * moves slower and therefore the approximation is cheaper and more stable.
 3105 *
 3106 * So instead of the above, we substitute:
 3107 *
 3108 *   grq->load.weight -> grq->avg.load_avg                         (2)
 3109 *
 3110 * which yields the following:
 3111 *
 3112 *                     tg->weight * grq->avg.load_avg
 3113 *   ge->load.weight = ------------------------------              (3)
 3114 *                             tg->load_avg
 3115 *
 3116 * Where: tg->load_avg ~= \Sum grq->avg.load_avg
 3117 *
 3118 * That is shares_avg, and it is right (given the approximation (2)).
 3119 *
 3120 * The problem with it is that because the average is slow -- it was designed
 3121 * to be exactly that of course -- this leads to transients in boundary
 3122 * conditions. In specific, the case where the group was idle and we start the
 3123 * one task. It takes time for our CPU's grq->avg.load_avg to build up,
 3124 * yielding bad latency etc..
 3125 *
 3126 * Now, in that special case (1) reduces to:
 3127 *
 3128 *                     tg->weight * grq->load.weight
 3129 *   ge->load.weight = ----------------------------- = tg->weight   (4)
 3130 *                         grp->load.weight
 3131 *
 3132 * That is, the sum collapses because all other CPUs are idle; the UP scenario.
 3133 *
 3134 * So what we do is modify our approximation (3) to approach (4) in the (near)
 3135 * UP case, like:
 3136 *
 3137 *   ge->load.weight =
 3138 *
 3139 *              tg->weight * grq->load.weight
 3140 *     ---------------------------------------------------         (5)
 3141 *     tg->load_avg - grq->avg.load_avg + grq->load.weight
 3142 *
 3143 * But because grq->load.weight can drop to 0, resulting in a divide by zero,
 3144 * we need to use grq->avg.load_avg as its lower bound, which then gives:
 3145 *
 3146 *
 3147 *                     tg->weight * grq->load.weight
 3148 *   ge->load.weight = -----------------------------		   (6)
 3149 *                             tg_load_avg'
 3150 *
 3151 * Where:
 3152 *
 3153 *   tg_load_avg' = tg->load_avg - grq->avg.load_avg +
 3154 *                  max(grq->load.weight, grq->avg.load_avg)
 3155 *
 3156 * And that is shares_weight and is icky. In the (near) UP case it approaches
 3157 * (4) while in the normal case it approaches (3). It consistently
 3158 * overestimates the ge->load.weight and therefore:
 3159 *
 3160 *   \Sum ge->load.weight >= tg->weight
 3161 *
 3162 * hence icky!
 3163 */
 3164static long calc_group_shares(struct cfs_rq *cfs_rq)
 3165{
 3166	long tg_weight, tg_shares, load, shares;
 3167	struct task_group *tg = cfs_rq->tg;
 3168
 3169	tg_shares = READ_ONCE(tg->shares);
 3170
 3171	load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
 3172
 3173	tg_weight = atomic_long_read(&tg->load_avg);
 3174
 3175	/* Ensure tg_weight >= load */
 3176	tg_weight -= cfs_rq->tg_load_avg_contrib;
 3177	tg_weight += load;
 3178
 3179	shares = (tg_shares * load);
 3180	if (tg_weight)
 3181		shares /= tg_weight;
 3182
 3183	/*
 3184	 * MIN_SHARES has to be unscaled here to support per-CPU partitioning
 3185	 * of a group with small tg->shares value. It is a floor value which is
 3186	 * assigned as a minimum load.weight to the sched_entity representing
 3187	 * the group on a CPU.
 3188	 *
 3189	 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
 3190	 * on an 8-core system with 8 tasks each runnable on one CPU shares has
 3191	 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
 3192	 * case no task is runnable on a CPU MIN_SHARES=2 should be returned
 3193	 * instead of 0.
 3194	 */
 3195	return clamp_t(long, shares, MIN_SHARES, tg_shares);
 3196}
 3197#endif /* CONFIG_SMP */
 3198
 3199static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
 3200
 3201/*
 3202 * Recomputes the group entity based on the current state of its group
 3203 * runqueue.
 3204 */
 3205static void update_cfs_group(struct sched_entity *se)
 3206{
 3207	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
 3208	long shares;
 3209
 3210	if (!gcfs_rq)
 3211		return;
 3212
 3213	if (throttled_hierarchy(gcfs_rq))
 3214		return;
 3215
 3216#ifndef CONFIG_SMP
 3217	shares = READ_ONCE(gcfs_rq->tg->shares);
 3218
 3219	if (likely(se->load.weight == shares))
 3220		return;
 3221#else
 3222	shares   = calc_group_shares(gcfs_rq);
 3223#endif
 3224
 3225	reweight_entity(cfs_rq_of(se), se, shares);
 3226}
 3227
 3228#else /* CONFIG_FAIR_GROUP_SCHED */
 3229static inline void update_cfs_group(struct sched_entity *se)
 3230{
 3231}
 3232#endif /* CONFIG_FAIR_GROUP_SCHED */
 3233
 3234static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
 3235{
 3236	struct rq *rq = rq_of(cfs_rq);
 3237
 3238	if (&rq->cfs == cfs_rq) {
 3239		/*
 3240		 * There are a few boundary cases this might miss but it should
 3241		 * get called often enough that that should (hopefully) not be
 3242		 * a real problem.
 3243		 *
 3244		 * It will not get called when we go idle, because the idle
 3245		 * thread is a different class (!fair), nor will the utilization
 3246		 * number include things like RT tasks.
 3247		 *
 3248		 * As is, the util number is not freq-invariant (we'd have to
 3249		 * implement arch_scale_freq_capacity() for that).
 3250		 *
 3251		 * See cpu_util().
 3252		 */
 3253		cpufreq_update_util(rq, flags);
 3254	}
 3255}
 3256
 3257#ifdef CONFIG_SMP
 3258#ifdef CONFIG_FAIR_GROUP_SCHED
 3259/*
 3260 * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
 3261 * immediately before a parent cfs_rq, and cfs_rqs are removed from the list
 3262 * bottom-up, we only have to test whether the cfs_rq before us on the list
 3263 * is our child.
 3264 * If cfs_rq is not on the list, test whether a child needs its to be added to
 3265 * connect a branch to the tree  * (see list_add_leaf_cfs_rq() for details).
 3266 */
 3267static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
 3268{
 3269	struct cfs_rq *prev_cfs_rq;
 3270	struct list_head *prev;
 3271
 3272	if (cfs_rq->on_list) {
 3273		prev = cfs_rq->leaf_cfs_rq_list.prev;
 3274	} else {
 3275		struct rq *rq = rq_of(cfs_rq);
 3276
 3277		prev = rq->tmp_alone_branch;
 3278	}
 3279
 3280	prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
 3281
 3282	return (prev_cfs_rq->tg->parent == cfs_rq->tg);
 3283}
 3284
 3285static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
 3286{
 3287	if (cfs_rq->load.weight)
 3288		return false;
 3289
 3290	if (cfs_rq->avg.load_sum)
 3291		return false;
 3292
 3293	if (cfs_rq->avg.util_sum)
 3294		return false;
 3295
 3296	if (cfs_rq->avg.runnable_sum)
 3297		return false;
 3298
 3299	if (child_cfs_rq_on_list(cfs_rq))
 3300		return false;
 3301
 3302	/*
 3303	 * _avg must be null when _sum are null because _avg = _sum / divider
 3304	 * Make sure that rounding and/or propagation of PELT values never
 3305	 * break this.
 3306	 */
 3307	SCHED_WARN_ON(cfs_rq->avg.load_avg ||
 3308		      cfs_rq->avg.util_avg ||
 3309		      cfs_rq->avg.runnable_avg);
 3310
 3311	return true;
 3312}
 3313
 3314/**
 3315 * update_tg_load_avg - update the tg's load avg
 3316 * @cfs_rq: the cfs_rq whose avg changed
 3317 *
 3318 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
 3319 * However, because tg->load_avg is a global value there are performance
 3320 * considerations.
 3321 *
 3322 * In order to avoid having to look at the other cfs_rq's, we use a
 3323 * differential update where we store the last value we propagated. This in
 3324 * turn allows skipping updates if the differential is 'small'.
 3325 *
 3326 * Updating tg's load_avg is necessary before update_cfs_share().
 3327 */
 3328static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
 3329{
 3330	long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
 3331
 3332	/*
 3333	 * No need to update load_avg for root_task_group as it is not used.
 3334	 */
 3335	if (cfs_rq->tg == &root_task_group)
 3336		return;
 3337
 3338	if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
 3339		atomic_long_add(delta, &cfs_rq->tg->load_avg);
 3340		cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
 3341	}
 3342}
 3343
 3344/*
 3345 * Called within set_task_rq() right before setting a task's CPU. The
 3346 * caller only guarantees p->pi_lock is held; no other assumptions,
 3347 * including the state of rq->lock, should be made.
 3348 */
 3349void set_task_rq_fair(struct sched_entity *se,
 3350		      struct cfs_rq *prev, struct cfs_rq *next)
 3351{
 3352	u64 p_last_update_time;
 3353	u64 n_last_update_time;
 3354
 3355	if (!sched_feat(ATTACH_AGE_LOAD))
 3356		return;
 3357
 3358	/*
 3359	 * We are supposed to update the task to "current" time, then its up to
 3360	 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
 3361	 * getting what current time is, so simply throw away the out-of-date
 3362	 * time. This will result in the wakee task is less decayed, but giving
 3363	 * the wakee more load sounds not bad.
 3364	 */
 3365	if (!(se->avg.last_update_time && prev))
 3366		return;
 3367
 3368#ifndef CONFIG_64BIT
 3369	{
 3370		u64 p_last_update_time_copy;
 3371		u64 n_last_update_time_copy;
 3372
 3373		do {
 3374			p_last_update_time_copy = prev->load_last_update_time_copy;
 3375			n_last_update_time_copy = next->load_last_update_time_copy;
 3376
 3377			smp_rmb();
 3378
 3379			p_last_update_time = prev->avg.last_update_time;
 3380			n_last_update_time = next->avg.last_update_time;
 3381
 3382		} while (p_last_update_time != p_last_update_time_copy ||
 3383			 n_last_update_time != n_last_update_time_copy);
 3384	}
 3385#else
 3386	p_last_update_time = prev->avg.last_update_time;
 3387	n_last_update_time = next->avg.last_update_time;
 3388#endif
 3389	__update_load_avg_blocked_se(p_last_update_time, se);
 3390	se->avg.last_update_time = n_last_update_time;
 3391}
 3392
 3393
 3394/*
 3395 * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
 3396 * propagate its contribution. The key to this propagation is the invariant
 3397 * that for each group:
 3398 *
 3399 *   ge->avg == grq->avg						(1)
 3400 *
 3401 * _IFF_ we look at the pure running and runnable sums. Because they
 3402 * represent the very same entity, just at different points in the hierarchy.
 3403 *
 3404 * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial
 3405 * and simply copies the running/runnable sum over (but still wrong, because
 3406 * the group entity and group rq do not have their PELT windows aligned).
 3407 *
 3408 * However, update_tg_cfs_load() is more complex. So we have:
 3409 *
 3410 *   ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg		(2)
 3411 *
 3412 * And since, like util, the runnable part should be directly transferable,
 3413 * the following would _appear_ to be the straight forward approach:
 3414 *
 3415 *   grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg	(3)
 3416 *
 3417 * And per (1) we have:
 3418 *
 3419 *   ge->avg.runnable_avg == grq->avg.runnable_avg
 3420 *
 3421 * Which gives:
 3422 *
 3423 *                      ge->load.weight * grq->avg.load_avg
 3424 *   ge->avg.load_avg = -----------------------------------		(4)
 3425 *                               grq->load.weight
 3426 *
 3427 * Except that is wrong!
 3428 *
 3429 * Because while for entities historical weight is not important and we
 3430 * really only care about our future and therefore can consider a pure
 3431 * runnable sum, runqueues can NOT do this.
 3432 *
 3433 * We specifically want runqueues to have a load_avg that includes
 3434 * historical weights. Those represent the blocked load, the load we expect
 3435 * to (shortly) return to us. This only works by keeping the weights as
 3436 * integral part of the sum. We therefore cannot decompose as per (3).
 3437 *
 3438 * Another reason this doesn't work is that runnable isn't a 0-sum entity.
 3439 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
 3440 * rq itself is runnable anywhere between 2/3 and 1 depending on how the
 3441 * runnable section of these tasks overlap (or not). If they were to perfectly
 3442 * align the rq as a whole would be runnable 2/3 of the time. If however we
 3443 * always have at least 1 runnable task, the rq as a whole is always runnable.
 3444 *
 3445 * So we'll have to approximate.. :/
 3446 *
 3447 * Given the constraint:
 3448 *
 3449 *   ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
 3450 *
 3451 * We can construct a rule that adds runnable to a rq by assuming minimal
 3452 * overlap.
 3453 *
 3454 * On removal, we'll assume each task is equally runnable; which yields:
 3455 *
 3456 *   grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
 3457 *
 3458 * XXX: only do this for the part of runnable > running ?
 3459 *
 3460 */
 3461
 3462static inline void
 3463update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 3464{
 3465	long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
 3466	u32 divider;
 3467
 3468	/* Nothing to update */
 3469	if (!delta)
 3470		return;
 3471
 3472	/*
 3473	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
 3474	 * See ___update_load_avg() for details.
 3475	 */
 3476	divider = get_pelt_divider(&cfs_rq->avg);
 3477
 3478	/* Set new sched_entity's utilization */
 3479	se->avg.util_avg = gcfs_rq->avg.util_avg;
 3480	se->avg.util_sum = se->avg.util_avg * divider;
 3481
 3482	/* Update parent cfs_rq utilization */
 3483	add_positive(&cfs_rq->avg.util_avg, delta);
 3484	cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
 3485}
 3486
 3487static inline void
 3488update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 3489{
 3490	long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
 3491	u32 divider;
 3492
 3493	/* Nothing to update */
 3494	if (!delta)
 3495		return;
 3496
 3497	/*
 3498	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
 3499	 * See ___update_load_avg() for details.
 3500	 */
 3501	divider = get_pelt_divider(&cfs_rq->avg);
 3502
 3503	/* Set new sched_entity's runnable */
 3504	se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
 3505	se->avg.runnable_sum = se->avg.runnable_avg * divider;
 3506
 3507	/* Update parent cfs_rq runnable */
 3508	add_positive(&cfs_rq->avg.runnable_avg, delta);
 3509	cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
 3510}
 3511
 3512static inline void
 3513update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 3514{
 3515	long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
 3516	unsigned long load_avg;
 3517	u64 load_sum = 0;
 3518	u32 divider;
 3519
 3520	if (!runnable_sum)
 3521		return;
 3522
 3523	gcfs_rq->prop_runnable_sum = 0;
 3524
 3525	/*
 3526	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
 3527	 * See ___update_load_avg() for details.
 3528	 */
 3529	divider = get_pelt_divider(&cfs_rq->avg);
 3530
 3531	if (runnable_sum >= 0) {
 3532		/*
 3533		 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
 3534		 * the CPU is saturated running == runnable.
 3535		 */
 3536		runnable_sum += se->avg.load_sum;
 3537		runnable_sum = min_t(long, runnable_sum, divider);
 3538	} else {
 3539		/*
 3540		 * Estimate the new unweighted runnable_sum of the gcfs_rq by
 3541		 * assuming all tasks are equally runnable.
 3542		 */
 3543		if (scale_load_down(gcfs_rq->load.weight)) {
 3544			load_sum = div_s64(gcfs_rq->avg.load_sum,
 3545				scale_load_down(gcfs_rq->load.weight));
 3546		}
 3547
 3548		/* But make sure to not inflate se's runnable */
 3549		runnable_sum = min(se->avg.load_sum, load_sum);
 3550	}
 3551
 3552	/*
 3553	 * runnable_sum can't be lower than running_sum
 3554	 * Rescale running sum to be in the same range as runnable sum
 3555	 * running_sum is in [0 : LOAD_AVG_MAX <<  SCHED_CAPACITY_SHIFT]
 3556	 * runnable_sum is in [0 : LOAD_AVG_MAX]
 3557	 */
 3558	running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
 3559	runnable_sum = max(runnable_sum, running_sum);
 3560
 3561	load_sum = (s64)se_weight(se) * runnable_sum;
 3562	load_avg = div_s64(load_sum, divider);
 3563
 3564	se->avg.load_sum = runnable_sum;
 3565
 3566	delta = load_avg - se->avg.load_avg;
 3567	if (!delta)
 3568		return;
 3569
 3570	se->avg.load_avg = load_avg;
 3571
 3572	add_positive(&cfs_rq->avg.load_avg, delta);
 3573	cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
 3574}
 3575
 3576static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
 3577{
 3578	cfs_rq->propagate = 1;
 3579	cfs_rq->prop_runnable_sum += runnable_sum;
 3580}
 3581
 3582/* Update task and its cfs_rq load average */
 3583static inline int propagate_entity_load_avg(struct sched_entity *se)
 3584{
 3585	struct cfs_rq *cfs_rq, *gcfs_rq;
 3586
 3587	if (entity_is_task(se))
 3588		return 0;
 3589
 3590	gcfs_rq = group_cfs_rq(se);
 3591	if (!gcfs_rq->propagate)
 3592		return 0;
 3593
 3594	gcfs_rq->propagate = 0;
 3595
 3596	cfs_rq = cfs_rq_of(se);
 3597
 3598	add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
 3599
 3600	update_tg_cfs_util(cfs_rq, se, gcfs_rq);
 3601	update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
 3602	update_tg_cfs_load(cfs_rq, se, gcfs_rq);
 3603
 3604	trace_pelt_cfs_tp(cfs_rq);
 3605	trace_pelt_se_tp(se);
 3606
 3607	return 1;
 3608}
 3609
 3610/*
 3611 * Check if we need to update the load and the utilization of a blocked
 3612 * group_entity:
 3613 */
 3614static inline bool skip_blocked_update(struct sched_entity *se)
 3615{
 3616	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
 3617
 3618	/*
 3619	 * If sched_entity still have not zero load or utilization, we have to
 3620	 * decay it:
 3621	 */
 3622	if (se->avg.load_avg || se->avg.util_avg)
 3623		return false;
 3624
 3625	/*
 3626	 * If there is a pending propagation, we have to update the load and
 3627	 * the utilization of the sched_entity:
 3628	 */
 3629	if (gcfs_rq->propagate)
 3630		return false;
 3631
 3632	/*
 3633	 * Otherwise, the load and the utilization of the sched_entity is
 3634	 * already zero and there is no pending propagation, so it will be a
 3635	 * waste of time to try to decay it:
 3636	 */
 3637	return true;
 3638}
 3639
 3640#else /* CONFIG_FAIR_GROUP_SCHED */
 3641
 3642static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
 3643
 3644static inline int propagate_entity_load_avg(struct sched_entity *se)
 3645{
 3646	return 0;
 3647}
 3648
 3649static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
 3650
 3651#endif /* CONFIG_FAIR_GROUP_SCHED */
 3652
 3653/**
 3654 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
 3655 * @now: current time, as per cfs_rq_clock_pelt()
 3656 * @cfs_rq: cfs_rq to update
 3657 *
 3658 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
 3659 * avg. The immediate corollary is that all (fair) tasks must be attached, see
 3660 * post_init_entity_util_avg().
 3661 *
 3662 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
 3663 *
 3664 * Returns true if the load decayed or we removed load.
 3665 *
 3666 * Since both these conditions indicate a changed cfs_rq->avg.load we should
 3667 * call update_tg_load_avg() when this function returns true.
 3668 */
 3669static inline int
 3670update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 3671{
 3672	unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0;
 3673	struct sched_avg *sa = &cfs_rq->avg;
 3674	int decayed = 0;
 3675
 3676	if (cfs_rq->removed.nr) {
 3677		unsigned long r;
 3678		u32 divider = get_pelt_divider(&cfs_rq->avg);
 3679
 3680		raw_spin_lock(&cfs_rq->removed.lock);
 3681		swap(cfs_rq->removed.util_avg, removed_util);
 3682		swap(cfs_rq->removed.load_avg, removed_load);
 3683		swap(cfs_rq->removed.runnable_avg, removed_runnable);
 3684		cfs_rq->removed.nr = 0;
 3685		raw_spin_unlock(&cfs_rq->removed.lock);
 3686
 3687		r = removed_load;
 3688		sub_positive(&sa->load_avg, r);
 3689		sa->load_sum = sa->load_avg * divider;
 3690
 3691		r = removed_util;
 3692		sub_positive(&sa->util_avg, r);
 3693		sa->util_sum = sa->util_avg * divider;
 3694
 3695		r = removed_runnable;
 3696		sub_positive(&sa->runnable_avg, r);
 3697		sa->runnable_sum = sa->runnable_avg * divider;
 3698
 3699		/*
 3700		 * removed_runnable is the unweighted version of removed_load so we
 3701		 * can use it to estimate removed_load_sum.
 3702		 */
 3703		add_tg_cfs_propagate(cfs_rq,
 3704			-(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
 3705
 3706		decayed = 1;
 3707	}
 3708
 3709	decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
 3710
 3711#ifndef CONFIG_64BIT
 3712	smp_wmb();
 3713	cfs_rq->load_last_update_time_copy = sa->last_update_time;
 3714#endif
 3715
 3716	return decayed;
 3717}
 3718
 3719/**
 3720 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
 3721 * @cfs_rq: cfs_rq to attach to
 3722 * @se: sched_entity to attach
 3723 *
 3724 * Must call update_cfs_rq_load_avg() before this, since we rely on
 3725 * cfs_rq->avg.last_update_time being current.
 3726 */
 3727static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 3728{
 3729	/*
 3730	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
 3731	 * See ___update_load_avg() for details.
 3732	 */
 3733	u32 divider = get_pelt_divider(&cfs_rq->avg);
 3734
 3735	/*
 3736	 * When we attach the @se to the @cfs_rq, we must align the decay
 3737	 * window because without that, really weird and wonderful things can
 3738	 * happen.
 3739	 *
 3740	 * XXX illustrate
 3741	 */
 3742	se->avg.last_update_time = cfs_rq->avg.last_update_time;
 3743	se->avg.period_contrib = cfs_rq->avg.period_contrib;
 3744
 3745	/*
 3746	 * Hell(o) Nasty stuff.. we need to recompute _sum based on the new
 3747	 * period_contrib. This isn't strictly correct, but since we're
 3748	 * entirely outside of the PELT hierarchy, nobody cares if we truncate
 3749	 * _sum a little.
 3750	 */
 3751	se->avg.util_sum = se->avg.util_avg * divider;
 3752
 3753	se->avg.runnable_sum = se->avg.runnable_avg * divider;
 3754
 3755	se->avg.load_sum = divider;
 3756	if (se_weight(se)) {
 3757		se->avg.load_sum =
 3758			div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
 3759	}
 3760
 3761	enqueue_load_avg(cfs_rq, se);
 3762	cfs_rq->avg.util_avg += se->avg.util_avg;
 3763	cfs_rq->avg.util_sum += se->avg.util_sum;
 3764	cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
 3765	cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
 3766
 3767	add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
 3768
 3769	cfs_rq_util_change(cfs_rq, 0);
 3770
 3771	trace_pelt_cfs_tp(cfs_rq);
 3772}
 3773
 3774/**
 3775 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
 3776 * @cfs_rq: cfs_rq to detach from
 3777 * @se: sched_entity to detach
 3778 *
 3779 * Must call update_cfs_rq_load_avg() before this, since we rely on
 3780 * cfs_rq->avg.last_update_time being current.
 3781 */
 3782static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 3783{
 3784	/*
 3785	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
 3786	 * See ___update_load_avg() for details.
 3787	 */
 3788	u32 divider = get_pelt_divider(&cfs_rq->avg);
 3789
 3790	dequeue_load_avg(cfs_rq, se);
 3791	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
 3792	cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
 3793	sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
 3794	cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
 3795
 3796	add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
 3797
 3798	cfs_rq_util_change(cfs_rq, 0);
 3799
 3800	trace_pelt_cfs_tp(cfs_rq);
 3801}
 3802
 3803/*
 3804 * Optional action to be done while updating the load average
 3805 */
 3806#define UPDATE_TG	0x1
 3807#define SKIP_AGE_LOAD	0x2
 3808#define DO_ATTACH	0x4
 3809
 3810/* Update task and its cfs_rq load average */
 3811static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 3812{
 3813	u64 now = cfs_rq_clock_pelt(cfs_rq);
 3814	int decayed;
 3815
 3816	/*
 3817	 * Track task load average for carrying it to new CPU after migrated, and
 3818	 * track group sched_entity load average for task_h_load calc in migration
 3819	 */
 3820	if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
 3821		__update_load_avg_se(now, cfs_rq, se);
 3822
 3823	decayed  = update_cfs_rq_load_avg(now, cfs_rq);
 3824	decayed |= propagate_entity_load_avg(se);
 3825
 3826	if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
 3827
 3828		/*
 3829		 * DO_ATTACH means we're here from enqueue_entity().
 3830		 * !last_update_time means we've passed through
 3831		 * migrate_task_rq_fair() indicating we migrated.
 3832		 *
 3833		 * IOW we're enqueueing a task on a new CPU.
 3834		 */
 3835		attach_entity_load_avg(cfs_rq, se);
 3836		update_tg_load_avg(cfs_rq);
 3837
 3838	} else if (decayed) {
 3839		cfs_rq_util_change(cfs_rq, 0);
 3840
 3841		if (flags & UPDATE_TG)
 3842			update_tg_load_avg(cfs_rq);
 3843	}
 3844}
 3845
 3846#ifndef CONFIG_64BIT
 3847static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
 3848{
 3849	u64 last_update_time_copy;
 3850	u64 last_update_time;
 3851
 3852	do {
 3853		last_update_time_copy = cfs_rq->load_last_update_time_copy;
 3854		smp_rmb();
 3855		last_update_time = cfs_rq->avg.last_update_time;
 3856	} while (last_update_time != last_update_time_copy);
 3857
 3858	return last_update_time;
 3859}
 3860#else
 3861static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
 3862{
 3863	return cfs_rq->avg.last_update_time;
 3864}
 3865#endif
 3866
 3867/*
 3868 * Synchronize entity load avg of dequeued entity without locking
 3869 * the previous rq.
 3870 */
 3871static void sync_entity_load_avg(struct sched_entity *se)
 3872{
 3873	struct cfs_rq *cfs_rq = cfs_rq_of(se);
 3874	u64 last_update_time;
 3875
 3876	last_update_time = cfs_rq_last_update_time(cfs_rq);
 3877	__update_load_avg_blocked_se(last_update_time, se);
 3878}
 3879
 3880/*
 3881 * Task first catches up with cfs_rq, and then subtract
 3882 * itself from the cfs_rq (task must be off the queue now).
 3883 */
 3884static void remove_entity_load_avg(struct sched_entity *se)
 3885{
 3886	struct cfs_rq *cfs_rq = cfs_rq_of(se);
 3887	unsigned long flags;
 3888
 3889	/*
 3890	 * tasks cannot exit without having gone through wake_up_new_task() ->
 3891	 * post_init_entity_util_avg() which will have added things to the
 3892	 * cfs_rq, so we can remove unconditionally.
 3893	 */
 3894
 3895	sync_entity_load_avg(se);
 3896
 3897	raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
 3898	++cfs_rq->removed.nr;
 3899	cfs_rq->removed.util_avg	+= se->avg.util_avg;
 3900	cfs_rq->removed.load_avg	+= se->avg.load_avg;
 3901	cfs_rq->removed.runnable_avg	+= se->avg.runnable_avg;
 3902	raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
 3903}
 3904
 3905static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq)
 3906{
 3907	return cfs_rq->avg.runnable_avg;
 3908}
 3909
 3910static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
 3911{
 3912	return cfs_rq->avg.load_avg;
 3913}
 3914
 3915static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
 3916
 3917static inline unsigned long task_util(struct task_struct *p)
 3918{
 3919	return READ_ONCE(p->se.avg.util_avg);
 3920}
 3921
 3922static inline unsigned long _task_util_est(struct task_struct *p)
 3923{
 3924	struct util_est ue = READ_ONCE(p->se.avg.util_est);
 3925
 3926	return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
 3927}
 3928
 3929static inline unsigned long task_util_est(struct task_struct *p)
 3930{
 3931	return max(task_util(p), _task_util_est(p));
 3932}
 3933
 3934#ifdef CONFIG_UCLAMP_TASK
 3935static inline unsigned long uclamp_task_util(struct task_struct *p)
 3936{
 3937	return clamp(task_util_est(p),
 3938		     uclamp_eff_value(p, UCLAMP_MIN),
 3939		     uclamp_eff_value(p, UCLAMP_MAX));
 3940}
 3941#else
 3942static inline unsigned long uclamp_task_util(struct task_struct *p)
 3943{
 3944	return task_util_est(p);
 3945}
 3946#endif
 3947
 3948static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
 3949				    struct task_struct *p)
 3950{
 3951	unsigned int enqueued;
 3952
 3953	if (!sched_feat(UTIL_EST))
 3954		return;
 3955
 3956	/* Update root cfs_rq's estimated utilization */
 3957	enqueued  = cfs_rq->avg.util_est.enqueued;
 3958	enqueued += _task_util_est(p);
 3959	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
 3960
 3961	trace_sched_util_est_cfs_tp(cfs_rq);
 3962}
 3963
 3964static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
 3965				    struct task_struct *p)
 3966{
 3967	unsigned int enqueued;
 3968
 3969	if (!sched_feat(UTIL_EST))
 3970		return;
 3971
 3972	/* Update root cfs_rq's estimated utilization */
 3973	enqueued  = cfs_rq->avg.util_est.enqueued;
 3974	enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
 3975	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
 3976
 3977	trace_sched_util_est_cfs_tp(cfs_rq);
 3978}
 3979
 3980#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
 3981
 3982/*
 3983 * Check if a (signed) value is within a specified (unsigned) margin,
 3984 * based on the observation that:
 3985 *
 3986 *     abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
 3987 *
 3988 * NOTE: this only works when value + margin < INT_MAX.
 3989 */
 3990static inline bool within_margin(int value, int margin)
 3991{
 3992	return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
 3993}
 3994
 3995static inline void util_est_update(struct cfs_rq *cfs_rq,
 3996				   struct task_struct *p,
 3997				   bool task_sleep)
 3998{
 3999	long last_ewma_diff, last_enqueued_diff;
 4000	struct util_est ue;
 4001
 4002	if (!sched_feat(UTIL_EST))
 4003		return;
 4004
 4005	/*
 4006	 * Skip update of task's estimated utilization when the task has not
 4007	 * yet completed an activation, e.g. being migrated.
 4008	 */
 4009	if (!task_sleep)
 4010		return;
 4011
 4012	/*
 4013	 * If the PELT values haven't changed since enqueue time,
 4014	 * skip the util_est update.
 4015	 */
 4016	ue = p->se.avg.util_est;
 4017	if (ue.enqueued & UTIL_AVG_UNCHANGED)
 4018		return;
 4019
 4020	last_enqueued_diff = ue.enqueued;
 4021
 4022	/*
 4023	 * Reset EWMA on utilization increases, the moving average is used only
 4024	 * to smooth utilization decreases.
 4025	 */
 4026	ue.enqueued = task_util(p);
 4027	if (sched_feat(UTIL_EST_FASTUP)) {
 4028		if (ue.ewma < ue.enqueued) {
 4029			ue.ewma = ue.enqueued;
 4030			goto done;
 4031		}
 4032	}
 4033
 4034	/*
 4035	 * Skip update of task's estimated utilization when its members are
 4036	 * already ~1% close to its last activation value.
 4037	 */
 4038	last_ewma_diff = ue.enqueued - ue.ewma;
 4039	last_enqueued_diff -= ue.enqueued;
 4040	if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
 4041		if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
 4042			goto done;
 4043
 4044		return;
 4045	}
 4046
 4047	/*
 4048	 * To avoid overestimation of actual task utilization, skip updates if
 4049	 * we cannot grant there is idle time in this CPU.
 4050	 */
 4051	if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
 4052		return;
 4053
 4054	/*
 4055	 * Update Task's estimated utilization
 4056	 *
 4057	 * When *p completes an activation we can consolidate another sample
 4058	 * of the task size. This is done by storing the current PELT value
 4059	 * as ue.enqueued and by using this value to update the Exponential
 4060	 * Weighted Moving Average (EWMA):
 4061	 *
 4062	 *  ewma(t) = w *  task_util(p) + (1-w) * ewma(t-1)
 4063	 *          = w *  task_util(p) +         ewma(t-1)  - w * ewma(t-1)
 4064	 *          = w * (task_util(p) -         ewma(t-1)) +     ewma(t-1)
 4065	 *          = w * (      last_ewma_diff            ) +     ewma(t-1)
 4066	 *          = w * (last_ewma_diff  +  ewma(t-1) / w)
 4067	 *
 4068	 * Where 'w' is the weight of new samples, which is configured to be
 4069	 * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
 4070	 */
 4071	ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
 4072	ue.ewma  += last_ewma_diff;
 4073	ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
 4074done:
 4075	ue.enqueued |= UTIL_AVG_UNCHANGED;
 4076	WRITE_ONCE(p->se.avg.util_est, ue);
 4077
 4078	trace_sched_util_est_se_tp(&p->se);
 4079}
 4080
 4081static inline int task_fits_capacity(struct task_struct *p, long capacity)
 4082{
 4083	return fits_capacity(uclamp_task_util(p), capacity);
 4084}
 4085
 4086static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
 4087{
 4088	if (!static_branch_unlikely(&sched_asym_cpucapacity))
 4089		return;
 4090
 4091	if (!p || p->nr_cpus_allowed == 1) {
 4092		rq->misfit_task_load = 0;
 4093		return;
 4094	}
 4095
 4096	if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
 4097		rq->misfit_task_load = 0;
 4098		return;
 4099	}
 4100
 4101	/*
 4102	 * Make sure that misfit_task_load will not be null even if
 4103	 * task_h_load() returns 0.
 4104	 */
 4105	rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
 4106}
 4107
 4108#else /* CONFIG_SMP */
 4109
 4110static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
 4111{
 4112	return true;
 4113}
 4114
 4115#define UPDATE_TG	0x0
 4116#define SKIP_AGE_LOAD	0x0
 4117#define DO_ATTACH	0x0
 4118
 4119static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
 4120{
 4121	cfs_rq_util_change(cfs_rq, 0);
 4122}
 4123
 4124static inline void remove_entity_load_avg(struct sched_entity *se) {}
 4125
 4126static inline void
 4127attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 4128static inline void
 4129detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 4130
 4131static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
 4132{
 4133	return 0;
 4134}
 4135
 4136static inline void
 4137util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
 4138
 4139static inline void
 4140util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
 4141
 4142static inline void
 4143util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
 4144		bool task_sleep) {}
 4145static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
 4146
 4147#endif /* CONFIG_SMP */
 4148
 4149static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
 4150{
 4151#ifdef CONFIG_SCHED_DEBUG
 4152	s64 d = se->vruntime - cfs_rq->min_vruntime;
 4153
 4154	if (d < 0)
 4155		d = -d;
 4156
 4157	if (d > 3*sysctl_sched_latency)
 4158		schedstat_inc(cfs_rq->nr_spread_over);
 4159#endif
 4160}
 4161
 4162static void
 4163place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 4164{
 4165	u64 vruntime = cfs_rq->min_vruntime;
 4166
 4167	/*
 4168	 * The 'current' period is already promised to the current tasks,
 4169	 * however the extra weight of the new task will slow them down a
 4170	 * little, place the new task so that it fits in the slot that
 4171	 * stays open at the end.
 4172	 */
 4173	if (initial && sched_feat(START_DEBIT))
 4174		vruntime += sched_vslice(cfs_rq, se);
 4175
 4176	/* sleeps up to a single latency don't count. */
 4177	if (!initial) {
 4178		unsigned long thresh = sysctl_sched_latency;
 4179
 4180		/*
 4181		 * Halve their sleep time's effect, to allow
 4182		 * for a gentler effect of sleepers:
 4183		 */
 4184		if (sched_feat(GENTLE_FAIR_SLEEPERS))
 4185			thresh >>= 1;
 4186
 4187		vruntime -= thresh;
 4188	}
 4189
 4190	/* ensure we never gain time by being placed backwards. */
 4191	se->vruntime = max_vruntime(se->vruntime, vruntime);
 4192}
 4193
 4194static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
 4195
 4196static inline void check_schedstat_required(void)
 4197{
 4198#ifdef CONFIG_SCHEDSTATS
 4199	if (schedstat_enabled())
 4200		return;
 4201
 4202	/* Force schedstat enabled if a dependent tracepoint is active */
 4203	if (trace_sched_stat_wait_enabled()    ||
 4204			trace_sched_stat_sleep_enabled()   ||
 4205			trace_sched_stat_iowait_enabled()  ||
 4206			trace_sched_stat_blocked_enabled() ||
 4207			trace_sched_stat_runtime_enabled())  {
 4208		printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
 4209			     "stat_blocked and stat_runtime require the "
 4210			     "kernel parameter schedstats=enable or "
 4211			     "kernel.sched_schedstats=1\n");
 4212	}
 4213#endif
 4214}
 4215
 4216static inline bool cfs_bandwidth_used(void);
 4217
 4218/*
 4219 * MIGRATION
 4220 *
 4221 *	dequeue
 4222 *	  update_curr()
 4223 *	    update_min_vruntime()
 4224 *	  vruntime -= min_vruntime
 4225 *
 4226 *	enqueue
 4227 *	  update_curr()
 4228 *	    update_min_vruntime()
 4229 *	  vruntime += min_vruntime
 4230 *
 4231 * this way the vruntime transition between RQs is done when both
 4232 * min_vruntime are up-to-date.
 4233 *
 4234 * WAKEUP (remote)
 4235 *
 4236 *	->migrate_task_rq_fair() (p->state == TASK_WAKING)
 4237 *	  vruntime -= min_vruntime
 4238 *
 4239 *	enqueue
 4240 *	  update_curr()
 4241 *	    update_min_vruntime()
 4242 *	  vruntime += min_vruntime
 4243 *
 4244 * this way we don't have the most up-to-date min_vruntime on the originating
 4245 * CPU and an up-to-date min_vruntime on the destination CPU.
 4246 */
 4247
 4248static void
 4249enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 4250{
 4251	bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
 4252	bool curr = cfs_rq->curr == se;
 4253
 4254	/*
 4255	 * If we're the current task, we must renormalise before calling
 4256	 * update_curr().
 4257	 */
 4258	if (renorm && curr)
 4259		se->vruntime += cfs_rq->min_vruntime;
 4260
 4261	update_curr(cfs_rq);
 4262
 4263	/*
 4264	 * Otherwise, renormalise after, such that we're placed at the current
 4265	 * moment in time, instead of some random moment in the past. Being
 4266	 * placed in the past could significantly boost this task to the
 4267	 * fairness detriment of existing tasks.
 4268	 */
 4269	if (renorm && !curr)
 4270		se->vruntime += cfs_rq->min_vruntime;
 4271
 4272	/*
 4273	 * When enqueuing a sched_entity, we must:
 4274	 *   - Update loads to have both entity and cfs_rq synced with now.
 4275	 *   - Add its load to cfs_rq->runnable_avg
 4276	 *   - For group_entity, update its weight to reflect the new share of
 4277	 *     its group cfs_rq
 4278	 *   - Add its new weight to cfs_rq->load.weight
 4279	 */
 4280	update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
 4281	se_update_runnable(se);
 4282	update_cfs_group(se);
 4283	account_entity_enqueue(cfs_rq, se);
 4284
 4285	if (flags & ENQUEUE_WAKEUP)
 4286		place_entity(cfs_rq, se, 0);
 4287
 4288	check_schedstat_required();
 4289	update_stats_enqueue(cfs_rq, se, flags);
 4290	check_spread(cfs_rq, se);
 4291	if (!curr)
 4292		__enqueue_entity(cfs_rq, se);
 4293	se->on_rq = 1;
 4294
 4295	/*
 4296	 * When bandwidth control is enabled, cfs might have been removed
 4297	 * because of a parent been throttled but cfs->nr_running > 1. Try to
 4298	 * add it unconditionally.
 4299	 */
 4300	if (cfs_rq->nr_running == 1 || cfs_bandwidth_used())
 4301		list_add_leaf_cfs_rq(cfs_rq);
 4302
 4303	if (cfs_rq->nr_running == 1)
 4304		check_enqueue_throttle(cfs_rq);
 4305}
 4306
 4307static void __clear_buddies_last(struct sched_entity *se)
 4308{
 4309	for_each_sched_entity(se) {
 4310		struct cfs_rq *cfs_rq = cfs_rq_of(se);
 4311		if (cfs_rq->last != se)
 4312			break;
 4313
 4314		cfs_rq->last = NULL;
 4315	}
 4316}
 4317
 4318static void __clear_buddies_next(struct sched_entity *se)
 4319{
 4320	for_each_sched_entity(se) {
 4321		struct cfs_rq *cfs_rq = cfs_rq_of(se);
 4322		if (cfs_rq->next != se)
 4323			break;
 4324
 4325		cfs_rq->next = NULL;
 4326	}
 4327}
 4328
 4329static void __clear_buddies_skip(struct sched_entity *se)
 4330{
 4331	for_each_sched_entity(se) {
 4332		struct cfs_rq *cfs_rq = cfs_rq_of(se);
 4333		if (cfs_rq->skip != se)
 4334			break;
 4335
 4336		cfs_rq->skip = NULL;
 4337	}
 4338}
 4339
 4340static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 4341{
 4342	if (cfs_rq->last == se)
 4343		__clear_buddies_last(se);
 4344
 4345	if (cfs_rq->next == se)
 4346		__clear_buddies_next(se);
 4347
 4348	if (cfs_rq->skip == se)
 4349		__clear_buddies_skip(se);
 4350}
 4351
 4352static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
 4353
 4354static void
 4355dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 4356{
 4357	/*
 4358	 * Update run-time statistics of the 'current'.
 4359	 */
 4360	update_curr(cfs_rq);
 4361
 4362	/*
 4363	 * When dequeuing a sched_entity, we must:
 4364	 *   - Update loads to have both entity and cfs_rq synced with now.
 4365	 *   - Subtract its load from the cfs_rq->runnable_avg.
 4366	 *   - Subtract its previous weight from cfs_rq->load.weight.
 4367	 *   - For group entity, update its weight to reflect the new share
 4368	 *     of its group cfs_rq.
 4369	 */
 4370	update_load_avg(cfs_rq, se, UPDATE_TG);
 4371	se_update_runnable(se);
 4372
 4373	update_stats_dequeue(cfs_rq, se, flags);
 4374
 4375	clear_buddies(cfs_rq, se);
 4376
 4377	if (se != cfs_rq->curr)
 4378		__dequeue_entity(cfs_rq, se);
 4379	se->on_rq = 0;
 4380	account_entity_dequeue(cfs_rq, se);
 4381
 4382	/*
 4383	 * Normalize after update_curr(); which will also have moved
 4384	 * min_vruntime if @se is the one holding it back. But before doing
 4385	 * update_min_vruntime() again, which will discount @se's position and
 4386	 * can move min_vruntime forward still more.
 4387	 */
 4388	if (!(flags & DEQUEUE_SLEEP))
 4389		se->vruntime -= cfs_rq->min_vruntime;
 4390
 4391	/* return excess runtime on last dequeue */
 4392	return_cfs_rq_runtime(cfs_rq);
 4393
 4394	update_cfs_group(se);
 4395
 4396	/*
 4397	 * Now advance min_vruntime if @se was the entity holding it back,
 4398	 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
 4399	 * put back on, and if we advance min_vruntime, we'll be placed back
 4400	 * further than we started -- ie. we'll be penalized.
 4401	 */
 4402	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
 4403		update_min_vruntime(cfs_rq);
 4404}
 4405
 4406/*
 4407 * Preempt the current task with a newly woken task if needed:
 4408 */
 4409static void
 4410check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 4411{
 4412	unsigned long ideal_runtime, delta_exec;
 4413	struct sched_entity *se;
 4414	s64 delta;
 4415
 4416	ideal_runtime = sched_slice(cfs_rq, curr);
 4417	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
 4418	if (delta_exec > ideal_runtime) {
 4419		resched_curr(rq_of(cfs_rq));
 4420		/*
 4421		 * The current task ran long enough, ensure it doesn't get
 4422		 * re-elected due to buddy favours.
 4423		 */
 4424		clear_buddies(cfs_rq, curr);
 4425		return;
 4426	}
 4427
 4428	/*
 4429	 * Ensure that a task that missed wakeup preemption by a
 4430	 * narrow margin doesn't have to wait for a full slice.
 4431	 * This also mitigates buddy induced latencies under load.
 4432	 */
 4433	if (delta_exec < sysctl_sched_min_granularity)
 4434		return;
 4435
 4436	se = __pick_first_entity(cfs_rq);
 4437	delta = curr->vruntime - se->vruntime;
 4438
 4439	if (delta < 0)
 4440		return;
 4441
 4442	if (delta > ideal_runtime)
 4443		resched_curr(rq_of(cfs_rq));
 4444}
 4445
 4446static void
 4447set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 4448{
 4449	clear_buddies(cfs_rq, se);
 4450
 4451	/* 'current' is not kept within the tree. */
 4452	if (se->on_rq) {
 4453		/*
 4454		 * Any task has to be enqueued before it get to execute on
 4455		 * a CPU. So account for the time it spent waiting on the
 4456		 * runqueue.
 4457		 */
 4458		update_stats_wait_end(cfs_rq, se);
 4459		__dequeue_entity(cfs_rq, se);
 4460		update_load_avg(cfs_rq, se, UPDATE_TG);
 4461	}
 4462
 4463	update_stats_curr_start(cfs_rq, se);
 4464	cfs_rq->curr = se;
 4465
 4466	/*
 4467	 * Track our maximum slice length, if the CPU's load is at
 4468	 * least twice that of our own weight (i.e. dont track it
 4469	 * when there are only lesser-weight tasks around):
 4470	 */
 4471	if (schedstat_enabled() &&
 4472	    rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
 4473		schedstat_set(se->statistics.slice_max,
 4474			max((u64)schedstat_val(se->statistics.slice_max),
 4475			    se->sum_exec_runtime - se->prev_sum_exec_runtime));
 4476	}
 4477
 4478	se->prev_sum_exec_runtime = se->sum_exec_runtime;
 4479}
 4480
 4481static int
 4482wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
 4483
 4484/*
 4485 * Pick the next process, keeping these things in mind, in this order:
 4486 * 1) keep things fair between processes/task groups
 4487 * 2) pick the "next" process, since someone really wants that to run
 4488 * 3) pick the "last" process, for cache locality
 4489 * 4) do not run the "skip" process, if something else is available
 4490 */
 4491static struct sched_entity *
 4492pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 4493{
 4494	struct sched_entity *left = __pick_first_entity(cfs_rq);
 4495	struct sched_entity *se;
 4496
 4497	/*
 4498	 * If curr is set we have to see if its left of the leftmost entity
 4499	 * still in the tree, provided there was anything in the tree at all.
 4500	 */
 4501	if (!left || (curr && entity_before(curr, left)))
 4502		left = curr;
 4503
 4504	se = left; /* ideally we run the leftmost entity */
 4505
 4506	/*
 4507	 * Avoid running the skip buddy, if running something else can
 4508	 * be done without getting too unfair.
 4509	 */
 4510	if (cfs_rq->skip && cfs_rq->skip == se) {
 4511		struct sched_entity *second;
 4512
 4513		if (se == curr) {
 4514			second = __pick_first_entity(cfs_rq);
 4515		} else {
 4516			second = __pick_next_entity(se);
 4517			if (!second || (curr && entity_before(curr, second)))
 4518				second = curr;
 4519		}
 4520
 4521		if (second && wakeup_preempt_entity(second, left) < 1)
 4522			se = second;
 4523	}
 4524
 4525	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) {
 4526		/*
 4527		 * Someone really wants this to run. If it's not unfair, run it.
 4528		 */
 4529		se = cfs_rq->next;
 4530	} else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) {
 4531		/*
 4532		 * Prefer last buddy, try to return the CPU to a preempted task.
 4533		 */
 4534		se = cfs_rq->last;
 4535	}
 4536
 4537	return se;
 4538}
 4539
 4540static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
 4541
 4542static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
 4543{
 4544	/*
 4545	 * If still on the runqueue then deactivate_task()
 4546	 * was not called and update_curr() has to be done:
 4547	 */
 4548	if (prev->on_rq)
 4549		update_curr(cfs_rq);
 4550
 4551	/* throttle cfs_rqs exceeding runtime */
 4552	check_cfs_rq_runtime(cfs_rq);
 4553
 4554	check_spread(cfs_rq, prev);
 4555
 4556	if (prev->on_rq) {
 4557		update_stats_wait_start(cfs_rq, prev);
 4558		/* Put 'current' back into the tree. */
 4559		__enqueue_entity(cfs_rq, prev);
 4560		/* in !on_rq case, update occurred at dequeue */
 4561		update_load_avg(cfs_rq, prev, 0);
 4562	}
 4563	cfs_rq->curr = NULL;
 4564}
 4565
 4566static void
 4567entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
 4568{
 4569	/*
 4570	 * Update run-time statistics of the 'current'.
 4571	 */
 4572	update_curr(cfs_rq);
 4573
 4574	/*
 4575	 * Ensure that runnable average is periodically updated.
 4576	 */
 4577	update_load_avg(cfs_rq, curr, UPDATE_TG);
 4578	update_cfs_group(curr);
 4579
 4580#ifdef CONFIG_SCHED_HRTICK
 4581	/*
 4582	 * queued ticks are scheduled to match the slice, so don't bother
 4583	 * validating it and just reschedule.
 4584	 */
 4585	if (queued) {
 4586		resched_curr(rq_of(cfs_rq));
 4587		return;
 4588	}
 4589	/*
 4590	 * don't let the period tick interfere with the hrtick preemption
 4591	 */
 4592	if (!sched_feat(DOUBLE_TICK) &&
 4593			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
 4594		return;
 4595#endif
 4596
 4597	if (cfs_rq->nr_running > 1)
 4598		check_preempt_tick(cfs_rq, curr);
 4599}
 4600
 4601
 4602/**************************************************
 4603 * CFS bandwidth control machinery
 4604 */
 4605
 4606#ifdef CONFIG_CFS_BANDWIDTH
 4607
 4608#ifdef CONFIG_JUMP_LABEL
 4609static struct static_key __cfs_bandwidth_used;
 4610
 4611static inline bool cfs_bandwidth_used(void)
 4612{
 4613	return static_key_false(&__cfs_bandwidth_used);
 4614}
 4615
 4616void cfs_bandwidth_usage_inc(void)
 4617{
 4618	static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
 4619}
 4620
 4621void cfs_bandwidth_usage_dec(void)
 4622{
 4623	static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
 4624}
 4625#else /* CONFIG_JUMP_LABEL */
 4626static bool cfs_bandwidth_used(void)
 4627{
 4628	return true;
 4629}
 4630
 4631void cfs_bandwidth_usage_inc(void) {}
 4632void cfs_bandwidth_usage_dec(void) {}
 4633#endif /* CONFIG_JUMP_LABEL */
 4634
 4635/*
 4636 * default period for cfs group bandwidth.
 4637 * default: 0.1s, units: nanoseconds
 4638 */
 4639static inline u64 default_cfs_period(void)
 4640{
 4641	return 100000000ULL;
 4642}
 4643
 4644static inline u64 sched_cfs_bandwidth_slice(void)
 4645{
 4646	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
 4647}
 4648
 4649/*
 4650 * Replenish runtime according to assigned quota. We use sched_clock_cpu
 4651 * directly instead of rq->clock to avoid adding additional synchronization
 4652 * around rq->lock.
 4653 *
 4654 * requires cfs_b->lock
 4655 */
 4656void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
 4657{
 4658	if (unlikely(cfs_b->quota == RUNTIME_INF))
 4659		return;
 4660
 4661	cfs_b->runtime += cfs_b->quota;
 4662	cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
 4663}
 4664
 4665static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
 4666{
 4667	return &tg->cfs_bandwidth;
 4668}
 4669
 4670/* returns 0 on failure to allocate runtime */
 4671static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b,
 4672				   struct cfs_rq *cfs_rq, u64 target_runtime)
 4673{
 4674	u64 min_amount, amount = 0;
 4675
 4676	lockdep_assert_held(&cfs_b->lock);
 4677
 4678	/* note: this is a positive sum as runtime_remaining <= 0 */
 4679	min_amount = target_runtime - cfs_rq->runtime_remaining;
 4680
 4681	if (cfs_b->quota == RUNTIME_INF)
 4682		amount = min_amount;
 4683	else {
 4684		start_cfs_bandwidth(cfs_b);
 4685
 4686		if (cfs_b->runtime > 0) {
 4687			amount = min(cfs_b->runtime, min_amount);
 4688			cfs_b->runtime -= amount;
 4689			cfs_b->idle = 0;
 4690		}
 4691	}
 4692
 4693	cfs_rq->runtime_remaining += amount;
 4694
 4695	return cfs_rq->runtime_remaining > 0;
 4696}
 4697
 4698/* returns 0 on failure to allocate runtime */
 4699static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 4700{
 4701	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
 4702	int ret;
 4703
 4704	raw_spin_lock(&cfs_b->lock);
 4705	ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
 4706	raw_spin_unlock(&cfs_b->lock);
 4707
 4708	return ret;
 4709}
 4710
 4711static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
 4712{
 4713	/* dock delta_exec before expiring quota (as it could span periods) */
 4714	cfs_rq->runtime_remaining -= delta_exec;
 4715
 4716	if (likely(cfs_rq->runtime_remaining > 0))
 4717		return;
 4718
 4719	if (cfs_rq->throttled)
 4720		return;
 4721	/*
 4722	 * if we're unable to extend our runtime we resched so that the active
 4723	 * hierarchy can be throttled
 4724	 */
 4725	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
 4726		resched_curr(rq_of(cfs_rq));
 4727}
 4728
 4729static __always_inline
 4730void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
 4731{
 4732	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
 4733		return;
 4734
 4735	__account_cfs_rq_runtime(cfs_rq, delta_exec);
 4736}
 4737
 4738static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
 4739{
 4740	return cfs_bandwidth_used() && cfs_rq->throttled;
 4741}
 4742
 4743/* check whether cfs_rq, or any parent, is throttled */
 4744static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
 4745{
 4746	return cfs_bandwidth_used() && cfs_rq->throttle_count;
 4747}
 4748
 4749/*
 4750 * Ensure that neither of the group entities corresponding to src_cpu or
 4751 * dest_cpu are members of a throttled hierarchy when performing group
 4752 * load-balance operations.
 4753 */
 4754static inline int throttled_lb_pair(struct task_group *tg,
 4755				    int src_cpu, int dest_cpu)
 4756{
 4757	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
 4758
 4759	src_cfs_rq = tg->cfs_rq[src_cpu];
 4760	dest_cfs_rq = tg->cfs_rq[dest_cpu];
 4761
 4762	return throttled_hierarchy(src_cfs_rq) ||
 4763	       throttled_hierarchy(dest_cfs_rq);
 4764}
 4765
 4766static int tg_unthrottle_up(struct task_group *tg, void *data)
 4767{
 4768	struct rq *rq = data;
 4769	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
 4770
 4771	cfs_rq->throttle_count--;
 4772	if (!cfs_rq->throttle_count) {
 4773		cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
 4774					     cfs_rq->throttled_clock_task;
 4775
 4776		/* Add cfs_rq with load or one or more already running entities to the list */
 4777		if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running)
 4778			list_add_leaf_cfs_rq(cfs_rq);
 4779	}
 4780
 4781	return 0;
 4782}
 4783
 4784static int tg_throttle_down(struct task_group *tg, void *data)
 4785{
 4786	struct rq *rq = data;
 4787	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
 4788
 4789	/* group is entering throttled state, stop time */
 4790	if (!cfs_rq->throttle_count) {
 4791		cfs_rq->throttled_clock_task = rq_clock_task(rq);
 4792		list_del_leaf_cfs_rq(cfs_rq);
 4793	}
 4794	cfs_rq->throttle_count++;
 4795
 4796	return 0;
 4797}
 4798
 4799static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
 4800{
 4801	struct rq *rq = rq_of(cfs_rq);
 4802	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
 4803	struct sched_entity *se;
 4804	long task_delta, idle_task_delta, dequeue = 1;
 4805
 4806	raw_spin_lock(&cfs_b->lock);
 4807	/* This will start the period timer if necessary */
 4808	if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
 4809		/*
 4810		 * We have raced with bandwidth becoming available, and if we
 4811		 * actually throttled the timer might not unthrottle us for an
 4812		 * entire period. We additionally needed to make sure that any
 4813		 * subsequent check_cfs_rq_runtime calls agree not to throttle
 4814		 * us, as we may commit to do cfs put_prev+pick_next, so we ask
 4815		 * for 1ns of runtime rather than just check cfs_b.
 4816		 */
 4817		dequeue = 0;
 4818	} else {
 4819		list_add_tail_rcu(&cfs_rq->throttled_list,
 4820				  &cfs_b->throttled_cfs_rq);
 4821	}
 4822	raw_spin_unlock(&cfs_b->lock);
 4823
 4824	if (!dequeue)
 4825		return false;  /* Throttle no longer required. */
 4826
 4827	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
 4828
 4829	/* freeze hierarchy runnable averages while throttled */
 4830	rcu_read_lock();
 4831	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
 4832	rcu_read_unlock();
 4833
 4834	task_delta = cfs_rq->h_nr_running;
 4835	idle_task_delta = cfs_rq->idle_h_nr_running;
 4836	for_each_sched_entity(se) {
 4837		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
 4838		/* throttled entity or throttle-on-deactivate */
 4839		if (!se->on_rq)
 4840			goto done;
 4841
 4842		dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
 4843
 4844		qcfs_rq->h_nr_running -= task_delta;
 4845		qcfs_rq->idle_h_nr_running -= idle_task_delta;
 4846
 4847		if (qcfs_rq->load.weight) {
 4848			/* Avoid re-evaluating load for this entity: */
 4849			se = parent_entity(se);
 4850			break;
 4851		}
 4852	}
 4853
 4854	for_each_sched_entity(se) {
 4855		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
 4856		/* throttled entity or throttle-on-deactivate */
 4857		if (!se->on_rq)
 4858			goto done;
 4859
 4860		update_load_avg(qcfs_rq, se, 0);
 4861		se_update_runnable(se);
 4862
 4863		qcfs_rq->h_nr_running -= task_delta;
 4864		qcfs_rq->idle_h_nr_running -= idle_task_delta;
 4865	}
 4866
 4867	/* At this point se is NULL and we are at root level*/
 4868	sub_nr_running(rq, task_delta);
 4869
 4870done:
 4871	/*
 4872	 * Note: distribution will already see us throttled via the
 4873	 * throttled-list.  rq->lock protects completion.
 4874	 */
 4875	cfs_rq->throttled = 1;
 4876	cfs_rq->throttled_clock = rq_clock(rq);
 4877	return true;
 4878}
 4879
 4880void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 4881{
 4882	struct rq *rq = rq_of(cfs_rq);
 4883	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
 4884	struct sched_entity *se;
 4885	long task_delta, idle_task_delta;
 4886
 4887	se = cfs_rq->tg->se[cpu_of(rq)];
 4888
 4889	cfs_rq->throttled = 0;
 4890
 4891	update_rq_clock(rq);
 4892
 4893	raw_spin_lock(&cfs_b->lock);
 4894	cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
 4895	list_del_rcu(&cfs_rq->throttled_list);
 4896	raw_spin_unlock(&cfs_b->lock);
 4897
 4898	/* update hierarchical throttle state */
 4899	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
 4900
 4901	/* Nothing to run but something to decay (on_list)? Complete the branch */
 4902	if (!cfs_rq->load.weight) {
 4903		if (cfs_rq->on_list)
 4904			goto unthrottle_throttle;
 4905		return;
 4906	}
 4907
 4908	task_delta = cfs_rq->h_nr_running;
 4909	idle_task_delta = cfs_rq->idle_h_nr_running;
 4910	for_each_sched_entity(se) {
 4911		if (se->on_rq)
 4912			break;
 4913		cfs_rq = cfs_rq_of(se);
 4914		enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
 4915
 4916		cfs_rq->h_nr_running += task_delta;
 4917		cfs_rq->idle_h_nr_running += idle_task_delta;
 4918
 4919		/* end evaluation on encountering a throttled cfs_rq */
 4920		if (cfs_rq_throttled(cfs_rq))
 4921			goto unthrottle_throttle;
 4922	}
 4923
 4924	for_each_sched_entity(se) {
 4925		cfs_rq = cfs_rq_of(se);
 4926
 4927		update_load_avg(cfs_rq, se, UPDATE_TG);
 4928		se_update_runnable(se);
 4929
 4930		cfs_rq->h_nr_running += task_delta;
 4931		cfs_rq->idle_h_nr_running += idle_task_delta;
 4932
 4933
 4934		/* end evaluation on encountering a throttled cfs_rq */
 4935		if (cfs_rq_throttled(cfs_rq))
 4936			goto unthrottle_throttle;
 4937
 4938		/*
 4939		 * One parent has been throttled and cfs_rq removed from the
 4940		 * list. Add it back to not break the leaf list.
 4941		 */
 4942		if (throttled_hierarchy(cfs_rq))
 4943			list_add_leaf_cfs_rq(cfs_rq);
 4944	}
 4945
 4946	/* At this point se is NULL and we are at root level*/
 4947	add_nr_running(rq, task_delta);
 4948
 4949unthrottle_throttle:
 4950	/*
 4951	 * The cfs_rq_throttled() breaks in the above iteration can result in
 4952	 * incomplete leaf list maintenance, resulting in triggering the
 4953	 * assertion below.
 4954	 */
 4955	for_each_sched_entity(se) {
 4956		cfs_rq = cfs_rq_of(se);
 4957
 4958		if (list_add_leaf_cfs_rq(cfs_rq))
 4959			break;
 4960	}
 4961
 4962	assert_list_leaf_cfs_rq(rq);
 4963
 4964	/* Determine whether we need to wake up potentially idle CPU: */
 4965	if (rq->curr == rq->idle && rq->cfs.nr_running)
 4966		resched_curr(rq);
 4967}
 4968
 4969static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
 4970{
 4971	struct cfs_rq *cfs_rq;
 4972	u64 runtime, remaining = 1;
 4973
 4974	rcu_read_lock();
 4975	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
 4976				throttled_list) {
 4977		struct rq *rq = rq_of(cfs_rq);
 4978		struct rq_flags rf;
 4979
 4980		rq_lock_irqsave(rq, &rf);
 4981		if (!cfs_rq_throttled(cfs_rq))
 4982			goto next;
 4983
 4984		/* By the above check, this should never be true */
 4985		SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
 4986
 4987		raw_spin_lock(&cfs_b->lock);
 4988		runtime = -cfs_rq->runtime_remaining + 1;
 4989		if (runtime > cfs_b->runtime)
 4990			runtime = cfs_b->runtime;
 4991		cfs_b->runtime -= runtime;
 4992		remaining = cfs_b->runtime;
 4993		raw_spin_unlock(&cfs_b->lock);
 4994
 4995		cfs_rq->runtime_remaining += runtime;
 4996
 4997		/* we check whether we're throttled above */
 4998		if (cfs_rq->runtime_remaining > 0)
 4999			unthrottle_cfs_rq(cfs_rq);
 5000
 5001next:
 5002		rq_unlock_irqrestore(rq, &rf);
 5003
 5004		if (!remaining)
 5005			break;
 5006	}
 5007	rcu_read_unlock();
 5008}
 5009
 5010/*
 5011 * Responsible for refilling a task_group's bandwidth and unthrottling its
 5012 * cfs_rqs as appropriate. If there has been no activity within the last
 5013 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
 5014 * used to track this state.
 5015 */
 5016static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
 5017{
 5018	int throttled;
 5019
 5020	/* no need to continue the timer with no bandwidth constraint */
 5021	if (cfs_b->quota == RUNTIME_INF)
 5022		goto out_deactivate;
 5023
 5024	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
 5025	cfs_b->nr_periods += overrun;
 5026
 5027	/* Refill extra burst quota even if cfs_b->idle */
 5028	__refill_cfs_bandwidth_runtime(cfs_b);
 5029
 5030	/*
 5031	 * idle depends on !throttled (for the case of a large deficit), and if
 5032	 * we're going inactive then everything else can be deferred
 5033	 */
 5034	if (cfs_b->idle && !throttled)
 5035		goto out_deactivate;
 5036
 5037	if (!throttled) {
 5038		/* mark as potentially idle for the upcoming period */
 5039		cfs_b->idle = 1;
 5040		return 0;
 5041	}
 5042
 5043	/* account preceding periods in which throttling occurred */
 5044	cfs_b->nr_throttled += overrun;
 5045
 5046	/*
 5047	 * This check is repeated as we release cfs_b->lock while we unthrottle.
 5048	 */
 5049	while (throttled && cfs_b->runtime > 0) {
 5050		raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
 5051		/* we can't nest cfs_b->lock while distributing bandwidth */
 5052		distribute_cfs_runtime(cfs_b);
 5053		raw_spin_lock_irqsave(&cfs_b->lock, flags);
 5054
 5055		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
 5056	}
 5057
 5058	/*
 5059	 * While we are ensured activity in the period following an
 5060	 * unthrottle, this also covers the case in which the new bandwidth is
 5061	 * insufficient to cover the existing bandwidth deficit.  (Forcing the
 5062	 * timer to remain active while there are any throttled entities.)
 5063	 */
 5064	cfs_b->idle = 0;
 5065
 5066	return 0;
 5067
 5068out_deactivate:
 5069	return 1;
 5070}
 5071
 5072/* a cfs_rq won't donate quota below this amount */
 5073static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
 5074/* minimum remaining period time to redistribute slack quota */
 5075static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
 5076/* how long we wait to gather additional slack before distributing */
 5077static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
 5078
 5079/*
 5080 * Are we near the end of the current quota period?
 5081 *
 5082 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
 5083 * hrtimer base being cleared by hrtimer_start. In the case of
 5084 * migrate_hrtimers, base is never cleared, so we are fine.
 5085 */
 5086static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
 5087{
 5088	struct hrtimer *refresh_timer = &cfs_b->period_timer;
 5089	s64 remaining;
 5090
 5091	/* if the call-back is running a quota refresh is already occurring */
 5092	if (hrtimer_callback_running(refresh_timer))
 5093		return 1;
 5094
 5095	/* is a quota refresh about to occur? */
 5096	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
 5097	if (remaining < (s64)min_expire)
 5098		return 1;
 5099
 5100	return 0;
 5101}
 5102
 5103static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
 5104{
 5105	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
 5106
 5107	/* if there's a quota refresh soon don't bother with slack */
 5108	if (runtime_refresh_within(cfs_b, min_left))
 5109		return;
 5110
 5111	/* don't push forwards an existing deferred unthrottle */
 5112	if (cfs_b->slack_started)
 5113		return;
 5114	cfs_b->slack_started = true;
 5115
 5116	hrtimer_start(&cfs_b->slack_timer,
 5117			ns_to_ktime(cfs_bandwidth_slack_period),
 5118			HRTIMER_MODE_REL);
 5119}
 5120
 5121/* we know any runtime found here is valid as update_curr() precedes return */
 5122static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 5123{
 5124	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
 5125	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
 5126
 5127	if (slack_runtime <= 0)
 5128		return;
 5129
 5130	raw_spin_lock(&cfs_b->lock);
 5131	if (cfs_b->quota != RUNTIME_INF) {
 5132		cfs_b->runtime += slack_runtime;
 5133
 5134		/* we are under rq->lock, defer unthrottling using a timer */
 5135		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
 5136		    !list_empty(&cfs_b->throttled_cfs_rq))
 5137			start_cfs_slack_bandwidth(cfs_b);
 5138	}
 5139	raw_spin_unlock(&cfs_b->lock);
 5140
 5141	/* even if it's not valid for return we don't want to try again */
 5142	cfs_rq->runtime_remaining -= slack_runtime;
 5143}
 5144
 5145static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 5146{
 5147	if (!cfs_bandwidth_used())
 5148		return;
 5149
 5150	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
 5151		return;
 5152
 5153	__return_cfs_rq_runtime(cfs_rq);
 5154}
 5155
 5156/*
 5157 * This is done with a timer (instead of inline with bandwidth return) since
 5158 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
 5159 */
 5160static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
 5161{
 5162	u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
 5163	unsigned long flags;
 5164
 5165	/* confirm we're still not at a refresh boundary */
 5166	raw_spin_lock_irqsave(&cfs_b->lock, flags);
 5167	cfs_b->slack_started = false;
 5168
 5169	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
 5170		raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
 5171		return;
 5172	}
 5173
 5174	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
 5175		runtime = cfs_b->runtime;
 5176
 5177	raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
 5178
 5179	if (!runtime)
 5180		return;
 5181
 5182	distribute_cfs_runtime(cfs_b);
 5183}
 5184
 5185/*
 5186 * When a group wakes up we want to make sure that its quota is not already
 5187 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
 5188 * runtime as update_curr() throttling can not trigger until it's on-rq.
 5189 */
 5190static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
 5191{
 5192	if (!cfs_bandwidth_used())
 5193		return;
 5194
 5195	/* an active group must be handled by the update_curr()->put() path */
 5196	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
 5197		return;
 5198
 5199	/* ensure the group is not already throttled */
 5200	if (cfs_rq_throttled(cfs_rq))
 5201		return;
 5202
 5203	/* update runtime allocation */
 5204	account_cfs_rq_runtime(cfs_rq, 0);
 5205	if (cfs_rq->runtime_remaining <= 0)
 5206		throttle_cfs_rq(cfs_rq);
 5207}
 5208
 5209static void sync_throttle(struct task_group *tg, int cpu)
 5210{
 5211	struct cfs_rq *pcfs_rq, *cfs_rq;
 5212
 5213	if (!cfs_bandwidth_used())
 5214		return;
 5215
 5216	if (!tg->parent)
 5217		return;
 5218
 5219	cfs_rq = tg->cfs_rq[cpu];
 5220	pcfs_rq = tg->parent->cfs_rq[cpu];
 5221
 5222	cfs_rq->throttle_count = pcfs_rq->throttle_count;
 5223	cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
 5224}
 5225
 5226/* conditionally throttle active cfs_rq's from put_prev_entity() */
 5227static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 5228{
 5229	if (!cfs_bandwidth_used())
 5230		return false;
 5231
 5232	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
 5233		return false;
 5234
 5235	/*
 5236	 * it's possible for a throttled entity to be forced into a running
 5237	 * state (e.g. set_curr_task), in this case we're finished.
 5238	 */
 5239	if (cfs_rq_throttled(cfs_rq))
 5240		return true;
 5241
 5242	return throttle_cfs_rq(cfs_rq);
 5243}
 5244
 5245static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
 5246{
 5247	struct cfs_bandwidth *cfs_b =
 5248		container_of(timer, struct cfs_bandwidth, slack_timer);
 5249
 5250	do_sched_cfs_slack_timer(cfs_b);
 5251
 5252	return HRTIMER_NORESTART;
 5253}
 5254
 5255extern const u64 max_cfs_quota_period;
 5256
 5257static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
 5258{
 5259	struct cfs_bandwidth *cfs_b =
 5260		container_of(timer, struct cfs_bandwidth, period_timer);
 5261	unsigned long flags;
 5262	int overrun;
 5263	int idle = 0;
 5264	int count = 0;
 5265
 5266	raw_spin_lock_irqsave(&cfs_b->lock, flags);
 5267	for (;;) {
 5268		overrun = hrtimer_forward_now(timer, cfs_b->period);
 5269		if (!overrun)
 5270			break;
 5271
 5272		idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
 5273
 5274		if (++count > 3) {
 5275			u64 new, old = ktime_to_ns(cfs_b->period);
 5276
 5277			/*
 5278			 * Grow period by a factor of 2 to avoid losing precision.
 5279			 * Precision loss in the quota/period ratio can cause __cfs_schedulable
 5280			 * to fail.
 5281			 */
 5282			new = old * 2;
 5283			if (new < max_cfs_quota_period) {
 5284				cfs_b->period = ns_to_ktime(new);
 5285				cfs_b->quota *= 2;
 5286				cfs_b->burst *= 2;
 5287
 5288				pr_warn_ratelimited(
 5289	"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
 5290					smp_processor_id(),
 5291					div_u64(new, NSEC_PER_USEC),
 5292					div_u64(cfs_b->quota, NSEC_PER_USEC));
 5293			} else {
 5294				pr_warn_ratelimited(
 5295	"cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
 5296					smp_processor_id(),
 5297					div_u64(old, NSEC_PER_USEC),
 5298					div_u64(cfs_b->quota, NSEC_PER_USEC));
 5299			}
 5300
 5301			/* reset count so we don't come right back in here */
 5302			count = 0;
 5303		}
 5304	}
 5305	if (idle)
 5306		cfs_b->period_active = 0;
 5307	raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
 5308
 5309	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
 5310}
 5311
 5312void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 5313{
 5314	raw_spin_lock_init(&cfs_b->lock);
 5315	cfs_b->runtime = 0;
 5316	cfs_b->quota = RUNTIME_INF;
 5317	cfs_b->period = ns_to_ktime(default_cfs_period());
 5318	cfs_b->burst = 0;
 5319
 5320	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
 5321	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
 5322	cfs_b->period_timer.function = sched_cfs_period_timer;
 5323	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 5324	cfs_b->slack_timer.function = sched_cfs_slack_timer;
 5325	cfs_b->slack_started = false;
 5326}
 5327
 5328static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 5329{
 5330	cfs_rq->runtime_enabled = 0;
 5331	INIT_LIST_HEAD(&cfs_rq->throttled_list);
 5332}
 5333
 5334void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 5335{
 5336	lockdep_assert_held(&cfs_b->lock);
 5337
 5338	if (cfs_b->period_active)
 5339		return;
 5340
 5341	cfs_b->period_active = 1;
 5342	hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
 5343	hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
 5344}
 5345
 5346static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 5347{
 5348	/* init_cfs_bandwidth() was not called */
 5349	if (!cfs_b->throttled_cfs_rq.next)
 5350		return;
 5351
 5352	hrtimer_cancel(&cfs_b->period_timer);
 5353	hrtimer_cancel(&cfs_b->slack_timer);
 5354}
 5355
 5356/*
 5357 * Both these CPU hotplug callbacks race against unregister_fair_sched_group()
 5358 *
 5359 * The race is harmless, since modifying bandwidth settings of unhooked group
 5360 * bits doesn't do much.
 5361 */
 5362
 5363/* cpu online callback */
 5364static void __maybe_unused update_runtime_enabled(struct rq *rq)
 5365{
 5366	struct task_group *tg;
 5367
 5368	lockdep_assert_rq_held(rq);
 5369
 5370	rcu_read_lock();
 5371	list_for_each_entry_rcu(tg, &task_groups, list) {
 5372		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
 5373		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
 5374
 5375		raw_spin_lock(&cfs_b->lock);
 5376		cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
 5377		raw_spin_unlock(&cfs_b->lock);
 5378	}
 5379	rcu_read_unlock();
 5380}
 5381
 5382/* cpu offline callback */
 5383static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
 5384{
 5385	struct task_group *tg;
 5386
 5387	lockdep_assert_rq_held(rq);
 5388
 5389	rcu_read_lock();
 5390	list_for_each_entry_rcu(tg, &task_groups, list) {
 5391		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
 5392
 5393		if (!cfs_rq->runtime_enabled)
 5394			continue;
 5395
 5396		/*
 5397		 * clock_task is not advancing so we just need to make sure
 5398		 * there's some valid quota amount
 5399		 */
 5400		cfs_rq->runtime_remaining = 1;
 5401		/*
 5402		 * Offline rq is schedulable till CPU is completely disabled
 5403		 * in take_cpu_down(), so we prevent new cfs throttling here.
 5404		 */
 5405		cfs_rq->runtime_enabled = 0;
 5406
 5407		if (cfs_rq_throttled(cfs_rq))
 5408			unthrottle_cfs_rq(cfs_rq);
 5409	}
 5410	rcu_read_unlock();
 5411}
 5412
 5413#else /* CONFIG_CFS_BANDWIDTH */
 5414
 5415static inline bool cfs_bandwidth_used(void)
 5416{
 5417	return false;
 5418}
 5419
 5420static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
 5421static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
 5422static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
 5423static inline void sync_throttle(struct task_group *tg, int cpu) {}
 5424static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
 5425
 5426static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
 5427{
 5428	return 0;
 5429}
 5430
 5431static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
 5432{
 5433	return 0;
 5434}
 5435
 5436static inline int throttled_lb_pair(struct task_group *tg,
 5437				    int src_cpu, int dest_cpu)
 5438{
 5439	return 0;
 5440}
 5441
 5442void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
 5443
 5444#ifdef CONFIG_FAIR_GROUP_SCHED
 5445static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
 5446#endif
 5447
 5448static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
 5449{
 5450	return NULL;
 5451}
 5452static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
 5453static inline void update_runtime_enabled(struct rq *rq) {}
 5454static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
 5455
 5456#endif /* CONFIG_CFS_BANDWIDTH */
 5457
 5458/**************************************************
 5459 * CFS operations on tasks:
 5460 */
 5461
 5462#ifdef CONFIG_SCHED_HRTICK
 5463static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 5464{
 5465	struct sched_entity *se = &p->se;
 5466	struct cfs_rq *cfs_rq = cfs_rq_of(se);
 5467
 5468	SCHED_WARN_ON(task_rq(p) != rq);
 5469
 5470	if (rq->cfs.h_nr_running > 1) {
 5471		u64 slice = sched_slice(cfs_rq, se);
 5472		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
 5473		s64 delta = slice - ran;
 5474
 5475		if (delta < 0) {
 5476			if (task_current(rq, p))
 5477				resched_curr(rq);
 5478			return;
 5479		}
 5480		hrtick_start(rq, delta);
 5481	}
 5482}
 5483
 5484/*
 5485 * called from enqueue/dequeue and updates the hrtick when the
 5486 * current task is from our class and nr_running is low enough
 5487 * to matter.
 5488 */
 5489static void hrtick_update(struct rq *rq)
 5490{
 5491	struct task_struct *curr = rq->curr;
 5492
 5493	if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class)
 5494		return;
 5495
 5496	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
 5497		hrtick_start_fair(rq, curr);
 5498}
 5499#else /* !CONFIG_SCHED_HRTICK */
 5500static inline void
 5501hrtick_start_fair(struct rq *rq, struct task_struct *p)
 5502{
 5503}
 5504
 5505static inline void hrtick_update(struct rq *rq)
 5506{
 5507}
 5508#endif
 5509
 5510#ifdef CONFIG_SMP
 5511static inline unsigned long cpu_util(int cpu);
 5512
 5513static inline bool cpu_overutilized(int cpu)
 5514{
 5515	return !fits_capacity(cpu_util(cpu), capacity_of(cpu));
 5516}
 5517
 5518static inline void update_overutilized_status(struct rq *rq)
 5519{
 5520	if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
 5521		WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
 5522		trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
 5523	}
 5524}
 5525#else
 5526static inline void update_overutilized_status(struct rq *rq) { }
 5527#endif
 5528
 5529/* Runqueue only has SCHED_IDLE tasks enqueued */
 5530static int sched_idle_rq(struct rq *rq)
 5531{
 5532	return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
 5533			rq->nr_running);
 5534}
 5535
 5536#ifdef CONFIG_SMP
 5537static int sched_idle_cpu(int cpu)
 5538{
 5539	return sched_idle_rq(cpu_rq(cpu));
 5540}
 5541#endif
 5542
 5543/*
 5544 * The enqueue_task method is called before nr_running is
 5545 * increased. Here we update the fair scheduling stats and
 5546 * then put the task into the rbtree:
 5547 */
 5548static void
 5549enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 5550{
 5551	struct cfs_rq *cfs_rq;
 5552	struct sched_entity *se = &p->se;
 5553	int idle_h_nr_running = task_has_idle_policy(p);
 5554	int task_new = !(flags & ENQUEUE_WAKEUP);
 5555
 5556	/*
 5557	 * The code below (indirectly) updates schedutil which looks at
 5558	 * the cfs_rq utilization to select a frequency.
 5559	 * Let's add the task's estimated utilization to the cfs_rq's
 5560	 * estimated utilization, before we update schedutil.
 5561	 */
 5562	util_est_enqueue(&rq->cfs, p);
 5563
 5564	/*
 5565	 * If in_iowait is set, the code below may not trigger any cpufreq
 5566	 * utilization updates, so do it here explicitly with the IOWAIT flag
 5567	 * passed.
 5568	 */
 5569	if (p->in_iowait)
 5570		cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
 5571
 5572	for_each_sched_entity(se) {
 5573		if (se->on_rq)
 5574			break;
 5575		cfs_rq = cfs_rq_of(se);
 5576		enqueue_entity(cfs_rq, se, flags);
 5577
 5578		cfs_rq->h_nr_running++;
 5579		cfs_rq->idle_h_nr_running += idle_h_nr_running;
 5580
 5581		/* end evaluation on encountering a throttled cfs_rq */
 5582		if (cfs_rq_throttled(cfs_rq))
 5583			goto enqueue_throttle;
 5584
 5585		flags = ENQUEUE_WAKEUP;
 5586	}
 5587
 5588	for_each_sched_entity(se) {
 5589		cfs_rq = cfs_rq_of(se);
 5590
 5591		update_load_avg(cfs_rq, se, UPDATE_TG);
 5592		se_update_runnable(se);
 5593		update_cfs_group(se);
 5594
 5595		cfs_rq->h_nr_running++;
 5596		cfs_rq->idle_h_nr_running += idle_h_nr_running;
 5597
 5598		/* end evaluation on encountering a throttled cfs_rq */
 5599		if (cfs_rq_throttled(cfs_rq))
 5600			goto enqueue_throttle;
 5601
 5602               /*
 5603                * One parent has been throttled and cfs_rq removed from the
 5604                * list. Add it back to not break the leaf list.
 5605                */
 5606               if (throttled_hierarchy(cfs_rq))
 5607                       list_add_leaf_cfs_rq(cfs_rq);
 5608	}
 5609
 5610	/* At this point se is NULL and we are at root level*/
 5611	add_nr_running(rq, 1);
 5612
 5613	/*
 5614	 * Since new tasks are assigned an initial util_avg equal to
 5615	 * half of the spare capacity of their CPU, tiny tasks have the
 5616	 * ability to cross the overutilized threshold, which will
 5617	 * result in the load balancer ruining all the task placement
 5618	 * done by EAS. As a way to mitigate that effect, do not account
 5619	 * for the first enqueue operation of new tasks during the
 5620	 * overutilized flag detection.
 5621	 *
 5622	 * A better way of solving this problem would be to wait for
 5623	 * the PELT signals of tasks to converge before taking them
 5624	 * into account, but that is not straightforward to implement,
 5625	 * and the following generally works well enough in practice.
 5626	 */
 5627	if (!task_new)
 5628		update_overutilized_status(rq);
 5629
 5630enqueue_throttle:
 5631	if (cfs_bandwidth_used()) {
 5632		/*
 5633		 * When bandwidth control is enabled; the cfs_rq_throttled()
 5634		 * breaks in the above iteration can result in incomplete
 5635		 * leaf list maintenance, resulting in triggering the assertion
 5636		 * below.
 5637		 */
 5638		for_each_sched_entity(se) {
 5639			cfs_rq = cfs_rq_of(se);
 5640
 5641			if (list_add_leaf_cfs_rq(cfs_rq))
 5642				break;
 5643		}
 5644	}
 5645
 5646	assert_list_leaf_cfs_rq(rq);
 5647
 5648	hrtick_update(rq);
 5649}
 5650
 5651static void set_next_buddy(struct sched_entity *se);
 5652
 5653/*
 5654 * The dequeue_task method is called before nr_running is
 5655 * decreased. We remove the task from the rbtree and
 5656 * update the fair scheduling stats:
 5657 */
 5658static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 5659{
 5660	struct cfs_rq *cfs_rq;
 5661	struct sched_entity *se = &p->se;
 5662	int task_sleep = flags & DEQUEUE_SLEEP;
 5663	int idle_h_nr_running = task_has_idle_policy(p);
 5664	bool was_sched_idle = sched_idle_rq(rq);
 5665
 5666	util_est_dequeue(&rq->cfs, p);
 5667
 5668	for_each_sched_entity(se) {
 5669		cfs_rq = cfs_rq_of(se);
 5670		dequeue_entity(cfs_rq, se, flags);
 5671
 5672		cfs_rq->h_nr_running--;
 5673		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 5674
 5675		/* end evaluation on encountering a throttled cfs_rq */
 5676		if (cfs_rq_throttled(cfs_rq))
 5677			goto dequeue_throttle;
 5678
 5679		/* Don't dequeue parent if it has other entities besides us */
 5680		if (cfs_rq->load.weight) {
 5681			/* Avoid re-evaluating load for this entity: */
 5682			se = parent_entity(se);
 5683			/*
 5684			 * Bias pick_next to pick a task from this cfs_rq, as
 5685			 * p is sleeping when it is within its sched_slice.
 5686			 */
 5687			if (task_sleep && se && !throttled_hierarchy(cfs_rq))
 5688				set_next_buddy(se);
 5689			break;
 5690		}
 5691		flags |= DEQUEUE_SLEEP;
 5692	}
 5693
 5694	for_each_sched_entity(se) {
 5695		cfs_rq = cfs_rq_of(se);
 5696
 5697		update_load_avg(cfs_rq, se, UPDATE_TG);
 5698		se_update_runnable(se);
 5699		update_cfs_group(se);
 5700
 5701		cfs_rq->h_nr_running--;
 5702		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 5703
 5704		/* end evaluation on encountering a throttled cfs_rq */
 5705		if (cfs_rq_throttled(cfs_rq))
 5706			goto dequeue_throttle;
 5707
 5708	}
 5709
 5710	/* At this point se is NULL and we are at root level*/
 5711	sub_nr_running(rq, 1);
 5712
 5713	/* balance early to pull high priority tasks */
 5714	if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
 5715		rq->next_balance = jiffies;
 5716
 5717dequeue_throttle:
 5718	util_est_update(&rq->cfs, p, task_sleep);
 5719	hrtick_update(rq);
 5720}
 5721
 5722#ifdef CONFIG_SMP
 5723
 5724/* Working cpumask for: load_balance, load_balance_newidle. */
 5725DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
 5726DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
 5727
 5728#ifdef CONFIG_NO_HZ_COMMON
 5729
 5730static struct {
 5731	cpumask_var_t idle_cpus_mask;
 5732	atomic_t nr_cpus;
 5733	int has_blocked;		/* Idle CPUS has blocked load */
 5734	unsigned long next_balance;     /* in jiffy units */
 5735	unsigned long next_blocked;	/* Next update of blocked load in jiffies */
 5736} nohz ____cacheline_aligned;
 5737
 5738#endif /* CONFIG_NO_HZ_COMMON */
 5739
 5740static unsigned long cpu_load(struct rq *rq)
 5741{
 5742	return cfs_rq_load_avg(&rq->cfs);
 5743}
 5744
 5745/*
 5746 * cpu_load_without - compute CPU load without any contributions from *p
 5747 * @cpu: the CPU which load is requested
 5748 * @p: the task which load should be discounted
 5749 *
 5750 * The load of a CPU is defined by the load of tasks currently enqueued on that
 5751 * CPU as well as tasks which are currently sleeping after an execution on that
 5752 * CPU.
 5753 *
 5754 * This method returns the load of the specified CPU by discounting the load of
 5755 * the specified task, whenever the task is currently contributing to the CPU
 5756 * load.
 5757 */
 5758static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
 5759{
 5760	struct cfs_rq *cfs_rq;
 5761	unsigned int load;
 5762
 5763	/* Task has no contribution or is new */
 5764	if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
 5765		return cpu_load(rq);
 5766
 5767	cfs_rq = &rq->cfs;
 5768	load = READ_ONCE(cfs_rq->avg.load_avg);
 5769
 5770	/* Discount task's util from CPU's util */
 5771	lsub_positive(&load, task_h_load(p));
 5772
 5773	return load;
 5774}
 5775
 5776static unsigned long cpu_runnable(struct rq *rq)
 5777{
 5778	return cfs_rq_runnable_avg(&rq->cfs);
 5779}
 5780
 5781static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
 5782{
 5783	struct cfs_rq *cfs_rq;
 5784	unsigned int runnable;
 5785
 5786	/* Task has no contribution or is new */
 5787	if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
 5788		return cpu_runnable(rq);
 5789
 5790	cfs_rq = &rq->cfs;
 5791	runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
 5792
 5793	/* Discount task's runnable from CPU's runnable */
 5794	lsub_positive(&runnable, p->se.avg.runnable_avg);
 5795
 5796	return runnable;
 5797}
 5798
 5799static unsigned long capacity_of(int cpu)
 5800{
 5801	return cpu_rq(cpu)->cpu_capacity;
 5802}
 5803
 5804static void record_wakee(struct task_struct *p)
 5805{
 5806	/*
 5807	 * Only decay a single time; tasks that have less then 1 wakeup per
 5808	 * jiffy will not have built up many flips.
 5809	 */
 5810	if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
 5811		current->wakee_flips >>= 1;
 5812		current->wakee_flip_decay_ts = jiffies;
 5813	}
 5814
 5815	if (current->last_wakee != p) {
 5816		current->last_wakee = p;
 5817		current->wakee_flips++;
 5818	}
 5819}
 5820
 5821/*
 5822 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
 5823 *
 5824 * A waker of many should wake a different task than the one last awakened
 5825 * at a frequency roughly N times higher than one of its wakees.
 5826 *
 5827 * In order to determine whether we should let the load spread vs consolidating
 5828 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
 5829 * partner, and a factor of lls_size higher frequency in the other.
 5830 *
 5831 * With both conditions met, we can be relatively sure that the relationship is
 5832 * non-monogamous, with partner count exceeding socket size.
 5833 *
 5834 * Waker/wakee being client/server, worker/dispatcher, interrupt source or
 5835 * whatever is irrelevant, spread criteria is apparent partner count exceeds
 5836 * socket size.
 5837 */
 5838static int wake_wide(struct task_struct *p)
 5839{
 5840	unsigned int master = current->wakee_flips;
 5841	unsigned int slave = p->wakee_flips;
 5842	int factor = __this_cpu_read(sd_llc_size);
 5843
 5844	if (master < slave)
 5845		swap(master, slave);
 5846	if (slave < factor || master < slave * factor)
 5847		return 0;
 5848	return 1;
 5849}
 5850
 5851/*
 5852 * The purpose of wake_affine() is to quickly determine on which CPU we can run
 5853 * soonest. For the purpose of speed we only consider the waking and previous
 5854 * CPU.
 5855 *
 5856 * wake_affine_idle() - only considers 'now', it check if the waking CPU is
 5857 *			cache-affine and is (or	will be) idle.
 5858 *
 5859 * wake_affine_weight() - considers the weight to reflect the average
 5860 *			  scheduling latency of the CPUs. This seems to work
 5861 *			  for the overloaded case.
 5862 */
 5863static int
 5864wake_affine_idle(int this_cpu, int prev_cpu, int sync)
 5865{
 5866	/*
 5867	 * If this_cpu is idle, it implies the wakeup is from interrupt
 5868	 * context. Only allow the move if cache is shared. Otherwise an
 5869	 * interrupt intensive workload could force all tasks onto one
 5870	 * node depending on the IO topology or IRQ affinity settings.
 5871	 *
 5872	 * If the prev_cpu is idle and cache affine then avoid a migration.
 5873	 * There is no guarantee that the cache hot data from an interrupt
 5874	 * is more important than cache hot data on the prev_cpu and from
 5875	 * a cpufreq perspective, it's better to have higher utilisation
 5876	 * on one CPU.
 5877	 */
 5878	if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
 5879		return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
 5880
 5881	if (sync && cpu_rq(this_cpu)->nr_running == 1)
 5882		return this_cpu;
 5883
 5884	if (available_idle_cpu(prev_cpu))
 5885		return prev_cpu;
 5886
 5887	return nr_cpumask_bits;
 5888}
 5889
 5890static int
 5891wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
 5892		   int this_cpu, int prev_cpu, int sync)
 5893{
 5894	s64 this_eff_load, prev_eff_load;
 5895	unsigned long task_load;
 5896
 5897	this_eff_load = cpu_load(cpu_rq(this_cpu));
 5898
 5899	if (sync) {
 5900		unsigned long current_load = task_h_load(current);
 5901
 5902		if (current_load > this_eff_load)
 5903			return this_cpu;
 5904
 5905		this_eff_load -= current_load;
 5906	}
 5907
 5908	task_load = task_h_load(p);
 5909
 5910	this_eff_load += task_load;
 5911	if (sched_feat(WA_BIAS))
 5912		this_eff_load *= 100;
 5913	this_eff_load *= capacity_of(prev_cpu);
 5914
 5915	prev_eff_load = cpu_load(cpu_rq(prev_cpu));
 5916	prev_eff_load -= task_load;
 5917	if (sched_feat(WA_BIAS))
 5918		prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
 5919	prev_eff_load *= capacity_of(this_cpu);
 5920
 5921	/*
 5922	 * If sync, adjust the weight of prev_eff_load such that if
 5923	 * prev_eff == this_eff that select_idle_sibling() will consider
 5924	 * stacking the wakee on top of the waker if no other CPU is
 5925	 * idle.
 5926	 */
 5927	if (sync)
 5928		prev_eff_load += 1;
 5929
 5930	return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
 5931}
 5932
 5933static int wake_affine(struct sched_domain *sd, struct task_struct *p,
 5934		       int this_cpu, int prev_cpu, int sync)
 5935{
 5936	int target = nr_cpumask_bits;
 5937
 5938	if (sched_feat(WA_IDLE))
 5939		target = wake_affine_idle(this_cpu, prev_cpu, sync);
 5940
 5941	if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
 5942		target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
 5943
 5944	schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
 5945	if (target == nr_cpumask_bits)
 5946		return prev_cpu;
 5947
 5948	schedstat_inc(sd->ttwu_move_affine);
 5949	schedstat_inc(p->se.statistics.nr_wakeups_affine);
 5950	return target;
 5951}
 5952
 5953static struct sched_group *
 5954find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
 5955
 5956/*
 5957 * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
 5958 */
 5959static int
 5960find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 5961{
 5962	unsigned long load, min_load = ULONG_MAX;
 5963	unsigned int min_exit_latency = UINT_MAX;
 5964	u64 latest_idle_timestamp = 0;
 5965	int least_loaded_cpu = this_cpu;
 5966	int shallowest_idle_cpu = -1;
 5967	int i;
 5968
 5969	/* Check if we have any choice: */
 5970	if (group->group_weight == 1)
 5971		return cpumask_first(sched_group_span(group));
 5972
 5973	/* Traverse only the allowed CPUs */
 5974	for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
 5975		struct rq *rq = cpu_rq(i);
 5976
 5977		if (!sched_core_cookie_match(rq, p))
 5978			continue;
 5979
 5980		if (sched_idle_cpu(i))
 5981			return i;
 5982
 5983		if (available_idle_cpu(i)) {
 5984			struct cpuidle_state *idle = idle_get_state(rq);
 5985			if (idle && idle->exit_latency < min_exit_latency) {
 5986				/*
 5987				 * We give priority to a CPU whose idle state
 5988				 * has the smallest exit latency irrespective
 5989				 * of any idle timestamp.
 5990				 */
 5991				min_exit_latency = idle->exit_latency;
 5992				latest_idle_timestamp = rq->idle_stamp;
 5993				shallowest_idle_cpu = i;
 5994			} else if ((!idle || idle->exit_latency == min_exit_latency) &&
 5995				   rq->idle_stamp > latest_idle_timestamp) {
 5996				/*
 5997				 * If equal or no active idle state, then
 5998				 * the most recently idled CPU might have
 5999				 * a warmer cache.
 6000				 */
 6001				latest_idle_timestamp = rq->idle_stamp;
 6002				shallowest_idle_cpu = i;
 6003			}
 6004		} else if (shallowest_idle_cpu == -1) {
 6005			load = cpu_load(cpu_rq(i));
 6006			if (load < min_load) {
 6007				min_load = load;
 6008				least_loaded_cpu = i;
 6009			}
 6010		}
 6011	}
 6012
 6013	return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
 6014}
 6015
 6016static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
 6017				  int cpu, int prev_cpu, int sd_flag)
 6018{
 6019	int new_cpu = cpu;
 6020
 6021	if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
 6022		return prev_cpu;
 6023
 6024	/*
 6025	 * We need task's util for cpu_util_without, sync it up to
 6026	 * prev_cpu's last_update_time.
 6027	 */
 6028	if (!(sd_flag & SD_BALANCE_FORK))
 6029		sync_entity_load_avg(&p->se);
 6030
 6031	while (sd) {
 6032		struct sched_group *group;
 6033		struct sched_domain *tmp;
 6034		int weight;
 6035
 6036		if (!(sd->flags & sd_flag)) {
 6037			sd = sd->child;
 6038			continue;
 6039		}
 6040
 6041		group = find_idlest_group(sd, p, cpu);
 6042		if (!group) {
 6043			sd = sd->child;
 6044			continue;
 6045		}
 6046
 6047		new_cpu = find_idlest_group_cpu(group, p, cpu);
 6048		if (new_cpu == cpu) {
 6049			/* Now try balancing at a lower domain level of 'cpu': */
 6050			sd = sd->child;
 6051			continue;
 6052		}
 6053
 6054		/* Now try balancing at a lower domain level of 'new_cpu': */
 6055		cpu = new_cpu;
 6056		weight = sd->span_weight;
 6057		sd = NULL;
 6058		for_each_domain(cpu, tmp) {
 6059			if (weight <= tmp->span_weight)
 6060				break;
 6061			if (tmp->flags & sd_flag)
 6062				sd = tmp;
 6063		}
 6064	}
 6065
 6066	return new_cpu;
 6067}
 6068
 6069static inline int __select_idle_cpu(int cpu, struct task_struct *p)
 6070{
 6071	if ((available_idle_cpu(cpu) || sched_idle_cpu(cpu)) &&
 6072	    sched_cpu_cookie_match(cpu_rq(cpu), p))
 6073		return cpu;
 6074
 6075	return -1;
 6076}
 6077
 6078#ifdef CONFIG_SCHED_SMT
 6079DEFINE_STATIC_KEY_FALSE(sched_smt_present);
 6080EXPORT_SYMBOL_GPL(sched_smt_present);
 6081
 6082static inline void set_idle_cores(int cpu, int val)
 6083{
 6084	struct sched_domain_shared *sds;
 6085
 6086	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
 6087	if (sds)
 6088		WRITE_ONCE(sds->has_idle_cores, val);
 6089}
 6090
 6091static inline bool test_idle_cores(int cpu, bool def)
 6092{
 6093	struct sched_domain_shared *sds;
 6094
 6095	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
 6096	if (sds)
 6097		return READ_ONCE(sds->has_idle_cores);
 6098
 6099	return def;
 6100}
 6101
 6102/*
 6103 * Scans the local SMT mask to see if the entire core is idle, and records this
 6104 * information in sd_llc_shared->has_idle_cores.
 6105 *
 6106 * Since SMT siblings share all cache levels, inspecting this limited remote
 6107 * state should be fairly cheap.
 6108 */
 6109void __update_idle_core(struct rq *rq)
 6110{
 6111	int core = cpu_of(rq);
 6112	int cpu;
 6113
 6114	rcu_read_lock();
 6115	if (test_idle_cores(core, true))
 6116		goto unlock;
 6117
 6118	for_each_cpu(cpu, cpu_smt_mask(core)) {
 6119		if (cpu == core)
 6120			continue;
 6121
 6122		if (!available_idle_cpu(cpu))
 6123			goto unlock;
 6124	}
 6125
 6126	set_idle_cores(core, 1);
 6127unlock:
 6128	rcu_read_unlock();
 6129}
 6130
 6131/*
 6132 * Scan the entire LLC domain for idle cores; this dynamically switches off if
 6133 * there are no idle cores left in the system; tracked through
 6134 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
 6135 */
 6136static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
 6137{
 6138	bool idle = true;
 6139	int cpu;
 6140
 6141	if (!static_branch_likely(&sched_smt_present))
 6142		return __select_idle_cpu(core, p);
 6143
 6144	for_each_cpu(cpu, cpu_smt_mask(core)) {
 6145		if (!available_idle_cpu(cpu)) {
 6146			idle = false;
 6147			if (*idle_cpu == -1) {
 6148				if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
 6149					*idle_cpu = cpu;
 6150					break;
 6151				}
 6152				continue;
 6153			}
 6154			break;
 6155		}
 6156		if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr))
 6157			*idle_cpu = cpu;
 6158	}
 6159
 6160	if (idle)
 6161		return core;
 6162
 6163	cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
 6164	return -1;
 6165}
 6166
 6167/*
 6168 * Scan the local SMT mask for idle CPUs.
 6169 */
 6170static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
 6171{
 6172	int cpu;
 6173
 6174	for_each_cpu(cpu, cpu_smt_mask(target)) {
 6175		if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
 6176		    !cpumask_test_cpu(cpu, sched_domain_span(sd)))
 6177			continue;
 6178		if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
 6179			return cpu;
 6180	}
 6181
 6182	return -1;
 6183}
 6184
 6185#else /* CONFIG_SCHED_SMT */
 6186
 6187static inline void set_idle_cores(int cpu, int val)
 6188{
 6189}
 6190
 6191static inline bool test_idle_cores(int cpu, bool def)
 6192{
 6193	return def;
 6194}
 6195
 6196static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
 6197{
 6198	return __select_idle_cpu(core, p);
 6199}
 6200
 6201static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
 6202{
 6203	return -1;
 6204}
 6205
 6206#endif /* CONFIG_SCHED_SMT */
 6207
 6208/*
 6209 * Scan the LLC domain for idle CPUs; this is dynamically regulated by
 6210 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
 6211 * average idle time for this rq (as found in rq->avg_idle).
 6212 */
 6213static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
 6214{
 6215	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
 6216	int i, cpu, idle_cpu = -1, nr = INT_MAX;
 6217	struct rq *this_rq = this_rq();
 6218	int this = smp_processor_id();
 6219	struct sched_domain *this_sd;
 6220	u64 time = 0;
 6221
 6222	this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
 6223	if (!this_sd)
 6224		return -1;
 6225
 6226	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
 6227
 6228	if (sched_feat(SIS_PROP) && !has_idle_core) {
 6229		u64 avg_cost, avg_idle, span_avg;
 6230		unsigned long now = jiffies;
 6231
 6232		/*
 6233		 * If we're busy, the assumption that the last idle period
 6234		 * predicts the future is flawed; age away the remaining
 6235		 * predicted idle time.
 6236		 */
 6237		if (unlikely(this_rq->wake_stamp < now)) {
 6238			while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) {
 6239				this_rq->wake_stamp++;
 6240				this_rq->wake_avg_idle >>= 1;
 6241			}
 6242		}
 6243
 6244		avg_idle = this_rq->wake_avg_idle;
 6245		avg_cost = this_sd->avg_scan_cost + 1;
 6246
 6247		span_avg = sd->span_weight * avg_idle;
 6248		if (span_avg > 4*avg_cost)
 6249			nr = div_u64(span_avg, avg_cost);
 6250		else
 6251			nr = 4;
 6252
 6253		time = cpu_clock(this);
 6254	}
 6255
 6256	for_each_cpu_wrap(cpu, cpus, target) {
 6257		if (has_idle_core) {
 6258			i = select_idle_core(p, cpu, cpus, &idle_cpu);
 6259			if ((unsigned int)i < nr_cpumask_bits)
 6260				return i;
 6261
 6262		} else {
 6263			if (!--nr)
 6264				return -1;
 6265			idle_cpu = __select_idle_cpu(cpu, p);
 6266			if ((unsigned int)idle_cpu < nr_cpumask_bits)
 6267				break;
 6268		}
 6269	}
 6270
 6271	if (has_idle_core)
 6272		set_idle_cores(target, false);
 6273
 6274	if (sched_feat(SIS_PROP) && !has_idle_core) {
 6275		time = cpu_clock(this) - time;
 6276
 6277		/*
 6278		 * Account for the scan cost of wakeups against the average
 6279		 * idle time.
 6280		 */
 6281		this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time);
 6282
 6283		update_avg(&this_sd->avg_scan_cost, time);
 6284	}
 6285
 6286	return idle_cpu;
 6287}
 6288
 6289/*
 6290 * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
 6291 * the task fits. If no CPU is big enough, but there are idle ones, try to
 6292 * maximize capacity.
 6293 */
 6294static int
 6295select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
 6296{
 6297	unsigned long task_util, best_cap = 0;
 6298	int cpu, best_cpu = -1;
 6299	struct cpumask *cpus;
 6300
 6301	cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
 6302	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
 6303
 6304	task_util = uclamp_task_util(p);
 6305
 6306	for_each_cpu_wrap(cpu, cpus, target) {
 6307		unsigned long cpu_cap = capacity_of(cpu);
 6308
 6309		if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
 6310			continue;
 6311		if (fits_capacity(task_util, cpu_cap))
 6312			return cpu;
 6313
 6314		if (cpu_cap > best_cap) {
 6315			best_cap = cpu_cap;
 6316			best_cpu = cpu;
 6317		}
 6318	}
 6319
 6320	return best_cpu;
 6321}
 6322
 6323static inline bool asym_fits_capacity(int task_util, int cpu)
 6324{
 6325	if (static_branch_unlikely(&sched_asym_cpucapacity))
 6326		return fits_capacity(task_util, capacity_of(cpu));
 6327
 6328	return true;
 6329}
 6330
 6331/*
 6332 * Try and locate an idle core/thread in the LLC cache domain.
 6333 */
 6334static int select_idle_sibling(struct task_struct *p, int prev, int target)
 6335{
 6336	bool has_idle_core = false;
 6337	struct sched_domain *sd;
 6338	unsigned long task_util;
 6339	int i, recent_used_cpu;
 6340
 6341	/*
 6342	 * On asymmetric system, update task utilization because we will check
 6343	 * that the task fits with cpu's capacity.
 6344	 */
 6345	if (static_branch_unlikely(&sched_asym_cpucapacity)) {
 6346		sync_entity_load_avg(&p->se);
 6347		task_util = uclamp_task_util(p);
 6348	}
 6349
 6350	/*
 6351	 * per-cpu select_idle_mask usage
 6352	 */
 6353	lockdep_assert_irqs_disabled();
 6354
 6355	if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
 6356	    asym_fits_capacity(task_util, target))
 6357		return target;
 6358
 6359	/*
 6360	 * If the previous CPU is cache affine and idle, don't be stupid:
 6361	 */
 6362	if (prev != target && cpus_share_cache(prev, target) &&
 6363	    (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
 6364	    asym_fits_capacity(task_util, prev))
 6365		return prev;
 6366
 6367	/*
 6368	 * Allow a per-cpu kthread to stack with the wakee if the
 6369	 * kworker thread and the tasks previous CPUs are the same.
 6370	 * The assumption is that the wakee queued work for the
 6371	 * per-cpu kthread that is now complete and the wakeup is
 6372	 * essentially a sync wakeup. An obvious example of this
 6373	 * pattern is IO completions.
 6374	 */
 6375	if (is_per_cpu_kthread(current) &&
 6376	    prev == smp_processor_id() &&
 6377	    this_rq()->nr_running <= 1) {
 6378		return prev;
 6379	}
 6380
 6381	/* Check a recently used CPU as a potential idle candidate: */
 6382	recent_used_cpu = p->recent_used_cpu;
 6383	if (recent_used_cpu != prev &&
 6384	    recent_used_cpu != target &&
 6385	    cpus_share_cache(recent_used_cpu, target) &&
 6386	    (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
 6387	    cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
 6388	    asym_fits_capacity(task_util, recent_used_cpu)) {
 6389		/*
 6390		 * Replace recent_used_cpu with prev as it is a potential
 6391		 * candidate for the next wake:
 6392		 */
 6393		p->recent_used_cpu = prev;
 6394		return recent_used_cpu;
 6395	}
 6396
 6397	/*
 6398	 * For asymmetric CPU capacity systems, our domain of interest is
 6399	 * sd_asym_cpucapacity rather than sd_llc.
 6400	 */
 6401	if (static_branch_unlikely(&sched_asym_cpucapacity)) {
 6402		sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
 6403		/*
 6404		 * On an asymmetric CPU capacity system where an exclusive
 6405		 * cpuset defines a symmetric island (i.e. one unique
 6406		 * capacity_orig value through the cpuset), the key will be set
 6407		 * but the CPUs within that cpuset will not have a domain with
 6408		 * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
 6409		 * capacity path.
 6410		 */
 6411		if (sd) {
 6412			i = select_idle_capacity(p, sd, target);
 6413			return ((unsigned)i < nr_cpumask_bits) ? i : target;
 6414		}
 6415	}
 6416
 6417	sd = rcu_dereference(per_cpu(sd_llc, target));
 6418	if (!sd)
 6419		return target;
 6420
 6421	if (sched_smt_active()) {
 6422		has_idle_core = test_idle_cores(target, false);
 6423
 6424		if (!has_idle_core && cpus_share_cache(prev, target)) {
 6425			i = select_idle_smt(p, sd, prev);
 6426			if ((unsigned int)i < nr_cpumask_bits)
 6427				return i;
 6428		}
 6429	}
 6430
 6431	i = select_idle_cpu(p, sd, has_idle_core, target);
 6432	if ((unsigned)i < nr_cpumask_bits)
 6433		return i;
 6434
 6435	return target;
 6436}
 6437
 6438/**
 6439 * cpu_util - Estimates the amount of capacity of a CPU used by CFS tasks.
 6440 * @cpu: the CPU to get the utilization of
 6441 *
 6442 * The unit of the return value must be the one of capacity so we can compare
 6443 * the utilization with the capacity of the CPU that is available for CFS task
 6444 * (ie cpu_capacity).
 6445 *
 6446 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
 6447 * recent utilization of currently non-runnable tasks on a CPU. It represents
 6448 * the amount of utilization of a CPU in the range [0..capacity_orig] where
 6449 * capacity_orig is the cpu_capacity available at the highest frequency
 6450 * (arch_scale_freq_capacity()).
 6451 * The utilization of a CPU converges towards a sum equal to or less than the
 6452 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
 6453 * the running time on this CPU scaled by capacity_curr.
 6454 *
 6455 * The estimated utilization of a CPU is defined to be the maximum between its
 6456 * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
 6457 * currently RUNNABLE on that CPU.
 6458 * This allows to properly represent the expected utilization of a CPU which
 6459 * has just got a big task running since a long sleep period. At the same time
 6460 * however it preserves the benefits of the "blocked utilization" in
 6461 * describing the potential for other tasks waking up on the same CPU.
 6462 *
 6463 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
 6464 * higher than capacity_orig because of unfortunate rounding in
 6465 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
 6466 * the average stabilizes with the new running time. We need to check that the
 6467 * utilization stays within the range of [0..capacity_orig] and cap it if
 6468 * necessary. Without utilization capping, a group could be seen as overloaded
 6469 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
 6470 * available capacity. We allow utilization to overshoot capacity_curr (but not
 6471 * capacity_orig) as it useful for predicting the capacity required after task
 6472 * migrations (scheduler-driven DVFS).
 6473 *
 6474 * Return: the (estimated) utilization for the specified CPU
 6475 */
 6476static inline unsigned long cpu_util(int cpu)
 6477{
 6478	struct cfs_rq *cfs_rq;
 6479	unsigned int util;
 6480
 6481	cfs_rq = &cpu_rq(cpu)->cfs;
 6482	util = READ_ONCE(cfs_rq->avg.util_avg);
 6483
 6484	if (sched_feat(UTIL_EST))
 6485		util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
 6486
 6487	return min_t(unsigned long, util, capacity_orig_of(cpu));
 6488}
 6489
 6490/*
 6491 * cpu_util_without: compute cpu utilization without any contributions from *p
 6492 * @cpu: the CPU which utilization is requested
 6493 * @p: the task which utilization should be discounted
 6494 *
 6495 * The utilization of a CPU is defined by the utilization of tasks currently
 6496 * enqueued on that CPU as well as tasks which are currently sleeping after an
 6497 * execution on that CPU.
 6498 *
 6499 * This method returns the utilization of the specified CPU by discounting the
 6500 * utilization of the specified task, whenever the task is currently
 6501 * contributing to the CPU utilization.
 6502 */
 6503static unsigned long cpu_util_without(int cpu, struct task_struct *p)
 6504{
 6505	struct cfs_rq *cfs_rq;
 6506	unsigned int util;
 6507
 6508	/* Task has no contribution or is new */
 6509	if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
 6510		return cpu_util(cpu);
 6511
 6512	cfs_rq = &cpu_rq(cpu)->cfs;
 6513	util = READ_ONCE(cfs_rq->avg.util_avg);
 6514
 6515	/* Discount task's util from CPU's util */
 6516	lsub_positive(&util, task_util(p));
 6517
 6518	/*
 6519	 * Covered cases:
 6520	 *
 6521	 * a) if *p is the only task sleeping on this CPU, then:
 6522	 *      cpu_util (== task_util) > util_est (== 0)
 6523	 *    and thus we return:
 6524	 *      cpu_util_without = (cpu_util - task_util) = 0
 6525	 *
 6526	 * b) if other tasks are SLEEPING on this CPU, which is now exiting
 6527	 *    IDLE, then:
 6528	 *      cpu_util >= task_util
 6529	 *      cpu_util > util_est (== 0)
 6530	 *    and thus we discount *p's blocked utilization to return:
 6531	 *      cpu_util_without = (cpu_util - task_util) >= 0
 6532	 *
 6533	 * c) if other tasks are RUNNABLE on that CPU and
 6534	 *      util_est > cpu_util
 6535	 *    then we use util_est since it returns a more restrictive
 6536	 *    estimation of the spare capacity on that CPU, by just
 6537	 *    considering the expected utilization of tasks already
 6538	 *    runnable on that CPU.
 6539	 *
 6540	 * Cases a) and b) are covered by the above code, while case c) is
 6541	 * covered by the following code when estimated utilization is
 6542	 * enabled.
 6543	 */
 6544	if (sched_feat(UTIL_EST)) {
 6545		unsigned int estimated =
 6546			READ_ONCE(cfs_rq->avg.util_est.enqueued);
 6547
 6548		/*
 6549		 * Despite the following checks we still have a small window
 6550		 * for a possible race, when an execl's select_task_rq_fair()
 6551		 * races with LB's detach_task():
 6552		 *
 6553		 *   detach_task()
 6554		 *     p->on_rq = TASK_ON_RQ_MIGRATING;
 6555		 *     ---------------------------------- A
 6556		 *     deactivate_task()                   \
 6557		 *       dequeue_task()                     + RaceTime
 6558		 *         util_est_dequeue()              /
 6559		 *     ---------------------------------- B
 6560		 *
 6561		 * The additional check on "current == p" it's required to
 6562		 * properly fix the execl regression and it helps in further
 6563		 * reducing the chances for the above race.
 6564		 */
 6565		if (unlikely(task_on_rq_queued(p) || current == p))
 6566			lsub_positive(&estimated, _task_util_est(p));
 6567
 6568		util = max(util, estimated);
 6569	}
 6570
 6571	/*
 6572	 * Utilization (estimated) can exceed the CPU capacity, thus let's
 6573	 * clamp to the maximum CPU capacity to ensure consistency with
 6574	 * the cpu_util call.
 6575	 */
 6576	return min_t(unsigned long, util, capacity_orig_of(cpu));
 6577}
 6578
 6579/*
 6580 * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
 6581 * to @dst_cpu.
 6582 */
 6583static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
 6584{
 6585	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
 6586	unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg);
 6587
 6588	/*
 6589	 * If @p migrates from @cpu to another, remove its contribution. Or,
 6590	 * if @p migrates from another CPU to @cpu, add its contribution. In
 6591	 * the other cases, @cpu is not impacted by the migration, so the
 6592	 * util_avg should already be correct.
 6593	 */
 6594	if (task_cpu(p) == cpu && dst_cpu != cpu)
 6595		lsub_positive(&util, task_util(p));
 6596	else if (task_cpu(p) != cpu && dst_cpu == cpu)
 6597		util += task_util(p);
 6598
 6599	if (sched_feat(UTIL_EST)) {
 6600		util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
 6601
 6602		/*
 6603		 * During wake-up, the task isn't enqueued yet and doesn't
 6604		 * appear in the cfs_rq->avg.util_est.enqueued of any rq,
 6605		 * so just add it (if needed) to "simulate" what will be
 6606		 * cpu_util() after the task has been enqueued.
 6607		 */
 6608		if (dst_cpu == cpu)
 6609			util_est += _task_util_est(p);
 6610
 6611		util = max(util, util_est);
 6612	}
 6613
 6614	return min(util, capacity_orig_of(cpu));
 6615}
 6616
 6617/*
 6618 * compute_energy(): Estimates the energy that @pd would consume if @p was
 6619 * migrated to @dst_cpu. compute_energy() predicts what will be the utilization
 6620 * landscape of @pd's CPUs after the task migration, and uses the Energy Model
 6621 * to compute what would be the energy if we decided to actually migrate that
 6622 * task.
 6623 */
 6624static long
 6625compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
 6626{
 6627	struct cpumask *pd_mask = perf_domain_span(pd);
 6628	unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask));
 6629	unsigned long max_util = 0, sum_util = 0;
 6630	unsigned long _cpu_cap = cpu_cap;
 6631	int cpu;
 6632
 6633	_cpu_cap -= arch_scale_thermal_pressure(cpumask_first(pd_mask));
 6634
 6635	/*
 6636	 * The capacity state of CPUs of the current rd can be driven by CPUs
 6637	 * of another rd if they belong to the same pd. So, account for the
 6638	 * utilization of these CPUs too by masking pd with cpu_online_mask
 6639	 * instead of the rd span.
 6640	 *
 6641	 * If an entire pd is outside of the current rd, it will not appear in
 6642	 * its pd list and will not be accounted by compute_energy().
 6643	 */
 6644	for_each_cpu_and(cpu, pd_mask, cpu_online_mask) {
 6645		unsigned long util_freq = cpu_util_next(cpu, p, dst_cpu);
 6646		unsigned long cpu_util, util_running = util_freq;
 6647		struct task_struct *tsk = NULL;
 6648
 6649		/*
 6650		 * When @p is placed on @cpu:
 6651		 *
 6652		 * util_running = max(cpu_util, cpu_util_est) +
 6653		 *		  max(task_util, _task_util_est)
 6654		 *
 6655		 * while cpu_util_next is: max(cpu_util + task_util,
 6656		 *			       cpu_util_est + _task_util_est)
 6657		 */
 6658		if (cpu == dst_cpu) {
 6659			tsk = p;
 6660			util_running =
 6661				cpu_util_next(cpu, p, -1) + task_util_est(p);
 6662		}
 6663
 6664		/*
 6665		 * Busy time computation: utilization clamping is not
 6666		 * required since the ratio (sum_util / cpu_capacity)
 6667		 * is already enough to scale the EM reported power
 6668		 * consumption at the (eventually clamped) cpu_capacity.
 6669		 */
 6670		cpu_util = effective_cpu_util(cpu, util_running, cpu_cap,
 6671					      ENERGY_UTIL, NULL);
 6672
 6673		sum_util += min(cpu_util, _cpu_cap);
 6674
 6675		/*
 6676		 * Performance domain frequency: utilization clamping
 6677		 * must be considered since it affects the selection
 6678		 * of the performance domain frequency.
 6679		 * NOTE: in case RT tasks are running, by default the
 6680		 * FREQUENCY_UTIL's utilization can be max OPP.
 6681		 */
 6682		cpu_util = effective_cpu_util(cpu, util_freq, cpu_cap,
 6683					      FREQUENCY_UTIL, tsk);
 6684		max_util = max(max_util, min(cpu_util, _cpu_cap));
 6685	}
 6686
 6687	return em_cpu_energy(pd->em_pd, max_util, sum_util, _cpu_cap);
 6688}
 6689
 6690/*
 6691 * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
 6692 * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
 6693 * spare capacity in each performance domain and uses it as a potential
 6694 * candidate to execute the task. Then, it uses the Energy Model to figure
 6695 * out which of the CPU candidates is the most energy-efficient.
 6696 *
 6697 * The rationale for this heuristic is as follows. In a performance domain,
 6698 * all the most energy efficient CPU candidates (according to the Energy
 6699 * Model) are those for which we'll request a low frequency. When there are
 6700 * several CPUs for which the frequency request will be the same, we don't
 6701 * have enough data to break the tie between them, because the Energy Model
 6702 * only includes active power costs. With this model, if we assume that
 6703 * frequency requests follow utilization (e.g. using schedutil), the CPU with
 6704 * the maximum spare capacity in a performance domain is guaranteed to be among
 6705 * the best candidates of the performance domain.
 6706 *
 6707 * In practice, it could be preferable from an energy standpoint to pack
 6708 * small tasks on a CPU in order to let other CPUs go in deeper idle states,
 6709 * but that could also hurt our chances to go cluster idle, and we have no
 6710 * ways to tell with the current Energy Model if this is actually a good
 6711 * idea or not. So, find_energy_efficient_cpu() basically favors
 6712 * cluster-packing, and spreading inside a cluster. That should at least be
 6713 * a good thing for latency, and this is consistent with the idea that most
 6714 * of the energy savings of EAS come from the asymmetry of the system, and
 6715 * not so much from breaking the tie between identical CPUs. That's also the
 6716 * reason why EAS is enabled in the topology code only for systems where
 6717 * SD_ASYM_CPUCAPACITY is set.
 6718 *
 6719 * NOTE: Forkees are not accepted in the energy-aware wake-up path because
 6720 * they don't have any useful utilization data yet and it's not possible to
 6721 * forecast their impact on energy consumption. Consequently, they will be
 6722 * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
 6723 * to be energy-inefficient in some use-cases. The alternative would be to
 6724 * bias new tasks towards specific types of CPUs first, or to try to infer
 6725 * their util_avg from the parent task, but those heuristics could hurt
 6726 * other use-cases too. So, until someone finds a better way to solve this,
 6727 * let's keep things simple by re-using the existing slow path.
 6728 */
 6729static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
 6730{
 6731	unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
 6732	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
 6733	int cpu, best_energy_cpu = prev_cpu, target = -1;
 6734	unsigned long cpu_cap, util, base_energy = 0;
 6735	struct sched_domain *sd;
 6736	struct perf_domain *pd;
 6737
 6738	rcu_read_lock();
 6739	pd = rcu_dereference(rd->pd);
 6740	if (!pd || READ_ONCE(rd->overutilized))
 6741		goto unlock;
 6742
 6743	/*
 6744	 * Energy-aware wake-up happens on the lowest sched_domain starting
 6745	 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
 6746	 */
 6747	sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
 6748	while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
 6749		sd = sd->parent;
 6750	if (!sd)
 6751		goto unlock;
 6752
 6753	target = prev_cpu;
 6754
 6755	sync_entity_load_avg(&p->se);
 6756	if (!task_util_est(p))
 6757		goto unlock;
 6758
 6759	for (; pd; pd = pd->next) {
 6760		unsigned long cur_delta, spare_cap, max_spare_cap = 0;
 6761		bool compute_prev_delta = false;
 6762		unsigned long base_energy_pd;
 6763		int max_spare_cap_cpu = -1;
 6764
 6765		for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
 6766			if (!cpumask_test_cpu(cpu, p->cpus_ptr))
 6767				continue;
 6768
 6769			util = cpu_util_next(cpu, p, cpu);
 6770			cpu_cap = capacity_of(cpu);
 6771			spare_cap = cpu_cap;
 6772			lsub_positive(&spare_cap, util);
 6773
 6774			/*
 6775			 * Skip CPUs that cannot satisfy the capacity request.
 6776			 * IOW, placing the task there would make the CPU
 6777			 * overutilized. Take uclamp into account to see how
 6778			 * much capacity we can get out of the CPU; this is
 6779			 * aligned with sched_cpu_util().
 6780			 */
 6781			util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
 6782			if (!fits_capacity(util, cpu_cap))
 6783				continue;
 6784
 6785			if (cpu == prev_cpu) {
 6786				/* Always use prev_cpu as a candidate. */
 6787				compute_prev_delta = true;
 6788			} else if (spare_cap > max_spare_cap) {
 6789				/*
 6790				 * Find the CPU with the maximum spare capacity
 6791				 * in the performance domain.
 6792				 */
 6793				max_spare_cap = spare_cap;
 6794				max_spare_cap_cpu = cpu;
 6795			}
 6796		}
 6797
 6798		if (max_spare_cap_cpu < 0 && !compute_prev_delta)
 6799			continue;
 6800
 6801		/* Compute the 'base' energy of the pd, without @p */
 6802		base_energy_pd = compute_energy(p, -1, pd);
 6803		base_energy += base_energy_pd;
 6804
 6805		/* Evaluate the energy impact of using prev_cpu. */
 6806		if (compute_prev_delta) {
 6807			prev_delta = compute_energy(p, prev_cpu, pd);
 6808			if (prev_delta < base_energy_pd)
 6809				goto unlock;
 6810			prev_delta -= base_energy_pd;
 6811			best_delta = min(best_delta, prev_delta);
 6812		}
 6813
 6814		/* Evaluate the energy impact of using max_spare_cap_cpu. */
 6815		if (max_spare_cap_cpu >= 0) {
 6816			cur_delta = compute_energy(p, max_spare_cap_cpu, pd);
 6817			if (cur_delta < base_energy_pd)
 6818				goto unlock;
 6819			cur_delta -= base_energy_pd;
 6820			if (cur_delta < best_delta) {
 6821				best_delta = cur_delta;
 6822				best_energy_cpu = max_spare_cap_cpu;
 6823			}
 6824		}
 6825	}
 6826	rcu_read_unlock();
 6827
 6828	/*
 6829	 * Pick the best CPU if prev_cpu cannot be used, or if it saves at
 6830	 * least 6% of the energy used by prev_cpu.
 6831	 */
 6832	if ((prev_delta == ULONG_MAX) ||
 6833	    (prev_delta - best_delta) > ((prev_delta + base_energy) >> 4))
 6834		target = best_energy_cpu;
 6835
 6836	return target;
 6837
 6838unlock:
 6839	rcu_read_unlock();
 6840
 6841	return target;
 6842}
 6843
 6844/*
 6845 * select_task_rq_fair: Select target runqueue for the waking task in domains
 6846 * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
 6847 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
 6848 *
 6849 * Balances load by selecting the idlest CPU in the idlest group, or under
 6850 * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
 6851 *
 6852 * Returns the target CPU number.
 6853 */
 6854static int
 6855select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
 6856{
 6857	int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
 6858	struct sched_domain *tmp, *sd = NULL;
 6859	int cpu = smp_processor_id();
 6860	int new_cpu = prev_cpu;
 6861	int want_affine = 0;
 6862	/* SD_flags and WF_flags share the first nibble */
 6863	int sd_flag = wake_flags & 0xF;
 6864
 6865	/*
 6866	 * required for stable ->cpus_allowed
 6867	 */
 6868	lockdep_assert_held(&p->pi_lock);
 6869	if (wake_flags & WF_TTWU) {
 6870		record_wakee(p);
 6871
 6872		if (sched_energy_enabled()) {
 6873			new_cpu = find_energy_efficient_cpu(p, prev_cpu);
 6874			if (new_cpu >= 0)
 6875				return new_cpu;
 6876			new_cpu = prev_cpu;
 6877		}
 6878
 6879		want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
 6880	}
 6881
 6882	rcu_read_lock();
 6883	for_each_domain(cpu, tmp) {
 6884		/*
 6885		 * If both 'cpu' and 'prev_cpu' are part of this domain,
 6886		 * cpu is a valid SD_WAKE_AFFINE target.
 6887		 */
 6888		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
 6889		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
 6890			if (cpu != prev_cpu)
 6891				new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
 6892
 6893			sd = NULL; /* Prefer wake_affine over balance flags */
 6894			break;
 6895		}
 6896
 6897		if (tmp->flags & sd_flag)
 6898			sd = tmp;
 6899		else if (!want_affine)
 6900			break;
 6901	}
 6902
 6903	if (unlikely(sd)) {
 6904		/* Slow path */
 6905		new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
 6906	} else if (wake_flags & WF_TTWU) { /* XXX always ? */
 6907		/* Fast path */
 6908		new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
 6909
 6910		if (want_affine)
 6911			current->recent_used_cpu = cpu;
 6912	}
 6913	rcu_read_unlock();
 6914
 6915	return new_cpu;
 6916}
 6917
 6918static void detach_entity_cfs_rq(struct sched_entity *se);
 6919
 6920/*
 6921 * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
 6922 * cfs_rq_of(p) references at time of call are still valid and identify the
 6923 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
 6924 */
 6925static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
 6926{
 6927	/*
 6928	 * As blocked tasks retain absolute vruntime the migration needs to
 6929	 * deal with this by subtracting the old and adding the new
 6930	 * min_vruntime -- the latter is done by enqueue_entity() when placing
 6931	 * the task on the new runqueue.
 6932	 */
 6933	if (READ_ONCE(p->__state) == TASK_WAKING) {
 6934		struct sched_entity *se = &p->se;
 6935		struct cfs_rq *cfs_rq = cfs_rq_of(se);
 6936		u64 min_vruntime;
 6937
 6938#ifndef CONFIG_64BIT
 6939		u64 min_vruntime_copy;
 6940
 6941		do {
 6942			min_vruntime_copy = cfs_rq->min_vruntime_copy;
 6943			smp_rmb();
 6944			min_vruntime = cfs_rq->min_vruntime;
 6945		} while (min_vruntime != min_vruntime_copy);
 6946#else
 6947		min_vruntime = cfs_rq->min_vruntime;
 6948#endif
 6949
 6950		se->vruntime -= min_vruntime;
 6951	}
 6952
 6953	if (p->on_rq == TASK_ON_RQ_MIGRATING) {
 6954		/*
 6955		 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
 6956		 * rq->lock and can modify state directly.
 6957		 */
 6958		lockdep_assert_rq_held(task_rq(p));
 6959		detach_entity_cfs_rq(&p->se);
 6960
 6961	} else {
 6962		/*
 6963		 * We are supposed to update the task to "current" time, then
 6964		 * its up to date and ready to go to new CPU/cfs_rq. But we
 6965		 * have difficulty in getting what current time is, so simply
 6966		 * throw away the out-of-date time. This will result in the
 6967		 * wakee task is less decayed, but giving the wakee more load
 6968		 * sounds not bad.
 6969		 */
 6970		remove_entity_load_avg(&p->se);
 6971	}
 6972
 6973	/* Tell new CPU we are migrated */
 6974	p->se.avg.last_update_time = 0;
 6975
 6976	/* We have migrated, no longer consider this task hot */
 6977	p->se.exec_start = 0;
 6978
 6979	update_scan_period(p, new_cpu);
 6980}
 6981
 6982static void task_dead_fair(struct task_struct *p)
 6983{
 6984	remove_entity_load_avg(&p->se);
 6985}
 6986
 6987static int
 6988balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 6989{
 6990	if (rq->nr_running)
 6991		return 1;
 6992
 6993	return newidle_balance(rq, rf) != 0;
 6994}
 6995#endif /* CONFIG_SMP */
 6996
 6997static unsigned long wakeup_gran(struct sched_entity *se)
 6998{
 6999	unsigned long gran = sysctl_sched_wakeup_granularity;
 7000
 7001	/*
 7002	 * Since its curr running now, convert the gran from real-time
 7003	 * to virtual-time in his units.
 7004	 *
 7005	 * By using 'se' instead of 'curr' we penalize light tasks, so
 7006	 * they get preempted easier. That is, if 'se' < 'curr' then
 7007	 * the resulting gran will be larger, therefore penalizing the
 7008	 * lighter, if otoh 'se' > 'curr' then the resulting gran will
 7009	 * be smaller, again penalizing the lighter task.
 7010	 *
 7011	 * This is especially important for buddies when the leftmost
 7012	 * task is higher priority than the buddy.
 7013	 */
 7014	return calc_delta_fair(gran, se);
 7015}
 7016
 7017/*
 7018 * Should 'se' preempt 'curr'.
 7019 *
 7020 *             |s1
 7021 *        |s2
 7022 *   |s3
 7023 *         g
 7024 *      |<--->|c
 7025 *
 7026 *  w(c, s1) = -1
 7027 *  w(c, s2) =  0
 7028 *  w(c, s3) =  1
 7029 *
 7030 */
 7031static int
 7032wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
 7033{
 7034	s64 gran, vdiff = curr->vruntime - se->vruntime;
 7035
 7036	if (vdiff <= 0)
 7037		return -1;
 7038
 7039	gran = wakeup_gran(se);
 7040	if (vdiff > gran)
 7041		return 1;
 7042
 7043	return 0;
 7044}
 7045
 7046static void set_last_buddy(struct sched_entity *se)
 7047{
 7048	if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
 7049		return;
 7050
 7051	for_each_sched_entity(se) {
 7052		if (SCHED_WARN_ON(!se->on_rq))
 7053			return;
 7054		cfs_rq_of(se)->last = se;
 7055	}
 7056}
 7057
 7058static void set_next_buddy(struct sched_entity *se)
 7059{
 7060	if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
 7061		return;
 7062
 7063	for_each_sched_entity(se) {
 7064		if (SCHED_WARN_ON(!se->on_rq))
 7065			return;
 7066		cfs_rq_of(se)->next = se;
 7067	}
 7068}
 7069
 7070static void set_skip_buddy(struct sched_entity *se)
 7071{
 7072	for_each_sched_entity(se)
 7073		cfs_rq_of(se)->skip = se;
 7074}
 7075
 7076/*
 7077 * Preempt the current task with a newly woken task if needed:
 7078 */
 7079static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
 7080{
 7081	struct task_struct *curr = rq->curr;
 7082	struct sched_entity *se = &curr->se, *pse = &p->se;
 7083	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
 7084	int scale = cfs_rq->nr_running >= sched_nr_latency;
 7085	int next_buddy_marked = 0;
 7086
 7087	if (unlikely(se == pse))
 7088		return;
 7089
 7090	/*
 7091	 * This is possible from callers such as attach_tasks(), in which we
 7092	 * unconditionally check_preempt_curr() after an enqueue (which may have
 7093	 * lead to a throttle).  This both saves work and prevents false
 7094	 * next-buddy nomination below.
 7095	 */
 7096	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
 7097		return;
 7098
 7099	if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
 7100		set_next_buddy(pse);
 7101		next_buddy_marked = 1;
 7102	}
 7103
 7104	/*
 7105	 * We can come here with TIF_NEED_RESCHED already set from new task
 7106	 * wake up path.
 7107	 *
 7108	 * Note: this also catches the edge-case of curr being in a throttled
 7109	 * group (e.g. via set_curr_task), since update_curr() (in the
 7110	 * enqueue of curr) will have resulted in resched being set.  This
 7111	 * prevents us from potentially nominating it as a false LAST_BUDDY
 7112	 * below.
 7113	 */
 7114	if (test_tsk_need_resched(curr))
 7115		return;
 7116
 7117	/* Idle tasks are by definition preempted by non-idle tasks. */
 7118	if (unlikely(task_has_idle_policy(curr)) &&
 7119	    likely(!task_has_idle_policy(p)))
 7120		goto preempt;
 7121
 7122	/*
 7123	 * Batch and idle tasks do not preempt non-idle tasks (their preemption
 7124	 * is driven by the tick):
 7125	 */
 7126	if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
 7127		return;
 7128
 7129	find_matching_se(&se, &pse);
 7130	update_curr(cfs_rq_of(se));
 7131	BUG_ON(!pse);
 7132	if (wakeup_preempt_entity(se, pse) == 1) {
 7133		/*
 7134		 * Bias pick_next to pick the sched entity that is
 7135		 * triggering this preemption.
 7136		 */
 7137		if (!next_buddy_marked)
 7138			set_next_buddy(pse);
 7139		goto preempt;
 7140	}
 7141
 7142	return;
 7143
 7144preempt:
 7145	resched_curr(rq);
 7146	/*
 7147	 * Only set the backward buddy when the current task is still
 7148	 * on the rq. This can happen when a wakeup gets interleaved
 7149	 * with schedule on the ->pre_schedule() or idle_balance()
 7150	 * point, either of which can * drop the rq lock.
 7151	 *
 7152	 * Also, during early boot the idle thread is in the fair class,
 7153	 * for obvious reasons its a bad idea to schedule back to it.
 7154	 */
 7155	if (unlikely(!se->on_rq || curr == rq->idle))
 7156		return;
 7157
 7158	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
 7159		set_last_buddy(se);
 7160}
 7161
 7162#ifdef CONFIG_SMP
 7163static struct task_struct *pick_task_fair(struct rq *rq)
 7164{
 7165	struct sched_entity *se;
 7166	struct cfs_rq *cfs_rq;
 7167
 7168again:
 7169	cfs_rq = &rq->cfs;
 7170	if (!cfs_rq->nr_running)
 7171		return NULL;
 7172
 7173	do {
 7174		struct sched_entity *curr = cfs_rq->curr;
 7175
 7176		/* When we pick for a remote RQ, we'll not have done put_prev_entity() */
 7177		if (curr) {
 7178			if (curr->on_rq)
 7179				update_curr(cfs_rq);
 7180			else
 7181				curr = NULL;
 7182
 7183			if (unlikely(check_cfs_rq_runtime(cfs_rq)))
 7184				goto again;
 7185		}
 7186
 7187		se = pick_next_entity(cfs_rq, curr);
 7188		cfs_rq = group_cfs_rq(se);
 7189	} while (cfs_rq);
 7190
 7191	return task_of(se);
 7192}
 7193#endif
 7194
 7195struct task_struct *
 7196pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 7197{
 7198	struct cfs_rq *cfs_rq = &rq->cfs;
 7199	struct sched_entity *se;
 7200	struct task_struct *p;
 7201	int new_tasks;
 7202
 7203again:
 7204	if (!sched_fair_runnable(rq))
 7205		goto idle;
 7206
 7207#ifdef CONFIG_FAIR_GROUP_SCHED
 7208	if (!prev || prev->sched_class != &fair_sched_class)
 7209		goto simple;
 7210
 7211	/*
 7212	 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
 7213	 * likely that a next task is from the same cgroup as the current.
 7214	 *
 7215	 * Therefore attempt to avoid putting and setting the entire cgroup
 7216	 * hierarchy, only change the part that actually changes.
 7217	 */
 7218
 7219	do {
 7220		struct sched_entity *curr = cfs_rq->curr;
 7221
 7222		/*
 7223		 * Since we got here without doing put_prev_entity() we also
 7224		 * have to consider cfs_rq->curr. If it is still a runnable
 7225		 * entity, update_curr() will update its vruntime, otherwise
 7226		 * forget we've ever seen it.
 7227		 */
 7228		if (curr) {
 7229			if (curr->on_rq)
 7230				update_curr(cfs_rq);
 7231			else
 7232				curr = NULL;
 7233
 7234			/*
 7235			 * This call to check_cfs_rq_runtime() will do the
 7236			 * throttle and dequeue its entity in the parent(s).
 7237			 * Therefore the nr_running test will indeed
 7238			 * be correct.
 7239			 */
 7240			if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
 7241				cfs_rq = &rq->cfs;
 7242
 7243				if (!cfs_rq->nr_running)
 7244					goto idle;
 7245
 7246				goto simple;
 7247			}
 7248		}
 7249
 7250		se = pick_next_entity(cfs_rq, curr);
 7251		cfs_rq = group_cfs_rq(se);
 7252	} while (cfs_rq);
 7253
 7254	p = task_of(se);
 7255
 7256	/*
 7257	 * Since we haven't yet done put_prev_entity and if the selected task
 7258	 * is a different task than we started out with, try and touch the
 7259	 * least amount of cfs_rqs.
 7260	 */
 7261	if (prev != p) {
 7262		struct sched_entity *pse = &prev->se;
 7263
 7264		while (!(cfs_rq = is_same_group(se, pse))) {
 7265			int se_depth = se->depth;
 7266			int pse_depth = pse->depth;
 7267
 7268			if (se_depth <= pse_depth) {
 7269				put_prev_entity(cfs_rq_of(pse), pse);
 7270				pse = parent_entity(pse);
 7271			}
 7272			if (se_depth >= pse_depth) {
 7273				set_next_entity(cfs_rq_of(se), se);
 7274				se = parent_entity(se);
 7275			}
 7276		}
 7277
 7278		put_prev_entity(cfs_rq, pse);
 7279		set_next_entity(cfs_rq, se);
 7280	}
 7281
 7282	goto done;
 7283simple:
 7284#endif
 7285	if (prev)
 7286		put_prev_task(rq, prev);
 7287
 7288	do {
 7289		se = pick_next_entity(cfs_rq, NULL);
 7290		set_next_entity(cfs_rq, se);
 7291		cfs_rq = group_cfs_rq(se);
 7292	} while (cfs_rq);
 7293
 7294	p = task_of(se);
 7295
 7296done: __maybe_unused;
 7297#ifdef CONFIG_SMP
 7298	/*
 7299	 * Move the next running task to the front of
 7300	 * the list, so our cfs_tasks list becomes MRU
 7301	 * one.
 7302	 */
 7303	list_move(&p->se.group_node, &rq->cfs_tasks);
 7304#endif
 7305
 7306	if (hrtick_enabled_fair(rq))
 7307		hrtick_start_fair(rq, p);
 7308
 7309	update_misfit_status(p, rq);
 7310
 7311	return p;
 7312
 7313idle:
 7314	if (!rf)
 7315		return NULL;
 7316
 7317	new_tasks = newidle_balance(rq, rf);
 7318
 7319	/*
 7320	 * Because newidle_balance() releases (and re-acquires) rq->lock, it is
 7321	 * possible for any higher priority task to appear. In that case we
 7322	 * must re-start the pick_next_entity() loop.
 7323	 */
 7324	if (new_tasks < 0)
 7325		return RETRY_TASK;
 7326
 7327	if (new_tasks > 0)
 7328		goto again;
 7329
 7330	/*
 7331	 * rq is about to be idle, check if we need to update the
 7332	 * lost_idle_time of clock_pelt
 7333	 */
 7334	update_idle_rq_clock_pelt(rq);
 7335
 7336	return NULL;
 7337}
 7338
 7339static struct task_struct *__pick_next_task_fair(struct rq *rq)
 7340{
 7341	return pick_next_task_fair(rq, NULL, NULL);
 7342}
 7343
 7344/*
 7345 * Account for a descheduled task:
 7346 */
 7347static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
 7348{
 7349	struct sched_entity *se = &prev->se;
 7350	struct cfs_rq *cfs_rq;
 7351
 7352	for_each_sched_entity(se) {
 7353		cfs_rq = cfs_rq_of(se);
 7354		put_prev_entity(cfs_rq, se);
 7355	}
 7356}
 7357
 7358/*
 7359 * sched_yield() is very simple
 7360 *
 7361 * The magic of dealing with the ->skip buddy is in pick_next_entity.
 7362 */
 7363static void yield_task_fair(struct rq *rq)
 7364{
 7365	struct task_struct *curr = rq->curr;
 7366	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
 7367	struct sched_entity *se = &curr->se;
 7368
 7369	/*
 7370	 * Are we the only task in the tree?
 7371	 */
 7372	if (unlikely(rq->nr_running == 1))
 7373		return;
 7374
 7375	clear_buddies(cfs_rq, se);
 7376
 7377	if (curr->policy != SCHED_BATCH) {
 7378		update_rq_clock(rq);
 7379		/*
 7380		 * Update run-time statistics of the 'current'.
 7381		 */
 7382		update_curr(cfs_rq);
 7383		/*
 7384		 * Tell update_rq_clock() that we've just updated,
 7385		 * so we don't do microscopic update in schedule()
 7386		 * and double the fastpath cost.
 7387		 */
 7388		rq_clock_skip_update(rq);
 7389	}
 7390
 7391	set_skip_buddy(se);
 7392}
 7393
 7394static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
 7395{
 7396	struct sched_entity *se = &p->se;
 7397
 7398	/* throttled hierarchies are not runnable */
 7399	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
 7400		return false;
 7401
 7402	/* Tell the scheduler that we'd really like pse to run next. */
 7403	set_next_buddy(se);
 7404
 7405	yield_task_fair(rq);
 7406
 7407	return true;
 7408}
 7409
 7410#ifdef CONFIG_SMP
 7411/**************************************************
 7412 * Fair scheduling class load-balancing methods.
 7413 *
 7414 * BASICS
 7415 *
 7416 * The purpose of load-balancing is to achieve the same basic fairness the
 7417 * per-CPU scheduler provides, namely provide a proportional amount of compute
 7418 * time to each task. This is expressed in the following equation:
 7419 *
 7420 *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
 7421 *
 7422 * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
 7423 * W_i,0 is defined as:
 7424 *
 7425 *   W_i,0 = \Sum_j w_i,j                                             (2)
 7426 *
 7427 * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
 7428 * is derived from the nice value as per sched_prio_to_weight[].
 7429 *
 7430 * The weight average is an exponential decay average of the instantaneous
 7431 * weight:
 7432 *
 7433 *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
 7434 *
 7435 * C_i is the compute capacity of CPU i, typically it is the
 7436 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
 7437 * can also include other factors [XXX].
 7438 *
 7439 * To achieve this balance we define a measure of imbalance which follows
 7440 * directly from (1):
 7441 *
 7442 *   imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j }    (4)
 7443 *
 7444 * We them move tasks around to minimize the imbalance. In the continuous
 7445 * function space it is obvious this converges, in the discrete case we get
 7446 * a few fun cases generally called infeasible weight scenarios.
 7447 *
 7448 * [XXX expand on:
 7449 *     - infeasible weights;
 7450 *     - local vs global optima in the discrete case. ]
 7451 *
 7452 *
 7453 * SCHED DOMAINS
 7454 *
 7455 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
 7456 * for all i,j solution, we create a tree of CPUs that follows the hardware
 7457 * topology where each level pairs two lower groups (or better). This results
 7458 * in O(log n) layers. Furthermore we reduce the number of CPUs going up the
 7459 * tree to only the first of the previous level and we decrease the frequency
 7460 * of load-balance at each level inv. proportional to the number of CPUs in
 7461 * the groups.
 7462 *
 7463 * This yields:
 7464 *
 7465 *     log_2 n     1     n
 7466 *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
 7467 *     i = 0      2^i   2^i
 7468 *                               `- size of each group
 7469 *         |         |     `- number of CPUs doing load-balance
 7470 *         |         `- freq
 7471 *         `- sum over all levels
 7472 *
 7473 * Coupled with a limit on how many tasks we can migrate every balance pass,
 7474 * this makes (5) the runtime complexity of the balancer.
 7475 *
 7476 * An important property here is that each CPU is still (indirectly) connected
 7477 * to every other CPU in at most O(log n) steps:
 7478 *
 7479 * The adjacency matrix of the resulting graph is given by:
 7480 *
 7481 *             log_2 n
 7482 *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
 7483 *             k = 0
 7484 *
 7485 * And you'll find that:
 7486 *
 7487 *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
 7488 *
 7489 * Showing there's indeed a path between every CPU in at most O(log n) steps.
 7490 * The task movement gives a factor of O(m), giving a convergence complexity
 7491 * of:
 7492 *
 7493 *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
 7494 *
 7495 *
 7496 * WORK CONSERVING
 7497 *
 7498 * In order to avoid CPUs going idle while there's still work to do, new idle
 7499 * balancing is more aggressive and has the newly idle CPU iterate up the domain
 7500 * tree itself instead of relying on other CPUs to bring it work.
 7501 *
 7502 * This adds some complexity to both (5) and (8) but it reduces the total idle
 7503 * time.
 7504 *
 7505 * [XXX more?]
 7506 *
 7507 *
 7508 * CGROUPS
 7509 *
 7510 * Cgroups make a horror show out of (2), instead of a simple sum we get:
 7511 *
 7512 *                                s_k,i
 7513 *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
 7514 *                                 S_k
 7515 *
 7516 * Where
 7517 *
 7518 *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
 7519 *
 7520 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
 7521 *
 7522 * The big problem is S_k, its a global sum needed to compute a local (W_i)
 7523 * property.
 7524 *
 7525 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
 7526 *      rewrite all of this once again.]
 7527 */
 7528
 7529static unsigned long __read_mostly max_load_balance_interval = HZ/10;
 7530
 7531enum fbq_type { regular, remote, all };
 7532
 7533/*
 7534 * 'group_type' describes the group of CPUs at the moment of load balancing.
 7535 *
 7536 * The enum is ordered by pulling priority, with the group with lowest priority
 7537 * first so the group_type can simply be compared when selecting the busiest
 7538 * group. See update_sd_pick_busiest().
 7539 */
 7540enum group_type {
 7541	/* The group has spare capacity that can be used to run more tasks.  */
 7542	group_has_spare = 0,
 7543	/*
 7544	 * The group is fully used and the tasks don't compete for more CPU
 7545	 * cycles. Nevertheless, some tasks might wait before running.
 7546	 */
 7547	group_fully_busy,
 7548	/*
 7549	 * SD_ASYM_CPUCAPACITY only: One task doesn't fit with CPU's capacity
 7550	 * and must be migrated to a more powerful CPU.
 7551	 */
 7552	group_misfit_task,
 7553	/*
 7554	 * SD_ASYM_PACKING only: One local CPU with higher capacity is available,
 7555	 * and the task should be migrated to it instead of running on the
 7556	 * current CPU.
 7557	 */
 7558	group_asym_packing,
 7559	/*
 7560	 * The tasks' affinity constraints previously prevented the scheduler
 7561	 * from balancing the load across the system.
 7562	 */
 7563	group_imbalanced,
 7564	/*
 7565	 * The CPU is overloaded and can't provide expected CPU cycles to all
 7566	 * tasks.
 7567	 */
 7568	group_overloaded
 7569};
 7570
 7571enum migration_type {
 7572	migrate_load = 0,
 7573	migrate_util,
 7574	migrate_task,
 7575	migrate_misfit
 7576};
 7577
 7578#define LBF_ALL_PINNED	0x01
 7579#define LBF_NEED_BREAK	0x02
 7580#define LBF_DST_PINNED  0x04
 7581#define LBF_SOME_PINNED	0x08
 7582#define LBF_ACTIVE_LB	0x10
 7583
 7584struct lb_env {
 7585	struct sched_domain	*sd;
 7586
 7587	struct rq		*src_rq;
 7588	int			src_cpu;
 7589
 7590	int			dst_cpu;
 7591	struct rq		*dst_rq;
 7592
 7593	struct cpumask		*dst_grpmask;
 7594	int			new_dst_cpu;
 7595	enum cpu_idle_type	idle;
 7596	long			imbalance;
 7597	/* The set of CPUs under consideration for load-balancing */
 7598	struct cpumask		*cpus;
 7599
 7600	unsigned int		flags;
 7601
 7602	unsigned int		loop;
 7603	unsigned int		loop_break;
 7604	unsigned int		loop_max;
 7605
 7606	enum fbq_type		fbq_type;
 7607	enum migration_type	migration_type;
 7608	struct list_head	tasks;
 7609};
 7610
 7611/*
 7612 * Is this task likely cache-hot:
 7613 */
 7614static int task_hot(struct task_struct *p, struct lb_env *env)
 7615{
 7616	s64 delta;
 7617
 7618	lockdep_assert_rq_held(env->src_rq);
 7619
 7620	if (p->sched_class != &fair_sched_class)
 7621		return 0;
 7622
 7623	if (unlikely(task_has_idle_policy(p)))
 7624		return 0;
 7625
 7626	/* SMT siblings share cache */
 7627	if (env->sd->flags & SD_SHARE_CPUCAPACITY)
 7628		return 0;
 7629
 7630	/*
 7631	 * Buddy candidates are cache hot:
 7632	 */
 7633	if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
 7634			(&p->se == cfs_rq_of(&p->se)->next ||
 7635			 &p->se == cfs_rq_of(&p->se)->last))
 7636		return 1;
 7637
 7638	if (sysctl_sched_migration_cost == -1)
 7639		return 1;
 7640
 7641	/*
 7642	 * Don't migrate task if the task's cookie does not match
 7643	 * with the destination CPU's core cookie.
 7644	 */
 7645	if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p))
 7646		return 1;
 7647
 7648	if (sysctl_sched_migration_cost == 0)
 7649		return 0;
 7650
 7651	delta = rq_clock_task(env->src_rq) - p->se.exec_start;
 7652
 7653	return delta < (s64)sysctl_sched_migration_cost;
 7654}
 7655
 7656#ifdef CONFIG_NUMA_BALANCING
 7657/*
 7658 * Returns 1, if task migration degrades locality
 7659 * Returns 0, if task migration improves locality i.e migration preferred.
 7660 * Returns -1, if task migration is not affected by locality.
 7661 */
 7662static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
 7663{
 7664	struct numa_group *numa_group = rcu_dereference(p->numa_group);
 7665	unsigned long src_weight, dst_weight;
 7666	int src_nid, dst_nid, dist;
 7667
 7668	if (!static_branch_likely(&sched_numa_balancing))
 7669		return -1;
 7670
 7671	if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
 7672		return -1;
 7673
 7674	src_nid = cpu_to_node(env->src_cpu);
 7675	dst_nid = cpu_to_node(env->dst_cpu);
 7676
 7677	if (src_nid == dst_nid)
 7678		return -1;
 7679
 7680	/* Migrating away from the preferred node is always bad. */
 7681	if (src_nid == p->numa_preferred_nid) {
 7682		if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
 7683			return 1;
 7684		else
 7685			return -1;
 7686	}
 7687
 7688	/* Encourage migration to the preferred node. */
 7689	if (dst_nid == p->numa_preferred_nid)
 7690		return 0;
 7691
 7692	/* Leaving a core idle is often worse than degrading locality. */
 7693	if (env->idle == CPU_IDLE)
 7694		return -1;
 7695
 7696	dist = node_distance(src_nid, dst_nid);
 7697	if (numa_group) {
 7698		src_weight = group_weight(p, src_nid, dist);
 7699		dst_weight = group_weight(p, dst_nid, dist);
 7700	} else {
 7701		src_weight = task_weight(p, src_nid, dist);
 7702		dst_weight = task_weight(p, dst_nid, dist);
 7703	}
 7704
 7705	return dst_weight < src_weight;
 7706}
 7707
 7708#else
 7709static inline int migrate_degrades_locality(struct task_struct *p,
 7710					     struct lb_env *env)
 7711{
 7712	return -1;
 7713}
 7714#endif
 7715
 7716/*
 7717 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
 7718 */
 7719static
 7720int can_migrate_task(struct task_struct *p, struct lb_env *env)
 7721{
 7722	int tsk_cache_hot;
 7723
 7724	lockdep_assert_rq_held(env->src_rq);
 7725
 7726	/*
 7727	 * We do not migrate tasks that are:
 7728	 * 1) throttled_lb_pair, or
 7729	 * 2) cannot be migrated to this CPU due to cpus_ptr, or
 7730	 * 3) running (obviously), or
 7731	 * 4) are cache-hot on their current CPU.
 7732	 */
 7733	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
 7734		return 0;
 7735
 7736	/* Disregard pcpu kthreads; they are where they need to be. */
 7737	if (kthread_is_per_cpu(p))
 7738		return 0;
 7739
 7740	if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
 7741		int cpu;
 7742
 7743		schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
 7744
 7745		env->flags |= LBF_SOME_PINNED;
 7746
 7747		/*
 7748		 * Remember if this task can be migrated to any other CPU in
 7749		 * our sched_group. We may want to revisit it if we couldn't
 7750		 * meet load balance goals by pulling other tasks on src_cpu.
 7751		 *
 7752		 * Avoid computing new_dst_cpu
 7753		 * - for NEWLY_IDLE
 7754		 * - if we have already computed one in current iteration
 7755		 * - if it's an active balance
 7756		 */
 7757		if (env->idle == CPU_NEWLY_IDLE ||
 7758		    env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB))
 7759			return 0;
 7760
 7761		/* Prevent to re-select dst_cpu via env's CPUs: */
 7762		for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
 7763			if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
 7764				env->flags |= LBF_DST_PINNED;
 7765				env->new_dst_cpu = cpu;
 7766				break;
 7767			}
 7768		}
 7769
 7770		return 0;
 7771	}
 7772
 7773	/* Record that we found at least one task that could run on dst_cpu */
 7774	env->flags &= ~LBF_ALL_PINNED;
 7775
 7776	if (task_running(env->src_rq, p)) {
 7777		schedstat_inc(p->se.statistics.nr_failed_migrations_running);
 7778		return 0;
 7779	}
 7780
 7781	/*
 7782	 * Aggressive migration if:
 7783	 * 1) active balance
 7784	 * 2) destination numa is preferred
 7785	 * 3) task is cache cold, or
 7786	 * 4) too many balance attempts have failed.
 7787	 */
 7788	if (env->flags & LBF_ACTIVE_LB)
 7789		return 1;
 7790
 7791	tsk_cache_hot = migrate_degrades_locality(p, env);
 7792	if (tsk_cache_hot == -1)
 7793		tsk_cache_hot = task_hot(p, env);
 7794
 7795	if (tsk_cache_hot <= 0 ||
 7796	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
 7797		if (tsk_cache_hot == 1) {
 7798			schedstat_inc(env->sd->lb_hot_gained[env->idle]);
 7799			schedstat_inc(p->se.statistics.nr_forced_migrations);
 7800		}
 7801		return 1;
 7802	}
 7803
 7804	schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
 7805	return 0;
 7806}
 7807
 7808/*
 7809 * detach_task() -- detach the task for the migration specified in env
 7810 */
 7811static void detach_task(struct task_struct *p, struct lb_env *env)
 7812{
 7813	lockdep_assert_rq_held(env->src_rq);
 7814
 7815	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
 7816	set_task_cpu(p, env->dst_cpu);
 7817}
 7818
 7819/*
 7820 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
 7821 * part of active balancing operations within "domain".
 7822 *
 7823 * Returns a task if successful and NULL otherwise.
 7824 */
 7825static struct task_struct *detach_one_task(struct lb_env *env)
 7826{
 7827	struct task_struct *p;
 7828
 7829	lockdep_assert_rq_held(env->src_rq);
 7830
 7831	list_for_each_entry_reverse(p,
 7832			&env->src_rq->cfs_tasks, se.group_node) {
 7833		if (!can_migrate_task(p, env))
 7834			continue;
 7835
 7836		detach_task(p, env);
 7837
 7838		/*
 7839		 * Right now, this is only the second place where
 7840		 * lb_gained[env->idle] is updated (other is detach_tasks)
 7841		 * so we can safely collect stats here rather than
 7842		 * inside detach_tasks().
 7843		 */
 7844		schedstat_inc(env->sd->lb_gained[env->idle]);
 7845		return p;
 7846	}
 7847	return NULL;
 7848}
 7849
 7850static const unsigned int sched_nr_migrate_break = 32;
 7851
 7852/*
 7853 * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
 7854 * busiest_rq, as part of a balancing operation within domain "sd".
 7855 *
 7856 * Returns number of detached tasks if successful and 0 otherwise.
 7857 */
 7858static int detach_tasks(struct lb_env *env)
 7859{
 7860	struct list_head *tasks = &env->src_rq->cfs_tasks;
 7861	unsigned long util, load;
 7862	struct task_struct *p;
 7863	int detached = 0;
 7864
 7865	lockdep_assert_rq_held(env->src_rq);
 7866
 7867	/*
 7868	 * Source run queue has been emptied by another CPU, clear
 7869	 * LBF_ALL_PINNED flag as we will not test any task.
 7870	 */
 7871	if (env->src_rq->nr_running <= 1) {
 7872		env->flags &= ~LBF_ALL_PINNED;
 7873		return 0;
 7874	}
 7875
 7876	if (env->imbalance <= 0)
 7877		return 0;
 7878
 7879	while (!list_empty(tasks)) {
 7880		/*
 7881		 * We don't want to steal all, otherwise we may be treated likewise,
 7882		 * which could at worst lead to a livelock crash.
 7883		 */
 7884		if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
 7885			break;
 7886
 7887		p = list_last_entry(tasks, struct task_struct, se.group_node);
 7888
 7889		env->loop++;
 7890		/* We've more or less seen every task there is, call it quits */
 7891		if (env->loop > env->loop_max)
 7892			break;
 7893
 7894		/* take a breather every nr_migrate tasks */
 7895		if (env->loop > env->loop_break) {
 7896			env->loop_break += sched_nr_migrate_break;
 7897			env->flags |= LBF_NEED_BREAK;
 7898			break;
 7899		}
 7900
 7901		if (!can_migrate_task(p, env))
 7902			goto next;
 7903
 7904		switch (env->migration_type) {
 7905		case migrate_load:
 7906			/*
 7907			 * Depending of the number of CPUs and tasks and the
 7908			 * cgroup hierarchy, task_h_load() can return a null
 7909			 * value. Make sure that env->imbalance decreases
 7910			 * otherwise detach_tasks() will stop only after
 7911			 * detaching up to loop_max tasks.
 7912			 */
 7913			load = max_t(unsigned long, task_h_load(p), 1);
 7914
 7915			if (sched_feat(LB_MIN) &&
 7916			    load < 16 && !env->sd->nr_balance_failed)
 7917				goto next;
 7918
 7919			/*
 7920			 * Make sure that we don't migrate too much load.
 7921			 * Nevertheless, let relax the constraint if
 7922			 * scheduler fails to find a good waiting task to
 7923			 * migrate.
 7924			 */
 7925			if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
 7926				goto next;
 7927
 7928			env->imbalance -= load;
 7929			break;
 7930
 7931		case migrate_util:
 7932			util = task_util_est(p);
 7933
 7934			if (util > env->imbalance)
 7935				goto next;
 7936
 7937			env->imbalance -= util;
 7938			break;
 7939
 7940		case migrate_task:
 7941			env->imbalance--;
 7942			break;
 7943
 7944		case migrate_misfit:
 7945			/* This is not a misfit task */
 7946			if (task_fits_capacity(p, capacity_of(env->src_cpu)))
 7947				goto next;
 7948
 7949			env->imbalance = 0;
 7950			break;
 7951		}
 7952
 7953		detach_task(p, env);
 7954		list_add(&p->se.group_node, &env->tasks);
 7955
 7956		detached++;
 7957
 7958#ifdef CONFIG_PREEMPTION
 7959		/*
 7960		 * NEWIDLE balancing is a source of latency, so preemptible
 7961		 * kernels will stop after the first task is detached to minimize
 7962		 * the critical section.
 7963		 */
 7964		if (env->idle == CPU_NEWLY_IDLE)
 7965			break;
 7966#endif
 7967
 7968		/*
 7969		 * We only want to steal up to the prescribed amount of
 7970		 * load/util/tasks.
 7971		 */
 7972		if (env->imbalance <= 0)
 7973			break;
 7974
 7975		continue;
 7976next:
 7977		list_move(&p->se.group_node, tasks);
 7978	}
 7979
 7980	/*
 7981	 * Right now, this is one of only two places we collect this stat
 7982	 * so we can safely collect detach_one_task() stats here rather
 7983	 * than inside detach_one_task().
 7984	 */
 7985	schedstat_add(env->sd->lb_gained[env->idle], detached);
 7986
 7987	return detached;
 7988}
 7989
 7990/*
 7991 * attach_task() -- attach the task detached by detach_task() to its new rq.
 7992 */
 7993static void attach_task(struct rq *rq, struct task_struct *p)
 7994{
 7995	lockdep_assert_rq_held(rq);
 7996
 7997	BUG_ON(task_rq(p) != rq);
 7998	activate_task(rq, p, ENQUEUE_NOCLOCK);
 7999	check_preempt_curr(rq, p, 0);
 8000}
 8001
 8002/*
 8003 * attach_one_task() -- attaches the task returned from detach_one_task() to
 8004 * its new rq.
 8005 */
 8006static void attach_one_task(struct rq *rq, struct task_struct *p)
 8007{
 8008	struct rq_flags rf;
 8009
 8010	rq_lock(rq, &rf);
 8011	update_rq_clock(rq);
 8012	attach_task(rq, p);
 8013	rq_unlock(rq, &rf);
 8014}
 8015
 8016/*
 8017 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
 8018 * new rq.
 8019 */
 8020static void attach_tasks(struct lb_env *env)
 8021{
 8022	struct list_head *tasks = &env->tasks;
 8023	struct task_struct *p;
 8024	struct rq_flags rf;
 8025
 8026	rq_lock(env->dst_rq, &rf);
 8027	update_rq_clock(env->dst_rq);
 8028
 8029	while (!list_empty(tasks)) {
 8030		p = list_first_entry(tasks, struct task_struct, se.group_node);
 8031		list_del_init(&p->se.group_node);
 8032
 8033		attach_task(env->dst_rq, p);
 8034	}
 8035
 8036	rq_unlock(env->dst_rq, &rf);
 8037}
 8038
 8039#ifdef CONFIG_NO_HZ_COMMON
 8040static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
 8041{
 8042	if (cfs_rq->avg.load_avg)
 8043		return true;
 8044
 8045	if (cfs_rq->avg.util_avg)
 8046		return true;
 8047
 8048	return false;
 8049}
 8050
 8051static inline bool others_have_blocked(struct rq *rq)
 8052{
 8053	if (READ_ONCE(rq->avg_rt.util_avg))
 8054		return true;
 8055
 8056	if (READ_ONCE(rq->avg_dl.util_avg))
 8057		return true;
 8058
 8059	if (thermal_load_avg(rq))
 8060		return true;
 8061
 8062#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 8063	if (READ_ONCE(rq->avg_irq.util_avg))
 8064		return true;
 8065#endif
 8066
 8067	return false;
 8068}
 8069
 8070static inline void update_blocked_load_tick(struct rq *rq)
 8071{
 8072	WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
 8073}
 8074
 8075static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
 8076{
 8077	if (!has_blocked)
 8078		rq->has_blocked_load = 0;
 8079}
 8080#else
 8081static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
 8082static inline bool others_have_blocked(struct rq *rq) { return false; }
 8083static inline void update_blocked_load_tick(struct rq *rq) {}
 8084static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
 8085#endif
 8086
 8087static bool __update_blocked_others(struct rq *rq, bool *done)
 8088{
 8089	const struct sched_class *curr_class;
 8090	u64 now = rq_clock_pelt(rq);
 8091	unsigned long thermal_pressure;
 8092	bool decayed;
 8093
 8094	/*
 8095	 * update_load_avg() can call cpufreq_update_util(). Make sure that RT,
 8096	 * DL and IRQ signals have been updated before updating CFS.
 8097	 */
 8098	curr_class = rq->curr->sched_class;
 8099
 8100	thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
 8101
 8102	decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
 8103		  update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
 8104		  update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) |
 8105		  update_irq_load_avg(rq, 0);
 8106
 8107	if (others_have_blocked(rq))
 8108		*done = false;
 8109
 8110	return decayed;
 8111}
 8112
 8113#ifdef CONFIG_FAIR_GROUP_SCHED
 8114
 8115static bool __update_blocked_fair(struct rq *rq, bool *done)
 8116{
 8117	struct cfs_rq *cfs_rq, *pos;
 8118	bool decayed = false;
 8119	int cpu = cpu_of(rq);
 8120
 8121	/*
 8122	 * Iterates the task_group tree in a bottom up fashion, see
 8123	 * list_add_leaf_cfs_rq() for details.
 8124	 */
 8125	for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
 8126		struct sched_entity *se;
 8127
 8128		if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
 8129			update_tg_load_avg(cfs_rq);
 8130
 8131			if (cfs_rq == &rq->cfs)
 8132				decayed = true;
 8133		}
 8134
 8135		/* Propagate pending load changes to the parent, if any: */
 8136		se = cfs_rq->tg->se[cpu];
 8137		if (se && !skip_blocked_update(se))
 8138			update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
 8139
 8140		/*
 8141		 * There can be a lot of idle CPU cgroups.  Don't let fully
 8142		 * decayed cfs_rqs linger on the list.
 8143		 */
 8144		if (cfs_rq_is_decayed(cfs_rq))
 8145			list_del_leaf_cfs_rq(cfs_rq);
 8146
 8147		/* Don't need periodic decay once load/util_avg are null */
 8148		if (cfs_rq_has_blocked(cfs_rq))
 8149			*done = false;
 8150	}
 8151
 8152	return decayed;
 8153}
 8154
 8155/*
 8156 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
 8157 * This needs to be done in a top-down fashion because the load of a child
 8158 * group is a fraction of its parents load.
 8159 */
 8160static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
 8161{
 8162	struct rq *rq = rq_of(cfs_rq);
 8163	struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
 8164	unsigned long now = jiffies;
 8165	unsigned long load;
 8166
 8167	if (cfs_rq->last_h_load_update == now)
 8168		return;
 8169
 8170	WRITE_ONCE(cfs_rq->h_load_next, NULL);
 8171	for_each_sched_entity(se) {
 8172		cfs_rq = cfs_rq_of(se);
 8173		WRITE_ONCE(cfs_rq->h_load_next, se);
 8174		if (cfs_rq->last_h_load_update == now)
 8175			break;
 8176	}
 8177
 8178	if (!se) {
 8179		cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
 8180		cfs_rq->last_h_load_update = now;
 8181	}
 8182
 8183	while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
 8184		load = cfs_rq->h_load;
 8185		load = div64_ul(load * se->avg.load_avg,
 8186			cfs_rq_load_avg(cfs_rq) + 1);
 8187		cfs_rq = group_cfs_rq(se);
 8188		cfs_rq->h_load = load;
 8189		cfs_rq->last_h_load_update = now;
 8190	}
 8191}
 8192
 8193static unsigned long task_h_load(struct task_struct *p)
 8194{
 8195	struct cfs_rq *cfs_rq = task_cfs_rq(p);
 8196
 8197	update_cfs_rq_h_load(cfs_rq);
 8198	return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
 8199			cfs_rq_load_avg(cfs_rq) + 1);
 8200}
 8201#else
 8202static bool __update_blocked_fair(struct rq *rq, bool *done)
 8203{
 8204	struct cfs_rq *cfs_rq = &rq->cfs;
 8205	bool decayed;
 8206
 8207	decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
 8208	if (cfs_rq_has_blocked(cfs_rq))
 8209		*done = false;
 8210
 8211	return decayed;
 8212}
 8213
 8214static unsigned long task_h_load(struct task_struct *p)
 8215{
 8216	return p->se.avg.load_avg;
 8217}
 8218#endif
 8219
 8220static void update_blocked_averages(int cpu)
 8221{
 8222	bool decayed = false, done = true;
 8223	struct rq *rq = cpu_rq(cpu);
 8224	struct rq_flags rf;
 8225
 8226	rq_lock_irqsave(rq, &rf);
 8227	update_blocked_load_tick(rq);
 8228	update_rq_clock(rq);
 8229
 8230	decayed |= __update_blocked_others(rq, &done);
 8231	decayed |= __update_blocked_fair(rq, &done);
 8232
 8233	update_blocked_load_status(rq, !done);
 8234	if (decayed)
 8235		cpufreq_update_util(rq, 0);
 8236	rq_unlock_irqrestore(rq, &rf);
 8237}
 8238
 8239/********** Helpers for find_busiest_group ************************/
 8240
 8241/*
 8242 * sg_lb_stats - stats of a sched_group required for load_balancing
 8243 */
 8244struct sg_lb_stats {
 8245	unsigned long avg_load; /*Avg load across the CPUs of the group */
 8246	unsigned long group_load; /* Total load over the CPUs of the group */
 8247	unsigned long group_capacity;
 8248	unsigned long group_util; /* Total utilization over the CPUs of the group */
 8249	unsigned long group_runnable; /* Total runnable time over the CPUs of the group */
 8250	unsigned int sum_nr_running; /* Nr of tasks running in the group */
 8251	unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
 8252	unsigned int idle_cpus;
 8253	unsigned int group_weight;
 8254	enum group_type group_type;
 8255	unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
 8256	unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
 8257#ifdef CONFIG_NUMA_BALANCING
 8258	unsigned int nr_numa_running;
 8259	unsigned int nr_preferred_running;
 8260#endif
 8261};
 8262
 8263/*
 8264 * sd_lb_stats - Structure to store the statistics of a sched_domain
 8265 *		 during load balancing.
 8266 */
 8267struct sd_lb_stats {
 8268	struct sched_group *busiest;	/* Busiest group in this sd */
 8269	struct sched_group *local;	/* Local group in this sd */
 8270	unsigned long total_load;	/* Total load of all groups in sd */
 8271	unsigned long total_capacity;	/* Total capacity of all groups in sd */
 8272	unsigned long avg_load;	/* Average load across all groups in sd */
 8273	unsigned int prefer_sibling; /* tasks should go to sibling first */
 8274
 8275	struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
 8276	struct sg_lb_stats local_stat;	/* Statistics of the local group */
 8277};
 8278
 8279static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
 8280{
 8281	/*
 8282	 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
 8283	 * local_stat because update_sg_lb_stats() does a full clear/assignment.
 8284	 * We must however set busiest_stat::group_type and
 8285	 * busiest_stat::idle_cpus to the worst busiest group because
 8286	 * update_sd_pick_busiest() reads these before assignment.
 8287	 */
 8288	*sds = (struct sd_lb_stats){
 8289		.busiest = NULL,
 8290		.local = NULL,
 8291		.total_load = 0UL,
 8292		.total_capacity = 0UL,
 8293		.busiest_stat = {
 8294			.idle_cpus = UINT_MAX,
 8295			.group_type = group_has_spare,
 8296		},
 8297	};
 8298}
 8299
 8300static unsigned long scale_rt_capacity(int cpu)
 8301{
 8302	struct rq *rq = cpu_rq(cpu);
 8303	unsigned long max = arch_scale_cpu_capacity(cpu);
 8304	unsigned long used, free;
 8305	unsigned long irq;
 8306
 8307	irq = cpu_util_irq(rq);
 8308
 8309	if (unlikely(irq >= max))
 8310		return 1;
 8311
 8312	/*
 8313	 * avg_rt.util_avg and avg_dl.util_avg track binary signals
 8314	 * (running and not running) with weights 0 and 1024 respectively.
 8315	 * avg_thermal.load_avg tracks thermal pressure and the weighted
 8316	 * average uses the actual delta max capacity(load).
 8317	 */
 8318	used = READ_ONCE(rq->avg_rt.util_avg);
 8319	used += READ_ONCE(rq->avg_dl.util_avg);
 8320	used += thermal_load_avg(rq);
 8321
 8322	if (unlikely(used >= max))
 8323		return 1;
 8324
 8325	free = max - used;
 8326
 8327	return scale_irq_capacity(free, irq, max);
 8328}
 8329
 8330static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 8331{
 8332	unsigned long capacity = scale_rt_capacity(cpu);
 8333	struct sched_group *sdg = sd->groups;
 8334
 8335	cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
 8336
 8337	if (!capacity)
 8338		capacity = 1;
 8339
 8340	cpu_rq(cpu)->cpu_capacity = capacity;
 8341	trace_sched_cpu_capacity_tp(cpu_rq(cpu));
 8342
 8343	sdg->sgc->capacity = capacity;
 8344	sdg->sgc->min_capacity = capacity;
 8345	sdg->sgc->max_capacity = capacity;
 8346}
 8347
 8348void update_group_capacity(struct sched_domain *sd, int cpu)
 8349{
 8350	struct sched_domain *child = sd->child;
 8351	struct sched_group *group, *sdg = sd->groups;
 8352	unsigned long capacity, min_capacity, max_capacity;
 8353	unsigned long interval;
 8354
 8355	interval = msecs_to_jiffies(sd->balance_interval);
 8356	interval = clamp(interval, 1UL, max_load_balance_interval);
 8357	sdg->sgc->next_update = jiffies + interval;
 8358
 8359	if (!child) {
 8360		update_cpu_capacity(sd, cpu);
 8361		return;
 8362	}
 8363
 8364	capacity = 0;
 8365	min_capacity = ULONG_MAX;
 8366	max_capacity = 0;
 8367
 8368	if (child->flags & SD_OVERLAP) {
 8369		/*
 8370		 * SD_OVERLAP domains cannot assume that child groups
 8371		 * span the current group.
 8372		 */
 8373
 8374		for_each_cpu(cpu, sched_group_span(sdg)) {
 8375			unsigned long cpu_cap = capacity_of(cpu);
 8376
 8377			capacity += cpu_cap;
 8378			min_capacity = min(cpu_cap, min_capacity);
 8379			max_capacity = max(cpu_cap, max_capacity);
 8380		}
 8381	} else  {
 8382		/*
 8383		 * !SD_OVERLAP domains can assume that child groups
 8384		 * span the current group.
 8385		 */
 8386
 8387		group = child->groups;
 8388		do {
 8389			struct sched_group_capacity *sgc = group->sgc;
 8390
 8391			capacity += sgc->capacity;
 8392			min_capacity = min(sgc->min_capacity, min_capacity);
 8393			max_capacity = max(sgc->max_capacity, max_capacity);
 8394			group = group->next;
 8395		} while (group != child->groups);
 8396	}
 8397
 8398	sdg->sgc->capacity = capacity;
 8399	sdg->sgc->min_capacity = min_capacity;
 8400	sdg->sgc->max_capacity = max_capacity;
 8401}
 8402
 8403/*
 8404 * Check whether the capacity of the rq has been noticeably reduced by side
 8405 * activity. The imbalance_pct is used for the threshold.
 8406 * Return true is the capacity is reduced
 8407 */
 8408static inline int
 8409check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
 8410{
 8411	return ((rq->cpu_capacity * sd->imbalance_pct) <
 8412				(rq->cpu_capacity_orig * 100));
 8413}
 8414
 8415/*
 8416 * Check whether a rq has a misfit task and if it looks like we can actually
 8417 * help that task: we can migrate the task to a CPU of higher capacity, or
 8418 * the task's current CPU is heavily pressured.
 8419 */
 8420static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
 8421{
 8422	return rq->misfit_task_load &&
 8423		(rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
 8424		 check_cpu_capacity(rq, sd));
 8425}
 8426
 8427/*
 8428 * Group imbalance indicates (and tries to solve) the problem where balancing
 8429 * groups is inadequate due to ->cpus_ptr constraints.
 8430 *
 8431 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
 8432 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
 8433 * Something like:
 8434 *
 8435 *	{ 0 1 2 3 } { 4 5 6 7 }
 8436 *	        *     * * *
 8437 *
 8438 * If we were to balance group-wise we'd place two tasks in the first group and
 8439 * two tasks in the second group. Clearly this is undesired as it will overload
 8440 * cpu 3 and leave one of the CPUs in the second group unused.
 8441 *
 8442 * The current solution to this issue is detecting the skew in the first group
 8443 * by noticing the lower domain failed to reach balance and had difficulty
 8444 * moving tasks due to affinity constraints.
 8445 *
 8446 * When this is so detected; this group becomes a candidate for busiest; see
 8447 * update_sd_pick_busiest(). And calculate_imbalance() and
 8448 * find_busiest_group() avoid some of the usual balance conditions to allow it
 8449 * to create an effective group imbalance.
 8450 *
 8451 * This is a somewhat tricky proposition since the next run might not find the
 8452 * group imbalance and decide the groups need to be balanced again. A most
 8453 * subtle and fragile situation.
 8454 */
 8455
 8456static inline int sg_imbalanced(struct sched_group *group)
 8457{
 8458	return group->sgc->imbalance;
 8459}
 8460
 8461/*
 8462 * group_has_capacity returns true if the group has spare capacity that could
 8463 * be used by some tasks.
 8464 * We consider that a group has spare capacity if the  * number of task is
 8465 * smaller than the number of CPUs or if the utilization is lower than the
 8466 * available capacity for CFS tasks.
 8467 * For the latter, we use a threshold to stabilize the state, to take into
 8468 * account the variance of the tasks' load and to return true if the available
 8469 * capacity in meaningful for the load balancer.
 8470 * As an example, an available capacity of 1% can appear but it doesn't make
 8471 * any benefit for the load balance.
 8472 */
 8473static inline bool
 8474group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
 8475{
 8476	if (sgs->sum_nr_running < sgs->group_weight)
 8477		return true;
 8478
 8479	if ((sgs->group_capacity * imbalance_pct) <
 8480			(sgs->group_runnable * 100))
 8481		return false;
 8482
 8483	if ((sgs->group_capacity * 100) >
 8484			(sgs->group_util * imbalance_pct))
 8485		return true;
 8486
 8487	return false;
 8488}
 8489
 8490/*
 8491 *  group_is_overloaded returns true if the group has more tasks than it can
 8492 *  handle.
 8493 *  group_is_overloaded is not equals to !group_has_capacity because a group
 8494 *  with the exact right number of tasks, has no more spare capacity but is not
 8495 *  overloaded so both group_has_capacity and group_is_overloaded return
 8496 *  false.
 8497 */
 8498static inline bool
 8499group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
 8500{
 8501	if (sgs->sum_nr_running <= sgs->group_weight)
 8502		return false;
 8503
 8504	if ((sgs->group_capacity * 100) <
 8505			(sgs->group_util * imbalance_pct))
 8506		return true;
 8507
 8508	if ((sgs->group_capacity * imbalance_pct) <
 8509			(sgs->group_runnable * 100))
 8510		return true;
 8511
 8512	return false;
 8513}
 8514
 8515static inline enum
 8516group_type group_classify(unsigned int imbalance_pct,
 8517			  struct sched_group *group,
 8518			  struct sg_lb_stats *sgs)
 8519{
 8520	if (group_is_overloaded(imbalance_pct, sgs))
 8521		return group_overloaded;
 8522
 8523	if (sg_imbalanced(group))
 8524		return group_imbalanced;
 8525
 8526	if (sgs->group_asym_packing)
 8527		return group_asym_packing;
 8528
 8529	if (sgs->group_misfit_task_load)
 8530		return group_misfit_task;
 8531
 8532	if (!group_has_capacity(imbalance_pct, sgs))
 8533		return group_fully_busy;
 8534
 8535	return group_has_spare;
 8536}
 8537
 8538/**
 8539 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
 8540 * @env: The load balancing environment.
 8541 * @group: sched_group whose statistics are to be updated.
 8542 * @sgs: variable to hold the statistics for this group.
 8543 * @sg_status: Holds flag indicating the status of the sched_group
 8544 */
 8545static inline void update_sg_lb_stats(struct lb_env *env,
 8546				      struct sched_group *group,
 8547				      struct sg_lb_stats *sgs,
 8548				      int *sg_status)
 8549{
 8550	int i, nr_running, local_group;
 8551
 8552	memset(sgs, 0, sizeof(*sgs));
 8553
 8554	local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group));
 8555
 8556	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
 8557		struct rq *rq = cpu_rq(i);
 8558
 8559		sgs->group_load += cpu_load(rq);
 8560		sgs->group_util += cpu_util(i);
 8561		sgs->group_runnable += cpu_runnable(rq);
 8562		sgs->sum_h_nr_running += rq->cfs.h_nr_running;
 8563
 8564		nr_running = rq->nr_running;
 8565		sgs->sum_nr_running += nr_running;
 8566
 8567		if (nr_running > 1)
 8568			*sg_status |= SG_OVERLOAD;
 8569
 8570		if (cpu_overutilized(i))
 8571			*sg_status |= SG_OVERUTILIZED;
 8572
 8573#ifdef CONFIG_NUMA_BALANCING
 8574		sgs->nr_numa_running += rq->nr_numa_running;
 8575		sgs->nr_preferred_running += rq->nr_preferred_running;
 8576#endif
 8577		/*
 8578		 * No need to call idle_cpu() if nr_running is not 0
 8579		 */
 8580		if (!nr_running && idle_cpu(i)) {
 8581			sgs->idle_cpus++;
 8582			/* Idle cpu can't have misfit task */
 8583			continue;
 8584		}
 8585
 8586		if (local_group)
 8587			continue;
 8588
 8589		/* Check for a misfit task on the cpu */
 8590		if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
 8591		    sgs->group_misfit_task_load < rq->misfit_task_load) {
 8592			sgs->group_misfit_task_load = rq->misfit_task_load;
 8593			*sg_status |= SG_OVERLOAD;
 8594		}
 8595	}
 8596
 8597	/* Check if dst CPU is idle and preferred to this group */
 8598	if (env->sd->flags & SD_ASYM_PACKING &&
 8599	    env->idle != CPU_NOT_IDLE &&
 8600	    sgs->sum_h_nr_running &&
 8601	    sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) {
 8602		sgs->group_asym_packing = 1;
 8603	}
 8604
 8605	sgs->group_capacity = group->sgc->capacity;
 8606
 8607	sgs->group_weight = group->group_weight;
 8608
 8609	sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
 8610
 8611	/* Computing avg_load makes sense only when group is overloaded */
 8612	if (sgs->group_type == group_overloaded)
 8613		sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
 8614				sgs->group_capacity;
 8615}
 8616
 8617/**
 8618 * update_sd_pick_busiest - return 1 on busiest group
 8619 * @env: The load balancing environment.
 8620 * @sds: sched_domain statistics
 8621 * @sg: sched_group candidate to be checked for being the busiest
 8622 * @sgs: sched_group statistics
 8623 *
 8624 * Determine if @sg is a busier group than the previously selected
 8625 * busiest group.
 8626 *
 8627 * Return: %true if @sg is a busier group than the previously selected
 8628 * busiest group. %false otherwise.
 8629 */
 8630static bool update_sd_pick_busiest(struct lb_env *env,
 8631				   struct sd_lb_stats *sds,
 8632				   struct sched_group *sg,
 8633				   struct sg_lb_stats *sgs)
 8634{
 8635	struct sg_lb_stats *busiest = &sds->busiest_stat;
 8636
 8637	/* Make sure that there is at least one task to pull */
 8638	if (!sgs->sum_h_nr_running)
 8639		return false;
 8640
 8641	/*
 8642	 * Don't try to pull misfit tasks we can't help.
 8643	 * We can use max_capacity here as reduction in capacity on some
 8644	 * CPUs in the group should either be possible to resolve
 8645	 * internally or be covered by avg_load imbalance (eventually).
 8646	 */
 8647	if (sgs->group_type == group_misfit_task &&
 8648	    (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) ||
 8649	     sds->local_stat.group_type != group_has_spare))
 8650		return false;
 8651
 8652	if (sgs->group_type > busiest->group_type)
 8653		return true;
 8654
 8655	if (sgs->group_type < busiest->group_type)
 8656		return false;
 8657
 8658	/*
 8659	 * The candidate and the current busiest group are the same type of
 8660	 * group. Let check which one is the busiest according to the type.
 8661	 */
 8662
 8663	switch (sgs->group_type) {
 8664	case group_overloaded:
 8665		/* Select the overloaded group with highest avg_load. */
 8666		if (sgs->avg_load <= busiest->avg_load)
 8667			return false;
 8668		break;
 8669
 8670	case group_imbalanced:
 8671		/*
 8672		 * Select the 1st imbalanced group as we don't have any way to
 8673		 * choose one more than another.
 8674		 */
 8675		return false;
 8676
 8677	case group_asym_packing:
 8678		/* Prefer to move from lowest priority CPU's work */
 8679		if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
 8680			return false;
 8681		break;
 8682
 8683	case group_misfit_task:
 8684		/*
 8685		 * If we have more than one misfit sg go with the biggest
 8686		 * misfit.
 8687		 */
 8688		if (sgs->group_misfit_task_load < busiest->group_misfit_task_load)
 8689			return false;
 8690		break;
 8691
 8692	case group_fully_busy:
 8693		/*
 8694		 * Select the fully busy group with highest avg_load. In
 8695		 * theory, there is no need to pull task from such kind of
 8696		 * group because tasks have all compute capacity that they need
 8697		 * but we can still improve the overall throughput by reducing
 8698		 * contention when accessing shared HW resources.
 8699		 *
 8700		 * XXX for now avg_load is not computed and always 0 so we
 8701		 * select the 1st one.
 8702		 */
 8703		if (sgs->avg_load <= busiest->avg_load)
 8704			return false;
 8705		break;
 8706
 8707	case group_has_spare:
 8708		/*
 8709		 * Select not overloaded group with lowest number of idle cpus
 8710		 * and highest number of running tasks. We could also compare
 8711		 * the spare capacity which is more stable but it can end up
 8712		 * that the group has less spare capacity but finally more idle
 8713		 * CPUs which means less opportunity to pull tasks.
 8714		 */
 8715		if (sgs->idle_cpus > busiest->idle_cpus)
 8716			return false;
 8717		else if ((sgs->idle_cpus == busiest->idle_cpus) &&
 8718			 (sgs->sum_nr_running <= busiest->sum_nr_running))
 8719			return false;
 8720
 8721		break;
 8722	}
 8723
 8724	/*
 8725	 * Candidate sg has no more than one task per CPU and has higher
 8726	 * per-CPU capacity. Migrating tasks to less capable CPUs may harm
 8727	 * throughput. Maximize throughput, power/energy consequences are not
 8728	 * considered.
 8729	 */
 8730	if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
 8731	    (sgs->group_type <= group_fully_busy) &&
 8732	    (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu))))
 8733		return false;
 8734
 8735	return true;
 8736}
 8737
 8738#ifdef CONFIG_NUMA_BALANCING
 8739static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
 8740{
 8741	if (sgs->sum_h_nr_running > sgs->nr_numa_running)
 8742		return regular;
 8743	if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
 8744		return remote;
 8745	return all;
 8746}
 8747
 8748static inline enum fbq_type fbq_classify_rq(struct rq *rq)
 8749{
 8750	if (rq->nr_running > rq->nr_numa_running)
 8751		return regular;
 8752	if (rq->nr_running > rq->nr_preferred_running)
 8753		return remote;
 8754	return all;
 8755}
 8756#else
 8757static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
 8758{
 8759	return all;
 8760}
 8761
 8762static inline enum fbq_type fbq_classify_rq(struct rq *rq)
 8763{
 8764	return regular;
 8765}
 8766#endif /* CONFIG_NUMA_BALANCING */
 8767
 8768
 8769struct sg_lb_stats;
 8770
 8771/*
 8772 * task_running_on_cpu - return 1 if @p is running on @cpu.
 8773 */
 8774
 8775static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
 8776{
 8777	/* Task has no contribution or is new */
 8778	if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
 8779		return 0;
 8780
 8781	if (task_on_rq_queued(p))
 8782		return 1;
 8783
 8784	return 0;
 8785}
 8786
 8787/**
 8788 * idle_cpu_without - would a given CPU be idle without p ?
 8789 * @cpu: the processor on which idleness is tested.
 8790 * @p: task which should be ignored.
 8791 *
 8792 * Return: 1 if the CPU would be idle. 0 otherwise.
 8793 */
 8794static int idle_cpu_without(int cpu, struct task_struct *p)
 8795{
 8796	struct rq *rq = cpu_rq(cpu);
 8797
 8798	if (rq->curr != rq->idle && rq->curr != p)
 8799		return 0;
 8800
 8801	/*
 8802	 * rq->nr_running can't be used but an updated version without the
 8803	 * impact of p on cpu must be used instead. The updated nr_running
 8804	 * be computed and tested before calling idle_cpu_without().
 8805	 */
 8806
 8807#ifdef CONFIG_SMP
 8808	if (rq->ttwu_pending)
 8809		return 0;
 8810#endif
 8811
 8812	return 1;
 8813}
 8814
 8815/*
 8816 * update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
 8817 * @sd: The sched_domain level to look for idlest group.
 8818 * @group: sched_group whose statistics are to be updated.
 8819 * @sgs: variable to hold the statistics for this group.
 8820 * @p: The task for which we look for the idlest group/CPU.
 8821 */
 8822static inline void update_sg_wakeup_stats(struct sched_domain *sd,
 8823					  struct sched_group *group,
 8824					  struct sg_lb_stats *sgs,
 8825					  struct task_struct *p)
 8826{
 8827	int i, nr_running;
 8828
 8829	memset(sgs, 0, sizeof(*sgs));
 8830
 8831	for_each_cpu(i, sched_group_span(group)) {
 8832		struct rq *rq = cpu_rq(i);
 8833		unsigned int local;
 8834
 8835		sgs->group_load += cpu_load_without(rq, p);
 8836		sgs->group_util += cpu_util_without(i, p);
 8837		sgs->group_runnable += cpu_runnable_without(rq, p);
 8838		local = task_running_on_cpu(i, p);
 8839		sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
 8840
 8841		nr_running = rq->nr_running - local;
 8842		sgs->sum_nr_running += nr_running;
 8843
 8844		/*
 8845		 * No need to call idle_cpu_without() if nr_running is not 0
 8846		 */
 8847		if (!nr_running && idle_cpu_without(i, p))
 8848			sgs->idle_cpus++;
 8849
 8850	}
 8851
 8852	/* Check if task fits in the group */
 8853	if (sd->flags & SD_ASYM_CPUCAPACITY &&
 8854	    !task_fits_capacity(p, group->sgc->max_capacity)) {
 8855		sgs->group_misfit_task_load = 1;
 8856	}
 8857
 8858	sgs->group_capacity = group->sgc->capacity;
 8859
 8860	sgs->group_weight = group->group_weight;
 8861
 8862	sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
 8863
 8864	/*
 8865	 * Computing avg_load makes sense only when group is fully busy or
 8866	 * overloaded
 8867	 */
 8868	if (sgs->group_type == group_fully_busy ||
 8869		sgs->group_type == group_overloaded)
 8870		sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
 8871				sgs->group_capacity;
 8872}
 8873
 8874static bool update_pick_idlest(struct sched_group *idlest,
 8875			       struct sg_lb_stats *idlest_sgs,
 8876			       struct sched_group *group,
 8877			       struct sg_lb_stats *sgs)
 8878{
 8879	if (sgs->group_type < idlest_sgs->group_type)
 8880		return true;
 8881
 8882	if (sgs->group_type > idlest_sgs->group_type)
 8883		return false;
 8884
 8885	/*
 8886	 * The candidate and the current idlest group are the same type of
 8887	 * group. Let check which one is the idlest according to the type.
 8888	 */
 8889
 8890	switch (sgs->group_type) {
 8891	case group_overloaded:
 8892	case group_fully_busy:
 8893		/* Select the group with lowest avg_load. */
 8894		if (idlest_sgs->avg_load <= sgs->avg_load)
 8895			return false;
 8896		break;
 8897
 8898	case group_imbalanced:
 8899	case group_asym_packing:
 8900		/* Those types are not used in the slow wakeup path */
 8901		return false;
 8902
 8903	case group_misfit_task:
 8904		/* Select group with the highest max capacity */
 8905		if (idlest->sgc->max_capacity >= group->sgc->max_capacity)
 8906			return false;
 8907		break;
 8908
 8909	case group_has_spare:
 8910		/* Select group with most idle CPUs */
 8911		if (idlest_sgs->idle_cpus > sgs->idle_cpus)
 8912			return false;
 8913
 8914		/* Select group with lowest group_util */
 8915		if (idlest_sgs->idle_cpus == sgs->idle_cpus &&
 8916			idlest_sgs->group_util <= sgs->group_util)
 8917			return false;
 8918
 8919		break;
 8920	}
 8921
 8922	return true;
 8923}
 8924
 8925/*
 8926 * Allow a NUMA imbalance if busy CPUs is less than 25% of the domain.
 8927 * This is an approximation as the number of running tasks may not be
 8928 * related to the number of busy CPUs due to sched_setaffinity.
 8929 */
 8930static inline bool allow_numa_imbalance(int dst_running, int dst_weight)
 8931{
 8932	return (dst_running < (dst_weight >> 2));
 8933}
 8934
 8935/*
 8936 * find_idlest_group() finds and returns the least busy CPU group within the
 8937 * domain.
 8938 *
 8939 * Assumes p is allowed on at least one CPU in sd.
 8940 */
 8941static struct sched_group *
 8942find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
 8943{
 8944	struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
 8945	struct sg_lb_stats local_sgs, tmp_sgs;
 8946	struct sg_lb_stats *sgs;
 8947	unsigned long imbalance;
 8948	struct sg_lb_stats idlest_sgs = {
 8949			.avg_load = UINT_MAX,
 8950			.group_type = group_overloaded,
 8951	};
 8952
 8953	do {
 8954		int local_group;
 8955
 8956		/* Skip over this group if it has no CPUs allowed */
 8957		if (!cpumask_intersects(sched_group_span(group),
 8958					p->cpus_ptr))
 8959			continue;
 8960
 8961		/* Skip over this group if no cookie matched */
 8962		if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group))
 8963			continue;
 8964
 8965		local_group = cpumask_test_cpu(this_cpu,
 8966					       sched_group_span(group));
 8967
 8968		if (local_group) {
 8969			sgs = &local_sgs;
 8970			local = group;
 8971		} else {
 8972			sgs = &tmp_sgs;
 8973		}
 8974
 8975		update_sg_wakeup_stats(sd, group, sgs, p);
 8976
 8977		if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
 8978			idlest = group;
 8979			idlest_sgs = *sgs;
 8980		}
 8981
 8982	} while (group = group->next, group != sd->groups);
 8983
 8984
 8985	/* There is no idlest group to push tasks to */
 8986	if (!idlest)
 8987		return NULL;
 8988
 8989	/* The local group has been skipped because of CPU affinity */
 8990	if (!local)
 8991		return idlest;
 8992
 8993	/*
 8994	 * If the local group is idler than the selected idlest group
 8995	 * don't try and push the task.
 8996	 */
 8997	if (local_sgs.group_type < idlest_sgs.group_type)
 8998		return NULL;
 8999
 9000	/*
 9001	 * If the local group is busier than the selected idlest group
 9002	 * try and push the task.
 9003	 */
 9004	if (local_sgs.group_type > idlest_sgs.group_type)
 9005		return idlest;
 9006
 9007	switch (local_sgs.group_type) {
 9008	case group_overloaded:
 9009	case group_fully_busy:
 9010
 9011		/* Calculate allowed imbalance based on load */
 9012		imbalance = scale_load_down(NICE_0_LOAD) *
 9013				(sd->imbalance_pct-100) / 100;
 9014
 9015		/*
 9016		 * When comparing groups across NUMA domains, it's possible for
 9017		 * the local domain to be very lightly loaded relative to the
 9018		 * remote domains but "imbalance" skews the comparison making
 9019		 * remote CPUs look much more favourable. When considering
 9020		 * cross-domain, add imbalance to the load on the remote node
 9021		 * and consider staying local.
 9022		 */
 9023
 9024		if ((sd->flags & SD_NUMA) &&
 9025		    ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load))
 9026			return NULL;
 9027
 9028		/*
 9029		 * If the local group is less loaded than the selected
 9030		 * idlest group don't try and push any tasks.
 9031		 */
 9032		if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance))
 9033			return NULL;
 9034
 9035		if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load)
 9036			return NULL;
 9037		break;
 9038
 9039	case group_imbalanced:
 9040	case group_asym_packing:
 9041		/* Those type are not used in the slow wakeup path */
 9042		return NULL;
 9043
 9044	case group_misfit_task:
 9045		/* Select group with the highest max capacity */
 9046		if (local->sgc->max_capacity >= idlest->sgc->max_capacity)
 9047			return NULL;
 9048		break;
 9049
 9050	case group_has_spare:
 9051		if (sd->flags & SD_NUMA) {
 9052#ifdef CONFIG_NUMA_BALANCING
 9053			int idlest_cpu;
 9054			/*
 9055			 * If there is spare capacity at NUMA, try to select
 9056			 * the preferred node
 9057			 */
 9058			if (cpu_to_node(this_cpu) == p->numa_preferred_nid)
 9059				return NULL;
 9060
 9061			idlest_cpu = cpumask_first(sched_group_span(idlest));
 9062			if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid)
 9063				return idlest;
 9064#endif
 9065			/*
 9066			 * Otherwise, keep the task on this node to stay close
 9067			 * its wakeup source and improve locality. If there is
 9068			 * a real need of migration, periodic load balance will
 9069			 * take care of it.
 9070			 */
 9071			if (allow_numa_imbalance(local_sgs.sum_nr_running, sd->span_weight))
 9072				return NULL;
 9073		}
 9074
 9075		/*
 9076		 * Select group with highest number of idle CPUs. We could also
 9077		 * compare the utilization which is more stable but it can end
 9078		 * up that the group has less spare capacity but finally more
 9079		 * idle CPUs which means more opportunity to run task.
 9080		 */
 9081		if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus)
 9082			return NULL;
 9083		break;
 9084	}
 9085
 9086	return idlest;
 9087}
 9088
 9089/**
 9090 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
 9091 * @env: The load balancing environment.
 9092 * @sds: variable to hold the statistics for this sched_domain.
 9093 */
 9094
 9095static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
 9096{
 9097	struct sched_domain *child = env->sd->child;
 9098	struct sched_group *sg = env->sd->groups;
 9099	struct sg_lb_stats *local = &sds->local_stat;
 9100	struct sg_lb_stats tmp_sgs;
 9101	int sg_status = 0;
 9102
 9103	do {
 9104		struct sg_lb_stats *sgs = &tmp_sgs;
 9105		int local_group;
 9106
 9107		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
 9108		if (local_group) {
 9109			sds->local = sg;
 9110			sgs = local;
 9111
 9112			if (env->idle != CPU_NEWLY_IDLE ||
 9113			    time_after_eq(jiffies, sg->sgc->next_update))
 9114				update_group_capacity(env->sd, env->dst_cpu);
 9115		}
 9116
 9117		update_sg_lb_stats(env, sg, sgs, &sg_status);
 9118
 9119		if (local_group)
 9120			goto next_group;
 9121
 9122
 9123		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
 9124			sds->busiest = sg;
 9125			sds->busiest_stat = *sgs;
 9126		}
 9127
 9128next_group:
 9129		/* Now, start updating sd_lb_stats */
 9130		sds->total_load += sgs->group_load;
 9131		sds->total_capacity += sgs->group_capacity;
 9132
 9133		sg = sg->next;
 9134	} while (sg != env->sd->groups);
 9135
 9136	/* Tag domain that child domain prefers tasks go to siblings first */
 9137	sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
 9138
 9139
 9140	if (env->sd->flags & SD_NUMA)
 9141		env->fbq_type = fbq_classify_group(&sds->busiest_stat);
 9142
 9143	if (!env->sd->parent) {
 9144		struct root_domain *rd = env->dst_rq->rd;
 9145
 9146		/* update overload indicator if we are at root domain */
 9147		WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
 9148
 9149		/* Update over-utilization (tipping point, U >= 0) indicator */
 9150		WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
 9151		trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
 9152	} else if (sg_status & SG_OVERUTILIZED) {
 9153		struct root_domain *rd = env->dst_rq->rd;
 9154
 9155		WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
 9156		trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
 9157	}
 9158}
 9159
 9160#define NUMA_IMBALANCE_MIN 2
 9161
 9162static inline long adjust_numa_imbalance(int imbalance,
 9163				int dst_running, int dst_weight)
 9164{
 9165	if (!allow_numa_imbalance(dst_running, dst_weight))
 9166		return imbalance;
 9167
 9168	/*
 9169	 * Allow a small imbalance based on a simple pair of communicating
 9170	 * tasks that remain local when the destination is lightly loaded.
 9171	 */
 9172	if (imbalance <= NUMA_IMBALANCE_MIN)
 9173		return 0;
 9174
 9175	return imbalance;
 9176}
 9177
 9178/**
 9179 * calculate_imbalance - Calculate the amount of imbalance present within the
 9180 *			 groups of a given sched_domain during load balance.
 9181 * @env: load balance environment
 9182 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
 9183 */
 9184static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
 9185{
 9186	struct sg_lb_stats *local, *busiest;
 9187
 9188	local = &sds->local_stat;
 9189	busiest = &sds->busiest_stat;
 9190
 9191	if (busiest->group_type == group_misfit_task) {
 9192		/* Set imbalance to allow misfit tasks to be balanced. */
 9193		env->migration_type = migrate_misfit;
 9194		env->imbalance = 1;
 9195		return;
 9196	}
 9197
 9198	if (busiest->group_type == group_asym_packing) {
 9199		/*
 9200		 * In case of asym capacity, we will try to migrate all load to
 9201		 * the preferred CPU.
 9202		 */
 9203		env->migration_type = migrate_task;
 9204		env->imbalance = busiest->sum_h_nr_running;
 9205		return;
 9206	}
 9207
 9208	if (busiest->group_type == group_imbalanced) {
 9209		/*
 9210		 * In the group_imb case we cannot rely on group-wide averages
 9211		 * to ensure CPU-load equilibrium, try to move any task to fix
 9212		 * the imbalance. The next load balance will take care of
 9213		 * balancing back the system.
 9214		 */
 9215		env->migration_type = migrate_task;
 9216		env->imbalance = 1;
 9217		return;
 9218	}
 9219
 9220	/*
 9221	 * Try to use spare capacity of local group without overloading it or
 9222	 * emptying busiest.
 9223	 */
 9224	if (local->group_type == group_has_spare) {
 9225		if ((busiest->group_type > group_fully_busy) &&
 9226		    !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
 9227			/*
 9228			 * If busiest is overloaded, try to fill spare
 9229			 * capacity. This might end up creating spare capacity
 9230			 * in busiest or busiest still being overloaded but
 9231			 * there is no simple way to directly compute the
 9232			 * amount of load to migrate in order to balance the
 9233			 * system.
 9234			 */
 9235			env->migration_type = migrate_util;
 9236			env->imbalance = max(local->group_capacity, local->group_util) -
 9237					 local->group_util;
 9238
 9239			/*
 9240			 * In some cases, the group's utilization is max or even
 9241			 * higher than capacity because of migrations but the
 9242			 * local CPU is (newly) idle. There is at least one
 9243			 * waiting task in this overloaded busiest group. Let's
 9244			 * try to pull it.
 9245			 */
 9246			if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
 9247				env->migration_type = migrate_task;
 9248				env->imbalance = 1;
 9249			}
 9250
 9251			return;
 9252		}
 9253
 9254		if (busiest->group_weight == 1 || sds->prefer_sibling) {
 9255			unsigned int nr_diff = busiest->sum_nr_running;
 9256			/*
 9257			 * When prefer sibling, evenly spread running tasks on
 9258			 * groups.
 9259			 */
 9260			env->migration_type = migrate_task;
 9261			lsub_positive(&nr_diff, local->sum_nr_running);
 9262			env->imbalance = nr_diff >> 1;
 9263		} else {
 9264
 9265			/*
 9266			 * If there is no overload, we just want to even the number of
 9267			 * idle cpus.
 9268			 */
 9269			env->migration_type = migrate_task;
 9270			env->imbalance = max_t(long, 0, (local->idle_cpus -
 9271						 busiest->idle_cpus) >> 1);
 9272		}
 9273
 9274		/* Consider allowing a small imbalance between NUMA groups */
 9275		if (env->sd->flags & SD_NUMA) {
 9276			env->imbalance = adjust_numa_imbalance(env->imbalance,
 9277				busiest->sum_nr_running, busiest->group_weight);
 9278		}
 9279
 9280		return;
 9281	}
 9282
 9283	/*
 9284	 * Local is fully busy but has to take more load to relieve the
 9285	 * busiest group
 9286	 */
 9287	if (local->group_type < group_overloaded) {
 9288		/*
 9289		 * Local will become overloaded so the avg_load metrics are
 9290		 * finally needed.
 9291		 */
 9292
 9293		local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
 9294				  local->group_capacity;
 9295
 9296		sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
 9297				sds->total_capacity;
 9298		/*
 9299		 * If the local group is more loaded than the selected
 9300		 * busiest group don't try to pull any tasks.
 9301		 */
 9302		if (local->avg_load >= busiest->avg_load) {
 9303			env->imbalance = 0;
 9304			return;
 9305		}
 9306	}
 9307
 9308	/*
 9309	 * Both group are or will become overloaded and we're trying to get all
 9310	 * the CPUs to the average_load, so we don't want to push ourselves
 9311	 * above the average load, nor do we wish to reduce the max loaded CPU
 9312	 * below the average load. At the same time, we also don't want to
 9313	 * reduce the group load below the group capacity. Thus we look for
 9314	 * the minimum possible imbalance.
 9315	 */
 9316	env->migration_type = migrate_load;
 9317	env->imbalance = min(
 9318		(busiest->avg_load - sds->avg_load) * busiest->group_capacity,
 9319		(sds->avg_load - local->avg_load) * local->group_capacity
 9320	) / SCHED_CAPACITY_SCALE;
 9321}
 9322
 9323/******* find_busiest_group() helpers end here *********************/
 9324
 9325/*
 9326 * Decision matrix according to the local and busiest group type:
 9327 *
 9328 * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
 9329 * has_spare        nr_idle   balanced   N/A    N/A  balanced   balanced
 9330 * fully_busy       nr_idle   nr_idle    N/A    N/A  balanced   balanced
 9331 * misfit_task      force     N/A        N/A    N/A  force      force
 9332 * asym_packing     force     force      N/A    N/A  force      force
 9333 * imbalanced       force     force      N/A    N/A  force      force
 9334 * overloaded       force     force      N/A    N/A  force      avg_load
 9335 *
 9336 * N/A :      Not Applicable because already filtered while updating
 9337 *            statistics.
 9338 * balanced : The system is balanced for these 2 groups.
 9339 * force :    Calculate the imbalance as load migration is probably needed.
 9340 * avg_load : Only if imbalance is significant enough.
 9341 * nr_idle :  dst_cpu is not busy and the number of idle CPUs is quite
 9342 *            different in groups.
 9343 */
 9344
 9345/**
 9346 * find_busiest_group - Returns the busiest group within the sched_domain
 9347 * if there is an imbalance.
 9348 *
 9349 * Also calculates the amount of runnable load which should be moved
 9350 * to restore balance.
 9351 *
 9352 * @env: The load balancing environment.
 9353 *
 9354 * Return:	- The busiest group if imbalance exists.
 9355 */
 9356static struct sched_group *find_busiest_group(struct lb_env *env)
 9357{
 9358	struct sg_lb_stats *local, *busiest;
 9359	struct sd_lb_stats sds;
 9360
 9361	init_sd_lb_stats(&sds);
 9362
 9363	/*
 9364	 * Compute the various statistics relevant for load balancing at
 9365	 * this level.
 9366	 */
 9367	update_sd_lb_stats(env, &sds);
 9368
 9369	if (sched_energy_enabled()) {
 9370		struct root_domain *rd = env->dst_rq->rd;
 9371
 9372		if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
 9373			goto out_balanced;
 9374	}
 9375
 9376	local = &sds.local_stat;
 9377	busiest = &sds.busiest_stat;
 9378
 9379	/* There is no busy sibling group to pull tasks from */
 9380	if (!sds.busiest)
 9381		goto out_balanced;
 9382
 9383	/* Misfit tasks should be dealt with regardless of the avg load */
 9384	if (busiest->group_type == group_misfit_task)
 9385		goto force_balance;
 9386
 9387	/* ASYM feature bypasses nice load balance check */
 9388	if (busiest->group_type == group_asym_packing)
 9389		goto force_balance;
 9390
 9391	/*
 9392	 * If the busiest group is imbalanced the below checks don't
 9393	 * work because they assume all things are equal, which typically
 9394	 * isn't true due to cpus_ptr constraints and the like.
 9395	 */
 9396	if (busiest->group_type == group_imbalanced)
 9397		goto force_balance;
 9398
 9399	/*
 9400	 * If the local group is busier than the selected busiest group
 9401	 * don't try and pull any tasks.
 9402	 */
 9403	if (local->group_type > busiest->group_type)
 9404		goto out_balanced;
 9405
 9406	/*
 9407	 * When groups are overloaded, use the avg_load to ensure fairness
 9408	 * between tasks.
 9409	 */
 9410	if (local->group_type == group_overloaded) {
 9411		/*
 9412		 * If the local group is more loaded than the selected
 9413		 * busiest group don't try to pull any tasks.
 9414		 */
 9415		if (local->avg_load >= busiest->avg_load)
 9416			goto out_balanced;
 9417
 9418		/* XXX broken for overlapping NUMA groups */
 9419		sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) /
 9420				sds.total_capacity;
 9421
 9422		/*
 9423		 * Don't pull any tasks if this group is already above the
 9424		 * domain average load.
 9425		 */
 9426		if (local->avg_load >= sds.avg_load)
 9427			goto out_balanced;
 9428
 9429		/*
 9430		 * If the busiest group is more loaded, use imbalance_pct to be
 9431		 * conservative.
 9432		 */
 9433		if (100 * busiest->avg_load <=
 9434				env->sd->imbalance_pct * local->avg_load)
 9435			goto out_balanced;
 9436	}
 9437
 9438	/* Try to move all excess tasks to child's sibling domain */
 9439	if (sds.prefer_sibling && local->group_type == group_has_spare &&
 9440	    busiest->sum_nr_running > local->sum_nr_running + 1)
 9441		goto force_balance;
 9442
 9443	if (busiest->group_type != group_overloaded) {
 9444		if (env->idle == CPU_NOT_IDLE)
 9445			/*
 9446			 * If the busiest group is not overloaded (and as a
 9447			 * result the local one too) but this CPU is already
 9448			 * busy, let another idle CPU try to pull task.
 9449			 */
 9450			goto out_balanced;
 9451
 9452		if (busiest->group_weight > 1 &&
 9453		    local->idle_cpus <= (busiest->idle_cpus + 1))
 9454			/*
 9455			 * If the busiest group is not overloaded
 9456			 * and there is no imbalance between this and busiest
 9457			 * group wrt idle CPUs, it is balanced. The imbalance
 9458			 * becomes significant if the diff is greater than 1
 9459			 * otherwise we might end up to just move the imbalance
 9460			 * on another group. Of course this applies only if
 9461			 * there is more than 1 CPU per group.
 9462			 */
 9463			goto out_balanced;
 9464
 9465		if (busiest->sum_h_nr_running == 1)
 9466			/*
 9467			 * busiest doesn't have any tasks waiting to run
 9468			 */
 9469			goto out_balanced;
 9470	}
 9471
 9472force_balance:
 9473	/* Looks like there is an imbalance. Compute it */
 9474	calculate_imbalance(env, &sds);
 9475	return env->imbalance ? sds.busiest : NULL;
 9476
 9477out_balanced:
 9478	env->imbalance = 0;
 9479	return NULL;
 9480}
 9481
 9482/*
 9483 * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
 9484 */
 9485static struct rq *find_busiest_queue(struct lb_env *env,
 9486				     struct sched_group *group)
 9487{
 9488	struct rq *busiest = NULL, *rq;
 9489	unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
 9490	unsigned int busiest_nr = 0;
 9491	int i;
 9492
 9493	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
 9494		unsigned long capacity, load, util;
 9495		unsigned int nr_running;
 9496		enum fbq_type rt;
 9497
 9498		rq = cpu_rq(i);
 9499		rt = fbq_classify_rq(rq);
 9500
 9501		/*
 9502		 * We classify groups/runqueues into three groups:
 9503		 *  - regular: there are !numa tasks
 9504		 *  - remote:  there are numa tasks that run on the 'wrong' node
 9505		 *  - all:     there is no distinction
 9506		 *
 9507		 * In order to avoid migrating ideally placed numa tasks,
 9508		 * ignore those when there's better options.
 9509		 *
 9510		 * If we ignore the actual busiest queue to migrate another
 9511		 * task, the next balance pass can still reduce the busiest
 9512		 * queue by moving tasks around inside the node.
 9513		 *
 9514		 * If we cannot move enough load due to this classification
 9515		 * the next pass will adjust the group classification and
 9516		 * allow migration of more tasks.
 9517		 *
 9518		 * Both cases only affect the total convergence complexity.
 9519		 */
 9520		if (rt > env->fbq_type)
 9521			continue;
 9522
 9523		nr_running = rq->cfs.h_nr_running;
 9524		if (!nr_running)
 9525			continue;
 9526
 9527		capacity = capacity_of(i);
 9528
 9529		/*
 9530		 * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
 9531		 * eventually lead to active_balancing high->low capacity.
 9532		 * Higher per-CPU capacity is considered better than balancing
 9533		 * average load.
 9534		 */
 9535		if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
 9536		    !capacity_greater(capacity_of(env->dst_cpu), capacity) &&
 9537		    nr_running == 1)
 9538			continue;
 9539
 9540		switch (env->migration_type) {
 9541		case migrate_load:
 9542			/*
 9543			 * When comparing with load imbalance, use cpu_load()
 9544			 * which is not scaled with the CPU capacity.
 9545			 */
 9546			load = cpu_load(rq);
 9547
 9548			if (nr_running == 1 && load > env->imbalance &&
 9549			    !check_cpu_capacity(rq, env->sd))
 9550				break;
 9551
 9552			/*
 9553			 * For the load comparisons with the other CPUs,
 9554			 * consider the cpu_load() scaled with the CPU
 9555			 * capacity, so that the load can be moved away
 9556			 * from the CPU that is potentially running at a
 9557			 * lower capacity.
 9558			 *
 9559			 * Thus we're looking for max(load_i / capacity_i),
 9560			 * crosswise multiplication to rid ourselves of the
 9561			 * division works out to:
 9562			 * load_i * capacity_j > load_j * capacity_i;
 9563			 * where j is our previous maximum.
 9564			 */
 9565			if (load * busiest_capacity > busiest_load * capacity) {
 9566				busiest_load = load;
 9567				busiest_capacity = capacity;
 9568				busiest = rq;
 9569			}
 9570			break;
 9571
 9572		case migrate_util:
 9573			util = cpu_util(cpu_of(rq));
 9574
 9575			/*
 9576			 * Don't try to pull utilization from a CPU with one
 9577			 * running task. Whatever its utilization, we will fail
 9578			 * detach the task.
 9579			 */
 9580			if (nr_running <= 1)
 9581				continue;
 9582
 9583			if (busiest_util < util) {
 9584				busiest_util = util;
 9585				busiest = rq;
 9586			}
 9587			break;
 9588
 9589		case migrate_task:
 9590			if (busiest_nr < nr_running) {
 9591				busiest_nr = nr_running;
 9592				busiest = rq;
 9593			}
 9594			break;
 9595
 9596		case migrate_misfit:
 9597			/*
 9598			 * For ASYM_CPUCAPACITY domains with misfit tasks we
 9599			 * simply seek the "biggest" misfit task.
 9600			 */
 9601			if (rq->misfit_task_load > busiest_load) {
 9602				busiest_load = rq->misfit_task_load;
 9603				busiest = rq;
 9604			}
 9605
 9606			break;
 9607
 9608		}
 9609	}
 9610
 9611	return busiest;
 9612}
 9613
 9614/*
 9615 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
 9616 * so long as it is large enough.
 9617 */
 9618#define MAX_PINNED_INTERVAL	512
 9619
 9620static inline bool
 9621asym_active_balance(struct lb_env *env)
 9622{
 9623	/*
 9624	 * ASYM_PACKING needs to force migrate tasks from busy but
 9625	 * lower priority CPUs in order to pack all tasks in the
 9626	 * highest priority CPUs.
 9627	 */
 9628	return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
 9629	       sched_asym_prefer(env->dst_cpu, env->src_cpu);
 9630}
 9631
 9632static inline bool
 9633imbalanced_active_balance(struct lb_env *env)
 9634{
 9635	struct sched_domain *sd = env->sd;
 9636
 9637	/*
 9638	 * The imbalanced case includes the case of pinned tasks preventing a fair
 9639	 * distribution of the load on the system but also the even distribution of the
 9640	 * threads on a system with spare capacity
 9641	 */
 9642	if ((env->migration_type == migrate_task) &&
 9643	    (sd->nr_balance_failed > sd->cache_nice_tries+2))
 9644		return 1;
 9645
 9646	return 0;
 9647}
 9648
 9649static int need_active_balance(struct lb_env *env)
 9650{
 9651	struct sched_domain *sd = env->sd;
 9652
 9653	if (asym_active_balance(env))
 9654		return 1;
 9655
 9656	if (imbalanced_active_balance(env))
 9657		return 1;
 9658
 9659	/*
 9660	 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
 9661	 * It's worth migrating the task if the src_cpu's capacity is reduced
 9662	 * because of other sched_class or IRQs if more capacity stays
 9663	 * available on dst_cpu.
 9664	 */
 9665	if ((env->idle != CPU_NOT_IDLE) &&
 9666	    (env->src_rq->cfs.h_nr_running == 1)) {
 9667		if ((check_cpu_capacity(env->src_rq, sd)) &&
 9668		    (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
 9669			return 1;
 9670	}
 9671
 9672	if (env->migration_type == migrate_misfit)
 9673		return 1;
 9674
 9675	return 0;
 9676}
 9677
 9678static int active_load_balance_cpu_stop(void *data);
 9679
 9680static int should_we_balance(struct lb_env *env)
 9681{
 9682	struct sched_group *sg = env->sd->groups;
 9683	int cpu;
 9684
 9685	/*
 9686	 * Ensure the balancing environment is consistent; can happen
 9687	 * when the softirq triggers 'during' hotplug.
 9688	 */
 9689	if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
 9690		return 0;
 9691
 9692	/*
 9693	 * In the newly idle case, we will allow all the CPUs
 9694	 * to do the newly idle load balance.
 9695	 */
 9696	if (env->idle == CPU_NEWLY_IDLE)
 9697		return 1;
 9698
 9699	/* Try to find first idle CPU */
 9700	for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
 9701		if (!idle_cpu(cpu))
 9702			continue;
 9703
 9704		/* Are we the first idle CPU? */
 9705		return cpu == env->dst_cpu;
 9706	}
 9707
 9708	/* Are we the first CPU of this group ? */
 9709	return group_balance_cpu(sg) == env->dst_cpu;
 9710}
 9711
 9712/*
 9713 * Check this_cpu to ensure it is balanced within domain. Attempt to move
 9714 * tasks if there is an imbalance.
 9715 */
 9716static int load_balance(int this_cpu, struct rq *this_rq,
 9717			struct sched_domain *sd, enum cpu_idle_type idle,
 9718			int *continue_balancing)
 9719{
 9720	int ld_moved, cur_ld_moved, active_balance = 0;
 9721	struct sched_domain *sd_parent = sd->parent;
 9722	struct sched_group *group;
 9723	struct rq *busiest;
 9724	struct rq_flags rf;
 9725	struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
 9726
 9727	struct lb_env env = {
 9728		.sd		= sd,
 9729		.dst_cpu	= this_cpu,
 9730		.dst_rq		= this_rq,
 9731		.dst_grpmask    = sched_group_span(sd->groups),
 9732		.idle		= idle,
 9733		.loop_break	= sched_nr_migrate_break,
 9734		.cpus		= cpus,
 9735		.fbq_type	= all,
 9736		.tasks		= LIST_HEAD_INIT(env.tasks),
 9737	};
 9738
 9739	cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
 9740
 9741	schedstat_inc(sd->lb_count[idle]);
 9742
 9743redo:
 9744	if (!should_we_balance(&env)) {
 9745		*continue_balancing = 0;
 9746		goto out_balanced;
 9747	}
 9748
 9749	group = find_busiest_group(&env);
 9750	if (!group) {
 9751		schedstat_inc(sd->lb_nobusyg[idle]);
 9752		goto out_balanced;
 9753	}
 9754
 9755	busiest = find_busiest_queue(&env, group);
 9756	if (!busiest) {
 9757		schedstat_inc(sd->lb_nobusyq[idle]);
 9758		goto out_balanced;
 9759	}
 9760
 9761	BUG_ON(busiest == env.dst_rq);
 9762
 9763	schedstat_add(sd->lb_imbalance[idle], env.imbalance);
 9764
 9765	env.src_cpu = busiest->cpu;
 9766	env.src_rq = busiest;
 9767
 9768	ld_moved = 0;
 9769	/* Clear this flag as soon as we find a pullable task */
 9770	env.flags |= LBF_ALL_PINNED;
 9771	if (busiest->nr_running > 1) {
 9772		/*
 9773		 * Attempt to move tasks. If find_busiest_group has found
 9774		 * an imbalance but busiest->nr_running <= 1, the group is
 9775		 * still unbalanced. ld_moved simply stays zero, so it is
 9776		 * correctly treated as an imbalance.
 9777		 */
 9778		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
 9779
 9780more_balance:
 9781		rq_lock_irqsave(busiest, &rf);
 9782		update_rq_clock(busiest);
 9783
 9784		/*
 9785		 * cur_ld_moved - load moved in current iteration
 9786		 * ld_moved     - cumulative load moved across iterations
 9787		 */
 9788		cur_ld_moved = detach_tasks(&env);
 9789
 9790		/*
 9791		 * We've detached some tasks from busiest_rq. Every
 9792		 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
 9793		 * unlock busiest->lock, and we are able to be sure
 9794		 * that nobody can manipulate the tasks in parallel.
 9795		 * See task_rq_lock() family for the details.
 9796		 */
 9797
 9798		rq_unlock(busiest, &rf);
 9799
 9800		if (cur_ld_moved) {
 9801			attach_tasks(&env);
 9802			ld_moved += cur_ld_moved;
 9803		}
 9804
 9805		local_irq_restore(rf.flags);
 9806
 9807		if (env.flags & LBF_NEED_BREAK) {
 9808			env.flags &= ~LBF_NEED_BREAK;
 9809			goto more_balance;
 9810		}
 9811
 9812		/*
 9813		 * Revisit (affine) tasks on src_cpu that couldn't be moved to
 9814		 * us and move them to an alternate dst_cpu in our sched_group
 9815		 * where they can run. The upper limit on how many times we
 9816		 * iterate on same src_cpu is dependent on number of CPUs in our
 9817		 * sched_group.
 9818		 *
 9819		 * This changes load balance semantics a bit on who can move
 9820		 * load to a given_cpu. In addition to the given_cpu itself
 9821		 * (or a ilb_cpu acting on its behalf where given_cpu is
 9822		 * nohz-idle), we now have balance_cpu in a position to move
 9823		 * load to given_cpu. In rare situations, this may cause
 9824		 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
 9825		 * _independently_ and at _same_ time to move some load to
 9826		 * given_cpu) causing excess load to be moved to given_cpu.
 9827		 * This however should not happen so much in practice and
 9828		 * moreover subsequent load balance cycles should correct the
 9829		 * excess load moved.
 9830		 */
 9831		if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
 9832
 9833			/* Prevent to re-select dst_cpu via env's CPUs */
 9834			__cpumask_clear_cpu(env.dst_cpu, env.cpus);
 9835
 9836			env.dst_rq	 = cpu_rq(env.new_dst_cpu);
 9837			env.dst_cpu	 = env.new_dst_cpu;
 9838			env.flags	&= ~LBF_DST_PINNED;
 9839			env.loop	 = 0;
 9840			env.loop_break	 = sched_nr_migrate_break;
 9841
 9842			/*
 9843			 * Go back to "more_balance" rather than "redo" since we
 9844			 * need to continue with same src_cpu.
 9845			 */
 9846			goto more_balance;
 9847		}
 9848
 9849		/*
 9850		 * We failed to reach balance because of affinity.
 9851		 */
 9852		if (sd_parent) {
 9853			int *group_imbalance = &sd_parent->groups->sgc->imbalance;
 9854
 9855			if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
 9856				*group_imbalance = 1;
 9857		}
 9858
 9859		/* All tasks on this runqueue were pinned by CPU affinity */
 9860		if (unlikely(env.flags & LBF_ALL_PINNED)) {
 9861			__cpumask_clear_cpu(cpu_of(busiest), cpus);
 9862			/*
 9863			 * Attempting to continue load balancing at the current
 9864			 * sched_domain level only makes sense if there are
 9865			 * active CPUs remaining as possible busiest CPUs to
 9866			 * pull load from which are not contained within the
 9867			 * destination group that is receiving any migrated
 9868			 * load.
 9869			 */
 9870			if (!cpumask_subset(cpus, env.dst_grpmask)) {
 9871				env.loop = 0;
 9872				env.loop_break = sched_nr_migrate_break;
 9873				goto redo;
 9874			}
 9875			goto out_all_pinned;
 9876		}
 9877	}
 9878
 9879	if (!ld_moved) {
 9880		schedstat_inc(sd->lb_failed[idle]);
 9881		/*
 9882		 * Increment the failure counter only on periodic balance.
 9883		 * We do not want newidle balance, which can be very
 9884		 * frequent, pollute the failure counter causing
 9885		 * excessive cache_hot migrations and active balances.
 9886		 */
 9887		if (idle != CPU_NEWLY_IDLE)
 9888			sd->nr_balance_failed++;
 9889
 9890		if (need_active_balance(&env)) {
 9891			unsigned long flags;
 9892
 9893			raw_spin_rq_lock_irqsave(busiest, flags);
 9894
 9895			/*
 9896			 * Don't kick the active_load_balance_cpu_stop,
 9897			 * if the curr task on busiest CPU can't be
 9898			 * moved to this_cpu:
 9899			 */
 9900			if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
 9901				raw_spin_rq_unlock_irqrestore(busiest, flags);
 9902				goto out_one_pinned;
 9903			}
 9904
 9905			/* Record that we found at least one task that could run on this_cpu */
 9906			env.flags &= ~LBF_ALL_PINNED;
 9907
 9908			/*
 9909			 * ->active_balance synchronizes accesses to
 9910			 * ->active_balance_work.  Once set, it's cleared
 9911			 * only after active load balance is finished.
 9912			 */
 9913			if (!busiest->active_balance) {
 9914				busiest->active_balance = 1;
 9915				busiest->push_cpu = this_cpu;
 9916				active_balance = 1;
 9917			}
 9918			raw_spin_rq_unlock_irqrestore(busiest, flags);
 9919
 9920			if (active_balance) {
 9921				stop_one_cpu_nowait(cpu_of(busiest),
 9922					active_load_balance_cpu_stop, busiest,
 9923					&busiest->active_balance_work);
 9924			}
 9925		}
 9926	} else {
 9927		sd->nr_balance_failed = 0;
 9928	}
 9929
 9930	if (likely(!active_balance) || need_active_balance(&env)) {
 9931		/* We were unbalanced, so reset the balancing interval */
 9932		sd->balance_interval = sd->min_interval;
 9933	}
 9934
 9935	goto out;
 9936
 9937out_balanced:
 9938	/*
 9939	 * We reach balance although we may have faced some affinity
 9940	 * constraints. Clear the imbalance flag only if other tasks got
 9941	 * a chance to move and fix the imbalance.
 9942	 */
 9943	if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
 9944		int *group_imbalance = &sd_parent->groups->sgc->imbalance;
 9945
 9946		if (*group_imbalance)
 9947			*group_imbalance = 0;
 9948	}
 9949
 9950out_all_pinned:
 9951	/*
 9952	 * We reach balance because all tasks are pinned at this level so
 9953	 * we can't migrate them. Let the imbalance flag set so parent level
 9954	 * can try to migrate them.
 9955	 */
 9956	schedstat_inc(sd->lb_balanced[idle]);
 9957
 9958	sd->nr_balance_failed = 0;
 9959
 9960out_one_pinned:
 9961	ld_moved = 0;
 9962
 9963	/*
 9964	 * newidle_balance() disregards balance intervals, so we could
 9965	 * repeatedly reach this code, which would lead to balance_interval
 9966	 * skyrocketing in a short amount of time. Skip the balance_interval
 9967	 * increase logic to avoid that.
 9968	 */
 9969	if (env.idle == CPU_NEWLY_IDLE)
 9970		goto out;
 9971
 9972	/* tune up the balancing interval */
 9973	if ((env.flags & LBF_ALL_PINNED &&
 9974	     sd->balance_interval < MAX_PINNED_INTERVAL) ||
 9975	    sd->balance_interval < sd->max_interval)
 9976		sd->balance_interval *= 2;
 9977out:
 9978	return ld_moved;
 9979}
 9980
 9981static inline unsigned long
 9982get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
 9983{
 9984	unsigned long interval = sd->balance_interval;
 9985
 9986	if (cpu_busy)
 9987		interval *= sd->busy_factor;
 9988
 9989	/* scale ms to jiffies */
 9990	interval = msecs_to_jiffies(interval);
 9991
 9992	/*
 9993	 * Reduce likelihood of busy balancing at higher domains racing with
 9994	 * balancing at lower domains by preventing their balancing periods
 9995	 * from being multiples of each other.
 9996	 */
 9997	if (cpu_busy)
 9998		interval -= 1;
 9999
10000	interval = clamp(interval, 1UL, max_load_balance_interval);
10001
10002	return interval;
10003}
10004
10005static inline void
10006update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
10007{
10008	unsigned long interval, next;
10009
10010	/* used by idle balance, so cpu_busy = 0 */
10011	interval = get_sd_balance_interval(sd, 0);
10012	next = sd->last_balance + interval;
10013
10014	if (time_after(*next_balance, next))
10015		*next_balance = next;
10016}
10017
10018/*
10019 * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
10020 * running tasks off the busiest CPU onto idle CPUs. It requires at
10021 * least 1 task to be running on each physical CPU where possible, and
10022 * avoids physical / logical imbalances.
10023 */
10024static int active_load_balance_cpu_stop(void *data)
10025{
10026	struct rq *busiest_rq = data;
10027	int busiest_cpu = cpu_of(busiest_rq);
10028	int target_cpu = busiest_rq->push_cpu;
10029	struct rq *target_rq = cpu_rq(target_cpu);
10030	struct sched_domain *sd;
10031	struct task_struct *p = NULL;
10032	struct rq_flags rf;
10033
10034	rq_lock_irq(busiest_rq, &rf);
10035	/*
10036	 * Between queueing the stop-work and running it is a hole in which
10037	 * CPUs can become inactive. We should not move tasks from or to
10038	 * inactive CPUs.
10039	 */
10040	if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
10041		goto out_unlock;
10042
10043	/* Make sure the requested CPU hasn't gone down in the meantime: */
10044	if (unlikely(busiest_cpu != smp_processor_id() ||
10045		     !busiest_rq->active_balance))
10046		goto out_unlock;
10047
10048	/* Is there any task to move? */
10049	if (busiest_rq->nr_running <= 1)
10050		goto out_unlock;
10051
10052	/*
10053	 * This condition is "impossible", if it occurs
10054	 * we need to fix it. Originally reported by
10055	 * Bjorn Helgaas on a 128-CPU setup.
10056	 */
10057	BUG_ON(busiest_rq == target_rq);
10058
10059	/* Search for an sd spanning us and the target CPU. */
10060	rcu_read_lock();
10061	for_each_domain(target_cpu, sd) {
10062		if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
10063			break;
10064	}
10065
10066	if (likely(sd)) {
10067		struct lb_env env = {
10068			.sd		= sd,
10069			.dst_cpu	= target_cpu,
10070			.dst_rq		= target_rq,
10071			.src_cpu	= busiest_rq->cpu,
10072			.src_rq		= busiest_rq,
10073			.idle		= CPU_IDLE,
10074			.flags		= LBF_ACTIVE_LB,
10075		};
10076
10077		schedstat_inc(sd->alb_count);
10078		update_rq_clock(busiest_rq);
10079
10080		p = detach_one_task(&env);
10081		if (p) {
10082			schedstat_inc(sd->alb_pushed);
10083			/* Active balancing done, reset the failure counter. */
10084			sd->nr_balance_failed = 0;
10085		} else {
10086			schedstat_inc(sd->alb_failed);
10087		}
10088	}
10089	rcu_read_unlock();
10090out_unlock:
10091	busiest_rq->active_balance = 0;
10092	rq_unlock(busiest_rq, &rf);
10093
10094	if (p)
10095		attach_one_task(target_rq, p);
10096
10097	local_irq_enable();
10098
10099	return 0;
10100}
10101
10102static DEFINE_SPINLOCK(balancing);
10103
10104/*
10105 * Scale the max load_balance interval with the number of CPUs in the system.
10106 * This trades load-balance latency on larger machines for less cross talk.
10107 */
10108void update_max_interval(void)
10109{
10110	max_load_balance_interval = HZ*num_online_cpus()/10;
10111}
10112
10113/*
10114 * It checks each scheduling domain to see if it is due to be balanced,
10115 * and initiates a balancing operation if so.
10116 *
10117 * Balancing parameters are set up in init_sched_domains.
10118 */
10119static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
10120{
10121	int continue_balancing = 1;
10122	int cpu = rq->cpu;
10123	int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
10124	unsigned long interval;
10125	struct sched_domain *sd;
10126	/* Earliest time when we have to do rebalance again */
10127	unsigned long next_balance = jiffies + 60*HZ;
10128	int update_next_balance = 0;
10129	int need_serialize, need_decay = 0;
10130	u64 max_cost = 0;
10131
10132	rcu_read_lock();
10133	for_each_domain(cpu, sd) {
10134		/*
10135		 * Decay the newidle max times here because this is a regular
10136		 * visit to all the domains. Decay ~1% per second.
10137		 */
10138		if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
10139			sd->max_newidle_lb_cost =
10140				(sd->max_newidle_lb_cost * 253) / 256;
10141			sd->next_decay_max_lb_cost = jiffies + HZ;
10142			need_decay = 1;
10143		}
10144		max_cost += sd->max_newidle_lb_cost;
10145
10146		/*
10147		 * Stop the load balance at this level. There is another
10148		 * CPU in our sched group which is doing load balancing more
10149		 * actively.
10150		 */
10151		if (!continue_balancing) {
10152			if (need_decay)
10153				continue;
10154			break;
10155		}
10156
10157		interval = get_sd_balance_interval(sd, busy);
10158
10159		need_serialize = sd->flags & SD_SERIALIZE;
10160		if (need_serialize) {
10161			if (!spin_trylock(&balancing))
10162				goto out;
10163		}
10164
10165		if (time_after_eq(jiffies, sd->last_balance + interval)) {
10166			if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
10167				/*
10168				 * The LBF_DST_PINNED logic could have changed
10169				 * env->dst_cpu, so we can't know our idle
10170				 * state even if we migrated tasks. Update it.
10171				 */
10172				idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
10173				busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
10174			}
10175			sd->last_balance = jiffies;
10176			interval = get_sd_balance_interval(sd, busy);
10177		}
10178		if (need_serialize)
10179			spin_unlock(&balancing);
10180out:
10181		if (time_after(next_balance, sd->last_balance + interval)) {
10182			next_balance = sd->last_balance + interval;
10183			update_next_balance = 1;
10184		}
10185	}
10186	if (need_decay) {
10187		/*
10188		 * Ensure the rq-wide value also decays but keep it at a
10189		 * reasonable floor to avoid funnies with rq->avg_idle.
10190		 */
10191		rq->max_idle_balance_cost =
10192			max((u64)sysctl_sched_migration_cost, max_cost);
10193	}
10194	rcu_read_unlock();
10195
10196	/*
10197	 * next_balance will be updated only when there is a need.
10198	 * When the cpu is attached to null domain for ex, it will not be
10199	 * updated.
10200	 */
10201	if (likely(update_next_balance))
10202		rq->next_balance = next_balance;
10203
10204}
10205
10206static inline int on_null_domain(struct rq *rq)
10207{
10208	return unlikely(!rcu_dereference_sched(rq->sd));
10209}
10210
10211#ifdef CONFIG_NO_HZ_COMMON
10212/*
10213 * idle load balancing details
10214 * - When one of the busy CPUs notice that there may be an idle rebalancing
10215 *   needed, they will kick the idle load balancer, which then does idle
10216 *   load balancing for all the idle CPUs.
10217 * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set
10218 *   anywhere yet.
10219 */
10220
10221static inline int find_new_ilb(void)
10222{
10223	int ilb;
10224
10225	for_each_cpu_and(ilb, nohz.idle_cpus_mask,
10226			      housekeeping_cpumask(HK_FLAG_MISC)) {
10227
10228		if (ilb == smp_processor_id())
10229			continue;
10230
10231		if (idle_cpu(ilb))
10232			return ilb;
10233	}
10234
10235	return nr_cpu_ids;
10236}
10237
10238/*
10239 * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
10240 * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
10241 */
10242static void kick_ilb(unsigned int flags)
10243{
10244	int ilb_cpu;
10245
10246	/*
10247	 * Increase nohz.next_balance only when if full ilb is triggered but
10248	 * not if we only update stats.
10249	 */
10250	if (flags & NOHZ_BALANCE_KICK)
10251		nohz.next_balance = jiffies+1;
10252
10253	ilb_cpu = find_new_ilb();
10254
10255	if (ilb_cpu >= nr_cpu_ids)
10256		return;
10257
10258	/*
10259	 * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
10260	 * the first flag owns it; cleared by nohz_csd_func().
10261	 */
10262	flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
10263	if (flags & NOHZ_KICK_MASK)
10264		return;
10265
10266	/*
10267	 * This way we generate an IPI on the target CPU which
10268	 * is idle. And the softirq performing nohz idle load balance
10269	 * will be run before returning from the IPI.
10270	 */
10271	smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
10272}
10273
10274/*
10275 * Current decision point for kicking the idle load balancer in the presence
10276 * of idle CPUs in the system.
10277 */
10278static void nohz_balancer_kick(struct rq *rq)
10279{
10280	unsigned long now = jiffies;
10281	struct sched_domain_shared *sds;
10282	struct sched_domain *sd;
10283	int nr_busy, i, cpu = rq->cpu;
10284	unsigned int flags = 0;
10285
10286	if (unlikely(rq->idle_balance))
10287		return;
10288
10289	/*
10290	 * We may be recently in ticked or tickless idle mode. At the first
10291	 * busy tick after returning from idle, we will update the busy stats.
10292	 */
10293	nohz_balance_exit_idle(rq);
10294
10295	/*
10296	 * None are in tickless mode and hence no need for NOHZ idle load
10297	 * balancing.
10298	 */
10299	if (likely(!atomic_read(&nohz.nr_cpus)))
10300		return;
10301
10302	if (READ_ONCE(nohz.has_blocked) &&
10303	    time_after(now, READ_ONCE(nohz.next_blocked)))
10304		flags = NOHZ_STATS_KICK;
10305
10306	if (time_before(now, nohz.next_balance))
10307		goto out;
10308
10309	if (rq->nr_running >= 2) {
10310		flags = NOHZ_KICK_MASK;
10311		goto out;
10312	}
10313
10314	rcu_read_lock();
10315
10316	sd = rcu_dereference(rq->sd);
10317	if (sd) {
10318		/*
10319		 * If there's a CFS task and the current CPU has reduced
10320		 * capacity; kick the ILB to see if there's a better CPU to run
10321		 * on.
10322		 */
10323		if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
10324			flags = NOHZ_KICK_MASK;
10325			goto unlock;
10326		}
10327	}
10328
10329	sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
10330	if (sd) {
10331		/*
10332		 * When ASYM_PACKING; see if there's a more preferred CPU
10333		 * currently idle; in which case, kick the ILB to move tasks
10334		 * around.
10335		 */
10336		for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
10337			if (sched_asym_prefer(i, cpu)) {
10338				flags = NOHZ_KICK_MASK;
10339				goto unlock;
10340			}
10341		}
10342	}
10343
10344	sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
10345	if (sd) {
10346		/*
10347		 * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
10348		 * to run the misfit task on.
10349		 */
10350		if (check_misfit_status(rq, sd)) {
10351			flags = NOHZ_KICK_MASK;
10352			goto unlock;
10353		}
10354
10355		/*
10356		 * For asymmetric systems, we do not want to nicely balance
10357		 * cache use, instead we want to embrace asymmetry and only
10358		 * ensure tasks have enough CPU capacity.
10359		 *
10360		 * Skip the LLC logic because it's not relevant in that case.
10361		 */
10362		goto unlock;
10363	}
10364
10365	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
10366	if (sds) {
10367		/*
10368		 * If there is an imbalance between LLC domains (IOW we could
10369		 * increase the overall cache use), we need some less-loaded LLC
10370		 * domain to pull some load. Likewise, we may need to spread
10371		 * load within the current LLC domain (e.g. packed SMT cores but
10372		 * other CPUs are idle). We can't really know from here how busy
10373		 * the others are - so just get a nohz balance going if it looks
10374		 * like this LLC domain has tasks we could move.
10375		 */
10376		nr_busy = atomic_read(&sds->nr_busy_cpus);
10377		if (nr_busy > 1) {
10378			flags = NOHZ_KICK_MASK;
10379			goto unlock;
10380		}
10381	}
10382unlock:
10383	rcu_read_unlock();
10384out:
10385	if (flags)
10386		kick_ilb(flags);
10387}
10388
10389static void set_cpu_sd_state_busy(int cpu)
10390{
10391	struct sched_domain *sd;
10392
10393	rcu_read_lock();
10394	sd = rcu_dereference(per_cpu(sd_llc, cpu));
10395
10396	if (!sd || !sd->nohz_idle)
10397		goto unlock;
10398	sd->nohz_idle = 0;
10399
10400	atomic_inc(&sd->shared->nr_busy_cpus);
10401unlock:
10402	rcu_read_unlock();
10403}
10404
10405void nohz_balance_exit_idle(struct rq *rq)
10406{
10407	SCHED_WARN_ON(rq != this_rq());
10408
10409	if (likely(!rq->nohz_tick_stopped))
10410		return;
10411
10412	rq->nohz_tick_stopped = 0;
10413	cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
10414	atomic_dec(&nohz.nr_cpus);
10415
10416	set_cpu_sd_state_busy(rq->cpu);
10417}
10418
10419static void set_cpu_sd_state_idle(int cpu)
10420{
10421	struct sched_domain *sd;
10422
10423	rcu_read_lock();
10424	sd = rcu_dereference(per_cpu(sd_llc, cpu));
10425
10426	if (!sd || sd->nohz_idle)
10427		goto unlock;
10428	sd->nohz_idle = 1;
10429
10430	atomic_dec(&sd->shared->nr_busy_cpus);
10431unlock:
10432	rcu_read_unlock();
10433}
10434
10435/*
10436 * This routine will record that the CPU is going idle with tick stopped.
10437 * This info will be used in performing idle load balancing in the future.
10438 */
10439void nohz_balance_enter_idle(int cpu)
10440{
10441	struct rq *rq = cpu_rq(cpu);
10442
10443	SCHED_WARN_ON(cpu != smp_processor_id());
10444
10445	/* If this CPU is going down, then nothing needs to be done: */
10446	if (!cpu_active(cpu))
10447		return;
10448
10449	/* Spare idle load balancing on CPUs that don't want to be disturbed: */
10450	if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
10451		return;
10452
10453	/*
10454	 * Can be set safely without rq->lock held
10455	 * If a clear happens, it will have evaluated last additions because
10456	 * rq->lock is held during the check and the clear
10457	 */
10458	rq->has_blocked_load = 1;
10459
10460	/*
10461	 * The tick is still stopped but load could have been added in the
10462	 * meantime. We set the nohz.has_blocked flag to trig a check of the
10463	 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
10464	 * of nohz.has_blocked can only happen after checking the new load
10465	 */
10466	if (rq->nohz_tick_stopped)
10467		goto out;
10468
10469	/* If we're a completely isolated CPU, we don't play: */
10470	if (on_null_domain(rq))
10471		return;
10472
10473	rq->nohz_tick_stopped = 1;
10474
10475	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
10476	atomic_inc(&nohz.nr_cpus);
10477
10478	/*
10479	 * Ensures that if nohz_idle_balance() fails to observe our
10480	 * @idle_cpus_mask store, it must observe the @has_blocked
10481	 * store.
10482	 */
10483	smp_mb__after_atomic();
10484
10485	set_cpu_sd_state_idle(cpu);
10486
10487out:
10488	/*
10489	 * Each time a cpu enter idle, we assume that it has blocked load and
10490	 * enable the periodic update of the load of idle cpus
10491	 */
10492	WRITE_ONCE(nohz.has_blocked, 1);
10493}
10494
10495static bool update_nohz_stats(struct rq *rq)
10496{
10497	unsigned int cpu = rq->cpu;
10498
10499	if (!rq->has_blocked_load)
10500		return false;
10501
10502	if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
10503		return false;
10504
10505	if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
10506		return true;
10507
10508	update_blocked_averages(cpu);
10509
10510	return rq->has_blocked_load;
10511}
10512
10513/*
10514 * Internal function that runs load balance for all idle cpus. The load balance
10515 * can be a simple update of blocked load or a complete load balance with
10516 * tasks movement depending of flags.
10517 */
10518static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
10519			       enum cpu_idle_type idle)
10520{
10521	/* Earliest time when we have to do rebalance again */
10522	unsigned long now = jiffies;
10523	unsigned long next_balance = now + 60*HZ;
10524	bool has_blocked_load = false;
10525	int update_next_balance = 0;
10526	int this_cpu = this_rq->cpu;
10527	int balance_cpu;
10528	struct rq *rq;
10529
10530	SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
10531
10532	/*
10533	 * We assume there will be no idle load after this update and clear
10534	 * the has_blocked flag. If a cpu enters idle in the mean time, it will
10535	 * set the has_blocked flag and trig another update of idle load.
10536	 * Because a cpu that becomes idle, is added to idle_cpus_mask before
10537	 * setting the flag, we are sure to not clear the state and not
10538	 * check the load of an idle cpu.
10539	 */
10540	WRITE_ONCE(nohz.has_blocked, 0);
10541
10542	/*
10543	 * Ensures that if we miss the CPU, we must see the has_blocked
10544	 * store from nohz_balance_enter_idle().
10545	 */
10546	smp_mb();
10547
10548	/*
10549	 * Start with the next CPU after this_cpu so we will end with this_cpu and let a
10550	 * chance for other idle cpu to pull load.
10551	 */
10552	for_each_cpu_wrap(balance_cpu,  nohz.idle_cpus_mask, this_cpu+1) {
10553		if (!idle_cpu(balance_cpu))
10554			continue;
10555
10556		/*
10557		 * If this CPU gets work to do, stop the load balancing
10558		 * work being done for other CPUs. Next load
10559		 * balancing owner will pick it up.
10560		 */
10561		if (need_resched()) {
10562			has_blocked_load = true;
10563			goto abort;
10564		}
10565
10566		rq = cpu_rq(balance_cpu);
10567
10568		has_blocked_load |= update_nohz_stats(rq);
10569
10570		/*
10571		 * If time for next balance is due,
10572		 * do the balance.
10573		 */
10574		if (time_after_eq(jiffies, rq->next_balance)) {
10575			struct rq_flags rf;
10576
10577			rq_lock_irqsave(rq, &rf);
10578			update_rq_clock(rq);
10579			rq_unlock_irqrestore(rq, &rf);
10580
10581			if (flags & NOHZ_BALANCE_KICK)
10582				rebalance_domains(rq, CPU_IDLE);
10583		}
10584
10585		if (time_after(next_balance, rq->next_balance)) {
10586			next_balance = rq->next_balance;
10587			update_next_balance = 1;
10588		}
10589	}
10590
10591	/*
10592	 * next_balance will be updated only when there is a need.
10593	 * When the CPU is attached to null domain for ex, it will not be
10594	 * updated.
10595	 */
10596	if (likely(update_next_balance))
10597		nohz.next_balance = next_balance;
10598
10599	WRITE_ONCE(nohz.next_blocked,
10600		now + msecs_to_jiffies(LOAD_AVG_PERIOD));
10601
10602abort:
10603	/* There is still blocked load, enable periodic update */
10604	if (has_blocked_load)
10605		WRITE_ONCE(nohz.has_blocked, 1);
10606}
10607
10608/*
10609 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
10610 * rebalancing for all the cpus for whom scheduler ticks are stopped.
10611 */
10612static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
10613{
10614	unsigned int flags = this_rq->nohz_idle_balance;
10615
10616	if (!flags)
10617		return false;
10618
10619	this_rq->nohz_idle_balance = 0;
10620
10621	if (idle != CPU_IDLE)
10622		return false;
10623
10624	_nohz_idle_balance(this_rq, flags, idle);
10625
10626	return true;
10627}
10628
10629/*
10630 * Check if we need to run the ILB for updating blocked load before entering
10631 * idle state.
10632 */
10633void nohz_run_idle_balance(int cpu)
10634{
10635	unsigned int flags;
10636
10637	flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu));
10638
10639	/*
10640	 * Update the blocked load only if no SCHED_SOFTIRQ is about to happen
10641	 * (ie NOHZ_STATS_KICK set) and will do the same.
10642	 */
10643	if ((flags == NOHZ_NEWILB_KICK) && !need_resched())
10644		_nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK, CPU_IDLE);
10645}
10646
10647static void nohz_newidle_balance(struct rq *this_rq)
10648{
10649	int this_cpu = this_rq->cpu;
10650
10651	/*
10652	 * This CPU doesn't want to be disturbed by scheduler
10653	 * housekeeping
10654	 */
10655	if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED))
10656		return;
10657
10658	/* Will wake up very soon. No time for doing anything else*/
10659	if (this_rq->avg_idle < sysctl_sched_migration_cost)
10660		return;
10661
10662	/* Don't need to update blocked load of idle CPUs*/
10663	if (!READ_ONCE(nohz.has_blocked) ||
10664	    time_before(jiffies, READ_ONCE(nohz.next_blocked)))
10665		return;
10666
10667	/*
10668	 * Set the need to trigger ILB in order to update blocked load
10669	 * before entering idle state.
10670	 */
10671	atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu));
10672}
10673
10674#else /* !CONFIG_NO_HZ_COMMON */
10675static inline void nohz_balancer_kick(struct rq *rq) { }
10676
10677static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
10678{
10679	return false;
10680}
10681
10682static inline void nohz_newidle_balance(struct rq *this_rq) { }
10683#endif /* CONFIG_NO_HZ_COMMON */
10684
10685/*
10686 * newidle_balance is called by schedule() if this_cpu is about to become
10687 * idle. Attempts to pull tasks from other CPUs.
10688 *
10689 * Returns:
10690 *   < 0 - we released the lock and there are !fair tasks present
10691 *     0 - failed, no new tasks
10692 *   > 0 - success, new (fair) tasks present
10693 */
10694static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
10695{
10696	unsigned long next_balance = jiffies + HZ;
10697	int this_cpu = this_rq->cpu;
10698	struct sched_domain *sd;
10699	int pulled_task = 0;
10700	u64 curr_cost = 0;
10701
10702	update_misfit_status(NULL, this_rq);
10703
10704	/*
10705	 * There is a task waiting to run. No need to search for one.
10706	 * Return 0; the task will be enqueued when switching to idle.
10707	 */
10708	if (this_rq->ttwu_pending)
10709		return 0;
10710
10711	/*
10712	 * We must set idle_stamp _before_ calling idle_balance(), such that we
10713	 * measure the duration of idle_balance() as idle time.
10714	 */
10715	this_rq->idle_stamp = rq_clock(this_rq);
10716
10717	/*
10718	 * Do not pull tasks towards !active CPUs...
10719	 */
10720	if (!cpu_active(this_cpu))
10721		return 0;
10722
10723	/*
10724	 * This is OK, because current is on_cpu, which avoids it being picked
10725	 * for load-balance and preemption/IRQs are still disabled avoiding
10726	 * further scheduler activity on it and we're being very careful to
10727	 * re-start the picking loop.
10728	 */
10729	rq_unpin_lock(this_rq, rf);
10730
10731	if (this_rq->avg_idle < sysctl_sched_migration_cost ||
10732	    !READ_ONCE(this_rq->rd->overload)) {
10733
10734		rcu_read_lock();
10735		sd = rcu_dereference_check_sched_domain(this_rq->sd);
10736		if (sd)
10737			update_next_balance(sd, &next_balance);
10738		rcu_read_unlock();
10739
10740		goto out;
10741	}
10742
10743	raw_spin_rq_unlock(this_rq);
10744
10745	update_blocked_averages(this_cpu);
10746	rcu_read_lock();
10747	for_each_domain(this_cpu, sd) {
10748		int continue_balancing = 1;
10749		u64 t0, domain_cost;
10750
10751		if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
10752			update_next_balance(sd, &next_balance);
10753			break;
10754		}
10755
10756		if (sd->flags & SD_BALANCE_NEWIDLE) {
10757			t0 = sched_clock_cpu(this_cpu);
10758
10759			pulled_task = load_balance(this_cpu, this_rq,
10760						   sd, CPU_NEWLY_IDLE,
10761						   &continue_balancing);
10762
10763			domain_cost = sched_clock_cpu(this_cpu) - t0;
10764			if (domain_cost > sd->max_newidle_lb_cost)
10765				sd->max_newidle_lb_cost = domain_cost;
10766
10767			curr_cost += domain_cost;
10768		}
10769
10770		update_next_balance(sd, &next_balance);
10771
10772		/*
10773		 * Stop searching for tasks to pull if there are
10774		 * now runnable tasks on this rq.
10775		 */
10776		if (pulled_task || this_rq->nr_running > 0 ||
10777		    this_rq->ttwu_pending)
10778			break;
10779	}
10780	rcu_read_unlock();
10781
10782	raw_spin_rq_lock(this_rq);
10783
10784	if (curr_cost > this_rq->max_idle_balance_cost)
10785		this_rq->max_idle_balance_cost = curr_cost;
10786
10787	/*
10788	 * While browsing the domains, we released the rq lock, a task could
10789	 * have been enqueued in the meantime. Since we're not going idle,
10790	 * pretend we pulled a task.
10791	 */
10792	if (this_rq->cfs.h_nr_running && !pulled_task)
10793		pulled_task = 1;
10794
10795	/* Is there a task of a high priority class? */
10796	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
10797		pulled_task = -1;
10798
10799out:
10800	/* Move the next balance forward */
10801	if (time_after(this_rq->next_balance, next_balance))
10802		this_rq->next_balance = next_balance;
10803
10804	if (pulled_task)
10805		this_rq->idle_stamp = 0;
10806	else
10807		nohz_newidle_balance(this_rq);
10808
10809	rq_repin_lock(this_rq, rf);
10810
10811	return pulled_task;
10812}
10813
10814/*
10815 * run_rebalance_domains is triggered when needed from the scheduler tick.
10816 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
10817 */
10818static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
10819{
10820	struct rq *this_rq = this_rq();
10821	enum cpu_idle_type idle = this_rq->idle_balance ?
10822						CPU_IDLE : CPU_NOT_IDLE;
10823
10824	/*
10825	 * If this CPU has a pending nohz_balance_kick, then do the
10826	 * balancing on behalf of the other idle CPUs whose ticks are
10827	 * stopped. Do nohz_idle_balance *before* rebalance_domains to
10828	 * give the idle CPUs a chance to load balance. Else we may
10829	 * load balance only within the local sched_domain hierarchy
10830	 * and abort nohz_idle_balance altogether if we pull some load.
10831	 */
10832	if (nohz_idle_balance(this_rq, idle))
10833		return;
10834
10835	/* normal load balance */
10836	update_blocked_averages(this_rq->cpu);
10837	rebalance_domains(this_rq, idle);
10838}
10839
10840/*
10841 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
10842 */
10843void trigger_load_balance(struct rq *rq)
10844{
10845	/*
10846	 * Don't need to rebalance while attached to NULL domain or
10847	 * runqueue CPU is not active
10848	 */
10849	if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq))))
10850		return;
10851
10852	if (time_after_eq(jiffies, rq->next_balance))
10853		raise_softirq(SCHED_SOFTIRQ);
10854
10855	nohz_balancer_kick(rq);
10856}
10857
10858static void rq_online_fair(struct rq *rq)
10859{
10860	update_sysctl();
10861
10862	update_runtime_enabled(rq);
10863}
10864
10865static void rq_offline_fair(struct rq *rq)
10866{
10867	update_sysctl();
10868
10869	/* Ensure any throttled groups are reachable by pick_next_task */
10870	unthrottle_offline_cfs_rqs(rq);
10871}
10872
10873#endif /* CONFIG_SMP */
10874
10875#ifdef CONFIG_SCHED_CORE
10876static inline bool
10877__entity_slice_used(struct sched_entity *se, int min_nr_tasks)
10878{
10879	u64 slice = sched_slice(cfs_rq_of(se), se);
10880	u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
10881
10882	return (rtime * min_nr_tasks > slice);
10883}
10884
10885#define MIN_NR_TASKS_DURING_FORCEIDLE	2
10886static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
10887{
10888	if (!sched_core_enabled(rq))
10889		return;
10890
10891	/*
10892	 * If runqueue has only one task which used up its slice and
10893	 * if the sibling is forced idle, then trigger schedule to
10894	 * give forced idle task a chance.
10895	 *
10896	 * sched_slice() considers only this active rq and it gets the
10897	 * whole slice. But during force idle, we have siblings acting
10898	 * like a single runqueue and hence we need to consider runnable
10899	 * tasks on this CPU and the forced idle CPU. Ideally, we should
10900	 * go through the forced idle rq, but that would be a perf hit.
10901	 * We can assume that the forced idle CPU has at least
10902	 * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
10903	 * if we need to give up the CPU.
10904	 */
10905	if (rq->core->core_forceidle && rq->cfs.nr_running == 1 &&
10906	    __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
10907		resched_curr(rq);
10908}
10909
10910/*
10911 * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
10912 */
10913static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle)
10914{
10915	for_each_sched_entity(se) {
10916		struct cfs_rq *cfs_rq = cfs_rq_of(se);
10917
10918		if (forceidle) {
10919			if (cfs_rq->forceidle_seq == fi_seq)
10920				break;
10921			cfs_rq->forceidle_seq = fi_seq;
10922		}
10923
10924		cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime;
10925	}
10926}
10927
10928void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
10929{
10930	struct sched_entity *se = &p->se;
10931
10932	if (p->sched_class != &fair_sched_class)
10933		return;
10934
10935	se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
10936}
10937
10938bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
10939{
10940	struct rq *rq = task_rq(a);
10941	struct sched_entity *sea = &a->se;
10942	struct sched_entity *seb = &b->se;
10943	struct cfs_rq *cfs_rqa;
10944	struct cfs_rq *cfs_rqb;
10945	s64 delta;
10946
10947	SCHED_WARN_ON(task_rq(b)->core != rq->core);
10948
10949#ifdef CONFIG_FAIR_GROUP_SCHED
10950	/*
10951	 * Find an se in the hierarchy for tasks a and b, such that the se's
10952	 * are immediate siblings.
10953	 */
10954	while (sea->cfs_rq->tg != seb->cfs_rq->tg) {
10955		int sea_depth = sea->depth;
10956		int seb_depth = seb->depth;
10957
10958		if (sea_depth >= seb_depth)
10959			sea = parent_entity(sea);
10960		if (sea_depth <= seb_depth)
10961			seb = parent_entity(seb);
10962	}
10963
10964	se_fi_update(sea, rq->core->core_forceidle_seq, in_fi);
10965	se_fi_update(seb, rq->core->core_forceidle_seq, in_fi);
10966
10967	cfs_rqa = sea->cfs_rq;
10968	cfs_rqb = seb->cfs_rq;
10969#else
10970	cfs_rqa = &task_rq(a)->cfs;
10971	cfs_rqb = &task_rq(b)->cfs;
10972#endif
10973
10974	/*
10975	 * Find delta after normalizing se's vruntime with its cfs_rq's
10976	 * min_vruntime_fi, which would have been updated in prior calls
10977	 * to se_fi_update().
10978	 */
10979	delta = (s64)(sea->vruntime - seb->vruntime) +
10980		(s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi);
10981
10982	return delta > 0;
10983}
10984#else
10985static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
10986#endif
10987
10988/*
10989 * scheduler tick hitting a task of our scheduling class.
10990 *
10991 * NOTE: This function can be called remotely by the tick offload that
10992 * goes along full dynticks. Therefore no local assumption can be made
10993 * and everything must be accessed through the @rq and @curr passed in
10994 * parameters.
10995 */
10996static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
10997{
10998	struct cfs_rq *cfs_rq;
10999	struct sched_entity *se = &curr->se;
11000
11001	for_each_sched_entity(se) {
11002		cfs_rq = cfs_rq_of(se);
11003		entity_tick(cfs_rq, se, queued);
11004	}
11005
11006	if (static_branch_unlikely(&sched_numa_balancing))
11007		task_tick_numa(rq, curr);
11008
11009	update_misfit_status(curr, rq);
11010	update_overutilized_status(task_rq(curr));
11011
11012	task_tick_core(rq, curr);
11013}
11014
11015/*
11016 * called on fork with the child task as argument from the parent's context
11017 *  - child not yet on the tasklist
11018 *  - preemption disabled
11019 */
11020static void task_fork_fair(struct task_struct *p)
11021{
11022	struct cfs_rq *cfs_rq;
11023	struct sched_entity *se = &p->se, *curr;
11024	struct rq *rq = this_rq();
11025	struct rq_flags rf;
11026
11027	rq_lock(rq, &rf);
11028	update_rq_clock(rq);
11029
11030	cfs_rq = task_cfs_rq(current);
11031	curr = cfs_rq->curr;
11032	if (curr) {
11033		update_curr(cfs_rq);
11034		se->vruntime = curr->vruntime;
11035	}
11036	place_entity(cfs_rq, se, 1);
11037
11038	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
11039		/*
11040		 * Upon rescheduling, sched_class::put_prev_task() will place
11041		 * 'current' within the tree based on its new key value.
11042		 */
11043		swap(curr->vruntime, se->vruntime);
11044		resched_curr(rq);
11045	}
11046
11047	se->vruntime -= cfs_rq->min_vruntime;
11048	rq_unlock(rq, &rf);
11049}
11050
11051/*
11052 * Priority of the task has changed. Check to see if we preempt
11053 * the current task.
11054 */
11055static void
11056prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
11057{
11058	if (!task_on_rq_queued(p))
11059		return;
11060
11061	if (rq->cfs.nr_running == 1)
11062		return;
11063
11064	/*
11065	 * Reschedule if we are currently running on this runqueue and
11066	 * our priority decreased, or if we are not currently running on
11067	 * this runqueue and our priority is higher than the current's
11068	 */
11069	if (task_current(rq, p)) {
11070		if (p->prio > oldprio)
11071			resched_curr(rq);
11072	} else
11073		check_preempt_curr(rq, p, 0);
11074}
11075
11076static inline bool vruntime_normalized(struct task_struct *p)
11077{
11078	struct sched_entity *se = &p->se;
11079
11080	/*
11081	 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
11082	 * the dequeue_entity(.flags=0) will already have normalized the
11083	 * vruntime.
11084	 */
11085	if (p->on_rq)
11086		return true;
11087
11088	/*
11089	 * When !on_rq, vruntime of the task has usually NOT been normalized.
11090	 * But there are some cases where it has already been normalized:
11091	 *
11092	 * - A forked child which is waiting for being woken up by
11093	 *   wake_up_new_task().
11094	 * - A task which has been woken up by try_to_wake_up() and
11095	 *   waiting for actually being woken up by sched_ttwu_pending().
11096	 */
11097	if (!se->sum_exec_runtime ||
11098	    (READ_ONCE(p->__state) == TASK_WAKING && p->sched_remote_wakeup))
11099		return true;
11100
11101	return false;
11102}
11103
11104#ifdef CONFIG_FAIR_GROUP_SCHED
11105/*
11106 * Propagate the changes of the sched_entity across the tg tree to make it
11107 * visible to the root
11108 */
11109static void propagate_entity_cfs_rq(struct sched_entity *se)
11110{
11111	struct cfs_rq *cfs_rq;
11112
11113	list_add_leaf_cfs_rq(cfs_rq_of(se));
11114
11115	/* Start to propagate at parent */
11116	se = se->parent;
11117
11118	for_each_sched_entity(se) {
11119		cfs_rq = cfs_rq_of(se);
11120
11121		if (!cfs_rq_throttled(cfs_rq)){
11122			update_load_avg(cfs_rq, se, UPDATE_TG);
11123			list_add_leaf_cfs_rq(cfs_rq);
11124			continue;
11125		}
11126
11127		if (list_add_leaf_cfs_rq(cfs_rq))
11128			break;
11129	}
11130}
11131#else
11132static void propagate_entity_cfs_rq(struct sched_entity *se) { }
11133#endif
11134
11135static void detach_entity_cfs_rq(struct sched_entity *se)
11136{
11137	struct cfs_rq *cfs_rq = cfs_rq_of(se);
11138
11139	/* Catch up with the cfs_rq and remove our load when we leave */
11140	update_load_avg(cfs_rq, se, 0);
11141	detach_entity_load_avg(cfs_rq, se);
11142	update_tg_load_avg(cfs_rq);
11143	propagate_entity_cfs_rq(se);
11144}
11145
11146static void attach_entity_cfs_rq(struct sched_entity *se)
11147{
11148	struct cfs_rq *cfs_rq = cfs_rq_of(se);
11149
11150#ifdef CONFIG_FAIR_GROUP_SCHED
11151	/*
11152	 * Since the real-depth could have been changed (only FAIR
11153	 * class maintain depth value), reset depth properly.
11154	 */
11155	se->depth = se->parent ? se->parent->depth + 1 : 0;
11156#endif
11157
11158	/* Synchronize entity with its cfs_rq */
11159	update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
11160	attach_entity_load_avg(cfs_rq, se);
11161	update_tg_load_avg(cfs_rq);
11162	propagate_entity_cfs_rq(se);
11163}
11164
11165static void detach_task_cfs_rq(struct task_struct *p)
11166{
11167	struct sched_entity *se = &p->se;
11168	struct cfs_rq *cfs_rq = cfs_rq_of(se);
11169
11170	if (!vruntime_normalized(p)) {
11171		/*
11172		 * Fix up our vruntime so that the current sleep doesn't
11173		 * cause 'unlimited' sleep bonus.
11174		 */
11175		place_entity(cfs_rq, se, 0);
11176		se->vruntime -= cfs_rq->min_vruntime;
11177	}
11178
11179	detach_entity_cfs_rq(se);
11180}
11181
11182static void attach_task_cfs_rq(struct task_struct *p)
11183{
11184	struct sched_entity *se = &p->se;
11185	struct cfs_rq *cfs_rq = cfs_rq_of(se);
11186
11187	attach_entity_cfs_rq(se);
11188
11189	if (!vruntime_normalized(p))
11190		se->vruntime += cfs_rq->min_vruntime;
11191}
11192
11193static void switched_from_fair(struct rq *rq, struct task_struct *p)
11194{
11195	detach_task_cfs_rq(p);
11196}
11197
11198static void switched_to_fair(struct rq *rq, struct task_struct *p)
11199{
11200	attach_task_cfs_rq(p);
11201
11202	if (task_on_rq_queued(p)) {
11203		/*
11204		 * We were most likely switched from sched_rt, so
11205		 * kick off the schedule if running, otherwise just see
11206		 * if we can still preempt the current task.
11207		 */
11208		if (task_current(rq, p))
11209			resched_curr(rq);
11210		else
11211			check_preempt_curr(rq, p, 0);
11212	}
11213}
11214
11215/* Account for a task changing its policy or group.
11216 *
11217 * This routine is mostly called to set cfs_rq->curr field when a task
11218 * migrates between groups/classes.
11219 */
11220static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
11221{
11222	struct sched_entity *se = &p->se;
11223
11224#ifdef CONFIG_SMP
11225	if (task_on_rq_queued(p)) {
11226		/*
11227		 * Move the next running task to the front of the list, so our
11228		 * cfs_tasks list becomes MRU one.
11229		 */
11230		list_move(&se->group_node, &rq->cfs_tasks);
11231	}
11232#endif
11233
11234	for_each_sched_entity(se) {
11235		struct cfs_rq *cfs_rq = cfs_rq_of(se);
11236
11237		set_next_entity(cfs_rq, se);
11238		/* ensure bandwidth has been allocated on our new cfs_rq */
11239		account_cfs_rq_runtime(cfs_rq, 0);
11240	}
11241}
11242
11243void init_cfs_rq(struct cfs_rq *cfs_rq)
11244{
11245	cfs_rq->tasks_timeline = RB_ROOT_CACHED;
11246	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
11247#ifndef CONFIG_64BIT
11248	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
11249#endif
11250#ifdef CONFIG_SMP
11251	raw_spin_lock_init(&cfs_rq->removed.lock);
11252#endif
11253}
11254
11255#ifdef CONFIG_FAIR_GROUP_SCHED
11256static void task_set_group_fair(struct task_struct *p)
11257{
11258	struct sched_entity *se = &p->se;
11259
11260	set_task_rq(p, task_cpu(p));
11261	se->depth = se->parent ? se->parent->depth + 1 : 0;
11262}
11263
11264static void task_move_group_fair(struct task_struct *p)
11265{
11266	detach_task_cfs_rq(p);
11267	set_task_rq(p, task_cpu(p));
11268
11269#ifdef CONFIG_SMP
11270	/* Tell se's cfs_rq has been changed -- migrated */
11271	p->se.avg.last_update_time = 0;
11272#endif
11273	attach_task_cfs_rq(p);
11274}
11275
11276static void task_change_group_fair(struct task_struct *p, int type)
11277{
11278	switch (type) {
11279	case TASK_SET_GROUP:
11280		task_set_group_fair(p);
11281		break;
11282
11283	case TASK_MOVE_GROUP:
11284		task_move_group_fair(p);
11285		break;
11286	}
11287}
11288
11289void free_fair_sched_group(struct task_group *tg)
11290{
11291	int i;
11292
11293	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
11294
11295	for_each_possible_cpu(i) {
11296		if (tg->cfs_rq)
11297			kfree(tg->cfs_rq[i]);
11298		if (tg->se)
11299			kfree(tg->se[i]);
11300	}
11301
11302	kfree(tg->cfs_rq);
11303	kfree(tg->se);
11304}
11305
11306int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
11307{
11308	struct sched_entity *se;
11309	struct cfs_rq *cfs_rq;
11310	int i;
11311
11312	tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
11313	if (!tg->cfs_rq)
11314		goto err;
11315	tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
11316	if (!tg->se)
11317		goto err;
11318
11319	tg->shares = NICE_0_LOAD;
11320
11321	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
11322
11323	for_each_possible_cpu(i) {
11324		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
11325				      GFP_KERNEL, cpu_to_node(i));
11326		if (!cfs_rq)
11327			goto err;
11328
11329		se = kzalloc_node(sizeof(struct sched_entity),
11330				  GFP_KERNEL, cpu_to_node(i));
11331		if (!se)
11332			goto err_free_rq;
11333
11334		init_cfs_rq(cfs_rq);
11335		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
11336		init_entity_runnable_average(se);
11337	}
11338
11339	return 1;
11340
11341err_free_rq:
11342	kfree(cfs_rq);
11343err:
11344	return 0;
11345}
11346
11347void online_fair_sched_group(struct task_group *tg)
11348{
11349	struct sched_entity *se;
11350	struct rq_flags rf;
11351	struct rq *rq;
11352	int i;
11353
11354	for_each_possible_cpu(i) {
11355		rq = cpu_rq(i);
11356		se = tg->se[i];
11357		rq_lock_irq(rq, &rf);
11358		update_rq_clock(rq);
11359		attach_entity_cfs_rq(se);
11360		sync_throttle(tg, i);
11361		rq_unlock_irq(rq, &rf);
11362	}
11363}
11364
11365void unregister_fair_sched_group(struct task_group *tg)
11366{
11367	unsigned long flags;
11368	struct rq *rq;
11369	int cpu;
11370
11371	for_each_possible_cpu(cpu) {
11372		if (tg->se[cpu])
11373			remove_entity_load_avg(tg->se[cpu]);
11374
11375		/*
11376		 * Only empty task groups can be destroyed; so we can speculatively
11377		 * check on_list without danger of it being re-added.
11378		 */
11379		if (!tg->cfs_rq[cpu]->on_list)
11380			continue;
11381
11382		rq = cpu_rq(cpu);
11383
11384		raw_spin_rq_lock_irqsave(rq, flags);
11385		list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
11386		raw_spin_rq_unlock_irqrestore(rq, flags);
11387	}
11388}
11389
11390void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
11391			struct sched_entity *se, int cpu,
11392			struct sched_entity *parent)
11393{
11394	struct rq *rq = cpu_rq(cpu);
11395
11396	cfs_rq->tg = tg;
11397	cfs_rq->rq = rq;
11398	init_cfs_rq_runtime(cfs_rq);
11399
11400	tg->cfs_rq[cpu] = cfs_rq;
11401	tg->se[cpu] = se;
11402
11403	/* se could be NULL for root_task_group */
11404	if (!se)
11405		return;
11406
11407	if (!parent) {
11408		se->cfs_rq = &rq->cfs;
11409		se->depth = 0;
11410	} else {
11411		se->cfs_rq = parent->my_q;
11412		se->depth = parent->depth + 1;
11413	}
11414
11415	se->my_q = cfs_rq;
11416	/* guarantee group entities always have weight */
11417	update_load_set(&se->load, NICE_0_LOAD);
11418	se->parent = parent;
11419}
11420
11421static DEFINE_MUTEX(shares_mutex);
11422
11423int sched_group_set_shares(struct task_group *tg, unsigned long shares)
11424{
11425	int i;
11426
11427	/*
11428	 * We can't change the weight of the root cgroup.
11429	 */
11430	if (!tg->se[0])
11431		return -EINVAL;
11432
11433	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
11434
11435	mutex_lock(&shares_mutex);
11436	if (tg->shares == shares)
11437		goto done;
11438
11439	tg->shares = shares;
11440	for_each_possible_cpu(i) {
11441		struct rq *rq = cpu_rq(i);
11442		struct sched_entity *se = tg->se[i];
11443		struct rq_flags rf;
11444
11445		/* Propagate contribution to hierarchy */
11446		rq_lock_irqsave(rq, &rf);
11447		update_rq_clock(rq);
11448		for_each_sched_entity(se) {
11449			update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
11450			update_cfs_group(se);
11451		}
11452		rq_unlock_irqrestore(rq, &rf);
11453	}
11454
11455done:
11456	mutex_unlock(&shares_mutex);
11457	return 0;
11458}
11459#else /* CONFIG_FAIR_GROUP_SCHED */
11460
11461void free_fair_sched_group(struct task_group *tg) { }
11462
11463int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
11464{
11465	return 1;
11466}
11467
11468void online_fair_sched_group(struct task_group *tg) { }
11469
11470void unregister_fair_sched_group(struct task_group *tg) { }
11471
11472#endif /* CONFIG_FAIR_GROUP_SCHED */
11473
11474
11475static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
11476{
11477	struct sched_entity *se = &task->se;
11478	unsigned int rr_interval = 0;
11479
11480	/*
11481	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
11482	 * idle runqueue:
11483	 */
11484	if (rq->cfs.load.weight)
11485		rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
11486
11487	return rr_interval;
11488}
11489
11490/*
11491 * All the scheduling class methods:
11492 */
11493DEFINE_SCHED_CLASS(fair) = {
11494
11495	.enqueue_task		= enqueue_task_fair,
11496	.dequeue_task		= dequeue_task_fair,
11497	.yield_task		= yield_task_fair,
11498	.yield_to_task		= yield_to_task_fair,
11499
11500	.check_preempt_curr	= check_preempt_wakeup,
11501
11502	.pick_next_task		= __pick_next_task_fair,
11503	.put_prev_task		= put_prev_task_fair,
11504	.set_next_task          = set_next_task_fair,
11505
11506#ifdef CONFIG_SMP
11507	.balance		= balance_fair,
11508	.pick_task		= pick_task_fair,
11509	.select_task_rq		= select_task_rq_fair,
11510	.migrate_task_rq	= migrate_task_rq_fair,
11511
11512	.rq_online		= rq_online_fair,
11513	.rq_offline		= rq_offline_fair,
11514
11515	.task_dead		= task_dead_fair,
11516	.set_cpus_allowed	= set_cpus_allowed_common,
11517#endif
11518
11519	.task_tick		= task_tick_fair,
11520	.task_fork		= task_fork_fair,
11521
11522	.prio_changed		= prio_changed_fair,
11523	.switched_from		= switched_from_fair,
11524	.switched_to		= switched_to_fair,
11525
11526	.get_rr_interval	= get_rr_interval_fair,
11527
11528	.update_curr		= update_curr_fair,
11529
11530#ifdef CONFIG_FAIR_GROUP_SCHED
11531	.task_change_group	= task_change_group_fair,
11532#endif
11533
11534#ifdef CONFIG_UCLAMP_TASK
11535	.uclamp_enabled		= 1,
11536#endif
11537};
11538
11539#ifdef CONFIG_SCHED_DEBUG
11540void print_cfs_stats(struct seq_file *m, int cpu)
11541{
11542	struct cfs_rq *cfs_rq, *pos;
11543
11544	rcu_read_lock();
11545	for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
11546		print_cfs_rq(m, cpu, cfs_rq);
11547	rcu_read_unlock();
11548}
11549
11550#ifdef CONFIG_NUMA_BALANCING
11551void show_numa_stats(struct task_struct *p, struct seq_file *m)
11552{
11553	int node;
11554	unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
11555	struct numa_group *ng;
11556
11557	rcu_read_lock();
11558	ng = rcu_dereference(p->numa_group);
11559	for_each_online_node(node) {
11560		if (p->numa_faults) {
11561			tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
11562			tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
11563		}
11564		if (ng) {
11565			gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
11566			gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
11567		}
11568		print_numa_stats(m, node, tsf, tpf, gsf, gpf);
11569	}
11570	rcu_read_unlock();
11571}
11572#endif /* CONFIG_NUMA_BALANCING */
11573#endif /* CONFIG_SCHED_DEBUG */
11574
11575__init void init_sched_fair_class(void)
11576{
11577#ifdef CONFIG_SMP
11578	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
11579
11580#ifdef CONFIG_NO_HZ_COMMON
11581	nohz.next_balance = jiffies;
11582	nohz.next_blocked = jiffies;
11583	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
11584#endif
11585#endif /* SMP */
11586
11587}
11588
11589/*
11590 * Helper functions to facilitate extracting info from tracepoints.
11591 */
11592
11593const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq)
11594{
11595#ifdef CONFIG_SMP
11596	return cfs_rq ? &cfs_rq->avg : NULL;
11597#else
11598	return NULL;
11599#endif
11600}
11601EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_avg);
11602
11603char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len)
11604{
11605	if (!cfs_rq) {
11606		if (str)
11607			strlcpy(str, "(null)", len);
11608		else
11609			return NULL;
11610	}
11611
11612	cfs_rq_tg_path(cfs_rq, str, len);
11613	return str;
11614}
11615EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_path);
11616
11617int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq)
11618{
11619	return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1;
11620}
11621EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_cpu);
11622
11623const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq)
11624{
11625#ifdef CONFIG_SMP
11626	return rq ? &rq->avg_rt : NULL;
11627#else
11628	return NULL;
11629#endif
11630}
11631EXPORT_SYMBOL_GPL(sched_trace_rq_avg_rt);
11632
11633const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq)
11634{
11635#ifdef CONFIG_SMP
11636	return rq ? &rq->avg_dl : NULL;
11637#else
11638	return NULL;
11639#endif
11640}
11641EXPORT_SYMBOL_GPL(sched_trace_rq_avg_dl);
11642
11643const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq)
11644{
11645#if defined(CONFIG_SMP) && defined(CONFIG_HAVE_SCHED_AVG_IRQ)
11646	return rq ? &rq->avg_irq : NULL;
11647#else
11648	return NULL;
11649#endif
11650}
11651EXPORT_SYMBOL_GPL(sched_trace_rq_avg_irq);
11652
11653int sched_trace_rq_cpu(struct rq *rq)
11654{
11655	return rq ? cpu_of(rq) : -1;
11656}
11657EXPORT_SYMBOL_GPL(sched_trace_rq_cpu);
11658
11659int sched_trace_rq_cpu_capacity(struct rq *rq)
11660{
11661	return rq ?
11662#ifdef CONFIG_SMP
11663		rq->cpu_capacity
11664#else
11665		SCHED_CAPACITY_SCALE
11666#endif
11667		: -1;
11668}
11669EXPORT_SYMBOL_GPL(sched_trace_rq_cpu_capacity);
11670
11671const struct cpumask *sched_trace_rd_span(struct root_domain *rd)
11672{
11673#ifdef CONFIG_SMP
11674	return rd ? rd->span : NULL;
11675#else
11676	return NULL;
11677#endif
11678}
11679EXPORT_SYMBOL_GPL(sched_trace_rd_span);
11680
11681int sched_trace_rq_nr_running(struct rq *rq)
11682{
11683        return rq ? rq->nr_running : -1;
11684}
11685EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running);