Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   3 * policies)
   4 */
   5
   6#include "sched.h"
   7
   8#include <linux/slab.h>
   9#include <linux/irq_work.h>
  10
  11int sched_rr_timeslice = RR_TIMESLICE;
 
 
 
  12
  13static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  14
  15struct rt_bandwidth def_rt_bandwidth;
  16
  17static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  18{
  19	struct rt_bandwidth *rt_b =
  20		container_of(timer, struct rt_bandwidth, rt_period_timer);
  21	int idle = 0;
  22	int overrun;
  23
  24	raw_spin_lock(&rt_b->rt_runtime_lock);
  25	for (;;) {
  26		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
  27		if (!overrun)
  28			break;
  29
  30		raw_spin_unlock(&rt_b->rt_runtime_lock);
  31		idle = do_sched_rt_period_timer(rt_b, overrun);
  32		raw_spin_lock(&rt_b->rt_runtime_lock);
  33	}
  34	if (idle)
  35		rt_b->rt_period_active = 0;
  36	raw_spin_unlock(&rt_b->rt_runtime_lock);
  37
  38	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  39}
  40
  41void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  42{
  43	rt_b->rt_period = ns_to_ktime(period);
  44	rt_b->rt_runtime = runtime;
  45
  46	raw_spin_lock_init(&rt_b->rt_runtime_lock);
  47
  48	hrtimer_init(&rt_b->rt_period_timer,
  49			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  50	rt_b->rt_period_timer.function = sched_rt_period_timer;
  51}
  52
  53static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
  54{
  55	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
  56		return;
  57
  58	raw_spin_lock(&rt_b->rt_runtime_lock);
  59	if (!rt_b->rt_period_active) {
  60		rt_b->rt_period_active = 1;
  61		/*
  62		 * SCHED_DEADLINE updates the bandwidth, as a run away
  63		 * RT task with a DL task could hog a CPU. But DL does
  64		 * not reset the period. If a deadline task was running
  65		 * without an RT task running, it can cause RT tasks to
  66		 * throttle when they start up. Kick the timer right away
  67		 * to update the period.
  68		 */
  69		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
  70		hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
 
  71	}
  72	raw_spin_unlock(&rt_b->rt_runtime_lock);
  73}
  74
  75#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
  76static void push_irq_work_func(struct irq_work *work);
  77#endif
  78
  79void init_rt_rq(struct rt_rq *rt_rq)
  80{
  81	struct rt_prio_array *array;
  82	int i;
  83
  84	array = &rt_rq->active;
  85	for (i = 0; i < MAX_RT_PRIO; i++) {
  86		INIT_LIST_HEAD(array->queue + i);
  87		__clear_bit(i, array->bitmap);
  88	}
  89	/* delimiter for bitsearch: */
  90	__set_bit(MAX_RT_PRIO, array->bitmap);
  91
  92#if defined CONFIG_SMP
  93	rt_rq->highest_prio.curr = MAX_RT_PRIO;
  94	rt_rq->highest_prio.next = MAX_RT_PRIO;
  95	rt_rq->rt_nr_migratory = 0;
  96	rt_rq->overloaded = 0;
  97	plist_head_init(&rt_rq->pushable_tasks);
  98
  99#ifdef HAVE_RT_PUSH_IPI
 100	rt_rq->push_flags = 0;
 101	rt_rq->push_cpu = nr_cpu_ids;
 102	raw_spin_lock_init(&rt_rq->push_lock);
 103	init_irq_work(&rt_rq->push_work, push_irq_work_func);
 104#endif
 105#endif /* CONFIG_SMP */
 106	/* We start is dequeued state, because no RT tasks are queued */
 107	rt_rq->rt_queued = 0;
 108
 109	rt_rq->rt_time = 0;
 110	rt_rq->rt_throttled = 0;
 111	rt_rq->rt_runtime = 0;
 112	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
 113}
 114
 115#ifdef CONFIG_RT_GROUP_SCHED
 116static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
 117{
 118	hrtimer_cancel(&rt_b->rt_period_timer);
 119}
 120
 121#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
 122
 123static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 124{
 125#ifdef CONFIG_SCHED_DEBUG
 126	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
 127#endif
 128	return container_of(rt_se, struct task_struct, rt);
 129}
 130
 131static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 132{
 133	return rt_rq->rq;
 134}
 135
 136static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 137{
 138	return rt_se->rt_rq;
 139}
 140
 141static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
 142{
 143	struct rt_rq *rt_rq = rt_se->rt_rq;
 144
 145	return rt_rq->rq;
 146}
 147
 148void free_rt_sched_group(struct task_group *tg)
 149{
 150	int i;
 151
 152	if (tg->rt_se)
 153		destroy_rt_bandwidth(&tg->rt_bandwidth);
 154
 155	for_each_possible_cpu(i) {
 156		if (tg->rt_rq)
 157			kfree(tg->rt_rq[i]);
 158		if (tg->rt_se)
 159			kfree(tg->rt_se[i]);
 160	}
 161
 162	kfree(tg->rt_rq);
 163	kfree(tg->rt_se);
 164}
 165
 166void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 167		struct sched_rt_entity *rt_se, int cpu,
 168		struct sched_rt_entity *parent)
 169{
 170	struct rq *rq = cpu_rq(cpu);
 171
 172	rt_rq->highest_prio.curr = MAX_RT_PRIO;
 173	rt_rq->rt_nr_boosted = 0;
 174	rt_rq->rq = rq;
 175	rt_rq->tg = tg;
 176
 177	tg->rt_rq[cpu] = rt_rq;
 178	tg->rt_se[cpu] = rt_se;
 179
 180	if (!rt_se)
 181		return;
 182
 183	if (!parent)
 184		rt_se->rt_rq = &rq->rt;
 185	else
 186		rt_se->rt_rq = parent->my_q;
 187
 188	rt_se->my_q = rt_rq;
 189	rt_se->parent = parent;
 190	INIT_LIST_HEAD(&rt_se->run_list);
 191}
 192
 193int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 194{
 195	struct rt_rq *rt_rq;
 196	struct sched_rt_entity *rt_se;
 197	int i;
 198
 199	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
 200	if (!tg->rt_rq)
 201		goto err;
 202	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
 203	if (!tg->rt_se)
 204		goto err;
 205
 206	init_rt_bandwidth(&tg->rt_bandwidth,
 207			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
 208
 209	for_each_possible_cpu(i) {
 210		rt_rq = kzalloc_node(sizeof(struct rt_rq),
 211				     GFP_KERNEL, cpu_to_node(i));
 212		if (!rt_rq)
 213			goto err;
 214
 215		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
 216				     GFP_KERNEL, cpu_to_node(i));
 217		if (!rt_se)
 218			goto err_free_rq;
 219
 220		init_rt_rq(rt_rq);
 221		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
 222		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
 223	}
 224
 225	return 1;
 226
 227err_free_rq:
 228	kfree(rt_rq);
 229err:
 230	return 0;
 231}
 232
 233#else /* CONFIG_RT_GROUP_SCHED */
 234
 235#define rt_entity_is_task(rt_se) (1)
 236
 237static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 238{
 239	return container_of(rt_se, struct task_struct, rt);
 240}
 241
 242static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 243{
 244	return container_of(rt_rq, struct rq, rt);
 245}
 246
 247static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
 248{
 249	struct task_struct *p = rt_task_of(rt_se);
 250
 251	return task_rq(p);
 252}
 253
 254static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 255{
 256	struct rq *rq = rq_of_rt_se(rt_se);
 257
 258	return &rq->rt;
 259}
 260
 261void free_rt_sched_group(struct task_group *tg) { }
 262
 263int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 264{
 265	return 1;
 266}
 267#endif /* CONFIG_RT_GROUP_SCHED */
 268
 269#ifdef CONFIG_SMP
 270
 271static void pull_rt_task(struct rq *this_rq);
 272
 273static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 274{
 275	/* Try to pull RT tasks here if we lower this rq's prio */
 276	return rq->rt.highest_prio.curr > prev->prio;
 277}
 278
 279static inline int rt_overloaded(struct rq *rq)
 280{
 281	return atomic_read(&rq->rd->rto_count);
 282}
 283
 284static inline void rt_set_overload(struct rq *rq)
 285{
 286	if (!rq->online)
 287		return;
 288
 289	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
 290	/*
 291	 * Make sure the mask is visible before we set
 292	 * the overload count. That is checked to determine
 293	 * if we should look at the mask. It would be a shame
 294	 * if we looked at the mask, but the mask was not
 295	 * updated yet.
 296	 *
 297	 * Matched by the barrier in pull_rt_task().
 298	 */
 299	smp_wmb();
 300	atomic_inc(&rq->rd->rto_count);
 301}
 302
 303static inline void rt_clear_overload(struct rq *rq)
 304{
 305	if (!rq->online)
 306		return;
 307
 308	/* the order here really doesn't matter */
 309	atomic_dec(&rq->rd->rto_count);
 310	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 311}
 312
 313static void update_rt_migration(struct rt_rq *rt_rq)
 314{
 315	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
 316		if (!rt_rq->overloaded) {
 317			rt_set_overload(rq_of_rt_rq(rt_rq));
 318			rt_rq->overloaded = 1;
 319		}
 320	} else if (rt_rq->overloaded) {
 321		rt_clear_overload(rq_of_rt_rq(rt_rq));
 322		rt_rq->overloaded = 0;
 323	}
 324}
 325
 326static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 327{
 328	struct task_struct *p;
 329
 330	if (!rt_entity_is_task(rt_se))
 331		return;
 332
 333	p = rt_task_of(rt_se);
 334	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 335
 336	rt_rq->rt_nr_total++;
 337	if (tsk_nr_cpus_allowed(p) > 1)
 338		rt_rq->rt_nr_migratory++;
 339
 340	update_rt_migration(rt_rq);
 341}
 342
 343static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 344{
 345	struct task_struct *p;
 346
 347	if (!rt_entity_is_task(rt_se))
 348		return;
 349
 350	p = rt_task_of(rt_se);
 351	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 352
 353	rt_rq->rt_nr_total--;
 354	if (tsk_nr_cpus_allowed(p) > 1)
 355		rt_rq->rt_nr_migratory--;
 356
 357	update_rt_migration(rt_rq);
 358}
 359
 360static inline int has_pushable_tasks(struct rq *rq)
 361{
 362	return !plist_head_empty(&rq->rt.pushable_tasks);
 363}
 364
 365static DEFINE_PER_CPU(struct callback_head, rt_push_head);
 366static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
 367
 368static void push_rt_tasks(struct rq *);
 369static void pull_rt_task(struct rq *);
 370
 371static inline void queue_push_tasks(struct rq *rq)
 372{
 373	if (!has_pushable_tasks(rq))
 374		return;
 375
 376	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
 377}
 378
 379static inline void queue_pull_task(struct rq *rq)
 380{
 381	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
 382}
 383
 384static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 385{
 386	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 387	plist_node_init(&p->pushable_tasks, p->prio);
 388	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
 389
 390	/* Update the highest prio pushable task */
 391	if (p->prio < rq->rt.highest_prio.next)
 392		rq->rt.highest_prio.next = p->prio;
 393}
 394
 395static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 396{
 397	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 398
 399	/* Update the new highest prio pushable task */
 400	if (has_pushable_tasks(rq)) {
 401		p = plist_first_entry(&rq->rt.pushable_tasks,
 402				      struct task_struct, pushable_tasks);
 403		rq->rt.highest_prio.next = p->prio;
 404	} else
 405		rq->rt.highest_prio.next = MAX_RT_PRIO;
 406}
 407
 408#else
 409
 410static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 411{
 412}
 413
 414static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 415{
 416}
 417
 418static inline
 419void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 420{
 421}
 422
 423static inline
 424void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 425{
 426}
 427
 428static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 429{
 430	return false;
 431}
 432
 433static inline void pull_rt_task(struct rq *this_rq)
 434{
 435}
 436
 437static inline void queue_push_tasks(struct rq *rq)
 438{
 439}
 440#endif /* CONFIG_SMP */
 441
 442static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
 443static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
 444
 445static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 446{
 447	return rt_se->on_rq;
 448}
 449
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 450#ifdef CONFIG_RT_GROUP_SCHED
 451
 452static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 453{
 454	if (!rt_rq->tg)
 455		return RUNTIME_INF;
 456
 457	return rt_rq->rt_runtime;
 458}
 459
 460static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 461{
 462	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
 463}
 464
 465typedef struct task_group *rt_rq_iter_t;
 466
 467static inline struct task_group *next_task_group(struct task_group *tg)
 468{
 469	do {
 470		tg = list_entry_rcu(tg->list.next,
 471			typeof(struct task_group), list);
 472	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
 473
 474	if (&tg->list == &task_groups)
 475		tg = NULL;
 476
 477	return tg;
 478}
 479
 480#define for_each_rt_rq(rt_rq, iter, rq)					\
 481	for (iter = container_of(&task_groups, typeof(*iter), list);	\
 482		(iter = next_task_group(iter)) &&			\
 483		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
 484
 485#define for_each_sched_rt_entity(rt_se) \
 486	for (; rt_se; rt_se = rt_se->parent)
 487
 488static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 489{
 490	return rt_se->my_q;
 491}
 492
 493static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
 494static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
 495
 496static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 497{
 498	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
 499	struct rq *rq = rq_of_rt_rq(rt_rq);
 500	struct sched_rt_entity *rt_se;
 501
 502	int cpu = cpu_of(rq);
 503
 504	rt_se = rt_rq->tg->rt_se[cpu];
 505
 506	if (rt_rq->rt_nr_running) {
 507		if (!rt_se)
 508			enqueue_top_rt_rq(rt_rq);
 509		else if (!on_rt_rq(rt_se))
 510			enqueue_rt_entity(rt_se, 0);
 511
 512		if (rt_rq->highest_prio.curr < curr->prio)
 513			resched_curr(rq);
 514	}
 515}
 516
 517static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 518{
 519	struct sched_rt_entity *rt_se;
 520	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 521
 522	rt_se = rt_rq->tg->rt_se[cpu];
 523
 524	if (!rt_se)
 525		dequeue_top_rt_rq(rt_rq);
 
 
 
 526	else if (on_rt_rq(rt_se))
 527		dequeue_rt_entity(rt_se, 0);
 528}
 529
 530static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 531{
 532	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
 533}
 534
 535static int rt_se_boosted(struct sched_rt_entity *rt_se)
 536{
 537	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 538	struct task_struct *p;
 539
 540	if (rt_rq)
 541		return !!rt_rq->rt_nr_boosted;
 542
 543	p = rt_task_of(rt_se);
 544	return p->prio != p->normal_prio;
 545}
 546
 547#ifdef CONFIG_SMP
 548static inline const struct cpumask *sched_rt_period_mask(void)
 549{
 550	return this_rq()->rd->span;
 551}
 552#else
 553static inline const struct cpumask *sched_rt_period_mask(void)
 554{
 555	return cpu_online_mask;
 556}
 557#endif
 558
 559static inline
 560struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 561{
 562	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
 563}
 564
 565static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 566{
 567	return &rt_rq->tg->rt_bandwidth;
 568}
 569
 570#else /* !CONFIG_RT_GROUP_SCHED */
 571
 572static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 573{
 574	return rt_rq->rt_runtime;
 575}
 576
 577static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 578{
 579	return ktime_to_ns(def_rt_bandwidth.rt_period);
 580}
 581
 582typedef struct rt_rq *rt_rq_iter_t;
 583
 584#define for_each_rt_rq(rt_rq, iter, rq) \
 585	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 586
 587#define for_each_sched_rt_entity(rt_se) \
 588	for (; rt_se; rt_se = NULL)
 589
 590static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 591{
 592	return NULL;
 593}
 594
 595static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 596{
 597	struct rq *rq = rq_of_rt_rq(rt_rq);
 598
 599	if (!rt_rq->rt_nr_running)
 600		return;
 601
 602	enqueue_top_rt_rq(rt_rq);
 603	resched_curr(rq);
 604}
 605
 606static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 607{
 608	dequeue_top_rt_rq(rt_rq);
 609}
 610
 611static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 612{
 613	return rt_rq->rt_throttled;
 614}
 615
 616static inline const struct cpumask *sched_rt_period_mask(void)
 617{
 618	return cpu_online_mask;
 619}
 620
 621static inline
 622struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 623{
 624	return &cpu_rq(cpu)->rt;
 625}
 626
 627static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 628{
 629	return &def_rt_bandwidth;
 630}
 631
 632#endif /* CONFIG_RT_GROUP_SCHED */
 633
 634bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
 635{
 636	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 637
 638	return (hrtimer_active(&rt_b->rt_period_timer) ||
 639		rt_rq->rt_time < rt_b->rt_runtime);
 640}
 641
 642#ifdef CONFIG_SMP
 643/*
 644 * We ran out of runtime, see if we can borrow some from our neighbours.
 645 */
 646static void do_balance_runtime(struct rt_rq *rt_rq)
 647{
 648	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 649	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
 650	int i, weight;
 651	u64 rt_period;
 652
 653	weight = cpumask_weight(rd->span);
 654
 655	raw_spin_lock(&rt_b->rt_runtime_lock);
 656	rt_period = ktime_to_ns(rt_b->rt_period);
 657	for_each_cpu(i, rd->span) {
 658		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 659		s64 diff;
 660
 661		if (iter == rt_rq)
 662			continue;
 663
 664		raw_spin_lock(&iter->rt_runtime_lock);
 665		/*
 666		 * Either all rqs have inf runtime and there's nothing to steal
 667		 * or __disable_runtime() below sets a specific rq to inf to
 668		 * indicate its been disabled and disalow stealing.
 669		 */
 670		if (iter->rt_runtime == RUNTIME_INF)
 671			goto next;
 672
 673		/*
 674		 * From runqueues with spare time, take 1/n part of their
 675		 * spare time, but no more than our period.
 676		 */
 677		diff = iter->rt_runtime - iter->rt_time;
 678		if (diff > 0) {
 679			diff = div_u64((u64)diff, weight);
 680			if (rt_rq->rt_runtime + diff > rt_period)
 681				diff = rt_period - rt_rq->rt_runtime;
 682			iter->rt_runtime -= diff;
 683			rt_rq->rt_runtime += diff;
 684			if (rt_rq->rt_runtime == rt_period) {
 685				raw_spin_unlock(&iter->rt_runtime_lock);
 686				break;
 687			}
 688		}
 689next:
 690		raw_spin_unlock(&iter->rt_runtime_lock);
 691	}
 692	raw_spin_unlock(&rt_b->rt_runtime_lock);
 693}
 694
 695/*
 696 * Ensure this RQ takes back all the runtime it lend to its neighbours.
 697 */
 698static void __disable_runtime(struct rq *rq)
 699{
 700	struct root_domain *rd = rq->rd;
 701	rt_rq_iter_t iter;
 702	struct rt_rq *rt_rq;
 703
 704	if (unlikely(!scheduler_running))
 705		return;
 706
 707	for_each_rt_rq(rt_rq, iter, rq) {
 708		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 709		s64 want;
 710		int i;
 711
 712		raw_spin_lock(&rt_b->rt_runtime_lock);
 713		raw_spin_lock(&rt_rq->rt_runtime_lock);
 714		/*
 715		 * Either we're all inf and nobody needs to borrow, or we're
 716		 * already disabled and thus have nothing to do, or we have
 717		 * exactly the right amount of runtime to take out.
 718		 */
 719		if (rt_rq->rt_runtime == RUNTIME_INF ||
 720				rt_rq->rt_runtime == rt_b->rt_runtime)
 721			goto balanced;
 722		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 723
 724		/*
 725		 * Calculate the difference between what we started out with
 726		 * and what we current have, that's the amount of runtime
 727		 * we lend and now have to reclaim.
 728		 */
 729		want = rt_b->rt_runtime - rt_rq->rt_runtime;
 730
 731		/*
 732		 * Greedy reclaim, take back as much as we can.
 733		 */
 734		for_each_cpu(i, rd->span) {
 735			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 736			s64 diff;
 737
 738			/*
 739			 * Can't reclaim from ourselves or disabled runqueues.
 740			 */
 741			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
 742				continue;
 743
 744			raw_spin_lock(&iter->rt_runtime_lock);
 745			if (want > 0) {
 746				diff = min_t(s64, iter->rt_runtime, want);
 747				iter->rt_runtime -= diff;
 748				want -= diff;
 749			} else {
 750				iter->rt_runtime -= want;
 751				want -= want;
 752			}
 753			raw_spin_unlock(&iter->rt_runtime_lock);
 754
 755			if (!want)
 756				break;
 757		}
 758
 759		raw_spin_lock(&rt_rq->rt_runtime_lock);
 760		/*
 761		 * We cannot be left wanting - that would mean some runtime
 762		 * leaked out of the system.
 763		 */
 764		BUG_ON(want);
 765balanced:
 766		/*
 767		 * Disable all the borrow logic by pretending we have inf
 768		 * runtime - in which case borrowing doesn't make sense.
 769		 */
 770		rt_rq->rt_runtime = RUNTIME_INF;
 771		rt_rq->rt_throttled = 0;
 772		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 773		raw_spin_unlock(&rt_b->rt_runtime_lock);
 774
 775		/* Make rt_rq available for pick_next_task() */
 776		sched_rt_rq_enqueue(rt_rq);
 777	}
 778}
 779
 780static void __enable_runtime(struct rq *rq)
 781{
 782	rt_rq_iter_t iter;
 783	struct rt_rq *rt_rq;
 784
 785	if (unlikely(!scheduler_running))
 786		return;
 787
 788	/*
 789	 * Reset each runqueue's bandwidth settings
 790	 */
 791	for_each_rt_rq(rt_rq, iter, rq) {
 792		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 793
 794		raw_spin_lock(&rt_b->rt_runtime_lock);
 795		raw_spin_lock(&rt_rq->rt_runtime_lock);
 796		rt_rq->rt_runtime = rt_b->rt_runtime;
 797		rt_rq->rt_time = 0;
 798		rt_rq->rt_throttled = 0;
 799		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 800		raw_spin_unlock(&rt_b->rt_runtime_lock);
 801	}
 802}
 803
 804static void balance_runtime(struct rt_rq *rt_rq)
 805{
 806	if (!sched_feat(RT_RUNTIME_SHARE))
 807		return;
 808
 809	if (rt_rq->rt_time > rt_rq->rt_runtime) {
 810		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 811		do_balance_runtime(rt_rq);
 812		raw_spin_lock(&rt_rq->rt_runtime_lock);
 813	}
 814}
 815#else /* !CONFIG_SMP */
 816static inline void balance_runtime(struct rt_rq *rt_rq) {}
 817#endif /* CONFIG_SMP */
 818
 819static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 820{
 821	int i, idle = 1, throttled = 0;
 822	const struct cpumask *span;
 823
 824	span = sched_rt_period_mask();
 825#ifdef CONFIG_RT_GROUP_SCHED
 826	/*
 827	 * FIXME: isolated CPUs should really leave the root task group,
 828	 * whether they are isolcpus or were isolated via cpusets, lest
 829	 * the timer run on a CPU which does not service all runqueues,
 830	 * potentially leaving other CPUs indefinitely throttled.  If
 831	 * isolation is really required, the user will turn the throttle
 832	 * off to kill the perturbations it causes anyway.  Meanwhile,
 833	 * this maintains functionality for boot and/or troubleshooting.
 834	 */
 835	if (rt_b == &root_task_group.rt_bandwidth)
 836		span = cpu_online_mask;
 837#endif
 838	for_each_cpu(i, span) {
 839		int enqueue = 0;
 840		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
 841		struct rq *rq = rq_of_rt_rq(rt_rq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 842
 843		raw_spin_lock(&rq->lock);
 
 
 844		if (rt_rq->rt_time) {
 845			u64 runtime;
 846
 847			raw_spin_lock(&rt_rq->rt_runtime_lock);
 848			if (rt_rq->rt_throttled)
 849				balance_runtime(rt_rq);
 850			runtime = rt_rq->rt_runtime;
 851			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
 852			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
 853				rt_rq->rt_throttled = 0;
 854				enqueue = 1;
 855
 856				/*
 857				 * When we're idle and a woken (rt) task is
 858				 * throttled check_preempt_curr() will set
 859				 * skip_update and the time between the wakeup
 860				 * and this unthrottle will get accounted as
 861				 * 'runtime'.
 862				 */
 863				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
 864					rq_clock_skip_update(rq, false);
 865			}
 866			if (rt_rq->rt_time || rt_rq->rt_nr_running)
 867				idle = 0;
 868			raw_spin_unlock(&rt_rq->rt_runtime_lock);
 869		} else if (rt_rq->rt_nr_running) {
 870			idle = 0;
 871			if (!rt_rq_throttled(rt_rq))
 872				enqueue = 1;
 873		}
 874		if (rt_rq->rt_throttled)
 875			throttled = 1;
 876
 877		if (enqueue)
 878			sched_rt_rq_enqueue(rt_rq);
 879		raw_spin_unlock(&rq->lock);
 880	}
 881
 882	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
 883		return 1;
 884
 885	return idle;
 886}
 887
 888static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 889{
 890#ifdef CONFIG_RT_GROUP_SCHED
 891	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 892
 893	if (rt_rq)
 894		return rt_rq->highest_prio.curr;
 895#endif
 896
 897	return rt_task_of(rt_se)->prio;
 898}
 899
 900static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 901{
 902	u64 runtime = sched_rt_runtime(rt_rq);
 903
 904	if (rt_rq->rt_throttled)
 905		return rt_rq_throttled(rt_rq);
 906
 907	if (runtime >= sched_rt_period(rt_rq))
 908		return 0;
 909
 910	balance_runtime(rt_rq);
 911	runtime = sched_rt_runtime(rt_rq);
 912	if (runtime == RUNTIME_INF)
 913		return 0;
 914
 915	if (rt_rq->rt_time > runtime) {
 916		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 917
 918		/*
 919		 * Don't actually throttle groups that have no runtime assigned
 920		 * but accrue some time due to boosting.
 921		 */
 922		if (likely(rt_b->rt_runtime)) {
 923			rt_rq->rt_throttled = 1;
 924			printk_deferred_once("sched: RT throttling activated\n");
 925		} else {
 926			/*
 927			 * In case we did anyway, make it go away,
 928			 * replenishment is a joke, since it will replenish us
 929			 * with exactly 0 ns.
 930			 */
 931			rt_rq->rt_time = 0;
 932		}
 933
 934		if (rt_rq_throttled(rt_rq)) {
 935			sched_rt_rq_dequeue(rt_rq);
 936			return 1;
 937		}
 938	}
 939
 940	return 0;
 941}
 942
 943/*
 944 * Update the current task's runtime statistics. Skip current tasks that
 945 * are not in our scheduling class.
 946 */
 947static void update_curr_rt(struct rq *rq)
 948{
 949	struct task_struct *curr = rq->curr;
 950	struct sched_rt_entity *rt_se = &curr->rt;
 951	u64 delta_exec;
 
 952
 953	if (curr->sched_class != &rt_sched_class)
 954		return;
 955
 956	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
 
 957	if (unlikely((s64)delta_exec <= 0))
 958		return;
 959
 960	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
 961	cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
 962
 963	schedstat_set(curr->se.statistics.exec_max,
 964		      max(curr->se.statistics.exec_max, delta_exec));
 965
 966	curr->se.sum_exec_runtime += delta_exec;
 967	account_group_exec_runtime(curr, delta_exec);
 968
 969	curr->se.exec_start = rq_clock_task(rq);
 970	cpuacct_charge(curr, delta_exec);
 971
 972	sched_rt_avg_update(rq, delta_exec);
 973
 974	if (!rt_bandwidth_enabled())
 975		return;
 976
 977	for_each_sched_rt_entity(rt_se) {
 978		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 979
 980		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
 981			raw_spin_lock(&rt_rq->rt_runtime_lock);
 982			rt_rq->rt_time += delta_exec;
 983			if (sched_rt_runtime_exceeded(rt_rq))
 984				resched_curr(rq);
 985			raw_spin_unlock(&rt_rq->rt_runtime_lock);
 986		}
 987	}
 988}
 989
 990static void
 991dequeue_top_rt_rq(struct rt_rq *rt_rq)
 992{
 993	struct rq *rq = rq_of_rt_rq(rt_rq);
 994
 995	BUG_ON(&rq->rt != rt_rq);
 996
 997	if (!rt_rq->rt_queued)
 998		return;
 999
1000	BUG_ON(!rq->nr_running);
1001
1002	sub_nr_running(rq, rt_rq->rt_nr_running);
1003	rt_rq->rt_queued = 0;
 
1004}
1005
1006static void
1007enqueue_top_rt_rq(struct rt_rq *rt_rq)
1008{
1009	struct rq *rq = rq_of_rt_rq(rt_rq);
1010
1011	BUG_ON(&rq->rt != rt_rq);
1012
1013	if (rt_rq->rt_queued)
1014		return;
1015	if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
 
1016		return;
1017
1018	add_nr_running(rq, rt_rq->rt_nr_running);
1019	rt_rq->rt_queued = 1;
 
 
 
 
 
1020}
1021
1022#if defined CONFIG_SMP
1023
1024static void
1025inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1026{
1027	struct rq *rq = rq_of_rt_rq(rt_rq);
1028
1029#ifdef CONFIG_RT_GROUP_SCHED
1030	/*
1031	 * Change rq's cpupri only if rt_rq is the top queue.
1032	 */
1033	if (&rq->rt != rt_rq)
1034		return;
1035#endif
1036	if (rq->online && prio < prev_prio)
1037		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1038}
1039
1040static void
1041dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1042{
1043	struct rq *rq = rq_of_rt_rq(rt_rq);
1044
1045#ifdef CONFIG_RT_GROUP_SCHED
1046	/*
1047	 * Change rq's cpupri only if rt_rq is the top queue.
1048	 */
1049	if (&rq->rt != rt_rq)
1050		return;
1051#endif
1052	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1053		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1054}
1055
1056#else /* CONFIG_SMP */
1057
1058static inline
1059void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1060static inline
1061void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1062
1063#endif /* CONFIG_SMP */
1064
1065#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1066static void
1067inc_rt_prio(struct rt_rq *rt_rq, int prio)
1068{
1069	int prev_prio = rt_rq->highest_prio.curr;
1070
1071	if (prio < prev_prio)
1072		rt_rq->highest_prio.curr = prio;
1073
1074	inc_rt_prio_smp(rt_rq, prio, prev_prio);
1075}
1076
1077static void
1078dec_rt_prio(struct rt_rq *rt_rq, int prio)
1079{
1080	int prev_prio = rt_rq->highest_prio.curr;
1081
1082	if (rt_rq->rt_nr_running) {
1083
1084		WARN_ON(prio < prev_prio);
1085
1086		/*
1087		 * This may have been our highest task, and therefore
1088		 * we may have some recomputation to do
1089		 */
1090		if (prio == prev_prio) {
1091			struct rt_prio_array *array = &rt_rq->active;
1092
1093			rt_rq->highest_prio.curr =
1094				sched_find_first_bit(array->bitmap);
1095		}
1096
1097	} else
1098		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1099
1100	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1101}
1102
1103#else
1104
1105static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1106static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1107
1108#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1109
1110#ifdef CONFIG_RT_GROUP_SCHED
1111
1112static void
1113inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1114{
1115	if (rt_se_boosted(rt_se))
1116		rt_rq->rt_nr_boosted++;
1117
1118	if (rt_rq->tg)
1119		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1120}
1121
1122static void
1123dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1124{
1125	if (rt_se_boosted(rt_se))
1126		rt_rq->rt_nr_boosted--;
1127
1128	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1129}
1130
1131#else /* CONFIG_RT_GROUP_SCHED */
1132
1133static void
1134inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1135{
1136	start_rt_bandwidth(&def_rt_bandwidth);
1137}
1138
1139static inline
1140void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1141
1142#endif /* CONFIG_RT_GROUP_SCHED */
1143
1144static inline
1145unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1146{
1147	struct rt_rq *group_rq = group_rt_rq(rt_se);
1148
1149	if (group_rq)
1150		return group_rq->rt_nr_running;
1151	else
1152		return 1;
1153}
1154
1155static inline
1156unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1157{
1158	struct rt_rq *group_rq = group_rt_rq(rt_se);
1159	struct task_struct *tsk;
1160
1161	if (group_rq)
1162		return group_rq->rr_nr_running;
1163
1164	tsk = rt_task_of(rt_se);
1165
1166	return (tsk->policy == SCHED_RR) ? 1 : 0;
1167}
1168
1169static inline
1170void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1171{
1172	int prio = rt_se_prio(rt_se);
1173
1174	WARN_ON(!rt_prio(prio));
1175	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1176	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1177
1178	inc_rt_prio(rt_rq, prio);
1179	inc_rt_migration(rt_se, rt_rq);
1180	inc_rt_group(rt_se, rt_rq);
1181}
1182
1183static inline
1184void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1185{
1186	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1187	WARN_ON(!rt_rq->rt_nr_running);
1188	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1189	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1190
1191	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1192	dec_rt_migration(rt_se, rt_rq);
1193	dec_rt_group(rt_se, rt_rq);
1194}
1195
1196/*
1197 * Change rt_se->run_list location unless SAVE && !MOVE
1198 *
1199 * assumes ENQUEUE/DEQUEUE flags match
1200 */
1201static inline bool move_entity(unsigned int flags)
1202{
1203	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1204		return false;
1205
1206	return true;
1207}
1208
1209static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1210{
1211	list_del_init(&rt_se->run_list);
1212
1213	if (list_empty(array->queue + rt_se_prio(rt_se)))
1214		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1215
1216	rt_se->on_list = 0;
1217}
1218
1219static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1220{
1221	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1222	struct rt_prio_array *array = &rt_rq->active;
1223	struct rt_rq *group_rq = group_rt_rq(rt_se);
1224	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1225
1226	/*
1227	 * Don't enqueue the group if its throttled, or when empty.
1228	 * The latter is a consequence of the former when a child group
1229	 * get throttled and the current group doesn't have any other
1230	 * active members.
1231	 */
1232	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1233		if (rt_se->on_list)
1234			__delist_rt_entity(rt_se, array);
1235		return;
1236	}
1237
1238	if (move_entity(flags)) {
1239		WARN_ON_ONCE(rt_se->on_list);
1240		if (flags & ENQUEUE_HEAD)
1241			list_add(&rt_se->run_list, queue);
1242		else
1243			list_add_tail(&rt_se->run_list, queue);
1244
1245		__set_bit(rt_se_prio(rt_se), array->bitmap);
1246		rt_se->on_list = 1;
1247	}
1248	rt_se->on_rq = 1;
1249
1250	inc_rt_tasks(rt_se, rt_rq);
1251}
1252
1253static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1254{
1255	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1256	struct rt_prio_array *array = &rt_rq->active;
1257
1258	if (move_entity(flags)) {
1259		WARN_ON_ONCE(!rt_se->on_list);
1260		__delist_rt_entity(rt_se, array);
1261	}
1262	rt_se->on_rq = 0;
1263
1264	dec_rt_tasks(rt_se, rt_rq);
1265}
1266
1267/*
1268 * Because the prio of an upper entry depends on the lower
1269 * entries, we must remove entries top - down.
1270 */
1271static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1272{
1273	struct sched_rt_entity *back = NULL;
1274
1275	for_each_sched_rt_entity(rt_se) {
1276		rt_se->back = back;
1277		back = rt_se;
1278	}
1279
1280	dequeue_top_rt_rq(rt_rq_of_se(back));
1281
1282	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1283		if (on_rt_rq(rt_se))
1284			__dequeue_rt_entity(rt_se, flags);
1285	}
1286}
1287
1288static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1289{
1290	struct rq *rq = rq_of_rt_se(rt_se);
1291
1292	dequeue_rt_stack(rt_se, flags);
1293	for_each_sched_rt_entity(rt_se)
1294		__enqueue_rt_entity(rt_se, flags);
1295	enqueue_top_rt_rq(&rq->rt);
1296}
1297
1298static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1299{
1300	struct rq *rq = rq_of_rt_se(rt_se);
1301
1302	dequeue_rt_stack(rt_se, flags);
1303
1304	for_each_sched_rt_entity(rt_se) {
1305		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1306
1307		if (rt_rq && rt_rq->rt_nr_running)
1308			__enqueue_rt_entity(rt_se, flags);
1309	}
1310	enqueue_top_rt_rq(&rq->rt);
1311}
1312
1313/*
1314 * Adding/removing a task to/from a priority array:
1315 */
1316static void
1317enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1318{
1319	struct sched_rt_entity *rt_se = &p->rt;
1320
1321	if (flags & ENQUEUE_WAKEUP)
1322		rt_se->timeout = 0;
1323
1324	enqueue_rt_entity(rt_se, flags);
1325
1326	if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
1327		enqueue_pushable_task(rq, p);
1328}
1329
1330static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1331{
1332	struct sched_rt_entity *rt_se = &p->rt;
1333
1334	update_curr_rt(rq);
1335	dequeue_rt_entity(rt_se, flags);
1336
1337	dequeue_pushable_task(rq, p);
1338}
1339
1340/*
1341 * Put task to the head or the end of the run list without the overhead of
1342 * dequeue followed by enqueue.
1343 */
1344static void
1345requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1346{
1347	if (on_rt_rq(rt_se)) {
1348		struct rt_prio_array *array = &rt_rq->active;
1349		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1350
1351		if (head)
1352			list_move(&rt_se->run_list, queue);
1353		else
1354			list_move_tail(&rt_se->run_list, queue);
1355	}
1356}
1357
1358static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1359{
1360	struct sched_rt_entity *rt_se = &p->rt;
1361	struct rt_rq *rt_rq;
1362
1363	for_each_sched_rt_entity(rt_se) {
1364		rt_rq = rt_rq_of_se(rt_se);
1365		requeue_rt_entity(rt_rq, rt_se, head);
1366	}
1367}
1368
1369static void yield_task_rt(struct rq *rq)
1370{
1371	requeue_task_rt(rq, rq->curr, 0);
1372}
1373
1374#ifdef CONFIG_SMP
1375static int find_lowest_rq(struct task_struct *task);
1376
1377static int
1378select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1379{
1380	struct task_struct *curr;
1381	struct rq *rq;
 
1382
1383	/* For anything but wake ups, just return the task_cpu */
1384	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1385		goto out;
1386
1387	rq = cpu_rq(cpu);
1388
1389	rcu_read_lock();
1390	curr = READ_ONCE(rq->curr); /* unlocked access */
1391
1392	/*
1393	 * If the current task on @p's runqueue is an RT task, then
1394	 * try to see if we can wake this RT task up on another
1395	 * runqueue. Otherwise simply start this RT task
1396	 * on its current runqueue.
1397	 *
1398	 * We want to avoid overloading runqueues. If the woken
1399	 * task is a higher priority, then it will stay on this CPU
1400	 * and the lower prio task should be moved to another CPU.
1401	 * Even though this will probably make the lower prio task
1402	 * lose its cache, we do not want to bounce a higher task
1403	 * around just because it gave up its CPU, perhaps for a
1404	 * lock?
1405	 *
1406	 * For equal prio tasks, we just let the scheduler sort it out.
1407	 *
1408	 * Otherwise, just let it ride on the affined RQ and the
1409	 * post-schedule router will push the preempted task away
1410	 *
1411	 * This test is optimistic, if we get it wrong the load-balancer
1412	 * will have to sort it out.
1413	 */
1414	if (curr && unlikely(rt_task(curr)) &&
1415	    (tsk_nr_cpus_allowed(curr) < 2 ||
1416	     curr->prio <= p->prio)) {
 
 
 
 
 
 
1417		int target = find_lowest_rq(p);
1418
1419		/*
 
 
 
 
 
 
 
1420		 * Don't bother moving it if the destination CPU is
1421		 * not running a lower priority task.
1422		 */
1423		if (target != -1 &&
1424		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1425			cpu = target;
1426	}
 
 
1427	rcu_read_unlock();
1428
1429out:
1430	return cpu;
1431}
1432
1433static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1434{
1435	/*
1436	 * Current can't be migrated, useless to reschedule,
1437	 * let's hope p can move out.
1438	 */
1439	if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
1440	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1441		return;
1442
1443	/*
1444	 * p is migratable, so let's not schedule it and
1445	 * see if it is pushed or pulled somewhere else.
1446	 */
1447	if (tsk_nr_cpus_allowed(p) != 1
1448	    && cpupri_find(&rq->rd->cpupri, p, NULL))
1449		return;
1450
1451	/*
1452	 * There appears to be other cpus that can accept
1453	 * current and none to run 'p', so lets reschedule
1454	 * to try and push current away:
1455	 */
1456	requeue_task_rt(rq, p, 1);
1457	resched_curr(rq);
1458}
1459
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1460#endif /* CONFIG_SMP */
1461
1462/*
1463 * Preempt the current task with a newly woken task if needed:
1464 */
1465static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1466{
1467	if (p->prio < rq->curr->prio) {
1468		resched_curr(rq);
1469		return;
1470	}
1471
1472#ifdef CONFIG_SMP
1473	/*
1474	 * If:
1475	 *
1476	 * - the newly woken task is of equal priority to the current task
1477	 * - the newly woken task is non-migratable while current is migratable
1478	 * - current will be preempted on the next reschedule
1479	 *
1480	 * we should check to see if current can readily move to a different
1481	 * cpu.  If so, we will reschedule to allow the push logic to try
1482	 * to move current somewhere else, making room for our non-migratable
1483	 * task.
1484	 */
1485	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1486		check_preempt_equal_prio(rq, p);
1487#endif
1488}
1489
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1490static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1491						   struct rt_rq *rt_rq)
1492{
1493	struct rt_prio_array *array = &rt_rq->active;
1494	struct sched_rt_entity *next = NULL;
1495	struct list_head *queue;
1496	int idx;
1497
1498	idx = sched_find_first_bit(array->bitmap);
1499	BUG_ON(idx >= MAX_RT_PRIO);
1500
1501	queue = array->queue + idx;
1502	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1503
1504	return next;
1505}
1506
1507static struct task_struct *_pick_next_task_rt(struct rq *rq)
1508{
1509	struct sched_rt_entity *rt_se;
1510	struct task_struct *p;
1511	struct rt_rq *rt_rq  = &rq->rt;
1512
1513	do {
1514		rt_se = pick_next_rt_entity(rq, rt_rq);
1515		BUG_ON(!rt_se);
1516		rt_rq = group_rt_rq(rt_se);
1517	} while (rt_rq);
1518
1519	p = rt_task_of(rt_se);
1520	p->se.exec_start = rq_clock_task(rq);
1521
1522	return p;
1523}
1524
1525static struct task_struct *
1526pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
1527{
1528	struct task_struct *p;
1529	struct rt_rq *rt_rq = &rq->rt;
1530
1531	if (need_pull_rt_task(rq, prev)) {
1532		/*
1533		 * This is OK, because current is on_cpu, which avoids it being
1534		 * picked for load-balance and preemption/IRQs are still
1535		 * disabled avoiding further scheduler activity on it and we're
1536		 * being very careful to re-start the picking loop.
1537		 */
1538		lockdep_unpin_lock(&rq->lock, cookie);
1539		pull_rt_task(rq);
1540		lockdep_repin_lock(&rq->lock, cookie);
1541		/*
1542		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1543		 * means a dl or stop task can slip in, in which case we need
1544		 * to re-start task selection.
1545		 */
1546		if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1547			     rq->dl.dl_nr_running))
1548			return RETRY_TASK;
1549	}
1550
1551	/*
1552	 * We may dequeue prev's rt_rq in put_prev_task().
1553	 * So, we update time before rt_nr_running check.
1554	 */
1555	if (prev->sched_class == &rt_sched_class)
1556		update_curr_rt(rq);
1557
1558	if (!rt_rq->rt_queued)
1559		return NULL;
1560
1561	put_prev_task(rq, prev);
1562
1563	p = _pick_next_task_rt(rq);
1564
1565	/* The running task is never eligible for pushing */
1566	dequeue_pushable_task(rq, p);
1567
1568	queue_push_tasks(rq);
1569
1570	return p;
1571}
1572
1573static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1574{
1575	update_curr_rt(rq);
1576
 
 
1577	/*
1578	 * The previous task needs to be made eligible for pushing
1579	 * if it is still active
1580	 */
1581	if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1)
1582		enqueue_pushable_task(rq, p);
1583}
1584
1585#ifdef CONFIG_SMP
1586
1587/* Only try algorithms three times */
1588#define RT_MAX_TRIES 3
1589
1590static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1591{
1592	if (!task_running(rq, p) &&
1593	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1594		return 1;
 
1595	return 0;
1596}
1597
1598/*
1599 * Return the highest pushable rq's task, which is suitable to be executed
1600 * on the cpu, NULL otherwise
1601 */
1602static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1603{
1604	struct plist_head *head = &rq->rt.pushable_tasks;
1605	struct task_struct *p;
1606
1607	if (!has_pushable_tasks(rq))
1608		return NULL;
1609
1610	plist_for_each_entry(p, head, pushable_tasks) {
1611		if (pick_rt_task(rq, p, cpu))
1612			return p;
1613	}
1614
1615	return NULL;
1616}
1617
1618static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1619
1620static int find_lowest_rq(struct task_struct *task)
1621{
1622	struct sched_domain *sd;
1623	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1624	int this_cpu = smp_processor_id();
1625	int cpu      = task_cpu(task);
 
1626
1627	/* Make sure the mask is initialized first */
1628	if (unlikely(!lowest_mask))
1629		return -1;
1630
1631	if (tsk_nr_cpus_allowed(task) == 1)
1632		return -1; /* No other targets possible */
1633
1634	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1635		return -1; /* No targets found */
1636
1637	/*
1638	 * At this point we have built a mask of cpus representing the
1639	 * lowest priority tasks in the system.  Now we want to elect
1640	 * the best one based on our affinity and topology.
1641	 *
1642	 * We prioritize the last cpu that the task executed on since
1643	 * it is most likely cache-hot in that location.
1644	 */
1645	if (cpumask_test_cpu(cpu, lowest_mask))
1646		return cpu;
1647
1648	/*
1649	 * Otherwise, we consult the sched_domains span maps to figure
1650	 * out which cpu is logically closest to our hot cache data.
1651	 */
1652	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1653		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1654
1655	rcu_read_lock();
1656	for_each_domain(cpu, sd) {
1657		if (sd->flags & SD_WAKE_AFFINE) {
1658			int best_cpu;
1659
1660			/*
1661			 * "this_cpu" is cheaper to preempt than a
1662			 * remote processor.
1663			 */
1664			if (this_cpu != -1 &&
1665			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1666				rcu_read_unlock();
1667				return this_cpu;
1668			}
1669
1670			best_cpu = cpumask_first_and(lowest_mask,
1671						     sched_domain_span(sd));
1672			if (best_cpu < nr_cpu_ids) {
1673				rcu_read_unlock();
1674				return best_cpu;
1675			}
1676		}
1677	}
1678	rcu_read_unlock();
1679
1680	/*
1681	 * And finally, if there were no matches within the domains
1682	 * just give the caller *something* to work with from the compatible
1683	 * locations.
1684	 */
1685	if (this_cpu != -1)
1686		return this_cpu;
1687
1688	cpu = cpumask_any(lowest_mask);
1689	if (cpu < nr_cpu_ids)
1690		return cpu;
 
1691	return -1;
1692}
1693
1694/* Will lock the rq it finds */
1695static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1696{
1697	struct rq *lowest_rq = NULL;
1698	int tries;
1699	int cpu;
1700
1701	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1702		cpu = find_lowest_rq(task);
1703
1704		if ((cpu == -1) || (cpu == rq->cpu))
1705			break;
1706
1707		lowest_rq = cpu_rq(cpu);
1708
1709		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1710			/*
1711			 * Target rq has tasks of equal or higher priority,
1712			 * retrying does not release any lock and is unlikely
1713			 * to yield a different result.
1714			 */
1715			lowest_rq = NULL;
1716			break;
1717		}
1718
1719		/* if the prio of this runqueue changed, try again */
1720		if (double_lock_balance(rq, lowest_rq)) {
1721			/*
1722			 * We had to unlock the run queue. In
1723			 * the mean time, task could have
1724			 * migrated already or had its affinity changed.
1725			 * Also make sure that it wasn't scheduled on its rq.
1726			 */
1727			if (unlikely(task_rq(task) != rq ||
1728				     !cpumask_test_cpu(lowest_rq->cpu,
1729						       tsk_cpus_allowed(task)) ||
1730				     task_running(rq, task) ||
1731				     !rt_task(task) ||
1732				     !task_on_rq_queued(task))) {
1733
1734				double_unlock_balance(rq, lowest_rq);
1735				lowest_rq = NULL;
1736				break;
1737			}
1738		}
1739
1740		/* If this rq is still suitable use it. */
1741		if (lowest_rq->rt.highest_prio.curr > task->prio)
1742			break;
1743
1744		/* try again */
1745		double_unlock_balance(rq, lowest_rq);
1746		lowest_rq = NULL;
1747	}
1748
1749	return lowest_rq;
1750}
1751
1752static struct task_struct *pick_next_pushable_task(struct rq *rq)
1753{
1754	struct task_struct *p;
1755
1756	if (!has_pushable_tasks(rq))
1757		return NULL;
1758
1759	p = plist_first_entry(&rq->rt.pushable_tasks,
1760			      struct task_struct, pushable_tasks);
1761
1762	BUG_ON(rq->cpu != task_cpu(p));
1763	BUG_ON(task_current(rq, p));
1764	BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
1765
1766	BUG_ON(!task_on_rq_queued(p));
1767	BUG_ON(!rt_task(p));
1768
1769	return p;
1770}
1771
1772/*
1773 * If the current CPU has more than one RT task, see if the non
1774 * running task can migrate over to a CPU that is running a task
1775 * of lesser priority.
1776 */
1777static int push_rt_task(struct rq *rq)
1778{
1779	struct task_struct *next_task;
1780	struct rq *lowest_rq;
1781	int ret = 0;
1782
1783	if (!rq->rt.overloaded)
1784		return 0;
1785
1786	next_task = pick_next_pushable_task(rq);
1787	if (!next_task)
1788		return 0;
1789
1790retry:
1791	if (unlikely(next_task == rq->curr)) {
1792		WARN_ON(1);
1793		return 0;
1794	}
1795
1796	/*
1797	 * It's possible that the next_task slipped in of
1798	 * higher priority than current. If that's the case
1799	 * just reschedule current.
1800	 */
1801	if (unlikely(next_task->prio < rq->curr->prio)) {
1802		resched_curr(rq);
1803		return 0;
1804	}
1805
1806	/* We might release rq lock */
1807	get_task_struct(next_task);
1808
1809	/* find_lock_lowest_rq locks the rq if found */
1810	lowest_rq = find_lock_lowest_rq(next_task, rq);
1811	if (!lowest_rq) {
1812		struct task_struct *task;
1813		/*
1814		 * find_lock_lowest_rq releases rq->lock
1815		 * so it is possible that next_task has migrated.
1816		 *
1817		 * We need to make sure that the task is still on the same
1818		 * run-queue and is also still the next task eligible for
1819		 * pushing.
1820		 */
1821		task = pick_next_pushable_task(rq);
1822		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1823			/*
1824			 * The task hasn't migrated, and is still the next
1825			 * eligible task, but we failed to find a run-queue
1826			 * to push it to.  Do not retry in this case, since
1827			 * other cpus will pull from us when ready.
1828			 */
1829			goto out;
1830		}
1831
1832		if (!task)
1833			/* No more tasks, just exit */
1834			goto out;
1835
1836		/*
1837		 * Something has shifted, try again.
1838		 */
1839		put_task_struct(next_task);
1840		next_task = task;
1841		goto retry;
1842	}
1843
1844	deactivate_task(rq, next_task, 0);
1845	set_task_cpu(next_task, lowest_rq->cpu);
1846	activate_task(lowest_rq, next_task, 0);
1847	ret = 1;
1848
1849	resched_curr(lowest_rq);
1850
1851	double_unlock_balance(rq, lowest_rq);
1852
1853out:
1854	put_task_struct(next_task);
1855
1856	return ret;
1857}
1858
1859static void push_rt_tasks(struct rq *rq)
1860{
1861	/* push_rt_task will return true if it moved an RT */
1862	while (push_rt_task(rq))
1863		;
1864}
1865
1866#ifdef HAVE_RT_PUSH_IPI
 
1867/*
1868 * The search for the next cpu always starts at rq->cpu and ends
1869 * when we reach rq->cpu again. It will never return rq->cpu.
1870 * This returns the next cpu to check, or nr_cpu_ids if the loop
1871 * is complete.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1872 *
1873 * rq->rt.push_cpu holds the last cpu returned by this function,
1874 * or if this is the first instance, it must hold rq->cpu.
1875 */
1876static int rto_next_cpu(struct rq *rq)
1877{
1878	int prev_cpu = rq->rt.push_cpu;
1879	int cpu;
1880
1881	cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1882
1883	/*
1884	 * If the previous cpu is less than the rq's CPU, then it already
1885	 * passed the end of the mask, and has started from the beginning.
1886	 * We end if the next CPU is greater or equal to rq's CPU.
 
 
 
 
 
 
 
 
1887	 */
1888	if (prev_cpu < rq->cpu) {
1889		if (cpu >= rq->cpu)
1890			return nr_cpu_ids;
1891
1892	} else if (cpu >= nr_cpu_ids) {
1893		/*
1894		 * We passed the end of the mask, start at the beginning.
1895		 * If the result is greater or equal to the rq's CPU, then
1896		 * the loop is finished.
1897		 */
1898		cpu = cpumask_first(rq->rd->rto_mask);
1899		if (cpu >= rq->cpu)
1900			return nr_cpu_ids;
1901	}
1902	rq->rt.push_cpu = cpu;
1903
1904	/* Return cpu to let the caller know if the loop is finished or not */
1905	return cpu;
1906}
1907
1908static int find_next_push_cpu(struct rq *rq)
1909{
1910	struct rq *next_rq;
1911	int cpu;
1912
1913	while (1) {
1914		cpu = rto_next_cpu(rq);
1915		if (cpu >= nr_cpu_ids)
1916			break;
1917		next_rq = cpu_rq(cpu);
 
 
 
 
1918
1919		/* Make sure the next rq can push to this rq */
1920		if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1921			break;
 
 
1922	}
1923
1924	return cpu;
1925}
1926
1927#define RT_PUSH_IPI_EXECUTING		1
1928#define RT_PUSH_IPI_RESTART		2
 
 
1929
1930static void tell_cpu_to_push(struct rq *rq)
1931{
1932	int cpu;
 
1933
1934	if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1935		raw_spin_lock(&rq->rt.push_lock);
1936		/* Make sure it's still executing */
1937		if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1938			/*
1939			 * Tell the IPI to restart the loop as things have
1940			 * changed since it started.
1941			 */
1942			rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1943			raw_spin_unlock(&rq->rt.push_lock);
1944			return;
1945		}
1946		raw_spin_unlock(&rq->rt.push_lock);
1947	}
1948
1949	/* When here, there's no IPI going around */
 
1950
1951	rq->rt.push_cpu = rq->cpu;
1952	cpu = find_next_push_cpu(rq);
1953	if (cpu >= nr_cpu_ids)
1954		return;
1955
1956	rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
 
 
 
 
 
 
 
 
 
 
 
 
 
1957
1958	irq_work_queue_on(&rq->rt.push_work, cpu);
 
 
 
 
1959}
1960
1961/* Called from hardirq context */
1962static void try_to_push_tasks(void *arg)
1963{
1964	struct rt_rq *rt_rq = arg;
1965	struct rq *rq, *src_rq;
1966	int this_cpu;
1967	int cpu;
1968
1969	this_cpu = rt_rq->push_cpu;
1970
1971	/* Paranoid check */
1972	BUG_ON(this_cpu != smp_processor_id());
1973
1974	rq = cpu_rq(this_cpu);
1975	src_rq = rq_of_rt_rq(rt_rq);
1976
1977again:
1978	if (has_pushable_tasks(rq)) {
1979		raw_spin_lock(&rq->lock);
1980		push_rt_task(rq);
1981		raw_spin_unlock(&rq->lock);
1982	}
1983
1984	/* Pass the IPI to the next rt overloaded queue */
1985	raw_spin_lock(&rt_rq->push_lock);
1986	/*
1987	 * If the source queue changed since the IPI went out,
1988	 * we need to restart the search from that CPU again.
1989	 */
1990	if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
1991		rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
1992		rt_rq->push_cpu = src_rq->cpu;
1993	}
1994
1995	cpu = find_next_push_cpu(src_rq);
 
1996
1997	if (cpu >= nr_cpu_ids)
1998		rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
1999	raw_spin_unlock(&rt_rq->push_lock);
2000
2001	if (cpu >= nr_cpu_ids)
 
2002		return;
2003
2004	/*
2005	 * It is possible that a restart caused this CPU to be
2006	 * chosen again. Don't bother with an IPI, just see if we
2007	 * have more to push.
2008	 */
2009	if (unlikely(cpu == rq->cpu))
2010		goto again;
2011
2012	/* Try the next RT overloaded CPU */
2013	irq_work_queue_on(&rt_rq->push_work, cpu);
2014}
2015
2016static void push_irq_work_func(struct irq_work *work)
2017{
2018	struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
2019
2020	try_to_push_tasks(rt_rq);
2021}
2022#endif /* HAVE_RT_PUSH_IPI */
2023
2024static void pull_rt_task(struct rq *this_rq)
2025{
2026	int this_cpu = this_rq->cpu, cpu;
2027	bool resched = false;
2028	struct task_struct *p;
2029	struct rq *src_rq;
 
2030
2031	if (likely(!rt_overloaded(this_rq)))
2032		return;
2033
2034	/*
2035	 * Match the barrier from rt_set_overloaded; this guarantees that if we
2036	 * see overloaded we must also see the rto_mask bit.
2037	 */
2038	smp_rmb();
2039
 
 
 
 
 
2040#ifdef HAVE_RT_PUSH_IPI
2041	if (sched_feat(RT_PUSH_IPI)) {
2042		tell_cpu_to_push(this_rq);
2043		return;
2044	}
2045#endif
2046
2047	for_each_cpu(cpu, this_rq->rd->rto_mask) {
2048		if (this_cpu == cpu)
2049			continue;
2050
2051		src_rq = cpu_rq(cpu);
2052
2053		/*
2054		 * Don't bother taking the src_rq->lock if the next highest
2055		 * task is known to be lower-priority than our current task.
2056		 * This may look racy, but if this value is about to go
2057		 * logically higher, the src_rq will push this task away.
2058		 * And if its going logically lower, we do not care
2059		 */
2060		if (src_rq->rt.highest_prio.next >=
2061		    this_rq->rt.highest_prio.curr)
2062			continue;
2063
2064		/*
2065		 * We can potentially drop this_rq's lock in
2066		 * double_lock_balance, and another CPU could
2067		 * alter this_rq
2068		 */
2069		double_lock_balance(this_rq, src_rq);
2070
2071		/*
2072		 * We can pull only a task, which is pushable
2073		 * on its rq, and no others.
2074		 */
2075		p = pick_highest_pushable_task(src_rq, this_cpu);
2076
2077		/*
2078		 * Do we have an RT task that preempts
2079		 * the to-be-scheduled task?
2080		 */
2081		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2082			WARN_ON(p == src_rq->curr);
2083			WARN_ON(!task_on_rq_queued(p));
2084
2085			/*
2086			 * There's a chance that p is higher in priority
2087			 * than what's currently running on its cpu.
2088			 * This is just that p is wakeing up and hasn't
2089			 * had a chance to schedule. We only pull
2090			 * p if it is lower in priority than the
2091			 * current task on the run queue
2092			 */
2093			if (p->prio < src_rq->curr->prio)
2094				goto skip;
2095
2096			resched = true;
2097
2098			deactivate_task(src_rq, p, 0);
2099			set_task_cpu(p, this_cpu);
2100			activate_task(this_rq, p, 0);
2101			/*
2102			 * We continue with the search, just in
2103			 * case there's an even higher prio task
2104			 * in another runqueue. (low likelihood
2105			 * but possible)
2106			 */
2107		}
2108skip:
2109		double_unlock_balance(this_rq, src_rq);
2110	}
2111
2112	if (resched)
2113		resched_curr(this_rq);
2114}
2115
2116/*
2117 * If we are not running and we are not going to reschedule soon, we should
2118 * try to push tasks away now
2119 */
2120static void task_woken_rt(struct rq *rq, struct task_struct *p)
2121{
2122	if (!task_running(rq, p) &&
2123	    !test_tsk_need_resched(rq->curr) &&
2124	    tsk_nr_cpus_allowed(p) > 1 &&
2125	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
2126	    (tsk_nr_cpus_allowed(rq->curr) < 2 ||
2127	     rq->curr->prio <= p->prio))
 
 
2128		push_rt_tasks(rq);
2129}
2130
2131/* Assumes rq->lock is held */
2132static void rq_online_rt(struct rq *rq)
2133{
2134	if (rq->rt.overloaded)
2135		rt_set_overload(rq);
2136
2137	__enable_runtime(rq);
2138
2139	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2140}
2141
2142/* Assumes rq->lock is held */
2143static void rq_offline_rt(struct rq *rq)
2144{
2145	if (rq->rt.overloaded)
2146		rt_clear_overload(rq);
2147
2148	__disable_runtime(rq);
2149
2150	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2151}
2152
2153/*
2154 * When switch from the rt queue, we bring ourselves to a position
2155 * that we might want to pull RT tasks from other runqueues.
2156 */
2157static void switched_from_rt(struct rq *rq, struct task_struct *p)
2158{
2159	/*
2160	 * If there are other RT tasks then we will reschedule
2161	 * and the scheduling of the other RT tasks will handle
2162	 * the balancing. But if we are the last RT task
2163	 * we may need to handle the pulling of RT tasks
2164	 * now.
2165	 */
2166	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2167		return;
2168
2169	queue_pull_task(rq);
2170}
2171
2172void __init init_sched_rt_class(void)
2173{
2174	unsigned int i;
2175
2176	for_each_possible_cpu(i) {
2177		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2178					GFP_KERNEL, cpu_to_node(i));
2179	}
2180}
2181#endif /* CONFIG_SMP */
2182
2183/*
2184 * When switching a task to RT, we may overload the runqueue
2185 * with RT tasks. In this case we try to push them off to
2186 * other runqueues.
2187 */
2188static void switched_to_rt(struct rq *rq, struct task_struct *p)
2189{
2190	/*
2191	 * If we are already running, then there's nothing
2192	 * that needs to be done. But if we are not running
2193	 * we may need to preempt the current running task.
2194	 * If that current running task is also an RT task
2195	 * then see if we can move to another run queue.
2196	 */
2197	if (task_on_rq_queued(p) && rq->curr != p) {
2198#ifdef CONFIG_SMP
2199		if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
2200			queue_push_tasks(rq);
2201#endif /* CONFIG_SMP */
2202		if (p->prio < rq->curr->prio)
2203			resched_curr(rq);
2204	}
2205}
2206
2207/*
2208 * Priority of the task has changed. This may cause
2209 * us to initiate a push or pull.
2210 */
2211static void
2212prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2213{
2214	if (!task_on_rq_queued(p))
2215		return;
2216
2217	if (rq->curr == p) {
2218#ifdef CONFIG_SMP
2219		/*
2220		 * If our priority decreases while running, we
2221		 * may need to pull tasks to this runqueue.
2222		 */
2223		if (oldprio < p->prio)
2224			queue_pull_task(rq);
2225
2226		/*
2227		 * If there's a higher priority task waiting to run
2228		 * then reschedule.
2229		 */
2230		if (p->prio > rq->rt.highest_prio.curr)
2231			resched_curr(rq);
2232#else
2233		/* For UP simply resched on drop of prio */
2234		if (oldprio < p->prio)
2235			resched_curr(rq);
2236#endif /* CONFIG_SMP */
2237	} else {
2238		/*
2239		 * This task is not running, but if it is
2240		 * greater than the current running task
2241		 * then reschedule.
2242		 */
2243		if (p->prio < rq->curr->prio)
2244			resched_curr(rq);
2245	}
2246}
2247
 
2248static void watchdog(struct rq *rq, struct task_struct *p)
2249{
2250	unsigned long soft, hard;
2251
2252	/* max may change after cur was read, this will be fixed next tick */
2253	soft = task_rlimit(p, RLIMIT_RTTIME);
2254	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2255
2256	if (soft != RLIM_INFINITY) {
2257		unsigned long next;
2258
2259		if (p->rt.watchdog_stamp != jiffies) {
2260			p->rt.timeout++;
2261			p->rt.watchdog_stamp = jiffies;
2262		}
2263
2264		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2265		if (p->rt.timeout > next)
2266			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
 
 
2267	}
2268}
 
 
 
2269
 
 
 
 
 
 
 
 
2270static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2271{
2272	struct sched_rt_entity *rt_se = &p->rt;
2273
2274	update_curr_rt(rq);
 
2275
2276	watchdog(rq, p);
2277
2278	/*
2279	 * RR tasks need a special form of timeslice management.
2280	 * FIFO tasks have no timeslices.
2281	 */
2282	if (p->policy != SCHED_RR)
2283		return;
2284
2285	if (--p->rt.time_slice)
2286		return;
2287
2288	p->rt.time_slice = sched_rr_timeslice;
2289
2290	/*
2291	 * Requeue to the end of queue if we (and all of our ancestors) are not
2292	 * the only element on the queue
2293	 */
2294	for_each_sched_rt_entity(rt_se) {
2295		if (rt_se->run_list.prev != rt_se->run_list.next) {
2296			requeue_task_rt(rq, p, 0);
2297			resched_curr(rq);
2298			return;
2299		}
2300	}
2301}
2302
2303static void set_curr_task_rt(struct rq *rq)
2304{
2305	struct task_struct *p = rq->curr;
2306
2307	p->se.exec_start = rq_clock_task(rq);
2308
2309	/* The running task is never eligible for pushing */
2310	dequeue_pushable_task(rq, p);
2311}
2312
2313static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2314{
2315	/*
2316	 * Time slice is 0 for SCHED_FIFO tasks
2317	 */
2318	if (task->policy == SCHED_RR)
2319		return sched_rr_timeslice;
2320	else
2321		return 0;
2322}
2323
2324const struct sched_class rt_sched_class = {
2325	.next			= &fair_sched_class,
2326	.enqueue_task		= enqueue_task_rt,
2327	.dequeue_task		= dequeue_task_rt,
2328	.yield_task		= yield_task_rt,
2329
2330	.check_preempt_curr	= check_preempt_curr_rt,
2331
2332	.pick_next_task		= pick_next_task_rt,
2333	.put_prev_task		= put_prev_task_rt,
 
2334
2335#ifdef CONFIG_SMP
 
2336	.select_task_rq		= select_task_rq_rt,
2337
2338	.set_cpus_allowed       = set_cpus_allowed_common,
2339	.rq_online              = rq_online_rt,
2340	.rq_offline             = rq_offline_rt,
2341	.task_woken		= task_woken_rt,
2342	.switched_from		= switched_from_rt,
2343#endif
2344
2345	.set_curr_task          = set_curr_task_rt,
2346	.task_tick		= task_tick_rt,
2347
2348	.get_rr_interval	= get_rr_interval_rt,
2349
2350	.prio_changed		= prio_changed_rt,
2351	.switched_to		= switched_to_rt,
2352
2353	.update_curr		= update_curr_rt,
 
 
 
 
2354};
2355
2356#ifdef CONFIG_SCHED_DEBUG
2357extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2358
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2359void print_rt_stats(struct seq_file *m, int cpu)
2360{
2361	rt_rq_iter_t iter;
2362	struct rt_rq *rt_rq;
2363
2364	rcu_read_lock();
2365	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2366		print_rt_rq(m, cpu, rt_rq);
2367	rcu_read_unlock();
2368}
2369#endif /* CONFIG_SCHED_DEBUG */
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   4 * policies)
   5 */
 
   6#include "sched.h"
   7
   8#include "pelt.h"
 
   9
  10int sched_rr_timeslice = RR_TIMESLICE;
  11int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
  12/* More than 4 hours if BW_SHIFT equals 20. */
  13static const u64 max_rt_runtime = MAX_BW;
  14
  15static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  16
  17struct rt_bandwidth def_rt_bandwidth;
  18
  19static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  20{
  21	struct rt_bandwidth *rt_b =
  22		container_of(timer, struct rt_bandwidth, rt_period_timer);
  23	int idle = 0;
  24	int overrun;
  25
  26	raw_spin_lock(&rt_b->rt_runtime_lock);
  27	for (;;) {
  28		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
  29		if (!overrun)
  30			break;
  31
  32		raw_spin_unlock(&rt_b->rt_runtime_lock);
  33		idle = do_sched_rt_period_timer(rt_b, overrun);
  34		raw_spin_lock(&rt_b->rt_runtime_lock);
  35	}
  36	if (idle)
  37		rt_b->rt_period_active = 0;
  38	raw_spin_unlock(&rt_b->rt_runtime_lock);
  39
  40	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  41}
  42
  43void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  44{
  45	rt_b->rt_period = ns_to_ktime(period);
  46	rt_b->rt_runtime = runtime;
  47
  48	raw_spin_lock_init(&rt_b->rt_runtime_lock);
  49
  50	hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
  51		     HRTIMER_MODE_REL_HARD);
  52	rt_b->rt_period_timer.function = sched_rt_period_timer;
  53}
  54
  55static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
  56{
  57	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
  58		return;
  59
  60	raw_spin_lock(&rt_b->rt_runtime_lock);
  61	if (!rt_b->rt_period_active) {
  62		rt_b->rt_period_active = 1;
  63		/*
  64		 * SCHED_DEADLINE updates the bandwidth, as a run away
  65		 * RT task with a DL task could hog a CPU. But DL does
  66		 * not reset the period. If a deadline task was running
  67		 * without an RT task running, it can cause RT tasks to
  68		 * throttle when they start up. Kick the timer right away
  69		 * to update the period.
  70		 */
  71		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
  72		hrtimer_start_expires(&rt_b->rt_period_timer,
  73				      HRTIMER_MODE_ABS_PINNED_HARD);
  74	}
  75	raw_spin_unlock(&rt_b->rt_runtime_lock);
  76}
  77
 
 
 
 
  78void init_rt_rq(struct rt_rq *rt_rq)
  79{
  80	struct rt_prio_array *array;
  81	int i;
  82
  83	array = &rt_rq->active;
  84	for (i = 0; i < MAX_RT_PRIO; i++) {
  85		INIT_LIST_HEAD(array->queue + i);
  86		__clear_bit(i, array->bitmap);
  87	}
  88	/* delimiter for bitsearch: */
  89	__set_bit(MAX_RT_PRIO, array->bitmap);
  90
  91#if defined CONFIG_SMP
  92	rt_rq->highest_prio.curr = MAX_RT_PRIO;
  93	rt_rq->highest_prio.next = MAX_RT_PRIO;
  94	rt_rq->rt_nr_migratory = 0;
  95	rt_rq->overloaded = 0;
  96	plist_head_init(&rt_rq->pushable_tasks);
 
 
 
 
 
 
 
  97#endif /* CONFIG_SMP */
  98	/* We start is dequeued state, because no RT tasks are queued */
  99	rt_rq->rt_queued = 0;
 100
 101	rt_rq->rt_time = 0;
 102	rt_rq->rt_throttled = 0;
 103	rt_rq->rt_runtime = 0;
 104	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
 105}
 106
 107#ifdef CONFIG_RT_GROUP_SCHED
 108static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
 109{
 110	hrtimer_cancel(&rt_b->rt_period_timer);
 111}
 112
 113#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
 114
 115static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 116{
 117#ifdef CONFIG_SCHED_DEBUG
 118	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
 119#endif
 120	return container_of(rt_se, struct task_struct, rt);
 121}
 122
 123static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 124{
 125	return rt_rq->rq;
 126}
 127
 128static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 129{
 130	return rt_se->rt_rq;
 131}
 132
 133static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
 134{
 135	struct rt_rq *rt_rq = rt_se->rt_rq;
 136
 137	return rt_rq->rq;
 138}
 139
 140void free_rt_sched_group(struct task_group *tg)
 141{
 142	int i;
 143
 144	if (tg->rt_se)
 145		destroy_rt_bandwidth(&tg->rt_bandwidth);
 146
 147	for_each_possible_cpu(i) {
 148		if (tg->rt_rq)
 149			kfree(tg->rt_rq[i]);
 150		if (tg->rt_se)
 151			kfree(tg->rt_se[i]);
 152	}
 153
 154	kfree(tg->rt_rq);
 155	kfree(tg->rt_se);
 156}
 157
 158void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 159		struct sched_rt_entity *rt_se, int cpu,
 160		struct sched_rt_entity *parent)
 161{
 162	struct rq *rq = cpu_rq(cpu);
 163
 164	rt_rq->highest_prio.curr = MAX_RT_PRIO;
 165	rt_rq->rt_nr_boosted = 0;
 166	rt_rq->rq = rq;
 167	rt_rq->tg = tg;
 168
 169	tg->rt_rq[cpu] = rt_rq;
 170	tg->rt_se[cpu] = rt_se;
 171
 172	if (!rt_se)
 173		return;
 174
 175	if (!parent)
 176		rt_se->rt_rq = &rq->rt;
 177	else
 178		rt_se->rt_rq = parent->my_q;
 179
 180	rt_se->my_q = rt_rq;
 181	rt_se->parent = parent;
 182	INIT_LIST_HEAD(&rt_se->run_list);
 183}
 184
 185int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 186{
 187	struct rt_rq *rt_rq;
 188	struct sched_rt_entity *rt_se;
 189	int i;
 190
 191	tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
 192	if (!tg->rt_rq)
 193		goto err;
 194	tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
 195	if (!tg->rt_se)
 196		goto err;
 197
 198	init_rt_bandwidth(&tg->rt_bandwidth,
 199			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
 200
 201	for_each_possible_cpu(i) {
 202		rt_rq = kzalloc_node(sizeof(struct rt_rq),
 203				     GFP_KERNEL, cpu_to_node(i));
 204		if (!rt_rq)
 205			goto err;
 206
 207		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
 208				     GFP_KERNEL, cpu_to_node(i));
 209		if (!rt_se)
 210			goto err_free_rq;
 211
 212		init_rt_rq(rt_rq);
 213		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
 214		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
 215	}
 216
 217	return 1;
 218
 219err_free_rq:
 220	kfree(rt_rq);
 221err:
 222	return 0;
 223}
 224
 225#else /* CONFIG_RT_GROUP_SCHED */
 226
 227#define rt_entity_is_task(rt_se) (1)
 228
 229static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 230{
 231	return container_of(rt_se, struct task_struct, rt);
 232}
 233
 234static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 235{
 236	return container_of(rt_rq, struct rq, rt);
 237}
 238
 239static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
 240{
 241	struct task_struct *p = rt_task_of(rt_se);
 242
 243	return task_rq(p);
 244}
 245
 246static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 247{
 248	struct rq *rq = rq_of_rt_se(rt_se);
 249
 250	return &rq->rt;
 251}
 252
 253void free_rt_sched_group(struct task_group *tg) { }
 254
 255int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 256{
 257	return 1;
 258}
 259#endif /* CONFIG_RT_GROUP_SCHED */
 260
 261#ifdef CONFIG_SMP
 262
 263static void pull_rt_task(struct rq *this_rq);
 264
 265static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 266{
 267	/* Try to pull RT tasks here if we lower this rq's prio */
 268	return rq->rt.highest_prio.curr > prev->prio;
 269}
 270
 271static inline int rt_overloaded(struct rq *rq)
 272{
 273	return atomic_read(&rq->rd->rto_count);
 274}
 275
 276static inline void rt_set_overload(struct rq *rq)
 277{
 278	if (!rq->online)
 279		return;
 280
 281	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
 282	/*
 283	 * Make sure the mask is visible before we set
 284	 * the overload count. That is checked to determine
 285	 * if we should look at the mask. It would be a shame
 286	 * if we looked at the mask, but the mask was not
 287	 * updated yet.
 288	 *
 289	 * Matched by the barrier in pull_rt_task().
 290	 */
 291	smp_wmb();
 292	atomic_inc(&rq->rd->rto_count);
 293}
 294
 295static inline void rt_clear_overload(struct rq *rq)
 296{
 297	if (!rq->online)
 298		return;
 299
 300	/* the order here really doesn't matter */
 301	atomic_dec(&rq->rd->rto_count);
 302	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 303}
 304
 305static void update_rt_migration(struct rt_rq *rt_rq)
 306{
 307	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
 308		if (!rt_rq->overloaded) {
 309			rt_set_overload(rq_of_rt_rq(rt_rq));
 310			rt_rq->overloaded = 1;
 311		}
 312	} else if (rt_rq->overloaded) {
 313		rt_clear_overload(rq_of_rt_rq(rt_rq));
 314		rt_rq->overloaded = 0;
 315	}
 316}
 317
 318static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 319{
 320	struct task_struct *p;
 321
 322	if (!rt_entity_is_task(rt_se))
 323		return;
 324
 325	p = rt_task_of(rt_se);
 326	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 327
 328	rt_rq->rt_nr_total++;
 329	if (p->nr_cpus_allowed > 1)
 330		rt_rq->rt_nr_migratory++;
 331
 332	update_rt_migration(rt_rq);
 333}
 334
 335static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 336{
 337	struct task_struct *p;
 338
 339	if (!rt_entity_is_task(rt_se))
 340		return;
 341
 342	p = rt_task_of(rt_se);
 343	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 344
 345	rt_rq->rt_nr_total--;
 346	if (p->nr_cpus_allowed > 1)
 347		rt_rq->rt_nr_migratory--;
 348
 349	update_rt_migration(rt_rq);
 350}
 351
 352static inline int has_pushable_tasks(struct rq *rq)
 353{
 354	return !plist_head_empty(&rq->rt.pushable_tasks);
 355}
 356
 357static DEFINE_PER_CPU(struct callback_head, rt_push_head);
 358static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
 359
 360static void push_rt_tasks(struct rq *);
 361static void pull_rt_task(struct rq *);
 362
 363static inline void rt_queue_push_tasks(struct rq *rq)
 364{
 365	if (!has_pushable_tasks(rq))
 366		return;
 367
 368	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
 369}
 370
 371static inline void rt_queue_pull_task(struct rq *rq)
 372{
 373	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
 374}
 375
 376static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 377{
 378	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 379	plist_node_init(&p->pushable_tasks, p->prio);
 380	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
 381
 382	/* Update the highest prio pushable task */
 383	if (p->prio < rq->rt.highest_prio.next)
 384		rq->rt.highest_prio.next = p->prio;
 385}
 386
 387static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 388{
 389	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 390
 391	/* Update the new highest prio pushable task */
 392	if (has_pushable_tasks(rq)) {
 393		p = plist_first_entry(&rq->rt.pushable_tasks,
 394				      struct task_struct, pushable_tasks);
 395		rq->rt.highest_prio.next = p->prio;
 396	} else
 397		rq->rt.highest_prio.next = MAX_RT_PRIO;
 398}
 399
 400#else
 401
 402static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 403{
 404}
 405
 406static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 407{
 408}
 409
 410static inline
 411void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 412{
 413}
 414
 415static inline
 416void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 417{
 418}
 419
 420static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 421{
 422	return false;
 423}
 424
 425static inline void pull_rt_task(struct rq *this_rq)
 426{
 427}
 428
 429static inline void rt_queue_push_tasks(struct rq *rq)
 430{
 431}
 432#endif /* CONFIG_SMP */
 433
 434static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
 435static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
 436
 437static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 438{
 439	return rt_se->on_rq;
 440}
 441
 442#ifdef CONFIG_UCLAMP_TASK
 443/*
 444 * Verify the fitness of task @p to run on @cpu taking into account the uclamp
 445 * settings.
 446 *
 447 * This check is only important for heterogeneous systems where uclamp_min value
 448 * is higher than the capacity of a @cpu. For non-heterogeneous system this
 449 * function will always return true.
 450 *
 451 * The function will return true if the capacity of the @cpu is >= the
 452 * uclamp_min and false otherwise.
 453 *
 454 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
 455 * > uclamp_max.
 456 */
 457static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
 458{
 459	unsigned int min_cap;
 460	unsigned int max_cap;
 461	unsigned int cpu_cap;
 462
 463	/* Only heterogeneous systems can benefit from this check */
 464	if (!static_branch_unlikely(&sched_asym_cpucapacity))
 465		return true;
 466
 467	min_cap = uclamp_eff_value(p, UCLAMP_MIN);
 468	max_cap = uclamp_eff_value(p, UCLAMP_MAX);
 469
 470	cpu_cap = capacity_orig_of(cpu);
 471
 472	return cpu_cap >= min(min_cap, max_cap);
 473}
 474#else
 475static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
 476{
 477	return true;
 478}
 479#endif
 480
 481#ifdef CONFIG_RT_GROUP_SCHED
 482
 483static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 484{
 485	if (!rt_rq->tg)
 486		return RUNTIME_INF;
 487
 488	return rt_rq->rt_runtime;
 489}
 490
 491static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 492{
 493	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
 494}
 495
 496typedef struct task_group *rt_rq_iter_t;
 497
 498static inline struct task_group *next_task_group(struct task_group *tg)
 499{
 500	do {
 501		tg = list_entry_rcu(tg->list.next,
 502			typeof(struct task_group), list);
 503	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
 504
 505	if (&tg->list == &task_groups)
 506		tg = NULL;
 507
 508	return tg;
 509}
 510
 511#define for_each_rt_rq(rt_rq, iter, rq)					\
 512	for (iter = container_of(&task_groups, typeof(*iter), list);	\
 513		(iter = next_task_group(iter)) &&			\
 514		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
 515
 516#define for_each_sched_rt_entity(rt_se) \
 517	for (; rt_se; rt_se = rt_se->parent)
 518
 519static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 520{
 521	return rt_se->my_q;
 522}
 523
 524static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
 525static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
 526
 527static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 528{
 529	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
 530	struct rq *rq = rq_of_rt_rq(rt_rq);
 531	struct sched_rt_entity *rt_se;
 532
 533	int cpu = cpu_of(rq);
 534
 535	rt_se = rt_rq->tg->rt_se[cpu];
 536
 537	if (rt_rq->rt_nr_running) {
 538		if (!rt_se)
 539			enqueue_top_rt_rq(rt_rq);
 540		else if (!on_rt_rq(rt_se))
 541			enqueue_rt_entity(rt_se, 0);
 542
 543		if (rt_rq->highest_prio.curr < curr->prio)
 544			resched_curr(rq);
 545	}
 546}
 547
 548static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 549{
 550	struct sched_rt_entity *rt_se;
 551	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 552
 553	rt_se = rt_rq->tg->rt_se[cpu];
 554
 555	if (!rt_se) {
 556		dequeue_top_rt_rq(rt_rq);
 557		/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
 558		cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
 559	}
 560	else if (on_rt_rq(rt_se))
 561		dequeue_rt_entity(rt_se, 0);
 562}
 563
 564static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 565{
 566	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
 567}
 568
 569static int rt_se_boosted(struct sched_rt_entity *rt_se)
 570{
 571	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 572	struct task_struct *p;
 573
 574	if (rt_rq)
 575		return !!rt_rq->rt_nr_boosted;
 576
 577	p = rt_task_of(rt_se);
 578	return p->prio != p->normal_prio;
 579}
 580
 581#ifdef CONFIG_SMP
 582static inline const struct cpumask *sched_rt_period_mask(void)
 583{
 584	return this_rq()->rd->span;
 585}
 586#else
 587static inline const struct cpumask *sched_rt_period_mask(void)
 588{
 589	return cpu_online_mask;
 590}
 591#endif
 592
 593static inline
 594struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 595{
 596	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
 597}
 598
 599static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 600{
 601	return &rt_rq->tg->rt_bandwidth;
 602}
 603
 604#else /* !CONFIG_RT_GROUP_SCHED */
 605
 606static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 607{
 608	return rt_rq->rt_runtime;
 609}
 610
 611static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 612{
 613	return ktime_to_ns(def_rt_bandwidth.rt_period);
 614}
 615
 616typedef struct rt_rq *rt_rq_iter_t;
 617
 618#define for_each_rt_rq(rt_rq, iter, rq) \
 619	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 620
 621#define for_each_sched_rt_entity(rt_se) \
 622	for (; rt_se; rt_se = NULL)
 623
 624static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 625{
 626	return NULL;
 627}
 628
 629static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 630{
 631	struct rq *rq = rq_of_rt_rq(rt_rq);
 632
 633	if (!rt_rq->rt_nr_running)
 634		return;
 635
 636	enqueue_top_rt_rq(rt_rq);
 637	resched_curr(rq);
 638}
 639
 640static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 641{
 642	dequeue_top_rt_rq(rt_rq);
 643}
 644
 645static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 646{
 647	return rt_rq->rt_throttled;
 648}
 649
 650static inline const struct cpumask *sched_rt_period_mask(void)
 651{
 652	return cpu_online_mask;
 653}
 654
 655static inline
 656struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 657{
 658	return &cpu_rq(cpu)->rt;
 659}
 660
 661static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 662{
 663	return &def_rt_bandwidth;
 664}
 665
 666#endif /* CONFIG_RT_GROUP_SCHED */
 667
 668bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
 669{
 670	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 671
 672	return (hrtimer_active(&rt_b->rt_period_timer) ||
 673		rt_rq->rt_time < rt_b->rt_runtime);
 674}
 675
 676#ifdef CONFIG_SMP
 677/*
 678 * We ran out of runtime, see if we can borrow some from our neighbours.
 679 */
 680static void do_balance_runtime(struct rt_rq *rt_rq)
 681{
 682	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 683	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
 684	int i, weight;
 685	u64 rt_period;
 686
 687	weight = cpumask_weight(rd->span);
 688
 689	raw_spin_lock(&rt_b->rt_runtime_lock);
 690	rt_period = ktime_to_ns(rt_b->rt_period);
 691	for_each_cpu(i, rd->span) {
 692		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 693		s64 diff;
 694
 695		if (iter == rt_rq)
 696			continue;
 697
 698		raw_spin_lock(&iter->rt_runtime_lock);
 699		/*
 700		 * Either all rqs have inf runtime and there's nothing to steal
 701		 * or __disable_runtime() below sets a specific rq to inf to
 702		 * indicate its been disabled and disalow stealing.
 703		 */
 704		if (iter->rt_runtime == RUNTIME_INF)
 705			goto next;
 706
 707		/*
 708		 * From runqueues with spare time, take 1/n part of their
 709		 * spare time, but no more than our period.
 710		 */
 711		diff = iter->rt_runtime - iter->rt_time;
 712		if (diff > 0) {
 713			diff = div_u64((u64)diff, weight);
 714			if (rt_rq->rt_runtime + diff > rt_period)
 715				diff = rt_period - rt_rq->rt_runtime;
 716			iter->rt_runtime -= diff;
 717			rt_rq->rt_runtime += diff;
 718			if (rt_rq->rt_runtime == rt_period) {
 719				raw_spin_unlock(&iter->rt_runtime_lock);
 720				break;
 721			}
 722		}
 723next:
 724		raw_spin_unlock(&iter->rt_runtime_lock);
 725	}
 726	raw_spin_unlock(&rt_b->rt_runtime_lock);
 727}
 728
 729/*
 730 * Ensure this RQ takes back all the runtime it lend to its neighbours.
 731 */
 732static void __disable_runtime(struct rq *rq)
 733{
 734	struct root_domain *rd = rq->rd;
 735	rt_rq_iter_t iter;
 736	struct rt_rq *rt_rq;
 737
 738	if (unlikely(!scheduler_running))
 739		return;
 740
 741	for_each_rt_rq(rt_rq, iter, rq) {
 742		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 743		s64 want;
 744		int i;
 745
 746		raw_spin_lock(&rt_b->rt_runtime_lock);
 747		raw_spin_lock(&rt_rq->rt_runtime_lock);
 748		/*
 749		 * Either we're all inf and nobody needs to borrow, or we're
 750		 * already disabled and thus have nothing to do, or we have
 751		 * exactly the right amount of runtime to take out.
 752		 */
 753		if (rt_rq->rt_runtime == RUNTIME_INF ||
 754				rt_rq->rt_runtime == rt_b->rt_runtime)
 755			goto balanced;
 756		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 757
 758		/*
 759		 * Calculate the difference between what we started out with
 760		 * and what we current have, that's the amount of runtime
 761		 * we lend and now have to reclaim.
 762		 */
 763		want = rt_b->rt_runtime - rt_rq->rt_runtime;
 764
 765		/*
 766		 * Greedy reclaim, take back as much as we can.
 767		 */
 768		for_each_cpu(i, rd->span) {
 769			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 770			s64 diff;
 771
 772			/*
 773			 * Can't reclaim from ourselves or disabled runqueues.
 774			 */
 775			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
 776				continue;
 777
 778			raw_spin_lock(&iter->rt_runtime_lock);
 779			if (want > 0) {
 780				diff = min_t(s64, iter->rt_runtime, want);
 781				iter->rt_runtime -= diff;
 782				want -= diff;
 783			} else {
 784				iter->rt_runtime -= want;
 785				want -= want;
 786			}
 787			raw_spin_unlock(&iter->rt_runtime_lock);
 788
 789			if (!want)
 790				break;
 791		}
 792
 793		raw_spin_lock(&rt_rq->rt_runtime_lock);
 794		/*
 795		 * We cannot be left wanting - that would mean some runtime
 796		 * leaked out of the system.
 797		 */
 798		BUG_ON(want);
 799balanced:
 800		/*
 801		 * Disable all the borrow logic by pretending we have inf
 802		 * runtime - in which case borrowing doesn't make sense.
 803		 */
 804		rt_rq->rt_runtime = RUNTIME_INF;
 805		rt_rq->rt_throttled = 0;
 806		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 807		raw_spin_unlock(&rt_b->rt_runtime_lock);
 808
 809		/* Make rt_rq available for pick_next_task() */
 810		sched_rt_rq_enqueue(rt_rq);
 811	}
 812}
 813
 814static void __enable_runtime(struct rq *rq)
 815{
 816	rt_rq_iter_t iter;
 817	struct rt_rq *rt_rq;
 818
 819	if (unlikely(!scheduler_running))
 820		return;
 821
 822	/*
 823	 * Reset each runqueue's bandwidth settings
 824	 */
 825	for_each_rt_rq(rt_rq, iter, rq) {
 826		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 827
 828		raw_spin_lock(&rt_b->rt_runtime_lock);
 829		raw_spin_lock(&rt_rq->rt_runtime_lock);
 830		rt_rq->rt_runtime = rt_b->rt_runtime;
 831		rt_rq->rt_time = 0;
 832		rt_rq->rt_throttled = 0;
 833		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 834		raw_spin_unlock(&rt_b->rt_runtime_lock);
 835	}
 836}
 837
 838static void balance_runtime(struct rt_rq *rt_rq)
 839{
 840	if (!sched_feat(RT_RUNTIME_SHARE))
 841		return;
 842
 843	if (rt_rq->rt_time > rt_rq->rt_runtime) {
 844		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 845		do_balance_runtime(rt_rq);
 846		raw_spin_lock(&rt_rq->rt_runtime_lock);
 847	}
 848}
 849#else /* !CONFIG_SMP */
 850static inline void balance_runtime(struct rt_rq *rt_rq) {}
 851#endif /* CONFIG_SMP */
 852
 853static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 854{
 855	int i, idle = 1, throttled = 0;
 856	const struct cpumask *span;
 857
 858	span = sched_rt_period_mask();
 859#ifdef CONFIG_RT_GROUP_SCHED
 860	/*
 861	 * FIXME: isolated CPUs should really leave the root task group,
 862	 * whether they are isolcpus or were isolated via cpusets, lest
 863	 * the timer run on a CPU which does not service all runqueues,
 864	 * potentially leaving other CPUs indefinitely throttled.  If
 865	 * isolation is really required, the user will turn the throttle
 866	 * off to kill the perturbations it causes anyway.  Meanwhile,
 867	 * this maintains functionality for boot and/or troubleshooting.
 868	 */
 869	if (rt_b == &root_task_group.rt_bandwidth)
 870		span = cpu_online_mask;
 871#endif
 872	for_each_cpu(i, span) {
 873		int enqueue = 0;
 874		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
 875		struct rq *rq = rq_of_rt_rq(rt_rq);
 876		int skip;
 877
 878		/*
 879		 * When span == cpu_online_mask, taking each rq->lock
 880		 * can be time-consuming. Try to avoid it when possible.
 881		 */
 882		raw_spin_lock(&rt_rq->rt_runtime_lock);
 883		if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
 884			rt_rq->rt_runtime = rt_b->rt_runtime;
 885		skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
 886		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 887		if (skip)
 888			continue;
 889
 890		raw_spin_lock(&rq->lock);
 891		update_rq_clock(rq);
 892
 893		if (rt_rq->rt_time) {
 894			u64 runtime;
 895
 896			raw_spin_lock(&rt_rq->rt_runtime_lock);
 897			if (rt_rq->rt_throttled)
 898				balance_runtime(rt_rq);
 899			runtime = rt_rq->rt_runtime;
 900			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
 901			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
 902				rt_rq->rt_throttled = 0;
 903				enqueue = 1;
 904
 905				/*
 906				 * When we're idle and a woken (rt) task is
 907				 * throttled check_preempt_curr() will set
 908				 * skip_update and the time between the wakeup
 909				 * and this unthrottle will get accounted as
 910				 * 'runtime'.
 911				 */
 912				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
 913					rq_clock_cancel_skipupdate(rq);
 914			}
 915			if (rt_rq->rt_time || rt_rq->rt_nr_running)
 916				idle = 0;
 917			raw_spin_unlock(&rt_rq->rt_runtime_lock);
 918		} else if (rt_rq->rt_nr_running) {
 919			idle = 0;
 920			if (!rt_rq_throttled(rt_rq))
 921				enqueue = 1;
 922		}
 923		if (rt_rq->rt_throttled)
 924			throttled = 1;
 925
 926		if (enqueue)
 927			sched_rt_rq_enqueue(rt_rq);
 928		raw_spin_unlock(&rq->lock);
 929	}
 930
 931	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
 932		return 1;
 933
 934	return idle;
 935}
 936
 937static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 938{
 939#ifdef CONFIG_RT_GROUP_SCHED
 940	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 941
 942	if (rt_rq)
 943		return rt_rq->highest_prio.curr;
 944#endif
 945
 946	return rt_task_of(rt_se)->prio;
 947}
 948
 949static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 950{
 951	u64 runtime = sched_rt_runtime(rt_rq);
 952
 953	if (rt_rq->rt_throttled)
 954		return rt_rq_throttled(rt_rq);
 955
 956	if (runtime >= sched_rt_period(rt_rq))
 957		return 0;
 958
 959	balance_runtime(rt_rq);
 960	runtime = sched_rt_runtime(rt_rq);
 961	if (runtime == RUNTIME_INF)
 962		return 0;
 963
 964	if (rt_rq->rt_time > runtime) {
 965		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 966
 967		/*
 968		 * Don't actually throttle groups that have no runtime assigned
 969		 * but accrue some time due to boosting.
 970		 */
 971		if (likely(rt_b->rt_runtime)) {
 972			rt_rq->rt_throttled = 1;
 973			printk_deferred_once("sched: RT throttling activated\n");
 974		} else {
 975			/*
 976			 * In case we did anyway, make it go away,
 977			 * replenishment is a joke, since it will replenish us
 978			 * with exactly 0 ns.
 979			 */
 980			rt_rq->rt_time = 0;
 981		}
 982
 983		if (rt_rq_throttled(rt_rq)) {
 984			sched_rt_rq_dequeue(rt_rq);
 985			return 1;
 986		}
 987	}
 988
 989	return 0;
 990}
 991
 992/*
 993 * Update the current task's runtime statistics. Skip current tasks that
 994 * are not in our scheduling class.
 995 */
 996static void update_curr_rt(struct rq *rq)
 997{
 998	struct task_struct *curr = rq->curr;
 999	struct sched_rt_entity *rt_se = &curr->rt;
1000	u64 delta_exec;
1001	u64 now;
1002
1003	if (curr->sched_class != &rt_sched_class)
1004		return;
1005
1006	now = rq_clock_task(rq);
1007	delta_exec = now - curr->se.exec_start;
1008	if (unlikely((s64)delta_exec <= 0))
1009		return;
1010
 
 
 
1011	schedstat_set(curr->se.statistics.exec_max,
1012		      max(curr->se.statistics.exec_max, delta_exec));
1013
1014	curr->se.sum_exec_runtime += delta_exec;
1015	account_group_exec_runtime(curr, delta_exec);
1016
1017	curr->se.exec_start = now;
1018	cgroup_account_cputime(curr, delta_exec);
 
 
1019
1020	if (!rt_bandwidth_enabled())
1021		return;
1022
1023	for_each_sched_rt_entity(rt_se) {
1024		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1025
1026		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1027			raw_spin_lock(&rt_rq->rt_runtime_lock);
1028			rt_rq->rt_time += delta_exec;
1029			if (sched_rt_runtime_exceeded(rt_rq))
1030				resched_curr(rq);
1031			raw_spin_unlock(&rt_rq->rt_runtime_lock);
1032		}
1033	}
1034}
1035
1036static void
1037dequeue_top_rt_rq(struct rt_rq *rt_rq)
1038{
1039	struct rq *rq = rq_of_rt_rq(rt_rq);
1040
1041	BUG_ON(&rq->rt != rt_rq);
1042
1043	if (!rt_rq->rt_queued)
1044		return;
1045
1046	BUG_ON(!rq->nr_running);
1047
1048	sub_nr_running(rq, rt_rq->rt_nr_running);
1049	rt_rq->rt_queued = 0;
1050
1051}
1052
1053static void
1054enqueue_top_rt_rq(struct rt_rq *rt_rq)
1055{
1056	struct rq *rq = rq_of_rt_rq(rt_rq);
1057
1058	BUG_ON(&rq->rt != rt_rq);
1059
1060	if (rt_rq->rt_queued)
1061		return;
1062
1063	if (rt_rq_throttled(rt_rq))
1064		return;
1065
1066	if (rt_rq->rt_nr_running) {
1067		add_nr_running(rq, rt_rq->rt_nr_running);
1068		rt_rq->rt_queued = 1;
1069	}
1070
1071	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1072	cpufreq_update_util(rq, 0);
1073}
1074
1075#if defined CONFIG_SMP
1076
1077static void
1078inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1079{
1080	struct rq *rq = rq_of_rt_rq(rt_rq);
1081
1082#ifdef CONFIG_RT_GROUP_SCHED
1083	/*
1084	 * Change rq's cpupri only if rt_rq is the top queue.
1085	 */
1086	if (&rq->rt != rt_rq)
1087		return;
1088#endif
1089	if (rq->online && prio < prev_prio)
1090		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1091}
1092
1093static void
1094dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1095{
1096	struct rq *rq = rq_of_rt_rq(rt_rq);
1097
1098#ifdef CONFIG_RT_GROUP_SCHED
1099	/*
1100	 * Change rq's cpupri only if rt_rq is the top queue.
1101	 */
1102	if (&rq->rt != rt_rq)
1103		return;
1104#endif
1105	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1106		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1107}
1108
1109#else /* CONFIG_SMP */
1110
1111static inline
1112void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1113static inline
1114void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1115
1116#endif /* CONFIG_SMP */
1117
1118#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1119static void
1120inc_rt_prio(struct rt_rq *rt_rq, int prio)
1121{
1122	int prev_prio = rt_rq->highest_prio.curr;
1123
1124	if (prio < prev_prio)
1125		rt_rq->highest_prio.curr = prio;
1126
1127	inc_rt_prio_smp(rt_rq, prio, prev_prio);
1128}
1129
1130static void
1131dec_rt_prio(struct rt_rq *rt_rq, int prio)
1132{
1133	int prev_prio = rt_rq->highest_prio.curr;
1134
1135	if (rt_rq->rt_nr_running) {
1136
1137		WARN_ON(prio < prev_prio);
1138
1139		/*
1140		 * This may have been our highest task, and therefore
1141		 * we may have some recomputation to do
1142		 */
1143		if (prio == prev_prio) {
1144			struct rt_prio_array *array = &rt_rq->active;
1145
1146			rt_rq->highest_prio.curr =
1147				sched_find_first_bit(array->bitmap);
1148		}
1149
1150	} else
1151		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1152
1153	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1154}
1155
1156#else
1157
1158static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1159static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1160
1161#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1162
1163#ifdef CONFIG_RT_GROUP_SCHED
1164
1165static void
1166inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1167{
1168	if (rt_se_boosted(rt_se))
1169		rt_rq->rt_nr_boosted++;
1170
1171	if (rt_rq->tg)
1172		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1173}
1174
1175static void
1176dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1177{
1178	if (rt_se_boosted(rt_se))
1179		rt_rq->rt_nr_boosted--;
1180
1181	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1182}
1183
1184#else /* CONFIG_RT_GROUP_SCHED */
1185
1186static void
1187inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1188{
1189	start_rt_bandwidth(&def_rt_bandwidth);
1190}
1191
1192static inline
1193void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1194
1195#endif /* CONFIG_RT_GROUP_SCHED */
1196
1197static inline
1198unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1199{
1200	struct rt_rq *group_rq = group_rt_rq(rt_se);
1201
1202	if (group_rq)
1203		return group_rq->rt_nr_running;
1204	else
1205		return 1;
1206}
1207
1208static inline
1209unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1210{
1211	struct rt_rq *group_rq = group_rt_rq(rt_se);
1212	struct task_struct *tsk;
1213
1214	if (group_rq)
1215		return group_rq->rr_nr_running;
1216
1217	tsk = rt_task_of(rt_se);
1218
1219	return (tsk->policy == SCHED_RR) ? 1 : 0;
1220}
1221
1222static inline
1223void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1224{
1225	int prio = rt_se_prio(rt_se);
1226
1227	WARN_ON(!rt_prio(prio));
1228	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1229	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1230
1231	inc_rt_prio(rt_rq, prio);
1232	inc_rt_migration(rt_se, rt_rq);
1233	inc_rt_group(rt_se, rt_rq);
1234}
1235
1236static inline
1237void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1238{
1239	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1240	WARN_ON(!rt_rq->rt_nr_running);
1241	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1242	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1243
1244	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1245	dec_rt_migration(rt_se, rt_rq);
1246	dec_rt_group(rt_se, rt_rq);
1247}
1248
1249/*
1250 * Change rt_se->run_list location unless SAVE && !MOVE
1251 *
1252 * assumes ENQUEUE/DEQUEUE flags match
1253 */
1254static inline bool move_entity(unsigned int flags)
1255{
1256	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1257		return false;
1258
1259	return true;
1260}
1261
1262static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1263{
1264	list_del_init(&rt_se->run_list);
1265
1266	if (list_empty(array->queue + rt_se_prio(rt_se)))
1267		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1268
1269	rt_se->on_list = 0;
1270}
1271
1272static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1273{
1274	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1275	struct rt_prio_array *array = &rt_rq->active;
1276	struct rt_rq *group_rq = group_rt_rq(rt_se);
1277	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1278
1279	/*
1280	 * Don't enqueue the group if its throttled, or when empty.
1281	 * The latter is a consequence of the former when a child group
1282	 * get throttled and the current group doesn't have any other
1283	 * active members.
1284	 */
1285	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1286		if (rt_se->on_list)
1287			__delist_rt_entity(rt_se, array);
1288		return;
1289	}
1290
1291	if (move_entity(flags)) {
1292		WARN_ON_ONCE(rt_se->on_list);
1293		if (flags & ENQUEUE_HEAD)
1294			list_add(&rt_se->run_list, queue);
1295		else
1296			list_add_tail(&rt_se->run_list, queue);
1297
1298		__set_bit(rt_se_prio(rt_se), array->bitmap);
1299		rt_se->on_list = 1;
1300	}
1301	rt_se->on_rq = 1;
1302
1303	inc_rt_tasks(rt_se, rt_rq);
1304}
1305
1306static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1307{
1308	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1309	struct rt_prio_array *array = &rt_rq->active;
1310
1311	if (move_entity(flags)) {
1312		WARN_ON_ONCE(!rt_se->on_list);
1313		__delist_rt_entity(rt_se, array);
1314	}
1315	rt_se->on_rq = 0;
1316
1317	dec_rt_tasks(rt_se, rt_rq);
1318}
1319
1320/*
1321 * Because the prio of an upper entry depends on the lower
1322 * entries, we must remove entries top - down.
1323 */
1324static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1325{
1326	struct sched_rt_entity *back = NULL;
1327
1328	for_each_sched_rt_entity(rt_se) {
1329		rt_se->back = back;
1330		back = rt_se;
1331	}
1332
1333	dequeue_top_rt_rq(rt_rq_of_se(back));
1334
1335	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1336		if (on_rt_rq(rt_se))
1337			__dequeue_rt_entity(rt_se, flags);
1338	}
1339}
1340
1341static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1342{
1343	struct rq *rq = rq_of_rt_se(rt_se);
1344
1345	dequeue_rt_stack(rt_se, flags);
1346	for_each_sched_rt_entity(rt_se)
1347		__enqueue_rt_entity(rt_se, flags);
1348	enqueue_top_rt_rq(&rq->rt);
1349}
1350
1351static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1352{
1353	struct rq *rq = rq_of_rt_se(rt_se);
1354
1355	dequeue_rt_stack(rt_se, flags);
1356
1357	for_each_sched_rt_entity(rt_se) {
1358		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1359
1360		if (rt_rq && rt_rq->rt_nr_running)
1361			__enqueue_rt_entity(rt_se, flags);
1362	}
1363	enqueue_top_rt_rq(&rq->rt);
1364}
1365
1366/*
1367 * Adding/removing a task to/from a priority array:
1368 */
1369static void
1370enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1371{
1372	struct sched_rt_entity *rt_se = &p->rt;
1373
1374	if (flags & ENQUEUE_WAKEUP)
1375		rt_se->timeout = 0;
1376
1377	enqueue_rt_entity(rt_se, flags);
1378
1379	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1380		enqueue_pushable_task(rq, p);
1381}
1382
1383static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1384{
1385	struct sched_rt_entity *rt_se = &p->rt;
1386
1387	update_curr_rt(rq);
1388	dequeue_rt_entity(rt_se, flags);
1389
1390	dequeue_pushable_task(rq, p);
1391}
1392
1393/*
1394 * Put task to the head or the end of the run list without the overhead of
1395 * dequeue followed by enqueue.
1396 */
1397static void
1398requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1399{
1400	if (on_rt_rq(rt_se)) {
1401		struct rt_prio_array *array = &rt_rq->active;
1402		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1403
1404		if (head)
1405			list_move(&rt_se->run_list, queue);
1406		else
1407			list_move_tail(&rt_se->run_list, queue);
1408	}
1409}
1410
1411static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1412{
1413	struct sched_rt_entity *rt_se = &p->rt;
1414	struct rt_rq *rt_rq;
1415
1416	for_each_sched_rt_entity(rt_se) {
1417		rt_rq = rt_rq_of_se(rt_se);
1418		requeue_rt_entity(rt_rq, rt_se, head);
1419	}
1420}
1421
1422static void yield_task_rt(struct rq *rq)
1423{
1424	requeue_task_rt(rq, rq->curr, 0);
1425}
1426
1427#ifdef CONFIG_SMP
1428static int find_lowest_rq(struct task_struct *task);
1429
1430static int
1431select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1432{
1433	struct task_struct *curr;
1434	struct rq *rq;
1435	bool test;
1436
1437	/* For anything but wake ups, just return the task_cpu */
1438	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1439		goto out;
1440
1441	rq = cpu_rq(cpu);
1442
1443	rcu_read_lock();
1444	curr = READ_ONCE(rq->curr); /* unlocked access */
1445
1446	/*
1447	 * If the current task on @p's runqueue is an RT task, then
1448	 * try to see if we can wake this RT task up on another
1449	 * runqueue. Otherwise simply start this RT task
1450	 * on its current runqueue.
1451	 *
1452	 * We want to avoid overloading runqueues. If the woken
1453	 * task is a higher priority, then it will stay on this CPU
1454	 * and the lower prio task should be moved to another CPU.
1455	 * Even though this will probably make the lower prio task
1456	 * lose its cache, we do not want to bounce a higher task
1457	 * around just because it gave up its CPU, perhaps for a
1458	 * lock?
1459	 *
1460	 * For equal prio tasks, we just let the scheduler sort it out.
1461	 *
1462	 * Otherwise, just let it ride on the affined RQ and the
1463	 * post-schedule router will push the preempted task away
1464	 *
1465	 * This test is optimistic, if we get it wrong the load-balancer
1466	 * will have to sort it out.
1467	 *
1468	 * We take into account the capacity of the CPU to ensure it fits the
1469	 * requirement of the task - which is only important on heterogeneous
1470	 * systems like big.LITTLE.
1471	 */
1472	test = curr &&
1473	       unlikely(rt_task(curr)) &&
1474	       (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
1475
1476	if (test || !rt_task_fits_capacity(p, cpu)) {
1477		int target = find_lowest_rq(p);
1478
1479		/*
1480		 * Bail out if we were forcing a migration to find a better
1481		 * fitting CPU but our search failed.
1482		 */
1483		if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1484			goto out_unlock;
1485
1486		/*
1487		 * Don't bother moving it if the destination CPU is
1488		 * not running a lower priority task.
1489		 */
1490		if (target != -1 &&
1491		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1492			cpu = target;
1493	}
1494
1495out_unlock:
1496	rcu_read_unlock();
1497
1498out:
1499	return cpu;
1500}
1501
1502static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1503{
1504	/*
1505	 * Current can't be migrated, useless to reschedule,
1506	 * let's hope p can move out.
1507	 */
1508	if (rq->curr->nr_cpus_allowed == 1 ||
1509	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1510		return;
1511
1512	/*
1513	 * p is migratable, so let's not schedule it and
1514	 * see if it is pushed or pulled somewhere else.
1515	 */
1516	if (p->nr_cpus_allowed != 1 &&
1517	    cpupri_find(&rq->rd->cpupri, p, NULL))
1518		return;
1519
1520	/*
1521	 * There appear to be other CPUs that can accept
1522	 * the current task but none can run 'p', so lets reschedule
1523	 * to try and push the current task away:
1524	 */
1525	requeue_task_rt(rq, p, 1);
1526	resched_curr(rq);
1527}
1528
1529static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1530{
1531	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1532		/*
1533		 * This is OK, because current is on_cpu, which avoids it being
1534		 * picked for load-balance and preemption/IRQs are still
1535		 * disabled avoiding further scheduler activity on it and we've
1536		 * not yet started the picking loop.
1537		 */
1538		rq_unpin_lock(rq, rf);
1539		pull_rt_task(rq);
1540		rq_repin_lock(rq, rf);
1541	}
1542
1543	return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1544}
1545#endif /* CONFIG_SMP */
1546
1547/*
1548 * Preempt the current task with a newly woken task if needed:
1549 */
1550static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1551{
1552	if (p->prio < rq->curr->prio) {
1553		resched_curr(rq);
1554		return;
1555	}
1556
1557#ifdef CONFIG_SMP
1558	/*
1559	 * If:
1560	 *
1561	 * - the newly woken task is of equal priority to the current task
1562	 * - the newly woken task is non-migratable while current is migratable
1563	 * - current will be preempted on the next reschedule
1564	 *
1565	 * we should check to see if current can readily move to a different
1566	 * cpu.  If so, we will reschedule to allow the push logic to try
1567	 * to move current somewhere else, making room for our non-migratable
1568	 * task.
1569	 */
1570	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1571		check_preempt_equal_prio(rq, p);
1572#endif
1573}
1574
1575static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1576{
1577	p->se.exec_start = rq_clock_task(rq);
1578
1579	/* The running task is never eligible for pushing */
1580	dequeue_pushable_task(rq, p);
1581
1582	if (!first)
1583		return;
1584
1585	/*
1586	 * If prev task was rt, put_prev_task() has already updated the
1587	 * utilization. We only care of the case where we start to schedule a
1588	 * rt task
1589	 */
1590	if (rq->curr->sched_class != &rt_sched_class)
1591		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1592
1593	rt_queue_push_tasks(rq);
1594}
1595
1596static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1597						   struct rt_rq *rt_rq)
1598{
1599	struct rt_prio_array *array = &rt_rq->active;
1600	struct sched_rt_entity *next = NULL;
1601	struct list_head *queue;
1602	int idx;
1603
1604	idx = sched_find_first_bit(array->bitmap);
1605	BUG_ON(idx >= MAX_RT_PRIO);
1606
1607	queue = array->queue + idx;
1608	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1609
1610	return next;
1611}
1612
1613static struct task_struct *_pick_next_task_rt(struct rq *rq)
1614{
1615	struct sched_rt_entity *rt_se;
 
1616	struct rt_rq *rt_rq  = &rq->rt;
1617
1618	do {
1619		rt_se = pick_next_rt_entity(rq, rt_rq);
1620		BUG_ON(!rt_se);
1621		rt_rq = group_rt_rq(rt_se);
1622	} while (rt_rq);
1623
1624	return rt_task_of(rt_se);
 
 
 
1625}
1626
1627static struct task_struct *pick_next_task_rt(struct rq *rq)
 
1628{
1629	struct task_struct *p;
 
1630
1631	if (!sched_rt_runnable(rq))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1632		return NULL;
1633
 
 
1634	p = _pick_next_task_rt(rq);
1635	set_next_task_rt(rq, p, true);
 
 
 
 
 
1636	return p;
1637}
1638
1639static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1640{
1641	update_curr_rt(rq);
1642
1643	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1644
1645	/*
1646	 * The previous task needs to be made eligible for pushing
1647	 * if it is still active
1648	 */
1649	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1650		enqueue_pushable_task(rq, p);
1651}
1652
1653#ifdef CONFIG_SMP
1654
1655/* Only try algorithms three times */
1656#define RT_MAX_TRIES 3
1657
1658static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1659{
1660	if (!task_running(rq, p) &&
1661	    cpumask_test_cpu(cpu, p->cpus_ptr))
1662		return 1;
1663
1664	return 0;
1665}
1666
1667/*
1668 * Return the highest pushable rq's task, which is suitable to be executed
1669 * on the CPU, NULL otherwise
1670 */
1671static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1672{
1673	struct plist_head *head = &rq->rt.pushable_tasks;
1674	struct task_struct *p;
1675
1676	if (!has_pushable_tasks(rq))
1677		return NULL;
1678
1679	plist_for_each_entry(p, head, pushable_tasks) {
1680		if (pick_rt_task(rq, p, cpu))
1681			return p;
1682	}
1683
1684	return NULL;
1685}
1686
1687static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1688
1689static int find_lowest_rq(struct task_struct *task)
1690{
1691	struct sched_domain *sd;
1692	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1693	int this_cpu = smp_processor_id();
1694	int cpu      = task_cpu(task);
1695	int ret;
1696
1697	/* Make sure the mask is initialized first */
1698	if (unlikely(!lowest_mask))
1699		return -1;
1700
1701	if (task->nr_cpus_allowed == 1)
1702		return -1; /* No other targets possible */
1703
1704	/*
1705	 * If we're on asym system ensure we consider the different capacities
1706	 * of the CPUs when searching for the lowest_mask.
1707	 */
1708	if (static_branch_unlikely(&sched_asym_cpucapacity)) {
1709
1710		ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1711					  task, lowest_mask,
1712					  rt_task_fits_capacity);
1713	} else {
1714
1715		ret = cpupri_find(&task_rq(task)->rd->cpupri,
1716				  task, lowest_mask);
1717	}
1718
1719	if (!ret)
1720		return -1; /* No targets found */
1721
1722	/*
1723	 * At this point we have built a mask of CPUs representing the
1724	 * lowest priority tasks in the system.  Now we want to elect
1725	 * the best one based on our affinity and topology.
1726	 *
1727	 * We prioritize the last CPU that the task executed on since
1728	 * it is most likely cache-hot in that location.
1729	 */
1730	if (cpumask_test_cpu(cpu, lowest_mask))
1731		return cpu;
1732
1733	/*
1734	 * Otherwise, we consult the sched_domains span maps to figure
1735	 * out which CPU is logically closest to our hot cache data.
1736	 */
1737	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1738		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1739
1740	rcu_read_lock();
1741	for_each_domain(cpu, sd) {
1742		if (sd->flags & SD_WAKE_AFFINE) {
1743			int best_cpu;
1744
1745			/*
1746			 * "this_cpu" is cheaper to preempt than a
1747			 * remote processor.
1748			 */
1749			if (this_cpu != -1 &&
1750			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1751				rcu_read_unlock();
1752				return this_cpu;
1753			}
1754
1755			best_cpu = cpumask_first_and(lowest_mask,
1756						     sched_domain_span(sd));
1757			if (best_cpu < nr_cpu_ids) {
1758				rcu_read_unlock();
1759				return best_cpu;
1760			}
1761		}
1762	}
1763	rcu_read_unlock();
1764
1765	/*
1766	 * And finally, if there were no matches within the domains
1767	 * just give the caller *something* to work with from the compatible
1768	 * locations.
1769	 */
1770	if (this_cpu != -1)
1771		return this_cpu;
1772
1773	cpu = cpumask_any(lowest_mask);
1774	if (cpu < nr_cpu_ids)
1775		return cpu;
1776
1777	return -1;
1778}
1779
1780/* Will lock the rq it finds */
1781static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1782{
1783	struct rq *lowest_rq = NULL;
1784	int tries;
1785	int cpu;
1786
1787	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1788		cpu = find_lowest_rq(task);
1789
1790		if ((cpu == -1) || (cpu == rq->cpu))
1791			break;
1792
1793		lowest_rq = cpu_rq(cpu);
1794
1795		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1796			/*
1797			 * Target rq has tasks of equal or higher priority,
1798			 * retrying does not release any lock and is unlikely
1799			 * to yield a different result.
1800			 */
1801			lowest_rq = NULL;
1802			break;
1803		}
1804
1805		/* if the prio of this runqueue changed, try again */
1806		if (double_lock_balance(rq, lowest_rq)) {
1807			/*
1808			 * We had to unlock the run queue. In
1809			 * the mean time, task could have
1810			 * migrated already or had its affinity changed.
1811			 * Also make sure that it wasn't scheduled on its rq.
1812			 */
1813			if (unlikely(task_rq(task) != rq ||
1814				     !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
 
1815				     task_running(rq, task) ||
1816				     !rt_task(task) ||
1817				     !task_on_rq_queued(task))) {
1818
1819				double_unlock_balance(rq, lowest_rq);
1820				lowest_rq = NULL;
1821				break;
1822			}
1823		}
1824
1825		/* If this rq is still suitable use it. */
1826		if (lowest_rq->rt.highest_prio.curr > task->prio)
1827			break;
1828
1829		/* try again */
1830		double_unlock_balance(rq, lowest_rq);
1831		lowest_rq = NULL;
1832	}
1833
1834	return lowest_rq;
1835}
1836
1837static struct task_struct *pick_next_pushable_task(struct rq *rq)
1838{
1839	struct task_struct *p;
1840
1841	if (!has_pushable_tasks(rq))
1842		return NULL;
1843
1844	p = plist_first_entry(&rq->rt.pushable_tasks,
1845			      struct task_struct, pushable_tasks);
1846
1847	BUG_ON(rq->cpu != task_cpu(p));
1848	BUG_ON(task_current(rq, p));
1849	BUG_ON(p->nr_cpus_allowed <= 1);
1850
1851	BUG_ON(!task_on_rq_queued(p));
1852	BUG_ON(!rt_task(p));
1853
1854	return p;
1855}
1856
1857/*
1858 * If the current CPU has more than one RT task, see if the non
1859 * running task can migrate over to a CPU that is running a task
1860 * of lesser priority.
1861 */
1862static int push_rt_task(struct rq *rq)
1863{
1864	struct task_struct *next_task;
1865	struct rq *lowest_rq;
1866	int ret = 0;
1867
1868	if (!rq->rt.overloaded)
1869		return 0;
1870
1871	next_task = pick_next_pushable_task(rq);
1872	if (!next_task)
1873		return 0;
1874
1875retry:
1876	if (WARN_ON(next_task == rq->curr))
 
1877		return 0;
 
1878
1879	/*
1880	 * It's possible that the next_task slipped in of
1881	 * higher priority than current. If that's the case
1882	 * just reschedule current.
1883	 */
1884	if (unlikely(next_task->prio < rq->curr->prio)) {
1885		resched_curr(rq);
1886		return 0;
1887	}
1888
1889	/* We might release rq lock */
1890	get_task_struct(next_task);
1891
1892	/* find_lock_lowest_rq locks the rq if found */
1893	lowest_rq = find_lock_lowest_rq(next_task, rq);
1894	if (!lowest_rq) {
1895		struct task_struct *task;
1896		/*
1897		 * find_lock_lowest_rq releases rq->lock
1898		 * so it is possible that next_task has migrated.
1899		 *
1900		 * We need to make sure that the task is still on the same
1901		 * run-queue and is also still the next task eligible for
1902		 * pushing.
1903		 */
1904		task = pick_next_pushable_task(rq);
1905		if (task == next_task) {
1906			/*
1907			 * The task hasn't migrated, and is still the next
1908			 * eligible task, but we failed to find a run-queue
1909			 * to push it to.  Do not retry in this case, since
1910			 * other CPUs will pull from us when ready.
1911			 */
1912			goto out;
1913		}
1914
1915		if (!task)
1916			/* No more tasks, just exit */
1917			goto out;
1918
1919		/*
1920		 * Something has shifted, try again.
1921		 */
1922		put_task_struct(next_task);
1923		next_task = task;
1924		goto retry;
1925	}
1926
1927	deactivate_task(rq, next_task, 0);
1928	set_task_cpu(next_task, lowest_rq->cpu);
1929	activate_task(lowest_rq, next_task, 0);
1930	ret = 1;
1931
1932	resched_curr(lowest_rq);
1933
1934	double_unlock_balance(rq, lowest_rq);
1935
1936out:
1937	put_task_struct(next_task);
1938
1939	return ret;
1940}
1941
1942static void push_rt_tasks(struct rq *rq)
1943{
1944	/* push_rt_task will return true if it moved an RT */
1945	while (push_rt_task(rq))
1946		;
1947}
1948
1949#ifdef HAVE_RT_PUSH_IPI
1950
1951/*
1952 * When a high priority task schedules out from a CPU and a lower priority
1953 * task is scheduled in, a check is made to see if there's any RT tasks
1954 * on other CPUs that are waiting to run because a higher priority RT task
1955 * is currently running on its CPU. In this case, the CPU with multiple RT
1956 * tasks queued on it (overloaded) needs to be notified that a CPU has opened
1957 * up that may be able to run one of its non-running queued RT tasks.
1958 *
1959 * All CPUs with overloaded RT tasks need to be notified as there is currently
1960 * no way to know which of these CPUs have the highest priority task waiting
1961 * to run. Instead of trying to take a spinlock on each of these CPUs,
1962 * which has shown to cause large latency when done on machines with many
1963 * CPUs, sending an IPI to the CPUs to have them push off the overloaded
1964 * RT tasks waiting to run.
1965 *
1966 * Just sending an IPI to each of the CPUs is also an issue, as on large
1967 * count CPU machines, this can cause an IPI storm on a CPU, especially
1968 * if its the only CPU with multiple RT tasks queued, and a large number
1969 * of CPUs scheduling a lower priority task at the same time.
1970 *
1971 * Each root domain has its own irq work function that can iterate over
1972 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
1973 * tassk must be checked if there's one or many CPUs that are lowering
1974 * their priority, there's a single irq work iterator that will try to
1975 * push off RT tasks that are waiting to run.
1976 *
1977 * When a CPU schedules a lower priority task, it will kick off the
1978 * irq work iterator that will jump to each CPU with overloaded RT tasks.
1979 * As it only takes the first CPU that schedules a lower priority task
1980 * to start the process, the rto_start variable is incremented and if
1981 * the atomic result is one, then that CPU will try to take the rto_lock.
1982 * This prevents high contention on the lock as the process handles all
1983 * CPUs scheduling lower priority tasks.
1984 *
1985 * All CPUs that are scheduling a lower priority task will increment the
1986 * rt_loop_next variable. This will make sure that the irq work iterator
1987 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
1988 * priority task, even if the iterator is in the middle of a scan. Incrementing
1989 * the rt_loop_next will cause the iterator to perform another scan.
1990 *
 
 
1991 */
1992static int rto_next_cpu(struct root_domain *rd)
1993{
1994	int next;
1995	int cpu;
1996
 
 
1997	/*
1998	 * When starting the IPI RT pushing, the rto_cpu is set to -1,
1999	 * rt_next_cpu() will simply return the first CPU found in
2000	 * the rto_mask.
2001	 *
2002	 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
2003	 * will return the next CPU found in the rto_mask.
2004	 *
2005	 * If there are no more CPUs left in the rto_mask, then a check is made
2006	 * against rto_loop and rto_loop_next. rto_loop is only updated with
2007	 * the rto_lock held, but any CPU may increment the rto_loop_next
2008	 * without any locking.
2009	 */
2010	for (;;) {
 
 
2011
2012		/* When rto_cpu is -1 this acts like cpumask_first() */
2013		cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
 
 
 
 
 
 
 
 
 
2014
2015		rd->rto_cpu = cpu;
 
 
2016
2017		if (cpu < nr_cpu_ids)
2018			return cpu;
 
 
2019
2020		rd->rto_cpu = -1;
2021
2022		/*
2023		 * ACQUIRE ensures we see the @rto_mask changes
2024		 * made prior to the @next value observed.
2025		 *
2026		 * Matches WMB in rt_set_overload().
2027		 */
2028		next = atomic_read_acquire(&rd->rto_loop_next);
2029
2030		if (rd->rto_loop == next)
 
2031			break;
2032
2033		rd->rto_loop = next;
2034	}
2035
2036	return -1;
2037}
2038
2039static inline bool rto_start_trylock(atomic_t *v)
2040{
2041	return !atomic_cmpxchg_acquire(v, 0, 1);
2042}
2043
2044static inline void rto_start_unlock(atomic_t *v)
2045{
2046	atomic_set_release(v, 0);
2047}
2048
2049static void tell_cpu_to_push(struct rq *rq)
2050{
2051	int cpu = -1;
 
 
 
 
 
 
 
 
 
 
 
2052
2053	/* Keep the loop going if the IPI is currently active */
2054	atomic_inc(&rq->rd->rto_loop_next);
2055
2056	/* Only one CPU can initiate a loop at a time */
2057	if (!rto_start_trylock(&rq->rd->rto_loop_start))
 
2058		return;
2059
2060	raw_spin_lock(&rq->rd->rto_lock);
2061
2062	/*
2063	 * The rto_cpu is updated under the lock, if it has a valid CPU
2064	 * then the IPI is still running and will continue due to the
2065	 * update to loop_next, and nothing needs to be done here.
2066	 * Otherwise it is finishing up and an ipi needs to be sent.
2067	 */
2068	if (rq->rd->rto_cpu < 0)
2069		cpu = rto_next_cpu(rq->rd);
2070
2071	raw_spin_unlock(&rq->rd->rto_lock);
2072
2073	rto_start_unlock(&rq->rd->rto_loop_start);
2074
2075	if (cpu >= 0) {
2076		/* Make sure the rd does not get freed while pushing */
2077		sched_get_rd(rq->rd);
2078		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2079	}
2080}
2081
2082/* Called from hardirq context */
2083void rto_push_irq_work_func(struct irq_work *work)
2084{
2085	struct root_domain *rd =
2086		container_of(work, struct root_domain, rto_push_work);
2087	struct rq *rq;
2088	int cpu;
2089
2090	rq = this_rq();
 
 
 
2091
2092	/*
2093	 * We do not need to grab the lock to check for has_pushable_tasks.
2094	 * When it gets updated, a check is made if a push is possible.
2095	 */
2096	if (has_pushable_tasks(rq)) {
2097		raw_spin_lock(&rq->lock);
2098		push_rt_tasks(rq);
2099		raw_spin_unlock(&rq->lock);
2100	}
2101
2102	raw_spin_lock(&rd->rto_lock);
 
 
 
 
 
 
 
 
 
2103
2104	/* Pass the IPI to the next rt overloaded queue */
2105	cpu = rto_next_cpu(rd);
2106
2107	raw_spin_unlock(&rd->rto_lock);
 
 
2108
2109	if (cpu < 0) {
2110		sched_put_rd(rd);
2111		return;
2112	}
 
 
 
 
 
 
 
2113
2114	/* Try the next RT overloaded CPU */
2115	irq_work_queue_on(&rd->rto_push_work, cpu);
 
 
 
 
 
 
 
2116}
2117#endif /* HAVE_RT_PUSH_IPI */
2118
2119static void pull_rt_task(struct rq *this_rq)
2120{
2121	int this_cpu = this_rq->cpu, cpu;
2122	bool resched = false;
2123	struct task_struct *p;
2124	struct rq *src_rq;
2125	int rt_overload_count = rt_overloaded(this_rq);
2126
2127	if (likely(!rt_overload_count))
2128		return;
2129
2130	/*
2131	 * Match the barrier from rt_set_overloaded; this guarantees that if we
2132	 * see overloaded we must also see the rto_mask bit.
2133	 */
2134	smp_rmb();
2135
2136	/* If we are the only overloaded CPU do nothing */
2137	if (rt_overload_count == 1 &&
2138	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2139		return;
2140
2141#ifdef HAVE_RT_PUSH_IPI
2142	if (sched_feat(RT_PUSH_IPI)) {
2143		tell_cpu_to_push(this_rq);
2144		return;
2145	}
2146#endif
2147
2148	for_each_cpu(cpu, this_rq->rd->rto_mask) {
2149		if (this_cpu == cpu)
2150			continue;
2151
2152		src_rq = cpu_rq(cpu);
2153
2154		/*
2155		 * Don't bother taking the src_rq->lock if the next highest
2156		 * task is known to be lower-priority than our current task.
2157		 * This may look racy, but if this value is about to go
2158		 * logically higher, the src_rq will push this task away.
2159		 * And if its going logically lower, we do not care
2160		 */
2161		if (src_rq->rt.highest_prio.next >=
2162		    this_rq->rt.highest_prio.curr)
2163			continue;
2164
2165		/*
2166		 * We can potentially drop this_rq's lock in
2167		 * double_lock_balance, and another CPU could
2168		 * alter this_rq
2169		 */
2170		double_lock_balance(this_rq, src_rq);
2171
2172		/*
2173		 * We can pull only a task, which is pushable
2174		 * on its rq, and no others.
2175		 */
2176		p = pick_highest_pushable_task(src_rq, this_cpu);
2177
2178		/*
2179		 * Do we have an RT task that preempts
2180		 * the to-be-scheduled task?
2181		 */
2182		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2183			WARN_ON(p == src_rq->curr);
2184			WARN_ON(!task_on_rq_queued(p));
2185
2186			/*
2187			 * There's a chance that p is higher in priority
2188			 * than what's currently running on its CPU.
2189			 * This is just that p is wakeing up and hasn't
2190			 * had a chance to schedule. We only pull
2191			 * p if it is lower in priority than the
2192			 * current task on the run queue
2193			 */
2194			if (p->prio < src_rq->curr->prio)
2195				goto skip;
2196
2197			resched = true;
2198
2199			deactivate_task(src_rq, p, 0);
2200			set_task_cpu(p, this_cpu);
2201			activate_task(this_rq, p, 0);
2202			/*
2203			 * We continue with the search, just in
2204			 * case there's an even higher prio task
2205			 * in another runqueue. (low likelihood
2206			 * but possible)
2207			 */
2208		}
2209skip:
2210		double_unlock_balance(this_rq, src_rq);
2211	}
2212
2213	if (resched)
2214		resched_curr(this_rq);
2215}
2216
2217/*
2218 * If we are not running and we are not going to reschedule soon, we should
2219 * try to push tasks away now
2220 */
2221static void task_woken_rt(struct rq *rq, struct task_struct *p)
2222{
2223	bool need_to_push = !task_running(rq, p) &&
2224			    !test_tsk_need_resched(rq->curr) &&
2225			    p->nr_cpus_allowed > 1 &&
2226			    (dl_task(rq->curr) || rt_task(rq->curr)) &&
2227			    (rq->curr->nr_cpus_allowed < 2 ||
2228			     rq->curr->prio <= p->prio);
2229
2230	if (need_to_push)
2231		push_rt_tasks(rq);
2232}
2233
2234/* Assumes rq->lock is held */
2235static void rq_online_rt(struct rq *rq)
2236{
2237	if (rq->rt.overloaded)
2238		rt_set_overload(rq);
2239
2240	__enable_runtime(rq);
2241
2242	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2243}
2244
2245/* Assumes rq->lock is held */
2246static void rq_offline_rt(struct rq *rq)
2247{
2248	if (rq->rt.overloaded)
2249		rt_clear_overload(rq);
2250
2251	__disable_runtime(rq);
2252
2253	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2254}
2255
2256/*
2257 * When switch from the rt queue, we bring ourselves to a position
2258 * that we might want to pull RT tasks from other runqueues.
2259 */
2260static void switched_from_rt(struct rq *rq, struct task_struct *p)
2261{
2262	/*
2263	 * If there are other RT tasks then we will reschedule
2264	 * and the scheduling of the other RT tasks will handle
2265	 * the balancing. But if we are the last RT task
2266	 * we may need to handle the pulling of RT tasks
2267	 * now.
2268	 */
2269	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2270		return;
2271
2272	rt_queue_pull_task(rq);
2273}
2274
2275void __init init_sched_rt_class(void)
2276{
2277	unsigned int i;
2278
2279	for_each_possible_cpu(i) {
2280		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2281					GFP_KERNEL, cpu_to_node(i));
2282	}
2283}
2284#endif /* CONFIG_SMP */
2285
2286/*
2287 * When switching a task to RT, we may overload the runqueue
2288 * with RT tasks. In this case we try to push them off to
2289 * other runqueues.
2290 */
2291static void switched_to_rt(struct rq *rq, struct task_struct *p)
2292{
2293	/*
2294	 * If we are already running, then there's nothing
2295	 * that needs to be done. But if we are not running
2296	 * we may need to preempt the current running task.
2297	 * If that current running task is also an RT task
2298	 * then see if we can move to another run queue.
2299	 */
2300	if (task_on_rq_queued(p) && rq->curr != p) {
2301#ifdef CONFIG_SMP
2302		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2303			rt_queue_push_tasks(rq);
2304#endif /* CONFIG_SMP */
2305		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2306			resched_curr(rq);
2307	}
2308}
2309
2310/*
2311 * Priority of the task has changed. This may cause
2312 * us to initiate a push or pull.
2313 */
2314static void
2315prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2316{
2317	if (!task_on_rq_queued(p))
2318		return;
2319
2320	if (rq->curr == p) {
2321#ifdef CONFIG_SMP
2322		/*
2323		 * If our priority decreases while running, we
2324		 * may need to pull tasks to this runqueue.
2325		 */
2326		if (oldprio < p->prio)
2327			rt_queue_pull_task(rq);
2328
2329		/*
2330		 * If there's a higher priority task waiting to run
2331		 * then reschedule.
2332		 */
2333		if (p->prio > rq->rt.highest_prio.curr)
2334			resched_curr(rq);
2335#else
2336		/* For UP simply resched on drop of prio */
2337		if (oldprio < p->prio)
2338			resched_curr(rq);
2339#endif /* CONFIG_SMP */
2340	} else {
2341		/*
2342		 * This task is not running, but if it is
2343		 * greater than the current running task
2344		 * then reschedule.
2345		 */
2346		if (p->prio < rq->curr->prio)
2347			resched_curr(rq);
2348	}
2349}
2350
2351#ifdef CONFIG_POSIX_TIMERS
2352static void watchdog(struct rq *rq, struct task_struct *p)
2353{
2354	unsigned long soft, hard;
2355
2356	/* max may change after cur was read, this will be fixed next tick */
2357	soft = task_rlimit(p, RLIMIT_RTTIME);
2358	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2359
2360	if (soft != RLIM_INFINITY) {
2361		unsigned long next;
2362
2363		if (p->rt.watchdog_stamp != jiffies) {
2364			p->rt.timeout++;
2365			p->rt.watchdog_stamp = jiffies;
2366		}
2367
2368		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2369		if (p->rt.timeout > next) {
2370			posix_cputimers_rt_watchdog(&p->posix_cputimers,
2371						    p->se.sum_exec_runtime);
2372		}
2373	}
2374}
2375#else
2376static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2377#endif
2378
2379/*
2380 * scheduler tick hitting a task of our scheduling class.
2381 *
2382 * NOTE: This function can be called remotely by the tick offload that
2383 * goes along full dynticks. Therefore no local assumption can be made
2384 * and everything must be accessed through the @rq and @curr passed in
2385 * parameters.
2386 */
2387static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2388{
2389	struct sched_rt_entity *rt_se = &p->rt;
2390
2391	update_curr_rt(rq);
2392	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2393
2394	watchdog(rq, p);
2395
2396	/*
2397	 * RR tasks need a special form of timeslice management.
2398	 * FIFO tasks have no timeslices.
2399	 */
2400	if (p->policy != SCHED_RR)
2401		return;
2402
2403	if (--p->rt.time_slice)
2404		return;
2405
2406	p->rt.time_slice = sched_rr_timeslice;
2407
2408	/*
2409	 * Requeue to the end of queue if we (and all of our ancestors) are not
2410	 * the only element on the queue
2411	 */
2412	for_each_sched_rt_entity(rt_se) {
2413		if (rt_se->run_list.prev != rt_se->run_list.next) {
2414			requeue_task_rt(rq, p, 0);
2415			resched_curr(rq);
2416			return;
2417		}
2418	}
2419}
2420
 
 
 
 
 
 
 
 
 
 
2421static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2422{
2423	/*
2424	 * Time slice is 0 for SCHED_FIFO tasks
2425	 */
2426	if (task->policy == SCHED_RR)
2427		return sched_rr_timeslice;
2428	else
2429		return 0;
2430}
2431
2432const struct sched_class rt_sched_class
2433	__attribute__((section("__rt_sched_class"))) = {
2434	.enqueue_task		= enqueue_task_rt,
2435	.dequeue_task		= dequeue_task_rt,
2436	.yield_task		= yield_task_rt,
2437
2438	.check_preempt_curr	= check_preempt_curr_rt,
2439
2440	.pick_next_task		= pick_next_task_rt,
2441	.put_prev_task		= put_prev_task_rt,
2442	.set_next_task          = set_next_task_rt,
2443
2444#ifdef CONFIG_SMP
2445	.balance		= balance_rt,
2446	.select_task_rq		= select_task_rq_rt,
 
2447	.set_cpus_allowed       = set_cpus_allowed_common,
2448	.rq_online              = rq_online_rt,
2449	.rq_offline             = rq_offline_rt,
2450	.task_woken		= task_woken_rt,
2451	.switched_from		= switched_from_rt,
2452#endif
2453
 
2454	.task_tick		= task_tick_rt,
2455
2456	.get_rr_interval	= get_rr_interval_rt,
2457
2458	.prio_changed		= prio_changed_rt,
2459	.switched_to		= switched_to_rt,
2460
2461	.update_curr		= update_curr_rt,
2462
2463#ifdef CONFIG_UCLAMP_TASK
2464	.uclamp_enabled		= 1,
2465#endif
2466};
2467
2468#ifdef CONFIG_RT_GROUP_SCHED
2469/*
2470 * Ensure that the real time constraints are schedulable.
2471 */
2472static DEFINE_MUTEX(rt_constraints_mutex);
2473
2474static inline int tg_has_rt_tasks(struct task_group *tg)
2475{
2476	struct task_struct *task;
2477	struct css_task_iter it;
2478	int ret = 0;
2479
2480	/*
2481	 * Autogroups do not have RT tasks; see autogroup_create().
2482	 */
2483	if (task_group_is_autogroup(tg))
2484		return 0;
2485
2486	css_task_iter_start(&tg->css, 0, &it);
2487	while (!ret && (task = css_task_iter_next(&it)))
2488		ret |= rt_task(task);
2489	css_task_iter_end(&it);
2490
2491	return ret;
2492}
2493
2494struct rt_schedulable_data {
2495	struct task_group *tg;
2496	u64 rt_period;
2497	u64 rt_runtime;
2498};
2499
2500static int tg_rt_schedulable(struct task_group *tg, void *data)
2501{
2502	struct rt_schedulable_data *d = data;
2503	struct task_group *child;
2504	unsigned long total, sum = 0;
2505	u64 period, runtime;
2506
2507	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2508	runtime = tg->rt_bandwidth.rt_runtime;
2509
2510	if (tg == d->tg) {
2511		period = d->rt_period;
2512		runtime = d->rt_runtime;
2513	}
2514
2515	/*
2516	 * Cannot have more runtime than the period.
2517	 */
2518	if (runtime > period && runtime != RUNTIME_INF)
2519		return -EINVAL;
2520
2521	/*
2522	 * Ensure we don't starve existing RT tasks if runtime turns zero.
2523	 */
2524	if (rt_bandwidth_enabled() && !runtime &&
2525	    tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
2526		return -EBUSY;
2527
2528	total = to_ratio(period, runtime);
2529
2530	/*
2531	 * Nobody can have more than the global setting allows.
2532	 */
2533	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2534		return -EINVAL;
2535
2536	/*
2537	 * The sum of our children's runtime should not exceed our own.
2538	 */
2539	list_for_each_entry_rcu(child, &tg->children, siblings) {
2540		period = ktime_to_ns(child->rt_bandwidth.rt_period);
2541		runtime = child->rt_bandwidth.rt_runtime;
2542
2543		if (child == d->tg) {
2544			period = d->rt_period;
2545			runtime = d->rt_runtime;
2546		}
2547
2548		sum += to_ratio(period, runtime);
2549	}
2550
2551	if (sum > total)
2552		return -EINVAL;
2553
2554	return 0;
2555}
2556
2557static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2558{
2559	int ret;
2560
2561	struct rt_schedulable_data data = {
2562		.tg = tg,
2563		.rt_period = period,
2564		.rt_runtime = runtime,
2565	};
2566
2567	rcu_read_lock();
2568	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2569	rcu_read_unlock();
2570
2571	return ret;
2572}
2573
2574static int tg_set_rt_bandwidth(struct task_group *tg,
2575		u64 rt_period, u64 rt_runtime)
2576{
2577	int i, err = 0;
2578
2579	/*
2580	 * Disallowing the root group RT runtime is BAD, it would disallow the
2581	 * kernel creating (and or operating) RT threads.
2582	 */
2583	if (tg == &root_task_group && rt_runtime == 0)
2584		return -EINVAL;
2585
2586	/* No period doesn't make any sense. */
2587	if (rt_period == 0)
2588		return -EINVAL;
2589
2590	/*
2591	 * Bound quota to defend quota against overflow during bandwidth shift.
2592	 */
2593	if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2594		return -EINVAL;
2595
2596	mutex_lock(&rt_constraints_mutex);
2597	err = __rt_schedulable(tg, rt_period, rt_runtime);
2598	if (err)
2599		goto unlock;
2600
2601	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2602	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2603	tg->rt_bandwidth.rt_runtime = rt_runtime;
2604
2605	for_each_possible_cpu(i) {
2606		struct rt_rq *rt_rq = tg->rt_rq[i];
2607
2608		raw_spin_lock(&rt_rq->rt_runtime_lock);
2609		rt_rq->rt_runtime = rt_runtime;
2610		raw_spin_unlock(&rt_rq->rt_runtime_lock);
2611	}
2612	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2613unlock:
2614	mutex_unlock(&rt_constraints_mutex);
2615
2616	return err;
2617}
2618
2619int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2620{
2621	u64 rt_runtime, rt_period;
2622
2623	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2624	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2625	if (rt_runtime_us < 0)
2626		rt_runtime = RUNTIME_INF;
2627	else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2628		return -EINVAL;
2629
2630	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2631}
2632
2633long sched_group_rt_runtime(struct task_group *tg)
2634{
2635	u64 rt_runtime_us;
2636
2637	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2638		return -1;
2639
2640	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2641	do_div(rt_runtime_us, NSEC_PER_USEC);
2642	return rt_runtime_us;
2643}
2644
2645int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2646{
2647	u64 rt_runtime, rt_period;
2648
2649	if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2650		return -EINVAL;
2651
2652	rt_period = rt_period_us * NSEC_PER_USEC;
2653	rt_runtime = tg->rt_bandwidth.rt_runtime;
2654
2655	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2656}
2657
2658long sched_group_rt_period(struct task_group *tg)
2659{
2660	u64 rt_period_us;
2661
2662	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2663	do_div(rt_period_us, NSEC_PER_USEC);
2664	return rt_period_us;
2665}
2666
2667static int sched_rt_global_constraints(void)
2668{
2669	int ret = 0;
2670
2671	mutex_lock(&rt_constraints_mutex);
2672	ret = __rt_schedulable(NULL, 0, 0);
2673	mutex_unlock(&rt_constraints_mutex);
2674
2675	return ret;
2676}
2677
2678int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2679{
2680	/* Don't accept realtime tasks when there is no way for them to run */
2681	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2682		return 0;
2683
2684	return 1;
2685}
2686
2687#else /* !CONFIG_RT_GROUP_SCHED */
2688static int sched_rt_global_constraints(void)
2689{
2690	unsigned long flags;
2691	int i;
2692
2693	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2694	for_each_possible_cpu(i) {
2695		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2696
2697		raw_spin_lock(&rt_rq->rt_runtime_lock);
2698		rt_rq->rt_runtime = global_rt_runtime();
2699		raw_spin_unlock(&rt_rq->rt_runtime_lock);
2700	}
2701	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2702
2703	return 0;
2704}
2705#endif /* CONFIG_RT_GROUP_SCHED */
2706
2707static int sched_rt_global_validate(void)
2708{
2709	if (sysctl_sched_rt_period <= 0)
2710		return -EINVAL;
2711
2712	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2713		((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2714		 ((u64)sysctl_sched_rt_runtime *
2715			NSEC_PER_USEC > max_rt_runtime)))
2716		return -EINVAL;
2717
2718	return 0;
2719}
2720
2721static void sched_rt_do_global(void)
2722{
2723	def_rt_bandwidth.rt_runtime = global_rt_runtime();
2724	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
2725}
2726
2727int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
2728		size_t *lenp, loff_t *ppos)
2729{
2730	int old_period, old_runtime;
2731	static DEFINE_MUTEX(mutex);
2732	int ret;
2733
2734	mutex_lock(&mutex);
2735	old_period = sysctl_sched_rt_period;
2736	old_runtime = sysctl_sched_rt_runtime;
2737
2738	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2739
2740	if (!ret && write) {
2741		ret = sched_rt_global_validate();
2742		if (ret)
2743			goto undo;
2744
2745		ret = sched_dl_global_validate();
2746		if (ret)
2747			goto undo;
2748
2749		ret = sched_rt_global_constraints();
2750		if (ret)
2751			goto undo;
2752
2753		sched_rt_do_global();
2754		sched_dl_do_global();
2755	}
2756	if (0) {
2757undo:
2758		sysctl_sched_rt_period = old_period;
2759		sysctl_sched_rt_runtime = old_runtime;
2760	}
2761	mutex_unlock(&mutex);
2762
2763	return ret;
2764}
2765
2766int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
2767		size_t *lenp, loff_t *ppos)
2768{
2769	int ret;
2770	static DEFINE_MUTEX(mutex);
2771
2772	mutex_lock(&mutex);
2773	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2774	/*
2775	 * Make sure that internally we keep jiffies.
2776	 * Also, writing zero resets the timeslice to default:
2777	 */
2778	if (!ret && write) {
2779		sched_rr_timeslice =
2780			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
2781			msecs_to_jiffies(sysctl_sched_rr_timeslice);
2782	}
2783	mutex_unlock(&mutex);
2784
2785	return ret;
2786}
2787
2788#ifdef CONFIG_SCHED_DEBUG
2789void print_rt_stats(struct seq_file *m, int cpu)
2790{
2791	rt_rq_iter_t iter;
2792	struct rt_rq *rt_rq;
2793
2794	rcu_read_lock();
2795	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2796		print_rt_rq(m, cpu, rt_rq);
2797	rcu_read_unlock();
2798}
2799#endif /* CONFIG_SCHED_DEBUG */