Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   3 * policies)
   4 */
   5
   6#include "sched.h"
   7
   8#include <linux/slab.h>
   9
  10int sched_rr_timeslice = RR_TIMESLICE;
 
 
  11
  12static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  13
  14struct rt_bandwidth def_rt_bandwidth;
  15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  16static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  17{
  18	struct rt_bandwidth *rt_b =
  19		container_of(timer, struct rt_bandwidth, rt_period_timer);
  20	ktime_t now;
  21	int overrun;
  22	int idle = 0;
 
  23
 
  24	for (;;) {
  25		now = hrtimer_cb_get_time(timer);
  26		overrun = hrtimer_forward(timer, now, rt_b->rt_period);
  27
  28		if (!overrun)
  29			break;
  30
 
  31		idle = do_sched_rt_period_timer(rt_b, overrun);
 
  32	}
 
 
 
  33
  34	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  35}
  36
  37void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  38{
  39	rt_b->rt_period = ns_to_ktime(period);
  40	rt_b->rt_runtime = runtime;
  41
  42	raw_spin_lock_init(&rt_b->rt_runtime_lock);
  43
  44	hrtimer_init(&rt_b->rt_period_timer,
  45			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  46	rt_b->rt_period_timer.function = sched_rt_period_timer;
  47}
  48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
  50{
  51	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
  52		return;
  53
  54	if (hrtimer_active(&rt_b->rt_period_timer))
  55		return;
  56
  57	raw_spin_lock(&rt_b->rt_runtime_lock);
  58	start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
  59	raw_spin_unlock(&rt_b->rt_runtime_lock);
  60}
  61
  62void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
  63{
  64	struct rt_prio_array *array;
  65	int i;
  66
  67	array = &rt_rq->active;
  68	for (i = 0; i < MAX_RT_PRIO; i++) {
  69		INIT_LIST_HEAD(array->queue + i);
  70		__clear_bit(i, array->bitmap);
  71	}
  72	/* delimiter for bitsearch: */
  73	__set_bit(MAX_RT_PRIO, array->bitmap);
  74
  75#if defined CONFIG_SMP
  76	rt_rq->highest_prio.curr = MAX_RT_PRIO;
  77	rt_rq->highest_prio.next = MAX_RT_PRIO;
  78	rt_rq->rt_nr_migratory = 0;
  79	rt_rq->overloaded = 0;
  80	plist_head_init(&rt_rq->pushable_tasks);
  81#endif
 
 
  82
  83	rt_rq->rt_time = 0;
  84	rt_rq->rt_throttled = 0;
  85	rt_rq->rt_runtime = 0;
  86	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
  87}
  88
  89#ifdef CONFIG_RT_GROUP_SCHED
  90static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
  91{
  92	hrtimer_cancel(&rt_b->rt_period_timer);
  93}
  94
  95#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
  96
  97static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  98{
  99#ifdef CONFIG_SCHED_DEBUG
 100	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
 101#endif
 102	return container_of(rt_se, struct task_struct, rt);
 103}
 104
 105static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 106{
 107	return rt_rq->rq;
 108}
 109
 110static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 111{
 112	return rt_se->rt_rq;
 113}
 114
 115void free_rt_sched_group(struct task_group *tg)
 116{
 117	int i;
 118
 
 
 
 
 
 119	if (tg->rt_se)
 120		destroy_rt_bandwidth(&tg->rt_bandwidth);
 121
 
 
 
 
 
 
 122	for_each_possible_cpu(i) {
 123		if (tg->rt_rq)
 124			kfree(tg->rt_rq[i]);
 125		if (tg->rt_se)
 126			kfree(tg->rt_se[i]);
 127	}
 128
 129	kfree(tg->rt_rq);
 130	kfree(tg->rt_se);
 131}
 132
 133void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 134		struct sched_rt_entity *rt_se, int cpu,
 135		struct sched_rt_entity *parent)
 136{
 137	struct rq *rq = cpu_rq(cpu);
 138
 139	rt_rq->highest_prio.curr = MAX_RT_PRIO;
 140	rt_rq->rt_nr_boosted = 0;
 141	rt_rq->rq = rq;
 142	rt_rq->tg = tg;
 143
 144	tg->rt_rq[cpu] = rt_rq;
 145	tg->rt_se[cpu] = rt_se;
 146
 147	if (!rt_se)
 148		return;
 149
 150	if (!parent)
 151		rt_se->rt_rq = &rq->rt;
 152	else
 153		rt_se->rt_rq = parent->my_q;
 154
 155	rt_se->my_q = rt_rq;
 156	rt_se->parent = parent;
 157	INIT_LIST_HEAD(&rt_se->run_list);
 158}
 159
 160int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 161{
 162	struct rt_rq *rt_rq;
 163	struct sched_rt_entity *rt_se;
 164	int i;
 165
 166	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
 167	if (!tg->rt_rq)
 168		goto err;
 169	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
 170	if (!tg->rt_se)
 171		goto err;
 172
 173	init_rt_bandwidth(&tg->rt_bandwidth,
 174			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
 175
 176	for_each_possible_cpu(i) {
 177		rt_rq = kzalloc_node(sizeof(struct rt_rq),
 178				     GFP_KERNEL, cpu_to_node(i));
 179		if (!rt_rq)
 180			goto err;
 181
 182		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
 183				     GFP_KERNEL, cpu_to_node(i));
 184		if (!rt_se)
 185			goto err_free_rq;
 186
 187		init_rt_rq(rt_rq, cpu_rq(i));
 188		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
 189		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
 190	}
 191
 192	return 1;
 193
 194err_free_rq:
 195	kfree(rt_rq);
 196err:
 197	return 0;
 198}
 199
 200#else /* CONFIG_RT_GROUP_SCHED */
 201
 202#define rt_entity_is_task(rt_se) (1)
 203
 204static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 205{
 206	return container_of(rt_se, struct task_struct, rt);
 207}
 208
 209static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 210{
 211	return container_of(rt_rq, struct rq, rt);
 212}
 213
 214static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 215{
 216	struct task_struct *p = rt_task_of(rt_se);
 217	struct rq *rq = task_rq(p);
 
 
 
 
 
 
 218
 219	return &rq->rt;
 220}
 221
 
 
 222void free_rt_sched_group(struct task_group *tg) { }
 223
 224int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 225{
 226	return 1;
 227}
 228#endif /* CONFIG_RT_GROUP_SCHED */
 229
 230#ifdef CONFIG_SMP
 231
 232static int pull_rt_task(struct rq *this_rq);
 233
 234static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 235{
 236	/* Try to pull RT tasks here if we lower this rq's prio */
 237	return rq->rt.highest_prio.curr > prev->prio;
 238}
 239
 240static inline int rt_overloaded(struct rq *rq)
 241{
 242	return atomic_read(&rq->rd->rto_count);
 243}
 244
 245static inline void rt_set_overload(struct rq *rq)
 246{
 247	if (!rq->online)
 248		return;
 249
 250	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
 251	/*
 252	 * Make sure the mask is visible before we set
 253	 * the overload count. That is checked to determine
 254	 * if we should look at the mask. It would be a shame
 255	 * if we looked at the mask, but the mask was not
 256	 * updated yet.
 257	 *
 258	 * Matched by the barrier in pull_rt_task().
 259	 */
 260	smp_wmb();
 261	atomic_inc(&rq->rd->rto_count);
 262}
 263
 264static inline void rt_clear_overload(struct rq *rq)
 265{
 266	if (!rq->online)
 267		return;
 268
 269	/* the order here really doesn't matter */
 270	atomic_dec(&rq->rd->rto_count);
 271	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 272}
 273
 274static void update_rt_migration(struct rt_rq *rt_rq)
 275{
 276	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
 277		if (!rt_rq->overloaded) {
 278			rt_set_overload(rq_of_rt_rq(rt_rq));
 279			rt_rq->overloaded = 1;
 280		}
 281	} else if (rt_rq->overloaded) {
 282		rt_clear_overload(rq_of_rt_rq(rt_rq));
 283		rt_rq->overloaded = 0;
 284	}
 285}
 286
 287static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 288{
 289	struct task_struct *p;
 290
 291	if (!rt_entity_is_task(rt_se))
 292		return;
 293
 294	p = rt_task_of(rt_se);
 295	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 296
 297	rt_rq->rt_nr_total++;
 298	if (p->nr_cpus_allowed > 1)
 299		rt_rq->rt_nr_migratory++;
 300
 301	update_rt_migration(rt_rq);
 302}
 303
 304static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 305{
 306	struct task_struct *p;
 307
 308	if (!rt_entity_is_task(rt_se))
 309		return;
 310
 311	p = rt_task_of(rt_se);
 312	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 313
 314	rt_rq->rt_nr_total--;
 315	if (p->nr_cpus_allowed > 1)
 316		rt_rq->rt_nr_migratory--;
 317
 318	update_rt_migration(rt_rq);
 319}
 320
 321static inline int has_pushable_tasks(struct rq *rq)
 322{
 323	return !plist_head_empty(&rq->rt.pushable_tasks);
 324}
 325
 326static inline void set_post_schedule(struct rq *rq)
 327{
 328	/*
 329	 * We detect this state here so that we can avoid taking the RQ
 330	 * lock again later if there is no need to push
 331	 */
 332	rq->post_schedule = has_pushable_tasks(rq);
 333}
 334
 335static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 336{
 337	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 338	plist_node_init(&p->pushable_tasks, p->prio);
 339	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
 340
 341	/* Update the highest prio pushable task */
 342	if (p->prio < rq->rt.highest_prio.next)
 343		rq->rt.highest_prio.next = p->prio;
 
 
 
 
 
 344}
 345
 346static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 347{
 348	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 349
 350	/* Update the new highest prio pushable task */
 351	if (has_pushable_tasks(rq)) {
 352		p = plist_first_entry(&rq->rt.pushable_tasks,
 353				      struct task_struct, pushable_tasks);
 354		rq->rt.highest_prio.next = p->prio;
 355	} else
 356		rq->rt.highest_prio.next = MAX_RT_PRIO;
 
 
 
 
 
 
 357}
 358
 359#else
 360
 361static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 362{
 363}
 364
 365static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 366{
 367}
 368
 369static inline
 370void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 371{
 372}
 
 373
 374static inline
 375void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 376{
 377}
 378
 379static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 380{
 381	return false;
 382}
 383
 384static inline int pull_rt_task(struct rq *this_rq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 385{
 386	return 0;
 387}
 
 388
 389static inline void set_post_schedule(struct rq *rq)
 390{
 391}
 392#endif /* CONFIG_SMP */
 393
 394static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 
 
 
 
 
 
 
 
 395{
 396	return !list_empty(&rt_se->run_list);
 397}
 
 398
 399#ifdef CONFIG_RT_GROUP_SCHED
 400
 401static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 402{
 403	if (!rt_rq->tg)
 404		return RUNTIME_INF;
 405
 406	return rt_rq->rt_runtime;
 407}
 408
 409static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 410{
 411	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
 412}
 413
 414typedef struct task_group *rt_rq_iter_t;
 415
 416static inline struct task_group *next_task_group(struct task_group *tg)
 417{
 418	do {
 419		tg = list_entry_rcu(tg->list.next,
 420			typeof(struct task_group), list);
 421	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
 422
 423	if (&tg->list == &task_groups)
 424		tg = NULL;
 425
 426	return tg;
 427}
 428
 429#define for_each_rt_rq(rt_rq, iter, rq)					\
 430	for (iter = container_of(&task_groups, typeof(*iter), list);	\
 431		(iter = next_task_group(iter)) &&			\
 432		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
 433
 434#define for_each_sched_rt_entity(rt_se) \
 435	for (; rt_se; rt_se = rt_se->parent)
 436
 437static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 438{
 439	return rt_se->my_q;
 440}
 441
 442static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
 443static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 444
 445static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 446{
 447	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
 
 448	struct sched_rt_entity *rt_se;
 449
 450	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 451
 452	rt_se = rt_rq->tg->rt_se[cpu];
 453
 454	if (rt_rq->rt_nr_running) {
 455		if (rt_se && !on_rt_rq(rt_se))
 456			enqueue_rt_entity(rt_se, false);
 
 
 
 457		if (rt_rq->highest_prio.curr < curr->prio)
 458			resched_task(curr);
 459	}
 460}
 461
 462static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 463{
 464	struct sched_rt_entity *rt_se;
 465	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 466
 467	rt_se = rt_rq->tg->rt_se[cpu];
 468
 469	if (rt_se && on_rt_rq(rt_se))
 470		dequeue_rt_entity(rt_se);
 
 
 
 
 
 
 
 
 
 
 471}
 472
 473static int rt_se_boosted(struct sched_rt_entity *rt_se)
 474{
 475	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 476	struct task_struct *p;
 477
 478	if (rt_rq)
 479		return !!rt_rq->rt_nr_boosted;
 480
 481	p = rt_task_of(rt_se);
 482	return p->prio != p->normal_prio;
 483}
 484
 485#ifdef CONFIG_SMP
 486static inline const struct cpumask *sched_rt_period_mask(void)
 487{
 488	return this_rq()->rd->span;
 489}
 490#else
 491static inline const struct cpumask *sched_rt_period_mask(void)
 492{
 493	return cpu_online_mask;
 494}
 495#endif
 496
 497static inline
 498struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 499{
 500	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
 501}
 502
 503static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 504{
 505	return &rt_rq->tg->rt_bandwidth;
 506}
 507
 508#else /* !CONFIG_RT_GROUP_SCHED */
 509
 510static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 511{
 512	return rt_rq->rt_runtime;
 513}
 514
 515static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 516{
 517	return ktime_to_ns(def_rt_bandwidth.rt_period);
 518}
 519
 520typedef struct rt_rq *rt_rq_iter_t;
 521
 522#define for_each_rt_rq(rt_rq, iter, rq) \
 523	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 524
 525#define for_each_sched_rt_entity(rt_se) \
 526	for (; rt_se; rt_se = NULL)
 527
 528static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 529{
 530	return NULL;
 531}
 532
 533static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 534{
 535	if (rt_rq->rt_nr_running)
 536		resched_task(rq_of_rt_rq(rt_rq)->curr);
 
 
 
 
 
 537}
 538
 539static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 540{
 
 
 
 
 
 
 541}
 542
 543static inline const struct cpumask *sched_rt_period_mask(void)
 544{
 545	return cpu_online_mask;
 546}
 547
 548static inline
 549struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 550{
 551	return &cpu_rq(cpu)->rt;
 552}
 553
 554static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 555{
 556	return &def_rt_bandwidth;
 557}
 558
 559#endif /* CONFIG_RT_GROUP_SCHED */
 560
 561bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
 562{
 563	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 564
 565	return (hrtimer_active(&rt_b->rt_period_timer) ||
 566		rt_rq->rt_time < rt_b->rt_runtime);
 567}
 568
 569#ifdef CONFIG_SMP
 570/*
 571 * We ran out of runtime, see if we can borrow some from our neighbours.
 572 */
 573static int do_balance_runtime(struct rt_rq *rt_rq)
 574{
 575	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 576	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
 577	int i, weight, more = 0;
 578	u64 rt_period;
 579
 580	weight = cpumask_weight(rd->span);
 581
 582	raw_spin_lock(&rt_b->rt_runtime_lock);
 583	rt_period = ktime_to_ns(rt_b->rt_period);
 584	for_each_cpu(i, rd->span) {
 585		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 586		s64 diff;
 587
 588		if (iter == rt_rq)
 589			continue;
 590
 591		raw_spin_lock(&iter->rt_runtime_lock);
 592		/*
 593		 * Either all rqs have inf runtime and there's nothing to steal
 594		 * or __disable_runtime() below sets a specific rq to inf to
 595		 * indicate its been disabled and disalow stealing.
 596		 */
 597		if (iter->rt_runtime == RUNTIME_INF)
 598			goto next;
 599
 600		/*
 601		 * From runqueues with spare time, take 1/n part of their
 602		 * spare time, but no more than our period.
 603		 */
 604		diff = iter->rt_runtime - iter->rt_time;
 605		if (diff > 0) {
 606			diff = div_u64((u64)diff, weight);
 607			if (rt_rq->rt_runtime + diff > rt_period)
 608				diff = rt_period - rt_rq->rt_runtime;
 609			iter->rt_runtime -= diff;
 610			rt_rq->rt_runtime += diff;
 611			more = 1;
 612			if (rt_rq->rt_runtime == rt_period) {
 613				raw_spin_unlock(&iter->rt_runtime_lock);
 614				break;
 615			}
 616		}
 617next:
 618		raw_spin_unlock(&iter->rt_runtime_lock);
 619	}
 620	raw_spin_unlock(&rt_b->rt_runtime_lock);
 621
 622	return more;
 623}
 624
 625/*
 626 * Ensure this RQ takes back all the runtime it lend to its neighbours.
 627 */
 628static void __disable_runtime(struct rq *rq)
 629{
 630	struct root_domain *rd = rq->rd;
 631	rt_rq_iter_t iter;
 632	struct rt_rq *rt_rq;
 633
 634	if (unlikely(!scheduler_running))
 635		return;
 636
 637	for_each_rt_rq(rt_rq, iter, rq) {
 638		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 639		s64 want;
 640		int i;
 641
 642		raw_spin_lock(&rt_b->rt_runtime_lock);
 643		raw_spin_lock(&rt_rq->rt_runtime_lock);
 644		/*
 645		 * Either we're all inf and nobody needs to borrow, or we're
 646		 * already disabled and thus have nothing to do, or we have
 647		 * exactly the right amount of runtime to take out.
 648		 */
 649		if (rt_rq->rt_runtime == RUNTIME_INF ||
 650				rt_rq->rt_runtime == rt_b->rt_runtime)
 651			goto balanced;
 652		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 653
 654		/*
 655		 * Calculate the difference between what we started out with
 656		 * and what we current have, that's the amount of runtime
 657		 * we lend and now have to reclaim.
 658		 */
 659		want = rt_b->rt_runtime - rt_rq->rt_runtime;
 660
 661		/*
 662		 * Greedy reclaim, take back as much as we can.
 663		 */
 664		for_each_cpu(i, rd->span) {
 665			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 666			s64 diff;
 667
 668			/*
 669			 * Can't reclaim from ourselves or disabled runqueues.
 670			 */
 671			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
 672				continue;
 673
 674			raw_spin_lock(&iter->rt_runtime_lock);
 675			if (want > 0) {
 676				diff = min_t(s64, iter->rt_runtime, want);
 677				iter->rt_runtime -= diff;
 678				want -= diff;
 679			} else {
 680				iter->rt_runtime -= want;
 681				want -= want;
 682			}
 683			raw_spin_unlock(&iter->rt_runtime_lock);
 684
 685			if (!want)
 686				break;
 687		}
 688
 689		raw_spin_lock(&rt_rq->rt_runtime_lock);
 690		/*
 691		 * We cannot be left wanting - that would mean some runtime
 692		 * leaked out of the system.
 693		 */
 694		BUG_ON(want);
 695balanced:
 696		/*
 697		 * Disable all the borrow logic by pretending we have inf
 698		 * runtime - in which case borrowing doesn't make sense.
 699		 */
 700		rt_rq->rt_runtime = RUNTIME_INF;
 701		rt_rq->rt_throttled = 0;
 702		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 703		raw_spin_unlock(&rt_b->rt_runtime_lock);
 
 
 
 704	}
 705}
 706
 707static void __enable_runtime(struct rq *rq)
 708{
 709	rt_rq_iter_t iter;
 710	struct rt_rq *rt_rq;
 711
 712	if (unlikely(!scheduler_running))
 713		return;
 714
 715	/*
 716	 * Reset each runqueue's bandwidth settings
 717	 */
 718	for_each_rt_rq(rt_rq, iter, rq) {
 719		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 720
 721		raw_spin_lock(&rt_b->rt_runtime_lock);
 722		raw_spin_lock(&rt_rq->rt_runtime_lock);
 723		rt_rq->rt_runtime = rt_b->rt_runtime;
 724		rt_rq->rt_time = 0;
 725		rt_rq->rt_throttled = 0;
 726		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 727		raw_spin_unlock(&rt_b->rt_runtime_lock);
 728	}
 729}
 730
 731static int balance_runtime(struct rt_rq *rt_rq)
 732{
 733	int more = 0;
 734
 735	if (!sched_feat(RT_RUNTIME_SHARE))
 736		return more;
 737
 738	if (rt_rq->rt_time > rt_rq->rt_runtime) {
 739		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 740		more = do_balance_runtime(rt_rq);
 741		raw_spin_lock(&rt_rq->rt_runtime_lock);
 742	}
 743
 744	return more;
 745}
 746#else /* !CONFIG_SMP */
 747static inline int balance_runtime(struct rt_rq *rt_rq)
 748{
 749	return 0;
 750}
 751#endif /* CONFIG_SMP */
 752
 753static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 754{
 755	int i, idle = 1, throttled = 0;
 756	const struct cpumask *span;
 757
 758	span = sched_rt_period_mask();
 759#ifdef CONFIG_RT_GROUP_SCHED
 760	/*
 761	 * FIXME: isolated CPUs should really leave the root task group,
 762	 * whether they are isolcpus or were isolated via cpusets, lest
 763	 * the timer run on a CPU which does not service all runqueues,
 764	 * potentially leaving other CPUs indefinitely throttled.  If
 765	 * isolation is really required, the user will turn the throttle
 766	 * off to kill the perturbations it causes anyway.  Meanwhile,
 767	 * this maintains functionality for boot and/or troubleshooting.
 768	 */
 769	if (rt_b == &root_task_group.rt_bandwidth)
 770		span = cpu_online_mask;
 771#endif
 772	for_each_cpu(i, span) {
 773		int enqueue = 0;
 774		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
 775		struct rq *rq = rq_of_rt_rq(rt_rq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 776
 777		raw_spin_lock(&rq->lock);
 778		if (rt_rq->rt_time) {
 779			u64 runtime;
 780
 781			raw_spin_lock(&rt_rq->rt_runtime_lock);
 782			if (rt_rq->rt_throttled)
 783				balance_runtime(rt_rq);
 784			runtime = rt_rq->rt_runtime;
 785			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
 786			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
 787				rt_rq->rt_throttled = 0;
 788				enqueue = 1;
 789
 790				/*
 791				 * Force a clock update if the CPU was idle,
 792				 * lest wakeup -> unthrottle time accumulate.
 
 
 
 793				 */
 794				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
 795					rq->skip_clock_update = -1;
 796			}
 797			if (rt_rq->rt_time || rt_rq->rt_nr_running)
 798				idle = 0;
 799			raw_spin_unlock(&rt_rq->rt_runtime_lock);
 800		} else if (rt_rq->rt_nr_running) {
 801			idle = 0;
 802			if (!rt_rq_throttled(rt_rq))
 803				enqueue = 1;
 804		}
 805		if (rt_rq->rt_throttled)
 806			throttled = 1;
 807
 808		if (enqueue)
 809			sched_rt_rq_enqueue(rt_rq);
 810		raw_spin_unlock(&rq->lock);
 811	}
 812
 813	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
 814		return 1;
 815
 816	return idle;
 817}
 818
 819static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 820{
 821#ifdef CONFIG_RT_GROUP_SCHED
 822	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 823
 824	if (rt_rq)
 825		return rt_rq->highest_prio.curr;
 826#endif
 827
 828	return rt_task_of(rt_se)->prio;
 829}
 830
 831static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 832{
 833	u64 runtime = sched_rt_runtime(rt_rq);
 834
 835	if (rt_rq->rt_throttled)
 836		return rt_rq_throttled(rt_rq);
 837
 838	if (runtime >= sched_rt_period(rt_rq))
 839		return 0;
 840
 841	balance_runtime(rt_rq);
 842	runtime = sched_rt_runtime(rt_rq);
 843	if (runtime == RUNTIME_INF)
 844		return 0;
 845
 846	if (rt_rq->rt_time > runtime) {
 847		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 848
 849		/*
 850		 * Don't actually throttle groups that have no runtime assigned
 851		 * but accrue some time due to boosting.
 852		 */
 853		if (likely(rt_b->rt_runtime)) {
 854			static bool once = false;
 855
 856			rt_rq->rt_throttled = 1;
 857
 858			if (!once) {
 859				once = true;
 860				printk_sched("sched: RT throttling activated\n");
 861			}
 862		} else {
 863			/*
 864			 * In case we did anyway, make it go away,
 865			 * replenishment is a joke, since it will replenish us
 866			 * with exactly 0 ns.
 867			 */
 868			rt_rq->rt_time = 0;
 869		}
 870
 871		if (rt_rq_throttled(rt_rq)) {
 872			sched_rt_rq_dequeue(rt_rq);
 873			return 1;
 874		}
 875	}
 876
 877	return 0;
 878}
 879
 880/*
 881 * Update the current task's runtime statistics. Skip current tasks that
 882 * are not in our scheduling class.
 883 */
 884static void update_curr_rt(struct rq *rq)
 885{
 886	struct task_struct *curr = rq->curr;
 887	struct sched_rt_entity *rt_se = &curr->rt;
 888	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 889	u64 delta_exec;
 890
 891	if (curr->sched_class != &rt_sched_class)
 892		return;
 893
 894	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
 895	if (unlikely((s64)delta_exec <= 0))
 896		return;
 897
 898	schedstat_set(curr->se.statistics.exec_max,
 899		      max(curr->se.statistics.exec_max, delta_exec));
 900
 901	curr->se.sum_exec_runtime += delta_exec;
 902	account_group_exec_runtime(curr, delta_exec);
 903
 904	curr->se.exec_start = rq_clock_task(rq);
 905	cpuacct_charge(curr, delta_exec);
 906
 907	sched_rt_avg_update(rq, delta_exec);
 908
 909	if (!rt_bandwidth_enabled())
 910		return;
 911
 912	for_each_sched_rt_entity(rt_se) {
 913		rt_rq = rt_rq_of_se(rt_se);
 
 914
 915		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
 916			raw_spin_lock(&rt_rq->rt_runtime_lock);
 917			rt_rq->rt_time += delta_exec;
 918			if (sched_rt_runtime_exceeded(rt_rq))
 919				resched_task(curr);
 
 920			raw_spin_unlock(&rt_rq->rt_runtime_lock);
 
 
 921		}
 922	}
 923}
 924
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925#if defined CONFIG_SMP
 926
 927static void
 928inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 929{
 930	struct rq *rq = rq_of_rt_rq(rt_rq);
 931
 932#ifdef CONFIG_RT_GROUP_SCHED
 933	/*
 934	 * Change rq's cpupri only if rt_rq is the top queue.
 935	 */
 936	if (&rq->rt != rt_rq)
 937		return;
 938#endif
 939	if (rq->online && prio < prev_prio)
 940		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
 941}
 942
 943static void
 944dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 945{
 946	struct rq *rq = rq_of_rt_rq(rt_rq);
 947
 948#ifdef CONFIG_RT_GROUP_SCHED
 949	/*
 950	 * Change rq's cpupri only if rt_rq is the top queue.
 951	 */
 952	if (&rq->rt != rt_rq)
 953		return;
 954#endif
 955	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
 956		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
 957}
 958
 959#else /* CONFIG_SMP */
 960
 961static inline
 962void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
 963static inline
 964void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
 965
 966#endif /* CONFIG_SMP */
 967
 968#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 969static void
 970inc_rt_prio(struct rt_rq *rt_rq, int prio)
 971{
 972	int prev_prio = rt_rq->highest_prio.curr;
 973
 974	if (prio < prev_prio)
 975		rt_rq->highest_prio.curr = prio;
 976
 977	inc_rt_prio_smp(rt_rq, prio, prev_prio);
 978}
 979
 980static void
 981dec_rt_prio(struct rt_rq *rt_rq, int prio)
 982{
 983	int prev_prio = rt_rq->highest_prio.curr;
 984
 985	if (rt_rq->rt_nr_running) {
 986
 987		WARN_ON(prio < prev_prio);
 988
 989		/*
 990		 * This may have been our highest task, and therefore
 991		 * we may have some recomputation to do
 992		 */
 993		if (prio == prev_prio) {
 994			struct rt_prio_array *array = &rt_rq->active;
 995
 996			rt_rq->highest_prio.curr =
 997				sched_find_first_bit(array->bitmap);
 998		}
 999
1000	} else
1001		rt_rq->highest_prio.curr = MAX_RT_PRIO;
 
1002
1003	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1004}
1005
1006#else
1007
1008static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1009static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1010
1011#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1012
1013#ifdef CONFIG_RT_GROUP_SCHED
1014
1015static void
1016inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1017{
1018	if (rt_se_boosted(rt_se))
1019		rt_rq->rt_nr_boosted++;
1020
1021	if (rt_rq->tg)
1022		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1023}
1024
1025static void
1026dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1027{
1028	if (rt_se_boosted(rt_se))
1029		rt_rq->rt_nr_boosted--;
1030
1031	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1032}
1033
1034#else /* CONFIG_RT_GROUP_SCHED */
1035
1036static void
1037inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1038{
1039	start_rt_bandwidth(&def_rt_bandwidth);
1040}
1041
1042static inline
1043void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1044
1045#endif /* CONFIG_RT_GROUP_SCHED */
1046
1047static inline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1048void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1049{
1050	int prio = rt_se_prio(rt_se);
1051
1052	WARN_ON(!rt_prio(prio));
1053	rt_rq->rt_nr_running++;
 
1054
1055	inc_rt_prio(rt_rq, prio);
1056	inc_rt_migration(rt_se, rt_rq);
1057	inc_rt_group(rt_se, rt_rq);
1058}
1059
1060static inline
1061void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1062{
1063	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1064	WARN_ON(!rt_rq->rt_nr_running);
1065	rt_rq->rt_nr_running--;
 
1066
1067	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1068	dec_rt_migration(rt_se, rt_rq);
1069	dec_rt_group(rt_se, rt_rq);
1070}
1071
1072static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073{
1074	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1075	struct rt_prio_array *array = &rt_rq->active;
1076	struct rt_rq *group_rq = group_rt_rq(rt_se);
1077	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1078
1079	/*
1080	 * Don't enqueue the group if its throttled, or when empty.
1081	 * The latter is a consequence of the former when a child group
1082	 * get throttled and the current group doesn't have any other
1083	 * active members.
1084	 */
1085	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
 
 
1086		return;
 
1087
1088	if (head)
1089		list_add(&rt_se->run_list, queue);
1090	else
1091		list_add_tail(&rt_se->run_list, queue);
1092	__set_bit(rt_se_prio(rt_se), array->bitmap);
 
 
 
 
 
 
1093
1094	inc_rt_tasks(rt_se, rt_rq);
1095}
1096
1097static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1098{
1099	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1100	struct rt_prio_array *array = &rt_rq->active;
1101
1102	list_del_init(&rt_se->run_list);
1103	if (list_empty(array->queue + rt_se_prio(rt_se)))
1104		__clear_bit(rt_se_prio(rt_se), array->bitmap);
 
 
1105
1106	dec_rt_tasks(rt_se, rt_rq);
1107}
1108
1109/*
1110 * Because the prio of an upper entry depends on the lower
1111 * entries, we must remove entries top - down.
1112 */
1113static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1114{
1115	struct sched_rt_entity *back = NULL;
 
1116
1117	for_each_sched_rt_entity(rt_se) {
1118		rt_se->back = back;
1119		back = rt_se;
1120	}
1121
 
 
1122	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1123		if (on_rt_rq(rt_se))
1124			__dequeue_rt_entity(rt_se);
1125	}
 
 
1126}
1127
1128static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1129{
1130	dequeue_rt_stack(rt_se);
 
 
 
 
1131	for_each_sched_rt_entity(rt_se)
1132		__enqueue_rt_entity(rt_se, head);
 
1133}
1134
1135static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1136{
1137	dequeue_rt_stack(rt_se);
 
 
 
 
1138
1139	for_each_sched_rt_entity(rt_se) {
1140		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1141
1142		if (rt_rq && rt_rq->rt_nr_running)
1143			__enqueue_rt_entity(rt_se, false);
1144	}
 
1145}
1146
1147/*
1148 * Adding/removing a task to/from a priority array:
1149 */
1150static void
1151enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1152{
1153	struct sched_rt_entity *rt_se = &p->rt;
1154
1155	if (flags & ENQUEUE_WAKEUP)
1156		rt_se->timeout = 0;
1157
1158	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
 
 
 
1159
1160	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1161		enqueue_pushable_task(rq, p);
1162
1163	inc_nr_running(rq);
1164}
1165
1166static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1167{
1168	struct sched_rt_entity *rt_se = &p->rt;
1169
1170	update_curr_rt(rq);
1171	dequeue_rt_entity(rt_se);
1172
1173	dequeue_pushable_task(rq, p);
1174
1175	dec_nr_running(rq);
1176}
1177
1178/*
1179 * Put task to the head or the end of the run list without the overhead of
1180 * dequeue followed by enqueue.
1181 */
1182static void
1183requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1184{
1185	if (on_rt_rq(rt_se)) {
1186		struct rt_prio_array *array = &rt_rq->active;
1187		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1188
1189		if (head)
1190			list_move(&rt_se->run_list, queue);
1191		else
1192			list_move_tail(&rt_se->run_list, queue);
1193	}
1194}
1195
1196static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1197{
1198	struct sched_rt_entity *rt_se = &p->rt;
1199	struct rt_rq *rt_rq;
1200
1201	for_each_sched_rt_entity(rt_se) {
1202		rt_rq = rt_rq_of_se(rt_se);
1203		requeue_rt_entity(rt_rq, rt_se, head);
1204	}
1205}
1206
1207static void yield_task_rt(struct rq *rq)
1208{
1209	requeue_task_rt(rq, rq->curr, 0);
1210}
1211
1212#ifdef CONFIG_SMP
1213static int find_lowest_rq(struct task_struct *task);
1214
1215static int
1216select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1217{
1218	struct task_struct *curr;
1219	struct rq *rq;
1220
1221	if (p->nr_cpus_allowed == 1)
1222		goto out;
1223
1224	/* For anything but wake ups, just return the task_cpu */
1225	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1226		goto out;
1227
1228	rq = cpu_rq(cpu);
1229
1230	rcu_read_lock();
1231	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1232
1233	/*
1234	 * If the current task on @p's runqueue is an RT task, then
1235	 * try to see if we can wake this RT task up on another
1236	 * runqueue. Otherwise simply start this RT task
1237	 * on its current runqueue.
1238	 *
1239	 * We want to avoid overloading runqueues. If the woken
1240	 * task is a higher priority, then it will stay on this CPU
1241	 * and the lower prio task should be moved to another CPU.
1242	 * Even though this will probably make the lower prio task
1243	 * lose its cache, we do not want to bounce a higher task
1244	 * around just because it gave up its CPU, perhaps for a
1245	 * lock?
1246	 *
1247	 * For equal prio tasks, we just let the scheduler sort it out.
1248	 *
1249	 * Otherwise, just let it ride on the affined RQ and the
1250	 * post-schedule router will push the preempted task away
1251	 *
1252	 * This test is optimistic, if we get it wrong the load-balancer
1253	 * will have to sort it out.
1254	 */
1255	if (curr && unlikely(rt_task(curr)) &&
1256	    (curr->nr_cpus_allowed < 2 ||
1257	     curr->prio <= p->prio)) {
 
 
 
 
 
 
1258		int target = find_lowest_rq(p);
1259
1260		if (target != -1)
 
 
 
 
 
 
 
 
 
 
 
 
1261			cpu = target;
1262	}
 
 
1263	rcu_read_unlock();
1264
1265out:
1266	return cpu;
1267}
1268
1269static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1270{
1271	if (rq->curr->nr_cpus_allowed == 1)
1272		return;
1273
1274	if (p->nr_cpus_allowed != 1
1275	    && cpupri_find(&rq->rd->cpupri, p, NULL))
 
1276		return;
1277
1278	if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
 
 
 
 
 
1279		return;
1280
1281	/*
1282	 * There appears to be other cpus that can accept
1283	 * current and none to run 'p', so lets reschedule
1284	 * to try and push current away:
1285	 */
1286	requeue_task_rt(rq, p, 1);
1287	resched_task(rq->curr);
1288}
1289
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1290#endif /* CONFIG_SMP */
1291
1292/*
1293 * Preempt the current task with a newly woken task if needed:
1294 */
1295static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1296{
1297	if (p->prio < rq->curr->prio) {
1298		resched_task(rq->curr);
1299		return;
1300	}
1301
1302#ifdef CONFIG_SMP
1303	/*
1304	 * If:
1305	 *
1306	 * - the newly woken task is of equal priority to the current task
1307	 * - the newly woken task is non-migratable while current is migratable
1308	 * - current will be preempted on the next reschedule
1309	 *
1310	 * we should check to see if current can readily move to a different
1311	 * cpu.  If so, we will reschedule to allow the push logic to try
1312	 * to move current somewhere else, making room for our non-migratable
1313	 * task.
1314	 */
1315	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1316		check_preempt_equal_prio(rq, p);
1317#endif
1318}
1319
1320static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1321						   struct rt_rq *rt_rq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1322{
1323	struct rt_prio_array *array = &rt_rq->active;
1324	struct sched_rt_entity *next = NULL;
1325	struct list_head *queue;
1326	int idx;
1327
1328	idx = sched_find_first_bit(array->bitmap);
1329	BUG_ON(idx >= MAX_RT_PRIO);
1330
1331	queue = array->queue + idx;
 
 
1332	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1333
1334	return next;
1335}
1336
1337static struct task_struct *_pick_next_task_rt(struct rq *rq)
1338{
1339	struct sched_rt_entity *rt_se;
1340	struct task_struct *p;
1341	struct rt_rq *rt_rq  = &rq->rt;
1342
1343	do {
1344		rt_se = pick_next_rt_entity(rq, rt_rq);
1345		BUG_ON(!rt_se);
 
1346		rt_rq = group_rt_rq(rt_se);
1347	} while (rt_rq);
1348
1349	p = rt_task_of(rt_se);
1350	p->se.exec_start = rq_clock_task(rq);
1351
1352	return p;
1353}
1354
1355static struct task_struct *
1356pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1357{
1358	struct task_struct *p;
1359	struct rt_rq *rt_rq = &rq->rt;
1360
1361	if (need_pull_rt_task(rq, prev)) {
1362		pull_rt_task(rq);
1363		/*
1364		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1365		 * means a dl or stop task can slip in, in which case we need
1366		 * to re-start task selection.
1367		 */
1368		if (unlikely((rq->stop && rq->stop->on_rq) ||
1369			     rq->dl.dl_nr_running))
1370			return RETRY_TASK;
1371	}
1372
1373	/*
1374	 * We may dequeue prev's rt_rq in put_prev_task().
1375	 * So, we update time before rt_nr_running check.
1376	 */
1377	if (prev->sched_class == &rt_sched_class)
1378		update_curr_rt(rq);
1379
1380	if (!rt_rq->rt_nr_running)
1381		return NULL;
1382
1383	if (rt_rq_throttled(rt_rq))
1384		return NULL;
1385
1386	put_prev_task(rq, prev);
 
1387
1388	p = _pick_next_task_rt(rq);
 
 
1389
1390	/* The running task is never eligible for pushing */
1391	if (p)
1392		dequeue_pushable_task(rq, p);
1393
1394	set_post_schedule(rq);
1395
1396	return p;
1397}
1398
1399static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1400{
 
 
 
 
 
 
1401	update_curr_rt(rq);
1402
 
 
1403	/*
1404	 * The previous task needs to be made eligible for pushing
1405	 * if it is still active
1406	 */
1407	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1408		enqueue_pushable_task(rq, p);
1409}
1410
1411#ifdef CONFIG_SMP
1412
1413/* Only try algorithms three times */
1414#define RT_MAX_TRIES 3
1415
1416static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1417{
1418	if (!task_running(rq, p) &&
1419	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1420		return 1;
 
1421	return 0;
1422}
1423
1424/*
1425 * Return the highest pushable rq's task, which is suitable to be executed
1426 * on the cpu, NULL otherwise
1427 */
1428static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1429{
1430	struct plist_head *head = &rq->rt.pushable_tasks;
1431	struct task_struct *p;
1432
1433	if (!has_pushable_tasks(rq))
1434		return NULL;
1435
1436	plist_for_each_entry(p, head, pushable_tasks) {
1437		if (pick_rt_task(rq, p, cpu))
1438			return p;
1439	}
1440
1441	return NULL;
1442}
1443
1444static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1445
1446static int find_lowest_rq(struct task_struct *task)
1447{
1448	struct sched_domain *sd;
1449	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1450	int this_cpu = smp_processor_id();
1451	int cpu      = task_cpu(task);
 
1452
1453	/* Make sure the mask is initialized first */
1454	if (unlikely(!lowest_mask))
1455		return -1;
1456
1457	if (task->nr_cpus_allowed == 1)
1458		return -1; /* No other targets possible */
1459
1460	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1461		return -1; /* No targets found */
1462
1463	/*
1464	 * At this point we have built a mask of cpus representing the
1465	 * lowest priority tasks in the system.  Now we want to elect
1466	 * the best one based on our affinity and topology.
1467	 *
1468	 * We prioritize the last cpu that the task executed on since
1469	 * it is most likely cache-hot in that location.
1470	 */
1471	if (cpumask_test_cpu(cpu, lowest_mask))
1472		return cpu;
1473
1474	/*
1475	 * Otherwise, we consult the sched_domains span maps to figure
1476	 * out which cpu is logically closest to our hot cache data.
1477	 */
1478	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1479		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1480
1481	rcu_read_lock();
1482	for_each_domain(cpu, sd) {
1483		if (sd->flags & SD_WAKE_AFFINE) {
1484			int best_cpu;
1485
1486			/*
1487			 * "this_cpu" is cheaper to preempt than a
1488			 * remote processor.
1489			 */
1490			if (this_cpu != -1 &&
1491			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1492				rcu_read_unlock();
1493				return this_cpu;
1494			}
1495
1496			best_cpu = cpumask_first_and(lowest_mask,
1497						     sched_domain_span(sd));
1498			if (best_cpu < nr_cpu_ids) {
1499				rcu_read_unlock();
1500				return best_cpu;
1501			}
1502		}
1503	}
1504	rcu_read_unlock();
1505
1506	/*
1507	 * And finally, if there were no matches within the domains
1508	 * just give the caller *something* to work with from the compatible
1509	 * locations.
1510	 */
1511	if (this_cpu != -1)
1512		return this_cpu;
1513
1514	cpu = cpumask_any(lowest_mask);
1515	if (cpu < nr_cpu_ids)
1516		return cpu;
 
1517	return -1;
1518}
1519
1520/* Will lock the rq it finds */
1521static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1522{
1523	struct rq *lowest_rq = NULL;
1524	int tries;
1525	int cpu;
1526
1527	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1528		cpu = find_lowest_rq(task);
1529
1530		if ((cpu == -1) || (cpu == rq->cpu))
1531			break;
1532
1533		lowest_rq = cpu_rq(cpu);
1534
 
 
 
 
 
 
 
 
 
 
1535		/* if the prio of this runqueue changed, try again */
1536		if (double_lock_balance(rq, lowest_rq)) {
1537			/*
1538			 * We had to unlock the run queue. In
1539			 * the mean time, task could have
1540			 * migrated already or had its affinity changed.
1541			 * Also make sure that it wasn't scheduled on its rq.
 
 
 
1542			 */
1543			if (unlikely(task_rq(task) != rq ||
1544				     !cpumask_test_cpu(lowest_rq->cpu,
1545						       tsk_cpus_allowed(task)) ||
1546				     task_running(rq, task) ||
1547				     !task->on_rq)) {
 
1548
1549				double_unlock_balance(rq, lowest_rq);
1550				lowest_rq = NULL;
1551				break;
1552			}
1553		}
1554
1555		/* If this rq is still suitable use it. */
1556		if (lowest_rq->rt.highest_prio.curr > task->prio)
1557			break;
1558
1559		/* try again */
1560		double_unlock_balance(rq, lowest_rq);
1561		lowest_rq = NULL;
1562	}
1563
1564	return lowest_rq;
1565}
1566
1567static struct task_struct *pick_next_pushable_task(struct rq *rq)
1568{
1569	struct task_struct *p;
1570
1571	if (!has_pushable_tasks(rq))
1572		return NULL;
1573
1574	p = plist_first_entry(&rq->rt.pushable_tasks,
1575			      struct task_struct, pushable_tasks);
1576
1577	BUG_ON(rq->cpu != task_cpu(p));
1578	BUG_ON(task_current(rq, p));
1579	BUG_ON(p->nr_cpus_allowed <= 1);
1580
1581	BUG_ON(!p->on_rq);
1582	BUG_ON(!rt_task(p));
1583
1584	return p;
1585}
1586
1587/*
1588 * If the current CPU has more than one RT task, see if the non
1589 * running task can migrate over to a CPU that is running a task
1590 * of lesser priority.
1591 */
1592static int push_rt_task(struct rq *rq)
1593{
1594	struct task_struct *next_task;
1595	struct rq *lowest_rq;
1596	int ret = 0;
1597
1598	if (!rq->rt.overloaded)
1599		return 0;
1600
1601	next_task = pick_next_pushable_task(rq);
1602	if (!next_task)
1603		return 0;
1604
1605retry:
1606	if (unlikely(next_task == rq->curr)) {
1607		WARN_ON(1);
1608		return 0;
1609	}
1610
1611	/*
1612	 * It's possible that the next_task slipped in of
1613	 * higher priority than current. If that's the case
1614	 * just reschedule current.
1615	 */
1616	if (unlikely(next_task->prio < rq->curr->prio)) {
1617		resched_task(rq->curr);
1618		return 0;
1619	}
1620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1621	/* We might release rq lock */
1622	get_task_struct(next_task);
1623
1624	/* find_lock_lowest_rq locks the rq if found */
1625	lowest_rq = find_lock_lowest_rq(next_task, rq);
1626	if (!lowest_rq) {
1627		struct task_struct *task;
1628		/*
1629		 * find_lock_lowest_rq releases rq->lock
1630		 * so it is possible that next_task has migrated.
1631		 *
1632		 * We need to make sure that the task is still on the same
1633		 * run-queue and is also still the next task eligible for
1634		 * pushing.
1635		 */
1636		task = pick_next_pushable_task(rq);
1637		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1638			/*
1639			 * The task hasn't migrated, and is still the next
1640			 * eligible task, but we failed to find a run-queue
1641			 * to push it to.  Do not retry in this case, since
1642			 * other cpus will pull from us when ready.
1643			 */
1644			goto out;
1645		}
1646
1647		if (!task)
1648			/* No more tasks, just exit */
1649			goto out;
1650
1651		/*
1652		 * Something has shifted, try again.
1653		 */
1654		put_task_struct(next_task);
1655		next_task = task;
1656		goto retry;
1657	}
1658
1659	deactivate_task(rq, next_task, 0);
1660	set_task_cpu(next_task, lowest_rq->cpu);
1661	activate_task(lowest_rq, next_task, 0);
 
1662	ret = 1;
1663
1664	resched_task(lowest_rq->curr);
1665
1666	double_unlock_balance(rq, lowest_rq);
1667
1668out:
1669	put_task_struct(next_task);
1670
1671	return ret;
1672}
1673
1674static void push_rt_tasks(struct rq *rq)
1675{
1676	/* push_rt_task will return true if it moved an RT */
1677	while (push_rt_task(rq))
1678		;
1679}
1680
1681static int pull_rt_task(struct rq *this_rq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1682{
1683	int this_cpu = this_rq->cpu, ret = 0, cpu;
1684	struct task_struct *p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1685	struct rq *src_rq;
 
1686
1687	if (likely(!rt_overloaded(this_rq)))
1688		return 0;
1689
1690	/*
1691	 * Match the barrier from rt_set_overloaded; this guarantees that if we
1692	 * see overloaded we must also see the rto_mask bit.
1693	 */
1694	smp_rmb();
1695
 
 
 
 
 
 
 
 
 
 
 
 
1696	for_each_cpu(cpu, this_rq->rd->rto_mask) {
1697		if (this_cpu == cpu)
1698			continue;
1699
1700		src_rq = cpu_rq(cpu);
1701
1702		/*
1703		 * Don't bother taking the src_rq->lock if the next highest
1704		 * task is known to be lower-priority than our current task.
1705		 * This may look racy, but if this value is about to go
1706		 * logically higher, the src_rq will push this task away.
1707		 * And if its going logically lower, we do not care
1708		 */
1709		if (src_rq->rt.highest_prio.next >=
1710		    this_rq->rt.highest_prio.curr)
1711			continue;
1712
1713		/*
1714		 * We can potentially drop this_rq's lock in
1715		 * double_lock_balance, and another CPU could
1716		 * alter this_rq
1717		 */
 
1718		double_lock_balance(this_rq, src_rq);
1719
1720		/*
1721		 * We can pull only a task, which is pushable
1722		 * on its rq, and no others.
1723		 */
1724		p = pick_highest_pushable_task(src_rq, this_cpu);
1725
1726		/*
1727		 * Do we have an RT task that preempts
1728		 * the to-be-scheduled task?
1729		 */
1730		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1731			WARN_ON(p == src_rq->curr);
1732			WARN_ON(!p->on_rq);
1733
1734			/*
1735			 * There's a chance that p is higher in priority
1736			 * than what's currently running on its cpu.
1737			 * This is just that p is wakeing up and hasn't
1738			 * had a chance to schedule. We only pull
1739			 * p if it is lower in priority than the
1740			 * current task on the run queue
1741			 */
1742			if (p->prio < src_rq->curr->prio)
1743				goto skip;
1744
1745			ret = 1;
1746
1747			deactivate_task(src_rq, p, 0);
1748			set_task_cpu(p, this_cpu);
1749			activate_task(this_rq, p, 0);
 
 
 
1750			/*
1751			 * We continue with the search, just in
1752			 * case there's an even higher prio task
1753			 * in another runqueue. (low likelihood
1754			 * but possible)
1755			 */
1756		}
1757skip:
1758		double_unlock_balance(this_rq, src_rq);
1759	}
1760
1761	return ret;
1762}
 
 
 
 
 
 
 
1763
1764static void post_schedule_rt(struct rq *rq)
1765{
1766	push_rt_tasks(rq);
1767}
1768
1769/*
1770 * If we are not running and we are not going to reschedule soon, we should
1771 * try to push tasks away now
1772 */
1773static void task_woken_rt(struct rq *rq, struct task_struct *p)
1774{
1775	if (!task_running(rq, p) &&
1776	    !test_tsk_need_resched(rq->curr) &&
1777	    has_pushable_tasks(rq) &&
1778	    p->nr_cpus_allowed > 1 &&
1779	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
1780	    (rq->curr->nr_cpus_allowed < 2 ||
1781	     rq->curr->prio <= p->prio))
1782		push_rt_tasks(rq);
1783}
1784
1785static void set_cpus_allowed_rt(struct task_struct *p,
1786				const struct cpumask *new_mask)
1787{
1788	struct rq *rq;
1789	int weight;
1790
1791	BUG_ON(!rt_task(p));
1792
1793	if (!p->on_rq)
1794		return;
1795
1796	weight = cpumask_weight(new_mask);
1797
1798	/*
1799	 * Only update if the process changes its state from whether it
1800	 * can migrate or not.
1801	 */
1802	if ((p->nr_cpus_allowed > 1) == (weight > 1))
1803		return;
1804
1805	rq = task_rq(p);
1806
1807	/*
1808	 * The process used to be able to migrate OR it can now migrate
1809	 */
1810	if (weight <= 1) {
1811		if (!task_current(rq, p))
1812			dequeue_pushable_task(rq, p);
1813		BUG_ON(!rq->rt.rt_nr_migratory);
1814		rq->rt.rt_nr_migratory--;
1815	} else {
1816		if (!task_current(rq, p))
1817			enqueue_pushable_task(rq, p);
1818		rq->rt.rt_nr_migratory++;
1819	}
1820
1821	update_rt_migration(&rq->rt);
1822}
1823
1824/* Assumes rq->lock is held */
1825static void rq_online_rt(struct rq *rq)
1826{
1827	if (rq->rt.overloaded)
1828		rt_set_overload(rq);
1829
1830	__enable_runtime(rq);
1831
1832	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1833}
1834
1835/* Assumes rq->lock is held */
1836static void rq_offline_rt(struct rq *rq)
1837{
1838	if (rq->rt.overloaded)
1839		rt_clear_overload(rq);
1840
1841	__disable_runtime(rq);
1842
1843	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1844}
1845
1846/*
1847 * When switch from the rt queue, we bring ourselves to a position
1848 * that we might want to pull RT tasks from other runqueues.
1849 */
1850static void switched_from_rt(struct rq *rq, struct task_struct *p)
1851{
1852	/*
1853	 * If there are other RT tasks then we will reschedule
1854	 * and the scheduling of the other RT tasks will handle
1855	 * the balancing. But if we are the last RT task
1856	 * we may need to handle the pulling of RT tasks
1857	 * now.
1858	 */
1859	if (!p->on_rq || rq->rt.rt_nr_running)
1860		return;
1861
1862	if (pull_rt_task(rq))
1863		resched_task(rq->curr);
1864}
1865
1866void __init init_sched_rt_class(void)
1867{
1868	unsigned int i;
1869
1870	for_each_possible_cpu(i) {
1871		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1872					GFP_KERNEL, cpu_to_node(i));
1873	}
1874}
1875#endif /* CONFIG_SMP */
1876
1877/*
1878 * When switching a task to RT, we may overload the runqueue
1879 * with RT tasks. In this case we try to push them off to
1880 * other runqueues.
1881 */
1882static void switched_to_rt(struct rq *rq, struct task_struct *p)
1883{
1884	int check_resched = 1;
 
 
 
 
 
 
 
1885
1886	/*
1887	 * If we are already running, then there's nothing
1888	 * that needs to be done. But if we are not running
1889	 * we may need to preempt the current running task.
1890	 * If that current running task is also an RT task
1891	 * then see if we can move to another run queue.
1892	 */
1893	if (p->on_rq && rq->curr != p) {
1894#ifdef CONFIG_SMP
1895		if (rq->rt.overloaded && push_rt_task(rq) &&
1896		    /* Don't resched if we changed runqueues */
1897		    rq != task_rq(p))
1898			check_resched = 0;
1899#endif /* CONFIG_SMP */
1900		if (check_resched && p->prio < rq->curr->prio)
1901			resched_task(rq->curr);
1902	}
1903}
1904
1905/*
1906 * Priority of the task has changed. This may cause
1907 * us to initiate a push or pull.
1908 */
1909static void
1910prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1911{
1912	if (!p->on_rq)
1913		return;
1914
1915	if (rq->curr == p) {
1916#ifdef CONFIG_SMP
1917		/*
1918		 * If our priority decreases while running, we
1919		 * may need to pull tasks to this runqueue.
1920		 */
1921		if (oldprio < p->prio)
1922			pull_rt_task(rq);
 
1923		/*
1924		 * If there's a higher priority task waiting to run
1925		 * then reschedule. Note, the above pull_rt_task
1926		 * can release the rq lock and p could migrate.
1927		 * Only reschedule if p is still on the same runqueue.
1928		 */
1929		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1930			resched_task(p);
1931#else
1932		/* For UP simply resched on drop of prio */
1933		if (oldprio < p->prio)
1934			resched_task(p);
1935#endif /* CONFIG_SMP */
1936	} else {
1937		/*
1938		 * This task is not running, but if it is
1939		 * greater than the current running task
1940		 * then reschedule.
1941		 */
1942		if (p->prio < rq->curr->prio)
1943			resched_task(rq->curr);
1944	}
1945}
1946
 
1947static void watchdog(struct rq *rq, struct task_struct *p)
1948{
1949	unsigned long soft, hard;
1950
1951	/* max may change after cur was read, this will be fixed next tick */
1952	soft = task_rlimit(p, RLIMIT_RTTIME);
1953	hard = task_rlimit_max(p, RLIMIT_RTTIME);
1954
1955	if (soft != RLIM_INFINITY) {
1956		unsigned long next;
1957
1958		if (p->rt.watchdog_stamp != jiffies) {
1959			p->rt.timeout++;
1960			p->rt.watchdog_stamp = jiffies;
1961		}
1962
1963		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1964		if (p->rt.timeout > next)
1965			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
 
 
1966	}
1967}
 
 
 
1968
 
 
 
 
 
 
 
 
1969static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1970{
1971	struct sched_rt_entity *rt_se = &p->rt;
1972
1973	update_curr_rt(rq);
 
1974
1975	watchdog(rq, p);
1976
1977	/*
1978	 * RR tasks need a special form of timeslice management.
1979	 * FIFO tasks have no timeslices.
1980	 */
1981	if (p->policy != SCHED_RR)
1982		return;
1983
1984	if (--p->rt.time_slice)
1985		return;
1986
1987	p->rt.time_slice = sched_rr_timeslice;
1988
1989	/*
1990	 * Requeue to the end of queue if we (and all of our ancestors) are not
1991	 * the only element on the queue
1992	 */
1993	for_each_sched_rt_entity(rt_se) {
1994		if (rt_se->run_list.prev != rt_se->run_list.next) {
1995			requeue_task_rt(rq, p, 0);
1996			set_tsk_need_resched(p);
1997			return;
1998		}
1999	}
2000}
2001
2002static void set_curr_task_rt(struct rq *rq)
2003{
2004	struct task_struct *p = rq->curr;
2005
2006	p->se.exec_start = rq_clock_task(rq);
2007
2008	/* The running task is never eligible for pushing */
2009	dequeue_pushable_task(rq, p);
2010}
2011
2012static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2013{
2014	/*
2015	 * Time slice is 0 for SCHED_FIFO tasks
2016	 */
2017	if (task->policy == SCHED_RR)
2018		return sched_rr_timeslice;
2019	else
2020		return 0;
2021}
2022
2023const struct sched_class rt_sched_class = {
2024	.next			= &fair_sched_class,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2025	.enqueue_task		= enqueue_task_rt,
2026	.dequeue_task		= dequeue_task_rt,
2027	.yield_task		= yield_task_rt,
2028
2029	.check_preempt_curr	= check_preempt_curr_rt,
2030
2031	.pick_next_task		= pick_next_task_rt,
2032	.put_prev_task		= put_prev_task_rt,
 
2033
2034#ifdef CONFIG_SMP
 
 
2035	.select_task_rq		= select_task_rq_rt,
2036
2037	.set_cpus_allowed       = set_cpus_allowed_rt,
2038	.rq_online              = rq_online_rt,
2039	.rq_offline             = rq_offline_rt,
2040	.post_schedule		= post_schedule_rt,
2041	.task_woken		= task_woken_rt,
2042	.switched_from		= switched_from_rt,
 
2043#endif
2044
2045	.set_curr_task          = set_curr_task_rt,
2046	.task_tick		= task_tick_rt,
2047
2048	.get_rr_interval	= get_rr_interval_rt,
2049
2050	.prio_changed		= prio_changed_rt,
2051	.switched_to		= switched_to_rt,
 
 
 
 
 
 
 
 
 
 
2052};
2053
2054#ifdef CONFIG_SCHED_DEBUG
2055extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2056
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2057void print_rt_stats(struct seq_file *m, int cpu)
2058{
2059	rt_rq_iter_t iter;
2060	struct rt_rq *rt_rq;
2061
2062	rcu_read_lock();
2063	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2064		print_rt_rq(m, cpu, rt_rq);
2065	rcu_read_unlock();
2066}
2067#endif /* CONFIG_SCHED_DEBUG */
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   4 * policies)
   5 */
   6
 
 
 
 
   7int sched_rr_timeslice = RR_TIMESLICE;
   8/* More than 4 hours if BW_SHIFT equals 20. */
   9static const u64 max_rt_runtime = MAX_BW;
  10
  11static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  12
  13struct rt_bandwidth def_rt_bandwidth;
  14
  15/*
  16 * period over which we measure -rt task CPU usage in us.
  17 * default: 1s
  18 */
  19int sysctl_sched_rt_period = 1000000;
  20
  21/*
  22 * part of the period that we allow rt tasks to run in us.
  23 * default: 0.95s
  24 */
  25int sysctl_sched_rt_runtime = 950000;
  26
  27#ifdef CONFIG_SYSCTL
  28static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
  29static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
  30		size_t *lenp, loff_t *ppos);
  31static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
  32		size_t *lenp, loff_t *ppos);
  33static struct ctl_table sched_rt_sysctls[] = {
  34	{
  35		.procname       = "sched_rt_period_us",
  36		.data           = &sysctl_sched_rt_period,
  37		.maxlen         = sizeof(int),
  38		.mode           = 0644,
  39		.proc_handler   = sched_rt_handler,
  40		.extra1         = SYSCTL_ONE,
  41		.extra2         = SYSCTL_INT_MAX,
  42	},
  43	{
  44		.procname       = "sched_rt_runtime_us",
  45		.data           = &sysctl_sched_rt_runtime,
  46		.maxlen         = sizeof(int),
  47		.mode           = 0644,
  48		.proc_handler   = sched_rt_handler,
  49		.extra1         = SYSCTL_NEG_ONE,
  50		.extra2         = (void *)&sysctl_sched_rt_period,
  51	},
  52	{
  53		.procname       = "sched_rr_timeslice_ms",
  54		.data           = &sysctl_sched_rr_timeslice,
  55		.maxlen         = sizeof(int),
  56		.mode           = 0644,
  57		.proc_handler   = sched_rr_handler,
  58	},
  59	{}
  60};
  61
  62static int __init sched_rt_sysctl_init(void)
  63{
  64	register_sysctl_init("kernel", sched_rt_sysctls);
  65	return 0;
  66}
  67late_initcall(sched_rt_sysctl_init);
  68#endif
  69
  70static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  71{
  72	struct rt_bandwidth *rt_b =
  73		container_of(timer, struct rt_bandwidth, rt_period_timer);
 
 
  74	int idle = 0;
  75	int overrun;
  76
  77	raw_spin_lock(&rt_b->rt_runtime_lock);
  78	for (;;) {
  79		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
 
 
  80		if (!overrun)
  81			break;
  82
  83		raw_spin_unlock(&rt_b->rt_runtime_lock);
  84		idle = do_sched_rt_period_timer(rt_b, overrun);
  85		raw_spin_lock(&rt_b->rt_runtime_lock);
  86	}
  87	if (idle)
  88		rt_b->rt_period_active = 0;
  89	raw_spin_unlock(&rt_b->rt_runtime_lock);
  90
  91	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  92}
  93
  94void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  95{
  96	rt_b->rt_period = ns_to_ktime(period);
  97	rt_b->rt_runtime = runtime;
  98
  99	raw_spin_lock_init(&rt_b->rt_runtime_lock);
 100
 101	hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
 102		     HRTIMER_MODE_REL_HARD);
 103	rt_b->rt_period_timer.function = sched_rt_period_timer;
 104}
 105
 106static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
 107{
 108	raw_spin_lock(&rt_b->rt_runtime_lock);
 109	if (!rt_b->rt_period_active) {
 110		rt_b->rt_period_active = 1;
 111		/*
 112		 * SCHED_DEADLINE updates the bandwidth, as a run away
 113		 * RT task with a DL task could hog a CPU. But DL does
 114		 * not reset the period. If a deadline task was running
 115		 * without an RT task running, it can cause RT tasks to
 116		 * throttle when they start up. Kick the timer right away
 117		 * to update the period.
 118		 */
 119		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
 120		hrtimer_start_expires(&rt_b->rt_period_timer,
 121				      HRTIMER_MODE_ABS_PINNED_HARD);
 122	}
 123	raw_spin_unlock(&rt_b->rt_runtime_lock);
 124}
 125
 126static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
 127{
 128	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
 129		return;
 130
 131	do_start_rt_bandwidth(rt_b);
 
 
 
 
 
 132}
 133
 134void init_rt_rq(struct rt_rq *rt_rq)
 135{
 136	struct rt_prio_array *array;
 137	int i;
 138
 139	array = &rt_rq->active;
 140	for (i = 0; i < MAX_RT_PRIO; i++) {
 141		INIT_LIST_HEAD(array->queue + i);
 142		__clear_bit(i, array->bitmap);
 143	}
 144	/* delimiter for bitsearch: */
 145	__set_bit(MAX_RT_PRIO, array->bitmap);
 146
 147#if defined CONFIG_SMP
 148	rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
 149	rt_rq->highest_prio.next = MAX_RT_PRIO-1;
 
 150	rt_rq->overloaded = 0;
 151	plist_head_init(&rt_rq->pushable_tasks);
 152#endif /* CONFIG_SMP */
 153	/* We start is dequeued state, because no RT tasks are queued */
 154	rt_rq->rt_queued = 0;
 155
 156	rt_rq->rt_time = 0;
 157	rt_rq->rt_throttled = 0;
 158	rt_rq->rt_runtime = 0;
 159	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
 160}
 161
 162#ifdef CONFIG_RT_GROUP_SCHED
 163static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
 164{
 165	hrtimer_cancel(&rt_b->rt_period_timer);
 166}
 167
 168#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
 169
 170static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 171{
 172#ifdef CONFIG_SCHED_DEBUG
 173	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
 174#endif
 175	return container_of(rt_se, struct task_struct, rt);
 176}
 177
 178static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 179{
 180	return rt_rq->rq;
 181}
 182
 183static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 184{
 185	return rt_se->rt_rq;
 186}
 187
 188static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
 189{
 190	struct rt_rq *rt_rq = rt_se->rt_rq;
 191
 192	return rt_rq->rq;
 193}
 194
 195void unregister_rt_sched_group(struct task_group *tg)
 196{
 197	if (tg->rt_se)
 198		destroy_rt_bandwidth(&tg->rt_bandwidth);
 199
 200}
 201
 202void free_rt_sched_group(struct task_group *tg)
 203{
 204	int i;
 205
 206	for_each_possible_cpu(i) {
 207		if (tg->rt_rq)
 208			kfree(tg->rt_rq[i]);
 209		if (tg->rt_se)
 210			kfree(tg->rt_se[i]);
 211	}
 212
 213	kfree(tg->rt_rq);
 214	kfree(tg->rt_se);
 215}
 216
 217void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 218		struct sched_rt_entity *rt_se, int cpu,
 219		struct sched_rt_entity *parent)
 220{
 221	struct rq *rq = cpu_rq(cpu);
 222
 223	rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
 224	rt_rq->rt_nr_boosted = 0;
 225	rt_rq->rq = rq;
 226	rt_rq->tg = tg;
 227
 228	tg->rt_rq[cpu] = rt_rq;
 229	tg->rt_se[cpu] = rt_se;
 230
 231	if (!rt_se)
 232		return;
 233
 234	if (!parent)
 235		rt_se->rt_rq = &rq->rt;
 236	else
 237		rt_se->rt_rq = parent->my_q;
 238
 239	rt_se->my_q = rt_rq;
 240	rt_se->parent = parent;
 241	INIT_LIST_HEAD(&rt_se->run_list);
 242}
 243
 244int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 245{
 246	struct rt_rq *rt_rq;
 247	struct sched_rt_entity *rt_se;
 248	int i;
 249
 250	tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
 251	if (!tg->rt_rq)
 252		goto err;
 253	tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
 254	if (!tg->rt_se)
 255		goto err;
 256
 257	init_rt_bandwidth(&tg->rt_bandwidth,
 258			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
 259
 260	for_each_possible_cpu(i) {
 261		rt_rq = kzalloc_node(sizeof(struct rt_rq),
 262				     GFP_KERNEL, cpu_to_node(i));
 263		if (!rt_rq)
 264			goto err;
 265
 266		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
 267				     GFP_KERNEL, cpu_to_node(i));
 268		if (!rt_se)
 269			goto err_free_rq;
 270
 271		init_rt_rq(rt_rq);
 272		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
 273		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
 274	}
 275
 276	return 1;
 277
 278err_free_rq:
 279	kfree(rt_rq);
 280err:
 281	return 0;
 282}
 283
 284#else /* CONFIG_RT_GROUP_SCHED */
 285
 286#define rt_entity_is_task(rt_se) (1)
 287
 288static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 289{
 290	return container_of(rt_se, struct task_struct, rt);
 291}
 292
 293static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 294{
 295	return container_of(rt_rq, struct rq, rt);
 296}
 297
 298static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
 299{
 300	struct task_struct *p = rt_task_of(rt_se);
 301
 302	return task_rq(p);
 303}
 304
 305static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 306{
 307	struct rq *rq = rq_of_rt_se(rt_se);
 308
 309	return &rq->rt;
 310}
 311
 312void unregister_rt_sched_group(struct task_group *tg) { }
 313
 314void free_rt_sched_group(struct task_group *tg) { }
 315
 316int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 317{
 318	return 1;
 319}
 320#endif /* CONFIG_RT_GROUP_SCHED */
 321
 322#ifdef CONFIG_SMP
 323
 
 
 324static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 325{
 326	/* Try to pull RT tasks here if we lower this rq's prio */
 327	return rq->online && rq->rt.highest_prio.curr > prev->prio;
 328}
 329
 330static inline int rt_overloaded(struct rq *rq)
 331{
 332	return atomic_read(&rq->rd->rto_count);
 333}
 334
 335static inline void rt_set_overload(struct rq *rq)
 336{
 337	if (!rq->online)
 338		return;
 339
 340	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
 341	/*
 342	 * Make sure the mask is visible before we set
 343	 * the overload count. That is checked to determine
 344	 * if we should look at the mask. It would be a shame
 345	 * if we looked at the mask, but the mask was not
 346	 * updated yet.
 347	 *
 348	 * Matched by the barrier in pull_rt_task().
 349	 */
 350	smp_wmb();
 351	atomic_inc(&rq->rd->rto_count);
 352}
 353
 354static inline void rt_clear_overload(struct rq *rq)
 355{
 356	if (!rq->online)
 357		return;
 358
 359	/* the order here really doesn't matter */
 360	atomic_dec(&rq->rd->rto_count);
 361	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 362}
 363
 364static inline int has_pushable_tasks(struct rq *rq)
 365{
 366	return !plist_head_empty(&rq->rt.pushable_tasks);
 
 
 
 
 
 
 
 
 367}
 368
 369static DEFINE_PER_CPU(struct balance_callback, rt_push_head);
 370static DEFINE_PER_CPU(struct balance_callback, rt_pull_head);
 
 
 
 
 
 
 
 
 
 
 
 371
 372static void push_rt_tasks(struct rq *);
 373static void pull_rt_task(struct rq *);
 374
 375static inline void rt_queue_push_tasks(struct rq *rq)
 376{
 377	if (!has_pushable_tasks(rq))
 
 
 378		return;
 379
 380	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
 
 
 
 
 
 
 
 
 
 
 
 
 381}
 382
 383static inline void rt_queue_pull_task(struct rq *rq)
 384{
 385	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
 
 
 
 
 386}
 387
 388static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 389{
 390	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 391	plist_node_init(&p->pushable_tasks, p->prio);
 392	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
 393
 394	/* Update the highest prio pushable task */
 395	if (p->prio < rq->rt.highest_prio.next)
 396		rq->rt.highest_prio.next = p->prio;
 397
 398	if (!rq->rt.overloaded) {
 399		rt_set_overload(rq);
 400		rq->rt.overloaded = 1;
 401	}
 402}
 403
 404static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 405{
 406	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 407
 408	/* Update the new highest prio pushable task */
 409	if (has_pushable_tasks(rq)) {
 410		p = plist_first_entry(&rq->rt.pushable_tasks,
 411				      struct task_struct, pushable_tasks);
 412		rq->rt.highest_prio.next = p->prio;
 413	} else {
 414		rq->rt.highest_prio.next = MAX_RT_PRIO-1;
 415
 416		if (rq->rt.overloaded) {
 417			rt_clear_overload(rq);
 418			rq->rt.overloaded = 0;
 419		}
 420	}
 421}
 422
 423#else
 424
 425static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 426{
 427}
 428
 429static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 430{
 431}
 432
 433static inline void rt_queue_push_tasks(struct rq *rq)
 
 434{
 435}
 436#endif /* CONFIG_SMP */
 437
 438static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
 439static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
 
 
 440
 441static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 442{
 443	return rt_se->on_rq;
 444}
 445
 446#ifdef CONFIG_UCLAMP_TASK
 447/*
 448 * Verify the fitness of task @p to run on @cpu taking into account the uclamp
 449 * settings.
 450 *
 451 * This check is only important for heterogeneous systems where uclamp_min value
 452 * is higher than the capacity of a @cpu. For non-heterogeneous system this
 453 * function will always return true.
 454 *
 455 * The function will return true if the capacity of the @cpu is >= the
 456 * uclamp_min and false otherwise.
 457 *
 458 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
 459 * > uclamp_max.
 460 */
 461static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
 462{
 463	unsigned int min_cap;
 464	unsigned int max_cap;
 465	unsigned int cpu_cap;
 466
 467	/* Only heterogeneous systems can benefit from this check */
 468	if (!sched_asym_cpucap_active())
 469		return true;
 
 470
 471	min_cap = uclamp_eff_value(p, UCLAMP_MIN);
 472	max_cap = uclamp_eff_value(p, UCLAMP_MAX);
 473
 474	cpu_cap = arch_scale_cpu_capacity(cpu);
 475
 476	return cpu_cap >= min(min_cap, max_cap);
 477}
 478#else
 479static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
 480{
 481	return true;
 482}
 483#endif
 484
 485#ifdef CONFIG_RT_GROUP_SCHED
 486
 487static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 488{
 489	if (!rt_rq->tg)
 490		return RUNTIME_INF;
 491
 492	return rt_rq->rt_runtime;
 493}
 494
 495static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 496{
 497	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
 498}
 499
 500typedef struct task_group *rt_rq_iter_t;
 501
 502static inline struct task_group *next_task_group(struct task_group *tg)
 503{
 504	do {
 505		tg = list_entry_rcu(tg->list.next,
 506			typeof(struct task_group), list);
 507	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
 508
 509	if (&tg->list == &task_groups)
 510		tg = NULL;
 511
 512	return tg;
 513}
 514
 515#define for_each_rt_rq(rt_rq, iter, rq)					\
 516	for (iter = container_of(&task_groups, typeof(*iter), list);	\
 517		(iter = next_task_group(iter)) &&			\
 518		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
 519
 520#define for_each_sched_rt_entity(rt_se) \
 521	for (; rt_se; rt_se = rt_se->parent)
 522
 523static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 524{
 525	return rt_se->my_q;
 526}
 527
 528static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
 529static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
 530
 531static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 532{
 533	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
 534	struct rq *rq = rq_of_rt_rq(rt_rq);
 535	struct sched_rt_entity *rt_se;
 536
 537	int cpu = cpu_of(rq);
 538
 539	rt_se = rt_rq->tg->rt_se[cpu];
 540
 541	if (rt_rq->rt_nr_running) {
 542		if (!rt_se)
 543			enqueue_top_rt_rq(rt_rq);
 544		else if (!on_rt_rq(rt_se))
 545			enqueue_rt_entity(rt_se, 0);
 546
 547		if (rt_rq->highest_prio.curr < curr->prio)
 548			resched_curr(rq);
 549	}
 550}
 551
 552static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 553{
 554	struct sched_rt_entity *rt_se;
 555	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 556
 557	rt_se = rt_rq->tg->rt_se[cpu];
 558
 559	if (!rt_se) {
 560		dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
 561		/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
 562		cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
 563	}
 564	else if (on_rt_rq(rt_se))
 565		dequeue_rt_entity(rt_se, 0);
 566}
 567
 568static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 569{
 570	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
 571}
 572
 573static int rt_se_boosted(struct sched_rt_entity *rt_se)
 574{
 575	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 576	struct task_struct *p;
 577
 578	if (rt_rq)
 579		return !!rt_rq->rt_nr_boosted;
 580
 581	p = rt_task_of(rt_se);
 582	return p->prio != p->normal_prio;
 583}
 584
 585#ifdef CONFIG_SMP
 586static inline const struct cpumask *sched_rt_period_mask(void)
 587{
 588	return this_rq()->rd->span;
 589}
 590#else
 591static inline const struct cpumask *sched_rt_period_mask(void)
 592{
 593	return cpu_online_mask;
 594}
 595#endif
 596
 597static inline
 598struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 599{
 600	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
 601}
 602
 603static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 604{
 605	return &rt_rq->tg->rt_bandwidth;
 606}
 607
 608#else /* !CONFIG_RT_GROUP_SCHED */
 609
 610static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 611{
 612	return rt_rq->rt_runtime;
 613}
 614
 615static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 616{
 617	return ktime_to_ns(def_rt_bandwidth.rt_period);
 618}
 619
 620typedef struct rt_rq *rt_rq_iter_t;
 621
 622#define for_each_rt_rq(rt_rq, iter, rq) \
 623	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 624
 625#define for_each_sched_rt_entity(rt_se) \
 626	for (; rt_se; rt_se = NULL)
 627
 628static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 629{
 630	return NULL;
 631}
 632
 633static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 634{
 635	struct rq *rq = rq_of_rt_rq(rt_rq);
 636
 637	if (!rt_rq->rt_nr_running)
 638		return;
 639
 640	enqueue_top_rt_rq(rt_rq);
 641	resched_curr(rq);
 642}
 643
 644static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 645{
 646	dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
 647}
 648
 649static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 650{
 651	return rt_rq->rt_throttled;
 652}
 653
 654static inline const struct cpumask *sched_rt_period_mask(void)
 655{
 656	return cpu_online_mask;
 657}
 658
 659static inline
 660struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 661{
 662	return &cpu_rq(cpu)->rt;
 663}
 664
 665static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 666{
 667	return &def_rt_bandwidth;
 668}
 669
 670#endif /* CONFIG_RT_GROUP_SCHED */
 671
 672bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
 673{
 674	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 675
 676	return (hrtimer_active(&rt_b->rt_period_timer) ||
 677		rt_rq->rt_time < rt_b->rt_runtime);
 678}
 679
 680#ifdef CONFIG_SMP
 681/*
 682 * We ran out of runtime, see if we can borrow some from our neighbours.
 683 */
 684static void do_balance_runtime(struct rt_rq *rt_rq)
 685{
 686	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 687	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
 688	int i, weight;
 689	u64 rt_period;
 690
 691	weight = cpumask_weight(rd->span);
 692
 693	raw_spin_lock(&rt_b->rt_runtime_lock);
 694	rt_period = ktime_to_ns(rt_b->rt_period);
 695	for_each_cpu(i, rd->span) {
 696		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 697		s64 diff;
 698
 699		if (iter == rt_rq)
 700			continue;
 701
 702		raw_spin_lock(&iter->rt_runtime_lock);
 703		/*
 704		 * Either all rqs have inf runtime and there's nothing to steal
 705		 * or __disable_runtime() below sets a specific rq to inf to
 706		 * indicate its been disabled and disallow stealing.
 707		 */
 708		if (iter->rt_runtime == RUNTIME_INF)
 709			goto next;
 710
 711		/*
 712		 * From runqueues with spare time, take 1/n part of their
 713		 * spare time, but no more than our period.
 714		 */
 715		diff = iter->rt_runtime - iter->rt_time;
 716		if (diff > 0) {
 717			diff = div_u64((u64)diff, weight);
 718			if (rt_rq->rt_runtime + diff > rt_period)
 719				diff = rt_period - rt_rq->rt_runtime;
 720			iter->rt_runtime -= diff;
 721			rt_rq->rt_runtime += diff;
 
 722			if (rt_rq->rt_runtime == rt_period) {
 723				raw_spin_unlock(&iter->rt_runtime_lock);
 724				break;
 725			}
 726		}
 727next:
 728		raw_spin_unlock(&iter->rt_runtime_lock);
 729	}
 730	raw_spin_unlock(&rt_b->rt_runtime_lock);
 
 
 731}
 732
 733/*
 734 * Ensure this RQ takes back all the runtime it lend to its neighbours.
 735 */
 736static void __disable_runtime(struct rq *rq)
 737{
 738	struct root_domain *rd = rq->rd;
 739	rt_rq_iter_t iter;
 740	struct rt_rq *rt_rq;
 741
 742	if (unlikely(!scheduler_running))
 743		return;
 744
 745	for_each_rt_rq(rt_rq, iter, rq) {
 746		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 747		s64 want;
 748		int i;
 749
 750		raw_spin_lock(&rt_b->rt_runtime_lock);
 751		raw_spin_lock(&rt_rq->rt_runtime_lock);
 752		/*
 753		 * Either we're all inf and nobody needs to borrow, or we're
 754		 * already disabled and thus have nothing to do, or we have
 755		 * exactly the right amount of runtime to take out.
 756		 */
 757		if (rt_rq->rt_runtime == RUNTIME_INF ||
 758				rt_rq->rt_runtime == rt_b->rt_runtime)
 759			goto balanced;
 760		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 761
 762		/*
 763		 * Calculate the difference between what we started out with
 764		 * and what we current have, that's the amount of runtime
 765		 * we lend and now have to reclaim.
 766		 */
 767		want = rt_b->rt_runtime - rt_rq->rt_runtime;
 768
 769		/*
 770		 * Greedy reclaim, take back as much as we can.
 771		 */
 772		for_each_cpu(i, rd->span) {
 773			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 774			s64 diff;
 775
 776			/*
 777			 * Can't reclaim from ourselves or disabled runqueues.
 778			 */
 779			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
 780				continue;
 781
 782			raw_spin_lock(&iter->rt_runtime_lock);
 783			if (want > 0) {
 784				diff = min_t(s64, iter->rt_runtime, want);
 785				iter->rt_runtime -= diff;
 786				want -= diff;
 787			} else {
 788				iter->rt_runtime -= want;
 789				want -= want;
 790			}
 791			raw_spin_unlock(&iter->rt_runtime_lock);
 792
 793			if (!want)
 794				break;
 795		}
 796
 797		raw_spin_lock(&rt_rq->rt_runtime_lock);
 798		/*
 799		 * We cannot be left wanting - that would mean some runtime
 800		 * leaked out of the system.
 801		 */
 802		WARN_ON_ONCE(want);
 803balanced:
 804		/*
 805		 * Disable all the borrow logic by pretending we have inf
 806		 * runtime - in which case borrowing doesn't make sense.
 807		 */
 808		rt_rq->rt_runtime = RUNTIME_INF;
 809		rt_rq->rt_throttled = 0;
 810		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 811		raw_spin_unlock(&rt_b->rt_runtime_lock);
 812
 813		/* Make rt_rq available for pick_next_task() */
 814		sched_rt_rq_enqueue(rt_rq);
 815	}
 816}
 817
 818static void __enable_runtime(struct rq *rq)
 819{
 820	rt_rq_iter_t iter;
 821	struct rt_rq *rt_rq;
 822
 823	if (unlikely(!scheduler_running))
 824		return;
 825
 826	/*
 827	 * Reset each runqueue's bandwidth settings
 828	 */
 829	for_each_rt_rq(rt_rq, iter, rq) {
 830		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 831
 832		raw_spin_lock(&rt_b->rt_runtime_lock);
 833		raw_spin_lock(&rt_rq->rt_runtime_lock);
 834		rt_rq->rt_runtime = rt_b->rt_runtime;
 835		rt_rq->rt_time = 0;
 836		rt_rq->rt_throttled = 0;
 837		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 838		raw_spin_unlock(&rt_b->rt_runtime_lock);
 839	}
 840}
 841
 842static void balance_runtime(struct rt_rq *rt_rq)
 843{
 
 
 844	if (!sched_feat(RT_RUNTIME_SHARE))
 845		return;
 846
 847	if (rt_rq->rt_time > rt_rq->rt_runtime) {
 848		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 849		do_balance_runtime(rt_rq);
 850		raw_spin_lock(&rt_rq->rt_runtime_lock);
 851	}
 
 
 852}
 853#else /* !CONFIG_SMP */
 854static inline void balance_runtime(struct rt_rq *rt_rq) {}
 
 
 
 855#endif /* CONFIG_SMP */
 856
 857static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 858{
 859	int i, idle = 1, throttled = 0;
 860	const struct cpumask *span;
 861
 862	span = sched_rt_period_mask();
 863#ifdef CONFIG_RT_GROUP_SCHED
 864	/*
 865	 * FIXME: isolated CPUs should really leave the root task group,
 866	 * whether they are isolcpus or were isolated via cpusets, lest
 867	 * the timer run on a CPU which does not service all runqueues,
 868	 * potentially leaving other CPUs indefinitely throttled.  If
 869	 * isolation is really required, the user will turn the throttle
 870	 * off to kill the perturbations it causes anyway.  Meanwhile,
 871	 * this maintains functionality for boot and/or troubleshooting.
 872	 */
 873	if (rt_b == &root_task_group.rt_bandwidth)
 874		span = cpu_online_mask;
 875#endif
 876	for_each_cpu(i, span) {
 877		int enqueue = 0;
 878		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
 879		struct rq *rq = rq_of_rt_rq(rt_rq);
 880		struct rq_flags rf;
 881		int skip;
 882
 883		/*
 884		 * When span == cpu_online_mask, taking each rq->lock
 885		 * can be time-consuming. Try to avoid it when possible.
 886		 */
 887		raw_spin_lock(&rt_rq->rt_runtime_lock);
 888		if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
 889			rt_rq->rt_runtime = rt_b->rt_runtime;
 890		skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
 891		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 892		if (skip)
 893			continue;
 894
 895		rq_lock(rq, &rf);
 896		update_rq_clock(rq);
 897
 
 898		if (rt_rq->rt_time) {
 899			u64 runtime;
 900
 901			raw_spin_lock(&rt_rq->rt_runtime_lock);
 902			if (rt_rq->rt_throttled)
 903				balance_runtime(rt_rq);
 904			runtime = rt_rq->rt_runtime;
 905			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
 906			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
 907				rt_rq->rt_throttled = 0;
 908				enqueue = 1;
 909
 910				/*
 911				 * When we're idle and a woken (rt) task is
 912				 * throttled wakeup_preempt() will set
 913				 * skip_update and the time between the wakeup
 914				 * and this unthrottle will get accounted as
 915				 * 'runtime'.
 916				 */
 917				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
 918					rq_clock_cancel_skipupdate(rq);
 919			}
 920			if (rt_rq->rt_time || rt_rq->rt_nr_running)
 921				idle = 0;
 922			raw_spin_unlock(&rt_rq->rt_runtime_lock);
 923		} else if (rt_rq->rt_nr_running) {
 924			idle = 0;
 925			if (!rt_rq_throttled(rt_rq))
 926				enqueue = 1;
 927		}
 928		if (rt_rq->rt_throttled)
 929			throttled = 1;
 930
 931		if (enqueue)
 932			sched_rt_rq_enqueue(rt_rq);
 933		rq_unlock(rq, &rf);
 934	}
 935
 936	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
 937		return 1;
 938
 939	return idle;
 940}
 941
 942static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 943{
 944#ifdef CONFIG_RT_GROUP_SCHED
 945	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 946
 947	if (rt_rq)
 948		return rt_rq->highest_prio.curr;
 949#endif
 950
 951	return rt_task_of(rt_se)->prio;
 952}
 953
 954static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 955{
 956	u64 runtime = sched_rt_runtime(rt_rq);
 957
 958	if (rt_rq->rt_throttled)
 959		return rt_rq_throttled(rt_rq);
 960
 961	if (runtime >= sched_rt_period(rt_rq))
 962		return 0;
 963
 964	balance_runtime(rt_rq);
 965	runtime = sched_rt_runtime(rt_rq);
 966	if (runtime == RUNTIME_INF)
 967		return 0;
 968
 969	if (rt_rq->rt_time > runtime) {
 970		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 971
 972		/*
 973		 * Don't actually throttle groups that have no runtime assigned
 974		 * but accrue some time due to boosting.
 975		 */
 976		if (likely(rt_b->rt_runtime)) {
 
 
 977			rt_rq->rt_throttled = 1;
 978			printk_deferred_once("sched: RT throttling activated\n");
 
 
 
 
 979		} else {
 980			/*
 981			 * In case we did anyway, make it go away,
 982			 * replenishment is a joke, since it will replenish us
 983			 * with exactly 0 ns.
 984			 */
 985			rt_rq->rt_time = 0;
 986		}
 987
 988		if (rt_rq_throttled(rt_rq)) {
 989			sched_rt_rq_dequeue(rt_rq);
 990			return 1;
 991		}
 992	}
 993
 994	return 0;
 995}
 996
 997/*
 998 * Update the current task's runtime statistics. Skip current tasks that
 999 * are not in our scheduling class.
1000 */
1001static void update_curr_rt(struct rq *rq)
1002{
1003	struct task_struct *curr = rq->curr;
1004	struct sched_rt_entity *rt_se = &curr->rt;
1005	s64 delta_exec;
 
1006
1007	if (curr->sched_class != &rt_sched_class)
1008		return;
1009
1010	delta_exec = update_curr_common(rq);
1011	if (unlikely(delta_exec <= 0))
1012		return;
1013
 
 
 
 
 
 
 
 
 
 
 
1014	if (!rt_bandwidth_enabled())
1015		return;
1016
1017	for_each_sched_rt_entity(rt_se) {
1018		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1019		int exceeded;
1020
1021		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1022			raw_spin_lock(&rt_rq->rt_runtime_lock);
1023			rt_rq->rt_time += delta_exec;
1024			exceeded = sched_rt_runtime_exceeded(rt_rq);
1025			if (exceeded)
1026				resched_curr(rq);
1027			raw_spin_unlock(&rt_rq->rt_runtime_lock);
1028			if (exceeded)
1029				do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1030		}
1031	}
1032}
1033
1034static void
1035dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
1036{
1037	struct rq *rq = rq_of_rt_rq(rt_rq);
1038
1039	BUG_ON(&rq->rt != rt_rq);
1040
1041	if (!rt_rq->rt_queued)
1042		return;
1043
1044	BUG_ON(!rq->nr_running);
1045
1046	sub_nr_running(rq, count);
1047	rt_rq->rt_queued = 0;
1048
1049}
1050
1051static void
1052enqueue_top_rt_rq(struct rt_rq *rt_rq)
1053{
1054	struct rq *rq = rq_of_rt_rq(rt_rq);
1055
1056	BUG_ON(&rq->rt != rt_rq);
1057
1058	if (rt_rq->rt_queued)
1059		return;
1060
1061	if (rt_rq_throttled(rt_rq))
1062		return;
1063
1064	if (rt_rq->rt_nr_running) {
1065		add_nr_running(rq, rt_rq->rt_nr_running);
1066		rt_rq->rt_queued = 1;
1067	}
1068
1069	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1070	cpufreq_update_util(rq, 0);
1071}
1072
1073#if defined CONFIG_SMP
1074
1075static void
1076inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1077{
1078	struct rq *rq = rq_of_rt_rq(rt_rq);
1079
1080#ifdef CONFIG_RT_GROUP_SCHED
1081	/*
1082	 * Change rq's cpupri only if rt_rq is the top queue.
1083	 */
1084	if (&rq->rt != rt_rq)
1085		return;
1086#endif
1087	if (rq->online && prio < prev_prio)
1088		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1089}
1090
1091static void
1092dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1093{
1094	struct rq *rq = rq_of_rt_rq(rt_rq);
1095
1096#ifdef CONFIG_RT_GROUP_SCHED
1097	/*
1098	 * Change rq's cpupri only if rt_rq is the top queue.
1099	 */
1100	if (&rq->rt != rt_rq)
1101		return;
1102#endif
1103	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1104		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1105}
1106
1107#else /* CONFIG_SMP */
1108
1109static inline
1110void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1111static inline
1112void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1113
1114#endif /* CONFIG_SMP */
1115
1116#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1117static void
1118inc_rt_prio(struct rt_rq *rt_rq, int prio)
1119{
1120	int prev_prio = rt_rq->highest_prio.curr;
1121
1122	if (prio < prev_prio)
1123		rt_rq->highest_prio.curr = prio;
1124
1125	inc_rt_prio_smp(rt_rq, prio, prev_prio);
1126}
1127
1128static void
1129dec_rt_prio(struct rt_rq *rt_rq, int prio)
1130{
1131	int prev_prio = rt_rq->highest_prio.curr;
1132
1133	if (rt_rq->rt_nr_running) {
1134
1135		WARN_ON(prio < prev_prio);
1136
1137		/*
1138		 * This may have been our highest task, and therefore
1139		 * we may have some recomputation to do
1140		 */
1141		if (prio == prev_prio) {
1142			struct rt_prio_array *array = &rt_rq->active;
1143
1144			rt_rq->highest_prio.curr =
1145				sched_find_first_bit(array->bitmap);
1146		}
1147
1148	} else {
1149		rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
1150	}
1151
1152	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1153}
1154
1155#else
1156
1157static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1158static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1159
1160#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1161
1162#ifdef CONFIG_RT_GROUP_SCHED
1163
1164static void
1165inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1166{
1167	if (rt_se_boosted(rt_se))
1168		rt_rq->rt_nr_boosted++;
1169
1170	if (rt_rq->tg)
1171		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1172}
1173
1174static void
1175dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1176{
1177	if (rt_se_boosted(rt_se))
1178		rt_rq->rt_nr_boosted--;
1179
1180	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1181}
1182
1183#else /* CONFIG_RT_GROUP_SCHED */
1184
1185static void
1186inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1187{
1188	start_rt_bandwidth(&def_rt_bandwidth);
1189}
1190
1191static inline
1192void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1193
1194#endif /* CONFIG_RT_GROUP_SCHED */
1195
1196static inline
1197unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1198{
1199	struct rt_rq *group_rq = group_rt_rq(rt_se);
1200
1201	if (group_rq)
1202		return group_rq->rt_nr_running;
1203	else
1204		return 1;
1205}
1206
1207static inline
1208unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1209{
1210	struct rt_rq *group_rq = group_rt_rq(rt_se);
1211	struct task_struct *tsk;
1212
1213	if (group_rq)
1214		return group_rq->rr_nr_running;
1215
1216	tsk = rt_task_of(rt_se);
1217
1218	return (tsk->policy == SCHED_RR) ? 1 : 0;
1219}
1220
1221static inline
1222void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1223{
1224	int prio = rt_se_prio(rt_se);
1225
1226	WARN_ON(!rt_prio(prio));
1227	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1228	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1229
1230	inc_rt_prio(rt_rq, prio);
 
1231	inc_rt_group(rt_se, rt_rq);
1232}
1233
1234static inline
1235void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1236{
1237	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1238	WARN_ON(!rt_rq->rt_nr_running);
1239	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1240	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1241
1242	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
 
1243	dec_rt_group(rt_se, rt_rq);
1244}
1245
1246/*
1247 * Change rt_se->run_list location unless SAVE && !MOVE
1248 *
1249 * assumes ENQUEUE/DEQUEUE flags match
1250 */
1251static inline bool move_entity(unsigned int flags)
1252{
1253	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1254		return false;
1255
1256	return true;
1257}
1258
1259static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1260{
1261	list_del_init(&rt_se->run_list);
1262
1263	if (list_empty(array->queue + rt_se_prio(rt_se)))
1264		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1265
1266	rt_se->on_list = 0;
1267}
1268
1269static inline struct sched_statistics *
1270__schedstats_from_rt_se(struct sched_rt_entity *rt_se)
1271{
1272#ifdef CONFIG_RT_GROUP_SCHED
1273	/* schedstats is not supported for rt group. */
1274	if (!rt_entity_is_task(rt_se))
1275		return NULL;
1276#endif
1277
1278	return &rt_task_of(rt_se)->stats;
1279}
1280
1281static inline void
1282update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1283{
1284	struct sched_statistics *stats;
1285	struct task_struct *p = NULL;
1286
1287	if (!schedstat_enabled())
1288		return;
1289
1290	if (rt_entity_is_task(rt_se))
1291		p = rt_task_of(rt_se);
1292
1293	stats = __schedstats_from_rt_se(rt_se);
1294	if (!stats)
1295		return;
1296
1297	__update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats);
1298}
1299
1300static inline void
1301update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1302{
1303	struct sched_statistics *stats;
1304	struct task_struct *p = NULL;
1305
1306	if (!schedstat_enabled())
1307		return;
1308
1309	if (rt_entity_is_task(rt_se))
1310		p = rt_task_of(rt_se);
1311
1312	stats = __schedstats_from_rt_se(rt_se);
1313	if (!stats)
1314		return;
1315
1316	__update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats);
1317}
1318
1319static inline void
1320update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1321			int flags)
1322{
1323	if (!schedstat_enabled())
1324		return;
1325
1326	if (flags & ENQUEUE_WAKEUP)
1327		update_stats_enqueue_sleeper_rt(rt_rq, rt_se);
1328}
1329
1330static inline void
1331update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1332{
1333	struct sched_statistics *stats;
1334	struct task_struct *p = NULL;
1335
1336	if (!schedstat_enabled())
1337		return;
1338
1339	if (rt_entity_is_task(rt_se))
1340		p = rt_task_of(rt_se);
1341
1342	stats = __schedstats_from_rt_se(rt_se);
1343	if (!stats)
1344		return;
1345
1346	__update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats);
1347}
1348
1349static inline void
1350update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1351			int flags)
1352{
1353	struct task_struct *p = NULL;
1354
1355	if (!schedstat_enabled())
1356		return;
1357
1358	if (rt_entity_is_task(rt_se))
1359		p = rt_task_of(rt_se);
1360
1361	if ((flags & DEQUEUE_SLEEP) && p) {
1362		unsigned int state;
1363
1364		state = READ_ONCE(p->__state);
1365		if (state & TASK_INTERRUPTIBLE)
1366			__schedstat_set(p->stats.sleep_start,
1367					rq_clock(rq_of_rt_rq(rt_rq)));
1368
1369		if (state & TASK_UNINTERRUPTIBLE)
1370			__schedstat_set(p->stats.block_start,
1371					rq_clock(rq_of_rt_rq(rt_rq)));
1372	}
1373}
1374
1375static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1376{
1377	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1378	struct rt_prio_array *array = &rt_rq->active;
1379	struct rt_rq *group_rq = group_rt_rq(rt_se);
1380	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1381
1382	/*
1383	 * Don't enqueue the group if its throttled, or when empty.
1384	 * The latter is a consequence of the former when a child group
1385	 * get throttled and the current group doesn't have any other
1386	 * active members.
1387	 */
1388	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1389		if (rt_se->on_list)
1390			__delist_rt_entity(rt_se, array);
1391		return;
1392	}
1393
1394	if (move_entity(flags)) {
1395		WARN_ON_ONCE(rt_se->on_list);
1396		if (flags & ENQUEUE_HEAD)
1397			list_add(&rt_se->run_list, queue);
1398		else
1399			list_add_tail(&rt_se->run_list, queue);
1400
1401		__set_bit(rt_se_prio(rt_se), array->bitmap);
1402		rt_se->on_list = 1;
1403	}
1404	rt_se->on_rq = 1;
1405
1406	inc_rt_tasks(rt_se, rt_rq);
1407}
1408
1409static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1410{
1411	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1412	struct rt_prio_array *array = &rt_rq->active;
1413
1414	if (move_entity(flags)) {
1415		WARN_ON_ONCE(!rt_se->on_list);
1416		__delist_rt_entity(rt_se, array);
1417	}
1418	rt_se->on_rq = 0;
1419
1420	dec_rt_tasks(rt_se, rt_rq);
1421}
1422
1423/*
1424 * Because the prio of an upper entry depends on the lower
1425 * entries, we must remove entries top - down.
1426 */
1427static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1428{
1429	struct sched_rt_entity *back = NULL;
1430	unsigned int rt_nr_running;
1431
1432	for_each_sched_rt_entity(rt_se) {
1433		rt_se->back = back;
1434		back = rt_se;
1435	}
1436
1437	rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
1438
1439	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1440		if (on_rt_rq(rt_se))
1441			__dequeue_rt_entity(rt_se, flags);
1442	}
1443
1444	dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
1445}
1446
1447static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1448{
1449	struct rq *rq = rq_of_rt_se(rt_se);
1450
1451	update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1452
1453	dequeue_rt_stack(rt_se, flags);
1454	for_each_sched_rt_entity(rt_se)
1455		__enqueue_rt_entity(rt_se, flags);
1456	enqueue_top_rt_rq(&rq->rt);
1457}
1458
1459static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1460{
1461	struct rq *rq = rq_of_rt_se(rt_se);
1462
1463	update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1464
1465	dequeue_rt_stack(rt_se, flags);
1466
1467	for_each_sched_rt_entity(rt_se) {
1468		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1469
1470		if (rt_rq && rt_rq->rt_nr_running)
1471			__enqueue_rt_entity(rt_se, flags);
1472	}
1473	enqueue_top_rt_rq(&rq->rt);
1474}
1475
1476/*
1477 * Adding/removing a task to/from a priority array:
1478 */
1479static void
1480enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1481{
1482	struct sched_rt_entity *rt_se = &p->rt;
1483
1484	if (flags & ENQUEUE_WAKEUP)
1485		rt_se->timeout = 0;
1486
1487	check_schedstat_required();
1488	update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se);
1489
1490	enqueue_rt_entity(rt_se, flags);
1491
1492	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1493		enqueue_pushable_task(rq, p);
 
 
1494}
1495
1496static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1497{
1498	struct sched_rt_entity *rt_se = &p->rt;
1499
1500	update_curr_rt(rq);
1501	dequeue_rt_entity(rt_se, flags);
1502
1503	dequeue_pushable_task(rq, p);
 
 
1504}
1505
1506/*
1507 * Put task to the head or the end of the run list without the overhead of
1508 * dequeue followed by enqueue.
1509 */
1510static void
1511requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1512{
1513	if (on_rt_rq(rt_se)) {
1514		struct rt_prio_array *array = &rt_rq->active;
1515		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1516
1517		if (head)
1518			list_move(&rt_se->run_list, queue);
1519		else
1520			list_move_tail(&rt_se->run_list, queue);
1521	}
1522}
1523
1524static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1525{
1526	struct sched_rt_entity *rt_se = &p->rt;
1527	struct rt_rq *rt_rq;
1528
1529	for_each_sched_rt_entity(rt_se) {
1530		rt_rq = rt_rq_of_se(rt_se);
1531		requeue_rt_entity(rt_rq, rt_se, head);
1532	}
1533}
1534
1535static void yield_task_rt(struct rq *rq)
1536{
1537	requeue_task_rt(rq, rq->curr, 0);
1538}
1539
1540#ifdef CONFIG_SMP
1541static int find_lowest_rq(struct task_struct *task);
1542
1543static int
1544select_task_rq_rt(struct task_struct *p, int cpu, int flags)
1545{
1546	struct task_struct *curr;
1547	struct rq *rq;
1548	bool test;
 
 
1549
1550	/* For anything but wake ups, just return the task_cpu */
1551	if (!(flags & (WF_TTWU | WF_FORK)))
1552		goto out;
1553
1554	rq = cpu_rq(cpu);
1555
1556	rcu_read_lock();
1557	curr = READ_ONCE(rq->curr); /* unlocked access */
1558
1559	/*
1560	 * If the current task on @p's runqueue is an RT task, then
1561	 * try to see if we can wake this RT task up on another
1562	 * runqueue. Otherwise simply start this RT task
1563	 * on its current runqueue.
1564	 *
1565	 * We want to avoid overloading runqueues. If the woken
1566	 * task is a higher priority, then it will stay on this CPU
1567	 * and the lower prio task should be moved to another CPU.
1568	 * Even though this will probably make the lower prio task
1569	 * lose its cache, we do not want to bounce a higher task
1570	 * around just because it gave up its CPU, perhaps for a
1571	 * lock?
1572	 *
1573	 * For equal prio tasks, we just let the scheduler sort it out.
1574	 *
1575	 * Otherwise, just let it ride on the affined RQ and the
1576	 * post-schedule router will push the preempted task away
1577	 *
1578	 * This test is optimistic, if we get it wrong the load-balancer
1579	 * will have to sort it out.
1580	 *
1581	 * We take into account the capacity of the CPU to ensure it fits the
1582	 * requirement of the task - which is only important on heterogeneous
1583	 * systems like big.LITTLE.
1584	 */
1585	test = curr &&
1586	       unlikely(rt_task(curr)) &&
1587	       (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
1588
1589	if (test || !rt_task_fits_capacity(p, cpu)) {
1590		int target = find_lowest_rq(p);
1591
1592		/*
1593		 * Bail out if we were forcing a migration to find a better
1594		 * fitting CPU but our search failed.
1595		 */
1596		if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1597			goto out_unlock;
1598
1599		/*
1600		 * Don't bother moving it if the destination CPU is
1601		 * not running a lower priority task.
1602		 */
1603		if (target != -1 &&
1604		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1605			cpu = target;
1606	}
1607
1608out_unlock:
1609	rcu_read_unlock();
1610
1611out:
1612	return cpu;
1613}
1614
1615static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1616{
1617	/*
1618	 * Current can't be migrated, useless to reschedule,
1619	 * let's hope p can move out.
1620	 */
1621	if (rq->curr->nr_cpus_allowed == 1 ||
1622	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1623		return;
1624
1625	/*
1626	 * p is migratable, so let's not schedule it and
1627	 * see if it is pushed or pulled somewhere else.
1628	 */
1629	if (p->nr_cpus_allowed != 1 &&
1630	    cpupri_find(&rq->rd->cpupri, p, NULL))
1631		return;
1632
1633	/*
1634	 * There appear to be other CPUs that can accept
1635	 * the current task but none can run 'p', so lets reschedule
1636	 * to try and push the current task away:
1637	 */
1638	requeue_task_rt(rq, p, 1);
1639	resched_curr(rq);
1640}
1641
1642static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1643{
1644	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1645		/*
1646		 * This is OK, because current is on_cpu, which avoids it being
1647		 * picked for load-balance and preemption/IRQs are still
1648		 * disabled avoiding further scheduler activity on it and we've
1649		 * not yet started the picking loop.
1650		 */
1651		rq_unpin_lock(rq, rf);
1652		pull_rt_task(rq);
1653		rq_repin_lock(rq, rf);
1654	}
1655
1656	return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1657}
1658#endif /* CONFIG_SMP */
1659
1660/*
1661 * Preempt the current task with a newly woken task if needed:
1662 */
1663static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
1664{
1665	if (p->prio < rq->curr->prio) {
1666		resched_curr(rq);
1667		return;
1668	}
1669
1670#ifdef CONFIG_SMP
1671	/*
1672	 * If:
1673	 *
1674	 * - the newly woken task is of equal priority to the current task
1675	 * - the newly woken task is non-migratable while current is migratable
1676	 * - current will be preempted on the next reschedule
1677	 *
1678	 * we should check to see if current can readily move to a different
1679	 * cpu.  If so, we will reschedule to allow the push logic to try
1680	 * to move current somewhere else, making room for our non-migratable
1681	 * task.
1682	 */
1683	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1684		check_preempt_equal_prio(rq, p);
1685#endif
1686}
1687
1688static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1689{
1690	struct sched_rt_entity *rt_se = &p->rt;
1691	struct rt_rq *rt_rq = &rq->rt;
1692
1693	p->se.exec_start = rq_clock_task(rq);
1694	if (on_rt_rq(&p->rt))
1695		update_stats_wait_end_rt(rt_rq, rt_se);
1696
1697	/* The running task is never eligible for pushing */
1698	dequeue_pushable_task(rq, p);
1699
1700	if (!first)
1701		return;
1702
1703	/*
1704	 * If prev task was rt, put_prev_task() has already updated the
1705	 * utilization. We only care of the case where we start to schedule a
1706	 * rt task
1707	 */
1708	if (rq->curr->sched_class != &rt_sched_class)
1709		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1710
1711	rt_queue_push_tasks(rq);
1712}
1713
1714static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
1715{
1716	struct rt_prio_array *array = &rt_rq->active;
1717	struct sched_rt_entity *next = NULL;
1718	struct list_head *queue;
1719	int idx;
1720
1721	idx = sched_find_first_bit(array->bitmap);
1722	BUG_ON(idx >= MAX_RT_PRIO);
1723
1724	queue = array->queue + idx;
1725	if (SCHED_WARN_ON(list_empty(queue)))
1726		return NULL;
1727	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1728
1729	return next;
1730}
1731
1732static struct task_struct *_pick_next_task_rt(struct rq *rq)
1733{
1734	struct sched_rt_entity *rt_se;
 
1735	struct rt_rq *rt_rq  = &rq->rt;
1736
1737	do {
1738		rt_se = pick_next_rt_entity(rt_rq);
1739		if (unlikely(!rt_se))
1740			return NULL;
1741		rt_rq = group_rt_rq(rt_se);
1742	} while (rt_rq);
1743
1744	return rt_task_of(rt_se);
 
 
 
1745}
1746
1747static struct task_struct *pick_task_rt(struct rq *rq)
 
1748{
1749	struct task_struct *p;
 
1750
1751	if (!sched_rt_runnable(rq))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1752		return NULL;
1753
1754	p = _pick_next_task_rt(rq);
 
1755
1756	return p;
1757}
1758
1759static struct task_struct *pick_next_task_rt(struct rq *rq)
1760{
1761	struct task_struct *p = pick_task_rt(rq);
1762
 
1763	if (p)
1764		set_next_task_rt(rq, p, true);
 
 
1765
1766	return p;
1767}
1768
1769static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1770{
1771	struct sched_rt_entity *rt_se = &p->rt;
1772	struct rt_rq *rt_rq = &rq->rt;
1773
1774	if (on_rt_rq(&p->rt))
1775		update_stats_wait_start_rt(rt_rq, rt_se);
1776
1777	update_curr_rt(rq);
1778
1779	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1780
1781	/*
1782	 * The previous task needs to be made eligible for pushing
1783	 * if it is still active
1784	 */
1785	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1786		enqueue_pushable_task(rq, p);
1787}
1788
1789#ifdef CONFIG_SMP
1790
1791/* Only try algorithms three times */
1792#define RT_MAX_TRIES 3
1793
1794static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1795{
1796	if (!task_on_cpu(rq, p) &&
1797	    cpumask_test_cpu(cpu, &p->cpus_mask))
1798		return 1;
1799
1800	return 0;
1801}
1802
1803/*
1804 * Return the highest pushable rq's task, which is suitable to be executed
1805 * on the CPU, NULL otherwise
1806 */
1807static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1808{
1809	struct plist_head *head = &rq->rt.pushable_tasks;
1810	struct task_struct *p;
1811
1812	if (!has_pushable_tasks(rq))
1813		return NULL;
1814
1815	plist_for_each_entry(p, head, pushable_tasks) {
1816		if (pick_rt_task(rq, p, cpu))
1817			return p;
1818	}
1819
1820	return NULL;
1821}
1822
1823static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1824
1825static int find_lowest_rq(struct task_struct *task)
1826{
1827	struct sched_domain *sd;
1828	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1829	int this_cpu = smp_processor_id();
1830	int cpu      = task_cpu(task);
1831	int ret;
1832
1833	/* Make sure the mask is initialized first */
1834	if (unlikely(!lowest_mask))
1835		return -1;
1836
1837	if (task->nr_cpus_allowed == 1)
1838		return -1; /* No other targets possible */
1839
1840	/*
1841	 * If we're on asym system ensure we consider the different capacities
1842	 * of the CPUs when searching for the lowest_mask.
1843	 */
1844	if (sched_asym_cpucap_active()) {
1845
1846		ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1847					  task, lowest_mask,
1848					  rt_task_fits_capacity);
1849	} else {
1850
1851		ret = cpupri_find(&task_rq(task)->rd->cpupri,
1852				  task, lowest_mask);
1853	}
1854
1855	if (!ret)
1856		return -1; /* No targets found */
1857
1858	/*
1859	 * At this point we have built a mask of CPUs representing the
1860	 * lowest priority tasks in the system.  Now we want to elect
1861	 * the best one based on our affinity and topology.
1862	 *
1863	 * We prioritize the last CPU that the task executed on since
1864	 * it is most likely cache-hot in that location.
1865	 */
1866	if (cpumask_test_cpu(cpu, lowest_mask))
1867		return cpu;
1868
1869	/*
1870	 * Otherwise, we consult the sched_domains span maps to figure
1871	 * out which CPU is logically closest to our hot cache data.
1872	 */
1873	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1874		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1875
1876	rcu_read_lock();
1877	for_each_domain(cpu, sd) {
1878		if (sd->flags & SD_WAKE_AFFINE) {
1879			int best_cpu;
1880
1881			/*
1882			 * "this_cpu" is cheaper to preempt than a
1883			 * remote processor.
1884			 */
1885			if (this_cpu != -1 &&
1886			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1887				rcu_read_unlock();
1888				return this_cpu;
1889			}
1890
1891			best_cpu = cpumask_any_and_distribute(lowest_mask,
1892							      sched_domain_span(sd));
1893			if (best_cpu < nr_cpu_ids) {
1894				rcu_read_unlock();
1895				return best_cpu;
1896			}
1897		}
1898	}
1899	rcu_read_unlock();
1900
1901	/*
1902	 * And finally, if there were no matches within the domains
1903	 * just give the caller *something* to work with from the compatible
1904	 * locations.
1905	 */
1906	if (this_cpu != -1)
1907		return this_cpu;
1908
1909	cpu = cpumask_any_distribute(lowest_mask);
1910	if (cpu < nr_cpu_ids)
1911		return cpu;
1912
1913	return -1;
1914}
1915
1916/* Will lock the rq it finds */
1917static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1918{
1919	struct rq *lowest_rq = NULL;
1920	int tries;
1921	int cpu;
1922
1923	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1924		cpu = find_lowest_rq(task);
1925
1926		if ((cpu == -1) || (cpu == rq->cpu))
1927			break;
1928
1929		lowest_rq = cpu_rq(cpu);
1930
1931		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1932			/*
1933			 * Target rq has tasks of equal or higher priority,
1934			 * retrying does not release any lock and is unlikely
1935			 * to yield a different result.
1936			 */
1937			lowest_rq = NULL;
1938			break;
1939		}
1940
1941		/* if the prio of this runqueue changed, try again */
1942		if (double_lock_balance(rq, lowest_rq)) {
1943			/*
1944			 * We had to unlock the run queue. In
1945			 * the mean time, task could have
1946			 * migrated already or had its affinity changed.
1947			 * Also make sure that it wasn't scheduled on its rq.
1948			 * It is possible the task was scheduled, set
1949			 * "migrate_disabled" and then got preempted, so we must
1950			 * check the task migration disable flag here too.
1951			 */
1952			if (unlikely(task_rq(task) != rq ||
1953				     !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
1954				     task_on_cpu(rq, task) ||
1955				     !rt_task(task) ||
1956				     is_migration_disabled(task) ||
1957				     !task_on_rq_queued(task))) {
1958
1959				double_unlock_balance(rq, lowest_rq);
1960				lowest_rq = NULL;
1961				break;
1962			}
1963		}
1964
1965		/* If this rq is still suitable use it. */
1966		if (lowest_rq->rt.highest_prio.curr > task->prio)
1967			break;
1968
1969		/* try again */
1970		double_unlock_balance(rq, lowest_rq);
1971		lowest_rq = NULL;
1972	}
1973
1974	return lowest_rq;
1975}
1976
1977static struct task_struct *pick_next_pushable_task(struct rq *rq)
1978{
1979	struct task_struct *p;
1980
1981	if (!has_pushable_tasks(rq))
1982		return NULL;
1983
1984	p = plist_first_entry(&rq->rt.pushable_tasks,
1985			      struct task_struct, pushable_tasks);
1986
1987	BUG_ON(rq->cpu != task_cpu(p));
1988	BUG_ON(task_current(rq, p));
1989	BUG_ON(p->nr_cpus_allowed <= 1);
1990
1991	BUG_ON(!task_on_rq_queued(p));
1992	BUG_ON(!rt_task(p));
1993
1994	return p;
1995}
1996
1997/*
1998 * If the current CPU has more than one RT task, see if the non
1999 * running task can migrate over to a CPU that is running a task
2000 * of lesser priority.
2001 */
2002static int push_rt_task(struct rq *rq, bool pull)
2003{
2004	struct task_struct *next_task;
2005	struct rq *lowest_rq;
2006	int ret = 0;
2007
2008	if (!rq->rt.overloaded)
2009		return 0;
2010
2011	next_task = pick_next_pushable_task(rq);
2012	if (!next_task)
2013		return 0;
2014
2015retry:
 
 
 
 
 
2016	/*
2017	 * It's possible that the next_task slipped in of
2018	 * higher priority than current. If that's the case
2019	 * just reschedule current.
2020	 */
2021	if (unlikely(next_task->prio < rq->curr->prio)) {
2022		resched_curr(rq);
2023		return 0;
2024	}
2025
2026	if (is_migration_disabled(next_task)) {
2027		struct task_struct *push_task = NULL;
2028		int cpu;
2029
2030		if (!pull || rq->push_busy)
2031			return 0;
2032
2033		/*
2034		 * Invoking find_lowest_rq() on anything but an RT task doesn't
2035		 * make sense. Per the above priority check, curr has to
2036		 * be of higher priority than next_task, so no need to
2037		 * reschedule when bailing out.
2038		 *
2039		 * Note that the stoppers are masqueraded as SCHED_FIFO
2040		 * (cf. sched_set_stop_task()), so we can't rely on rt_task().
2041		 */
2042		if (rq->curr->sched_class != &rt_sched_class)
2043			return 0;
2044
2045		cpu = find_lowest_rq(rq->curr);
2046		if (cpu == -1 || cpu == rq->cpu)
2047			return 0;
2048
2049		/*
2050		 * Given we found a CPU with lower priority than @next_task,
2051		 * therefore it should be running. However we cannot migrate it
2052		 * to this other CPU, instead attempt to push the current
2053		 * running task on this CPU away.
2054		 */
2055		push_task = get_push_task(rq);
2056		if (push_task) {
2057			preempt_disable();
2058			raw_spin_rq_unlock(rq);
2059			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2060					    push_task, &rq->push_work);
2061			preempt_enable();
2062			raw_spin_rq_lock(rq);
2063		}
2064
2065		return 0;
2066	}
2067
2068	if (WARN_ON(next_task == rq->curr))
2069		return 0;
2070
2071	/* We might release rq lock */
2072	get_task_struct(next_task);
2073
2074	/* find_lock_lowest_rq locks the rq if found */
2075	lowest_rq = find_lock_lowest_rq(next_task, rq);
2076	if (!lowest_rq) {
2077		struct task_struct *task;
2078		/*
2079		 * find_lock_lowest_rq releases rq->lock
2080		 * so it is possible that next_task has migrated.
2081		 *
2082		 * We need to make sure that the task is still on the same
2083		 * run-queue and is also still the next task eligible for
2084		 * pushing.
2085		 */
2086		task = pick_next_pushable_task(rq);
2087		if (task == next_task) {
2088			/*
2089			 * The task hasn't migrated, and is still the next
2090			 * eligible task, but we failed to find a run-queue
2091			 * to push it to.  Do not retry in this case, since
2092			 * other CPUs will pull from us when ready.
2093			 */
2094			goto out;
2095		}
2096
2097		if (!task)
2098			/* No more tasks, just exit */
2099			goto out;
2100
2101		/*
2102		 * Something has shifted, try again.
2103		 */
2104		put_task_struct(next_task);
2105		next_task = task;
2106		goto retry;
2107	}
2108
2109	deactivate_task(rq, next_task, 0);
2110	set_task_cpu(next_task, lowest_rq->cpu);
2111	activate_task(lowest_rq, next_task, 0);
2112	resched_curr(lowest_rq);
2113	ret = 1;
2114
 
 
2115	double_unlock_balance(rq, lowest_rq);
 
2116out:
2117	put_task_struct(next_task);
2118
2119	return ret;
2120}
2121
2122static void push_rt_tasks(struct rq *rq)
2123{
2124	/* push_rt_task will return true if it moved an RT */
2125	while (push_rt_task(rq, false))
2126		;
2127}
2128
2129#ifdef HAVE_RT_PUSH_IPI
2130
2131/*
2132 * When a high priority task schedules out from a CPU and a lower priority
2133 * task is scheduled in, a check is made to see if there's any RT tasks
2134 * on other CPUs that are waiting to run because a higher priority RT task
2135 * is currently running on its CPU. In this case, the CPU with multiple RT
2136 * tasks queued on it (overloaded) needs to be notified that a CPU has opened
2137 * up that may be able to run one of its non-running queued RT tasks.
2138 *
2139 * All CPUs with overloaded RT tasks need to be notified as there is currently
2140 * no way to know which of these CPUs have the highest priority task waiting
2141 * to run. Instead of trying to take a spinlock on each of these CPUs,
2142 * which has shown to cause large latency when done on machines with many
2143 * CPUs, sending an IPI to the CPUs to have them push off the overloaded
2144 * RT tasks waiting to run.
2145 *
2146 * Just sending an IPI to each of the CPUs is also an issue, as on large
2147 * count CPU machines, this can cause an IPI storm on a CPU, especially
2148 * if its the only CPU with multiple RT tasks queued, and a large number
2149 * of CPUs scheduling a lower priority task at the same time.
2150 *
2151 * Each root domain has its own irq work function that can iterate over
2152 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
2153 * task must be checked if there's one or many CPUs that are lowering
2154 * their priority, there's a single irq work iterator that will try to
2155 * push off RT tasks that are waiting to run.
2156 *
2157 * When a CPU schedules a lower priority task, it will kick off the
2158 * irq work iterator that will jump to each CPU with overloaded RT tasks.
2159 * As it only takes the first CPU that schedules a lower priority task
2160 * to start the process, the rto_start variable is incremented and if
2161 * the atomic result is one, then that CPU will try to take the rto_lock.
2162 * This prevents high contention on the lock as the process handles all
2163 * CPUs scheduling lower priority tasks.
2164 *
2165 * All CPUs that are scheduling a lower priority task will increment the
2166 * rt_loop_next variable. This will make sure that the irq work iterator
2167 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
2168 * priority task, even if the iterator is in the middle of a scan. Incrementing
2169 * the rt_loop_next will cause the iterator to perform another scan.
2170 *
2171 */
2172static int rto_next_cpu(struct root_domain *rd)
2173{
2174	int next;
2175	int cpu;
2176
2177	/*
2178	 * When starting the IPI RT pushing, the rto_cpu is set to -1,
2179	 * rt_next_cpu() will simply return the first CPU found in
2180	 * the rto_mask.
2181	 *
2182	 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
2183	 * will return the next CPU found in the rto_mask.
2184	 *
2185	 * If there are no more CPUs left in the rto_mask, then a check is made
2186	 * against rto_loop and rto_loop_next. rto_loop is only updated with
2187	 * the rto_lock held, but any CPU may increment the rto_loop_next
2188	 * without any locking.
2189	 */
2190	for (;;) {
2191
2192		/* When rto_cpu is -1 this acts like cpumask_first() */
2193		cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
2194
2195		rd->rto_cpu = cpu;
2196
2197		if (cpu < nr_cpu_ids)
2198			return cpu;
2199
2200		rd->rto_cpu = -1;
2201
2202		/*
2203		 * ACQUIRE ensures we see the @rto_mask changes
2204		 * made prior to the @next value observed.
2205		 *
2206		 * Matches WMB in rt_set_overload().
2207		 */
2208		next = atomic_read_acquire(&rd->rto_loop_next);
2209
2210		if (rd->rto_loop == next)
2211			break;
2212
2213		rd->rto_loop = next;
2214	}
2215
2216	return -1;
2217}
2218
2219static inline bool rto_start_trylock(atomic_t *v)
2220{
2221	return !atomic_cmpxchg_acquire(v, 0, 1);
2222}
2223
2224static inline void rto_start_unlock(atomic_t *v)
2225{
2226	atomic_set_release(v, 0);
2227}
2228
2229static void tell_cpu_to_push(struct rq *rq)
2230{
2231	int cpu = -1;
2232
2233	/* Keep the loop going if the IPI is currently active */
2234	atomic_inc(&rq->rd->rto_loop_next);
2235
2236	/* Only one CPU can initiate a loop at a time */
2237	if (!rto_start_trylock(&rq->rd->rto_loop_start))
2238		return;
2239
2240	raw_spin_lock(&rq->rd->rto_lock);
2241
2242	/*
2243	 * The rto_cpu is updated under the lock, if it has a valid CPU
2244	 * then the IPI is still running and will continue due to the
2245	 * update to loop_next, and nothing needs to be done here.
2246	 * Otherwise it is finishing up and an ipi needs to be sent.
2247	 */
2248	if (rq->rd->rto_cpu < 0)
2249		cpu = rto_next_cpu(rq->rd);
2250
2251	raw_spin_unlock(&rq->rd->rto_lock);
2252
2253	rto_start_unlock(&rq->rd->rto_loop_start);
2254
2255	if (cpu >= 0) {
2256		/* Make sure the rd does not get freed while pushing */
2257		sched_get_rd(rq->rd);
2258		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2259	}
2260}
2261
2262/* Called from hardirq context */
2263void rto_push_irq_work_func(struct irq_work *work)
2264{
2265	struct root_domain *rd =
2266		container_of(work, struct root_domain, rto_push_work);
2267	struct rq *rq;
2268	int cpu;
2269
2270	rq = this_rq();
2271
2272	/*
2273	 * We do not need to grab the lock to check for has_pushable_tasks.
2274	 * When it gets updated, a check is made if a push is possible.
2275	 */
2276	if (has_pushable_tasks(rq)) {
2277		raw_spin_rq_lock(rq);
2278		while (push_rt_task(rq, true))
2279			;
2280		raw_spin_rq_unlock(rq);
2281	}
2282
2283	raw_spin_lock(&rd->rto_lock);
2284
2285	/* Pass the IPI to the next rt overloaded queue */
2286	cpu = rto_next_cpu(rd);
2287
2288	raw_spin_unlock(&rd->rto_lock);
2289
2290	if (cpu < 0) {
2291		sched_put_rd(rd);
2292		return;
2293	}
2294
2295	/* Try the next RT overloaded CPU */
2296	irq_work_queue_on(&rd->rto_push_work, cpu);
2297}
2298#endif /* HAVE_RT_PUSH_IPI */
2299
2300static void pull_rt_task(struct rq *this_rq)
2301{
2302	int this_cpu = this_rq->cpu, cpu;
2303	bool resched = false;
2304	struct task_struct *p, *push_task;
2305	struct rq *src_rq;
2306	int rt_overload_count = rt_overloaded(this_rq);
2307
2308	if (likely(!rt_overload_count))
2309		return;
2310
2311	/*
2312	 * Match the barrier from rt_set_overloaded; this guarantees that if we
2313	 * see overloaded we must also see the rto_mask bit.
2314	 */
2315	smp_rmb();
2316
2317	/* If we are the only overloaded CPU do nothing */
2318	if (rt_overload_count == 1 &&
2319	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2320		return;
2321
2322#ifdef HAVE_RT_PUSH_IPI
2323	if (sched_feat(RT_PUSH_IPI)) {
2324		tell_cpu_to_push(this_rq);
2325		return;
2326	}
2327#endif
2328
2329	for_each_cpu(cpu, this_rq->rd->rto_mask) {
2330		if (this_cpu == cpu)
2331			continue;
2332
2333		src_rq = cpu_rq(cpu);
2334
2335		/*
2336		 * Don't bother taking the src_rq->lock if the next highest
2337		 * task is known to be lower-priority than our current task.
2338		 * This may look racy, but if this value is about to go
2339		 * logically higher, the src_rq will push this task away.
2340		 * And if its going logically lower, we do not care
2341		 */
2342		if (src_rq->rt.highest_prio.next >=
2343		    this_rq->rt.highest_prio.curr)
2344			continue;
2345
2346		/*
2347		 * We can potentially drop this_rq's lock in
2348		 * double_lock_balance, and another CPU could
2349		 * alter this_rq
2350		 */
2351		push_task = NULL;
2352		double_lock_balance(this_rq, src_rq);
2353
2354		/*
2355		 * We can pull only a task, which is pushable
2356		 * on its rq, and no others.
2357		 */
2358		p = pick_highest_pushable_task(src_rq, this_cpu);
2359
2360		/*
2361		 * Do we have an RT task that preempts
2362		 * the to-be-scheduled task?
2363		 */
2364		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2365			WARN_ON(p == src_rq->curr);
2366			WARN_ON(!task_on_rq_queued(p));
2367
2368			/*
2369			 * There's a chance that p is higher in priority
2370			 * than what's currently running on its CPU.
2371			 * This is just that p is waking up and hasn't
2372			 * had a chance to schedule. We only pull
2373			 * p if it is lower in priority than the
2374			 * current task on the run queue
2375			 */
2376			if (p->prio < src_rq->curr->prio)
2377				goto skip;
2378
2379			if (is_migration_disabled(p)) {
2380				push_task = get_push_task(src_rq);
2381			} else {
2382				deactivate_task(src_rq, p, 0);
2383				set_task_cpu(p, this_cpu);
2384				activate_task(this_rq, p, 0);
2385				resched = true;
2386			}
2387			/*
2388			 * We continue with the search, just in
2389			 * case there's an even higher prio task
2390			 * in another runqueue. (low likelihood
2391			 * but possible)
2392			 */
2393		}
2394skip:
2395		double_unlock_balance(this_rq, src_rq);
 
2396
2397		if (push_task) {
2398			preempt_disable();
2399			raw_spin_rq_unlock(this_rq);
2400			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2401					    push_task, &src_rq->push_work);
2402			preempt_enable();
2403			raw_spin_rq_lock(this_rq);
2404		}
2405	}
2406
2407	if (resched)
2408		resched_curr(this_rq);
 
2409}
2410
2411/*
2412 * If we are not running and we are not going to reschedule soon, we should
2413 * try to push tasks away now
2414 */
2415static void task_woken_rt(struct rq *rq, struct task_struct *p)
2416{
2417	bool need_to_push = !task_on_cpu(rq, p) &&
2418			    !test_tsk_need_resched(rq->curr) &&
2419			    p->nr_cpus_allowed > 1 &&
2420			    (dl_task(rq->curr) || rt_task(rq->curr)) &&
2421			    (rq->curr->nr_cpus_allowed < 2 ||
2422			     rq->curr->prio <= p->prio);
 
 
 
 
 
 
 
 
 
2423
2424	if (need_to_push)
2425		push_rt_tasks(rq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2426}
2427
2428/* Assumes rq->lock is held */
2429static void rq_online_rt(struct rq *rq)
2430{
2431	if (rq->rt.overloaded)
2432		rt_set_overload(rq);
2433
2434	__enable_runtime(rq);
2435
2436	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2437}
2438
2439/* Assumes rq->lock is held */
2440static void rq_offline_rt(struct rq *rq)
2441{
2442	if (rq->rt.overloaded)
2443		rt_clear_overload(rq);
2444
2445	__disable_runtime(rq);
2446
2447	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2448}
2449
2450/*
2451 * When switch from the rt queue, we bring ourselves to a position
2452 * that we might want to pull RT tasks from other runqueues.
2453 */
2454static void switched_from_rt(struct rq *rq, struct task_struct *p)
2455{
2456	/*
2457	 * If there are other RT tasks then we will reschedule
2458	 * and the scheduling of the other RT tasks will handle
2459	 * the balancing. But if we are the last RT task
2460	 * we may need to handle the pulling of RT tasks
2461	 * now.
2462	 */
2463	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2464		return;
2465
2466	rt_queue_pull_task(rq);
 
2467}
2468
2469void __init init_sched_rt_class(void)
2470{
2471	unsigned int i;
2472
2473	for_each_possible_cpu(i) {
2474		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2475					GFP_KERNEL, cpu_to_node(i));
2476	}
2477}
2478#endif /* CONFIG_SMP */
2479
2480/*
2481 * When switching a task to RT, we may overload the runqueue
2482 * with RT tasks. In this case we try to push them off to
2483 * other runqueues.
2484 */
2485static void switched_to_rt(struct rq *rq, struct task_struct *p)
2486{
2487	/*
2488	 * If we are running, update the avg_rt tracking, as the running time
2489	 * will now on be accounted into the latter.
2490	 */
2491	if (task_current(rq, p)) {
2492		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2493		return;
2494	}
2495
2496	/*
2497	 * If we are not running we may need to preempt the current
2498	 * running task. If that current running task is also an RT task
 
 
2499	 * then see if we can move to another run queue.
2500	 */
2501	if (task_on_rq_queued(p)) {
2502#ifdef CONFIG_SMP
2503		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2504			rt_queue_push_tasks(rq);
 
 
2505#endif /* CONFIG_SMP */
2506		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2507			resched_curr(rq);
2508	}
2509}
2510
2511/*
2512 * Priority of the task has changed. This may cause
2513 * us to initiate a push or pull.
2514 */
2515static void
2516prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2517{
2518	if (!task_on_rq_queued(p))
2519		return;
2520
2521	if (task_current(rq, p)) {
2522#ifdef CONFIG_SMP
2523		/*
2524		 * If our priority decreases while running, we
2525		 * may need to pull tasks to this runqueue.
2526		 */
2527		if (oldprio < p->prio)
2528			rt_queue_pull_task(rq);
2529
2530		/*
2531		 * If there's a higher priority task waiting to run
2532		 * then reschedule.
 
 
2533		 */
2534		if (p->prio > rq->rt.highest_prio.curr)
2535			resched_curr(rq);
2536#else
2537		/* For UP simply resched on drop of prio */
2538		if (oldprio < p->prio)
2539			resched_curr(rq);
2540#endif /* CONFIG_SMP */
2541	} else {
2542		/*
2543		 * This task is not running, but if it is
2544		 * greater than the current running task
2545		 * then reschedule.
2546		 */
2547		if (p->prio < rq->curr->prio)
2548			resched_curr(rq);
2549	}
2550}
2551
2552#ifdef CONFIG_POSIX_TIMERS
2553static void watchdog(struct rq *rq, struct task_struct *p)
2554{
2555	unsigned long soft, hard;
2556
2557	/* max may change after cur was read, this will be fixed next tick */
2558	soft = task_rlimit(p, RLIMIT_RTTIME);
2559	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2560
2561	if (soft != RLIM_INFINITY) {
2562		unsigned long next;
2563
2564		if (p->rt.watchdog_stamp != jiffies) {
2565			p->rt.timeout++;
2566			p->rt.watchdog_stamp = jiffies;
2567		}
2568
2569		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2570		if (p->rt.timeout > next) {
2571			posix_cputimers_rt_watchdog(&p->posix_cputimers,
2572						    p->se.sum_exec_runtime);
2573		}
2574	}
2575}
2576#else
2577static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2578#endif
2579
2580/*
2581 * scheduler tick hitting a task of our scheduling class.
2582 *
2583 * NOTE: This function can be called remotely by the tick offload that
2584 * goes along full dynticks. Therefore no local assumption can be made
2585 * and everything must be accessed through the @rq and @curr passed in
2586 * parameters.
2587 */
2588static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2589{
2590	struct sched_rt_entity *rt_se = &p->rt;
2591
2592	update_curr_rt(rq);
2593	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2594
2595	watchdog(rq, p);
2596
2597	/*
2598	 * RR tasks need a special form of timeslice management.
2599	 * FIFO tasks have no timeslices.
2600	 */
2601	if (p->policy != SCHED_RR)
2602		return;
2603
2604	if (--p->rt.time_slice)
2605		return;
2606
2607	p->rt.time_slice = sched_rr_timeslice;
2608
2609	/*
2610	 * Requeue to the end of queue if we (and all of our ancestors) are not
2611	 * the only element on the queue
2612	 */
2613	for_each_sched_rt_entity(rt_se) {
2614		if (rt_se->run_list.prev != rt_se->run_list.next) {
2615			requeue_task_rt(rq, p, 0);
2616			resched_curr(rq);
2617			return;
2618		}
2619	}
2620}
2621
 
 
 
 
 
 
 
 
 
 
2622static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2623{
2624	/*
2625	 * Time slice is 0 for SCHED_FIFO tasks
2626	 */
2627	if (task->policy == SCHED_RR)
2628		return sched_rr_timeslice;
2629	else
2630		return 0;
2631}
2632
2633#ifdef CONFIG_SCHED_CORE
2634static int task_is_throttled_rt(struct task_struct *p, int cpu)
2635{
2636	struct rt_rq *rt_rq;
2637
2638#ifdef CONFIG_RT_GROUP_SCHED
2639	rt_rq = task_group(p)->rt_rq[cpu];
2640#else
2641	rt_rq = &cpu_rq(cpu)->rt;
2642#endif
2643
2644	return rt_rq_throttled(rt_rq);
2645}
2646#endif
2647
2648DEFINE_SCHED_CLASS(rt) = {
2649
2650	.enqueue_task		= enqueue_task_rt,
2651	.dequeue_task		= dequeue_task_rt,
2652	.yield_task		= yield_task_rt,
2653
2654	.wakeup_preempt		= wakeup_preempt_rt,
2655
2656	.pick_next_task		= pick_next_task_rt,
2657	.put_prev_task		= put_prev_task_rt,
2658	.set_next_task          = set_next_task_rt,
2659
2660#ifdef CONFIG_SMP
2661	.balance		= balance_rt,
2662	.pick_task		= pick_task_rt,
2663	.select_task_rq		= select_task_rq_rt,
2664	.set_cpus_allowed       = set_cpus_allowed_common,
 
2665	.rq_online              = rq_online_rt,
2666	.rq_offline             = rq_offline_rt,
 
2667	.task_woken		= task_woken_rt,
2668	.switched_from		= switched_from_rt,
2669	.find_lock_rq		= find_lock_lowest_rq,
2670#endif
2671
 
2672	.task_tick		= task_tick_rt,
2673
2674	.get_rr_interval	= get_rr_interval_rt,
2675
2676	.prio_changed		= prio_changed_rt,
2677	.switched_to		= switched_to_rt,
2678
2679	.update_curr		= update_curr_rt,
2680
2681#ifdef CONFIG_SCHED_CORE
2682	.task_is_throttled	= task_is_throttled_rt,
2683#endif
2684
2685#ifdef CONFIG_UCLAMP_TASK
2686	.uclamp_enabled		= 1,
2687#endif
2688};
2689
2690#ifdef CONFIG_RT_GROUP_SCHED
2691/*
2692 * Ensure that the real time constraints are schedulable.
2693 */
2694static DEFINE_MUTEX(rt_constraints_mutex);
2695
2696static inline int tg_has_rt_tasks(struct task_group *tg)
2697{
2698	struct task_struct *task;
2699	struct css_task_iter it;
2700	int ret = 0;
2701
2702	/*
2703	 * Autogroups do not have RT tasks; see autogroup_create().
2704	 */
2705	if (task_group_is_autogroup(tg))
2706		return 0;
2707
2708	css_task_iter_start(&tg->css, 0, &it);
2709	while (!ret && (task = css_task_iter_next(&it)))
2710		ret |= rt_task(task);
2711	css_task_iter_end(&it);
2712
2713	return ret;
2714}
2715
2716struct rt_schedulable_data {
2717	struct task_group *tg;
2718	u64 rt_period;
2719	u64 rt_runtime;
2720};
2721
2722static int tg_rt_schedulable(struct task_group *tg, void *data)
2723{
2724	struct rt_schedulable_data *d = data;
2725	struct task_group *child;
2726	unsigned long total, sum = 0;
2727	u64 period, runtime;
2728
2729	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2730	runtime = tg->rt_bandwidth.rt_runtime;
2731
2732	if (tg == d->tg) {
2733		period = d->rt_period;
2734		runtime = d->rt_runtime;
2735	}
2736
2737	/*
2738	 * Cannot have more runtime than the period.
2739	 */
2740	if (runtime > period && runtime != RUNTIME_INF)
2741		return -EINVAL;
2742
2743	/*
2744	 * Ensure we don't starve existing RT tasks if runtime turns zero.
2745	 */
2746	if (rt_bandwidth_enabled() && !runtime &&
2747	    tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
2748		return -EBUSY;
2749
2750	total = to_ratio(period, runtime);
2751
2752	/*
2753	 * Nobody can have more than the global setting allows.
2754	 */
2755	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2756		return -EINVAL;
2757
2758	/*
2759	 * The sum of our children's runtime should not exceed our own.
2760	 */
2761	list_for_each_entry_rcu(child, &tg->children, siblings) {
2762		period = ktime_to_ns(child->rt_bandwidth.rt_period);
2763		runtime = child->rt_bandwidth.rt_runtime;
2764
2765		if (child == d->tg) {
2766			period = d->rt_period;
2767			runtime = d->rt_runtime;
2768		}
2769
2770		sum += to_ratio(period, runtime);
2771	}
2772
2773	if (sum > total)
2774		return -EINVAL;
2775
2776	return 0;
2777}
2778
2779static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2780{
2781	int ret;
2782
2783	struct rt_schedulable_data data = {
2784		.tg = tg,
2785		.rt_period = period,
2786		.rt_runtime = runtime,
2787	};
2788
2789	rcu_read_lock();
2790	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2791	rcu_read_unlock();
2792
2793	return ret;
2794}
2795
2796static int tg_set_rt_bandwidth(struct task_group *tg,
2797		u64 rt_period, u64 rt_runtime)
2798{
2799	int i, err = 0;
2800
2801	/*
2802	 * Disallowing the root group RT runtime is BAD, it would disallow the
2803	 * kernel creating (and or operating) RT threads.
2804	 */
2805	if (tg == &root_task_group && rt_runtime == 0)
2806		return -EINVAL;
2807
2808	/* No period doesn't make any sense. */
2809	if (rt_period == 0)
2810		return -EINVAL;
2811
2812	/*
2813	 * Bound quota to defend quota against overflow during bandwidth shift.
2814	 */
2815	if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2816		return -EINVAL;
2817
2818	mutex_lock(&rt_constraints_mutex);
2819	err = __rt_schedulable(tg, rt_period, rt_runtime);
2820	if (err)
2821		goto unlock;
2822
2823	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2824	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2825	tg->rt_bandwidth.rt_runtime = rt_runtime;
2826
2827	for_each_possible_cpu(i) {
2828		struct rt_rq *rt_rq = tg->rt_rq[i];
2829
2830		raw_spin_lock(&rt_rq->rt_runtime_lock);
2831		rt_rq->rt_runtime = rt_runtime;
2832		raw_spin_unlock(&rt_rq->rt_runtime_lock);
2833	}
2834	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2835unlock:
2836	mutex_unlock(&rt_constraints_mutex);
2837
2838	return err;
2839}
2840
2841int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2842{
2843	u64 rt_runtime, rt_period;
2844
2845	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2846	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2847	if (rt_runtime_us < 0)
2848		rt_runtime = RUNTIME_INF;
2849	else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2850		return -EINVAL;
2851
2852	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2853}
2854
2855long sched_group_rt_runtime(struct task_group *tg)
2856{
2857	u64 rt_runtime_us;
2858
2859	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2860		return -1;
2861
2862	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2863	do_div(rt_runtime_us, NSEC_PER_USEC);
2864	return rt_runtime_us;
2865}
2866
2867int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2868{
2869	u64 rt_runtime, rt_period;
2870
2871	if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2872		return -EINVAL;
2873
2874	rt_period = rt_period_us * NSEC_PER_USEC;
2875	rt_runtime = tg->rt_bandwidth.rt_runtime;
2876
2877	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2878}
2879
2880long sched_group_rt_period(struct task_group *tg)
2881{
2882	u64 rt_period_us;
2883
2884	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2885	do_div(rt_period_us, NSEC_PER_USEC);
2886	return rt_period_us;
2887}
2888
2889#ifdef CONFIG_SYSCTL
2890static int sched_rt_global_constraints(void)
2891{
2892	int ret = 0;
2893
2894	mutex_lock(&rt_constraints_mutex);
2895	ret = __rt_schedulable(NULL, 0, 0);
2896	mutex_unlock(&rt_constraints_mutex);
2897
2898	return ret;
2899}
2900#endif /* CONFIG_SYSCTL */
2901
2902int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2903{
2904	/* Don't accept realtime tasks when there is no way for them to run */
2905	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2906		return 0;
2907
2908	return 1;
2909}
2910
2911#else /* !CONFIG_RT_GROUP_SCHED */
2912
2913#ifdef CONFIG_SYSCTL
2914static int sched_rt_global_constraints(void)
2915{
2916	unsigned long flags;
2917	int i;
2918
2919	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2920	for_each_possible_cpu(i) {
2921		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2922
2923		raw_spin_lock(&rt_rq->rt_runtime_lock);
2924		rt_rq->rt_runtime = global_rt_runtime();
2925		raw_spin_unlock(&rt_rq->rt_runtime_lock);
2926	}
2927	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2928
2929	return 0;
2930}
2931#endif /* CONFIG_SYSCTL */
2932#endif /* CONFIG_RT_GROUP_SCHED */
2933
2934#ifdef CONFIG_SYSCTL
2935static int sched_rt_global_validate(void)
2936{
2937	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2938		((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2939		 ((u64)sysctl_sched_rt_runtime *
2940			NSEC_PER_USEC > max_rt_runtime)))
2941		return -EINVAL;
2942
2943	return 0;
2944}
2945
2946static void sched_rt_do_global(void)
2947{
2948	unsigned long flags;
2949
2950	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2951	def_rt_bandwidth.rt_runtime = global_rt_runtime();
2952	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
2953	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2954}
2955
2956static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
2957		size_t *lenp, loff_t *ppos)
2958{
2959	int old_period, old_runtime;
2960	static DEFINE_MUTEX(mutex);
2961	int ret;
2962
2963	mutex_lock(&mutex);
2964	old_period = sysctl_sched_rt_period;
2965	old_runtime = sysctl_sched_rt_runtime;
2966
2967	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2968
2969	if (!ret && write) {
2970		ret = sched_rt_global_validate();
2971		if (ret)
2972			goto undo;
2973
2974		ret = sched_dl_global_validate();
2975		if (ret)
2976			goto undo;
2977
2978		ret = sched_rt_global_constraints();
2979		if (ret)
2980			goto undo;
2981
2982		sched_rt_do_global();
2983		sched_dl_do_global();
2984	}
2985	if (0) {
2986undo:
2987		sysctl_sched_rt_period = old_period;
2988		sysctl_sched_rt_runtime = old_runtime;
2989	}
2990	mutex_unlock(&mutex);
2991
2992	return ret;
2993}
2994
2995static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
2996		size_t *lenp, loff_t *ppos)
2997{
2998	int ret;
2999	static DEFINE_MUTEX(mutex);
3000
3001	mutex_lock(&mutex);
3002	ret = proc_dointvec(table, write, buffer, lenp, ppos);
3003	/*
3004	 * Make sure that internally we keep jiffies.
3005	 * Also, writing zero resets the timeslice to default:
3006	 */
3007	if (!ret && write) {
3008		sched_rr_timeslice =
3009			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
3010			msecs_to_jiffies(sysctl_sched_rr_timeslice);
3011
3012		if (sysctl_sched_rr_timeslice <= 0)
3013			sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
3014	}
3015	mutex_unlock(&mutex);
3016
3017	return ret;
3018}
3019#endif /* CONFIG_SYSCTL */
3020
3021#ifdef CONFIG_SCHED_DEBUG
3022void print_rt_stats(struct seq_file *m, int cpu)
3023{
3024	rt_rq_iter_t iter;
3025	struct rt_rq *rt_rq;
3026
3027	rcu_read_lock();
3028	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
3029		print_rt_rq(m, cpu, rt_rq);
3030	rcu_read_unlock();
3031}
3032#endif /* CONFIG_SCHED_DEBUG */