Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   3 * policies)
   4 */
   5
   6#include "sched.h"
   7
   8#include <linux/slab.h>
   9
  10int sched_rr_timeslice = RR_TIMESLICE;
  11
  12static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  13
  14struct rt_bandwidth def_rt_bandwidth;
  15
  16static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  17{
  18	struct rt_bandwidth *rt_b =
  19		container_of(timer, struct rt_bandwidth, rt_period_timer);
  20	ktime_t now;
  21	int overrun;
  22	int idle = 0;
  23
  24	for (;;) {
  25		now = hrtimer_cb_get_time(timer);
  26		overrun = hrtimer_forward(timer, now, rt_b->rt_period);
  27
  28		if (!overrun)
  29			break;
  30
  31		idle = do_sched_rt_period_timer(rt_b, overrun);
  32	}
  33
  34	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  35}
  36
  37void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  38{
  39	rt_b->rt_period = ns_to_ktime(period);
  40	rt_b->rt_runtime = runtime;
  41
  42	raw_spin_lock_init(&rt_b->rt_runtime_lock);
  43
  44	hrtimer_init(&rt_b->rt_period_timer,
  45			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  46	rt_b->rt_period_timer.function = sched_rt_period_timer;
  47}
  48
  49static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
  50{
  51	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
  52		return;
  53
  54	if (hrtimer_active(&rt_b->rt_period_timer))
  55		return;
  56
  57	raw_spin_lock(&rt_b->rt_runtime_lock);
  58	start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
  59	raw_spin_unlock(&rt_b->rt_runtime_lock);
  60}
  61
  62void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
  63{
  64	struct rt_prio_array *array;
  65	int i;
  66
  67	array = &rt_rq->active;
  68	for (i = 0; i < MAX_RT_PRIO; i++) {
  69		INIT_LIST_HEAD(array->queue + i);
  70		__clear_bit(i, array->bitmap);
  71	}
  72	/* delimiter for bitsearch: */
  73	__set_bit(MAX_RT_PRIO, array->bitmap);
  74
  75#if defined CONFIG_SMP
  76	rt_rq->highest_prio.curr = MAX_RT_PRIO;
  77	rt_rq->highest_prio.next = MAX_RT_PRIO;
  78	rt_rq->rt_nr_migratory = 0;
  79	rt_rq->overloaded = 0;
  80	plist_head_init(&rt_rq->pushable_tasks);
  81#endif
  82
  83	rt_rq->rt_time = 0;
  84	rt_rq->rt_throttled = 0;
  85	rt_rq->rt_runtime = 0;
  86	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
  87}
  88
  89#ifdef CONFIG_RT_GROUP_SCHED
  90static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
  91{
  92	hrtimer_cancel(&rt_b->rt_period_timer);
  93}
  94
  95#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
  96
  97static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  98{
  99#ifdef CONFIG_SCHED_DEBUG
 100	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
 101#endif
 102	return container_of(rt_se, struct task_struct, rt);
 103}
 104
 105static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 106{
 107	return rt_rq->rq;
 108}
 109
 110static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 111{
 112	return rt_se->rt_rq;
 113}
 114
 115void free_rt_sched_group(struct task_group *tg)
 116{
 117	int i;
 118
 119	if (tg->rt_se)
 120		destroy_rt_bandwidth(&tg->rt_bandwidth);
 121
 122	for_each_possible_cpu(i) {
 123		if (tg->rt_rq)
 124			kfree(tg->rt_rq[i]);
 125		if (tg->rt_se)
 126			kfree(tg->rt_se[i]);
 127	}
 128
 129	kfree(tg->rt_rq);
 130	kfree(tg->rt_se);
 131}
 132
 133void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 134		struct sched_rt_entity *rt_se, int cpu,
 135		struct sched_rt_entity *parent)
 136{
 137	struct rq *rq = cpu_rq(cpu);
 138
 139	rt_rq->highest_prio.curr = MAX_RT_PRIO;
 140	rt_rq->rt_nr_boosted = 0;
 141	rt_rq->rq = rq;
 142	rt_rq->tg = tg;
 143
 144	tg->rt_rq[cpu] = rt_rq;
 145	tg->rt_se[cpu] = rt_se;
 146
 147	if (!rt_se)
 148		return;
 149
 150	if (!parent)
 151		rt_se->rt_rq = &rq->rt;
 152	else
 153		rt_se->rt_rq = parent->my_q;
 154
 155	rt_se->my_q = rt_rq;
 156	rt_se->parent = parent;
 157	INIT_LIST_HEAD(&rt_se->run_list);
 158}
 159
 160int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 161{
 162	struct rt_rq *rt_rq;
 163	struct sched_rt_entity *rt_se;
 164	int i;
 165
 166	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
 167	if (!tg->rt_rq)
 168		goto err;
 169	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
 170	if (!tg->rt_se)
 171		goto err;
 172
 173	init_rt_bandwidth(&tg->rt_bandwidth,
 174			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
 175
 176	for_each_possible_cpu(i) {
 177		rt_rq = kzalloc_node(sizeof(struct rt_rq),
 178				     GFP_KERNEL, cpu_to_node(i));
 179		if (!rt_rq)
 180			goto err;
 181
 182		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
 183				     GFP_KERNEL, cpu_to_node(i));
 184		if (!rt_se)
 185			goto err_free_rq;
 186
 187		init_rt_rq(rt_rq, cpu_rq(i));
 188		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
 189		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
 190	}
 191
 192	return 1;
 193
 194err_free_rq:
 195	kfree(rt_rq);
 196err:
 197	return 0;
 198}
 199
 200#else /* CONFIG_RT_GROUP_SCHED */
 201
 202#define rt_entity_is_task(rt_se) (1)
 203
 204static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 205{
 206	return container_of(rt_se, struct task_struct, rt);
 207}
 208
 209static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 210{
 211	return container_of(rt_rq, struct rq, rt);
 212}
 213
 214static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 215{
 216	struct task_struct *p = rt_task_of(rt_se);
 217	struct rq *rq = task_rq(p);
 218
 219	return &rq->rt;
 220}
 221
 222void free_rt_sched_group(struct task_group *tg) { }
 223
 224int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 225{
 226	return 1;
 227}
 228#endif /* CONFIG_RT_GROUP_SCHED */
 229
 230#ifdef CONFIG_SMP
 231
 232static int pull_rt_task(struct rq *this_rq);
 233
 234static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 235{
 236	/* Try to pull RT tasks here if we lower this rq's prio */
 237	return rq->rt.highest_prio.curr > prev->prio;
 238}
 239
 240static inline int rt_overloaded(struct rq *rq)
 241{
 242	return atomic_read(&rq->rd->rto_count);
 243}
 244
 245static inline void rt_set_overload(struct rq *rq)
 246{
 247	if (!rq->online)
 248		return;
 249
 250	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
 251	/*
 252	 * Make sure the mask is visible before we set
 253	 * the overload count. That is checked to determine
 254	 * if we should look at the mask. It would be a shame
 255	 * if we looked at the mask, but the mask was not
 256	 * updated yet.
 257	 *
 258	 * Matched by the barrier in pull_rt_task().
 259	 */
 260	smp_wmb();
 261	atomic_inc(&rq->rd->rto_count);
 262}
 263
 264static inline void rt_clear_overload(struct rq *rq)
 265{
 266	if (!rq->online)
 267		return;
 268
 269	/* the order here really doesn't matter */
 270	atomic_dec(&rq->rd->rto_count);
 271	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 272}
 273
 274static void update_rt_migration(struct rt_rq *rt_rq)
 275{
 276	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
 277		if (!rt_rq->overloaded) {
 278			rt_set_overload(rq_of_rt_rq(rt_rq));
 279			rt_rq->overloaded = 1;
 280		}
 281	} else if (rt_rq->overloaded) {
 282		rt_clear_overload(rq_of_rt_rq(rt_rq));
 283		rt_rq->overloaded = 0;
 284	}
 285}
 286
 287static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 288{
 289	struct task_struct *p;
 290
 291	if (!rt_entity_is_task(rt_se))
 292		return;
 293
 294	p = rt_task_of(rt_se);
 295	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 296
 297	rt_rq->rt_nr_total++;
 298	if (p->nr_cpus_allowed > 1)
 299		rt_rq->rt_nr_migratory++;
 300
 301	update_rt_migration(rt_rq);
 302}
 303
 304static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 305{
 306	struct task_struct *p;
 307
 308	if (!rt_entity_is_task(rt_se))
 309		return;
 310
 311	p = rt_task_of(rt_se);
 312	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 313
 314	rt_rq->rt_nr_total--;
 315	if (p->nr_cpus_allowed > 1)
 316		rt_rq->rt_nr_migratory--;
 317
 318	update_rt_migration(rt_rq);
 319}
 320
 321static inline int has_pushable_tasks(struct rq *rq)
 322{
 323	return !plist_head_empty(&rq->rt.pushable_tasks);
 324}
 325
 326static inline void set_post_schedule(struct rq *rq)
 327{
 328	/*
 329	 * We detect this state here so that we can avoid taking the RQ
 330	 * lock again later if there is no need to push
 331	 */
 332	rq->post_schedule = has_pushable_tasks(rq);
 333}
 334
 335static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 336{
 337	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 338	plist_node_init(&p->pushable_tasks, p->prio);
 339	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
 340
 341	/* Update the highest prio pushable task */
 342	if (p->prio < rq->rt.highest_prio.next)
 343		rq->rt.highest_prio.next = p->prio;
 344}
 345
 346static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 347{
 348	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 349
 350	/* Update the new highest prio pushable task */
 351	if (has_pushable_tasks(rq)) {
 352		p = plist_first_entry(&rq->rt.pushable_tasks,
 353				      struct task_struct, pushable_tasks);
 354		rq->rt.highest_prio.next = p->prio;
 355	} else
 356		rq->rt.highest_prio.next = MAX_RT_PRIO;
 357}
 358
 359#else
 360
 361static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 362{
 363}
 364
 365static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 366{
 367}
 368
 369static inline
 370void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 371{
 372}
 373
 374static inline
 375void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 376{
 377}
 378
 379static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
 380{
 381	return false;
 382}
 383
 384static inline int pull_rt_task(struct rq *this_rq)
 385{
 386	return 0;
 387}
 388
 389static inline void set_post_schedule(struct rq *rq)
 390{
 391}
 392#endif /* CONFIG_SMP */
 393
 394static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 395{
 396	return !list_empty(&rt_se->run_list);
 397}
 398
 399#ifdef CONFIG_RT_GROUP_SCHED
 400
 401static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 402{
 403	if (!rt_rq->tg)
 404		return RUNTIME_INF;
 405
 406	return rt_rq->rt_runtime;
 407}
 408
 409static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 410{
 411	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
 412}
 413
 414typedef struct task_group *rt_rq_iter_t;
 415
 416static inline struct task_group *next_task_group(struct task_group *tg)
 417{
 418	do {
 419		tg = list_entry_rcu(tg->list.next,
 420			typeof(struct task_group), list);
 421	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
 422
 423	if (&tg->list == &task_groups)
 424		tg = NULL;
 425
 426	return tg;
 427}
 428
 429#define for_each_rt_rq(rt_rq, iter, rq)					\
 430	for (iter = container_of(&task_groups, typeof(*iter), list);	\
 431		(iter = next_task_group(iter)) &&			\
 432		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
 433
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 434#define for_each_sched_rt_entity(rt_se) \
 435	for (; rt_se; rt_se = rt_se->parent)
 436
 437static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 438{
 439	return rt_se->my_q;
 440}
 441
 442static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
 443static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 444
 445static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 446{
 447	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
 448	struct sched_rt_entity *rt_se;
 449
 450	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 451
 452	rt_se = rt_rq->tg->rt_se[cpu];
 453
 454	if (rt_rq->rt_nr_running) {
 455		if (rt_se && !on_rt_rq(rt_se))
 456			enqueue_rt_entity(rt_se, false);
 457		if (rt_rq->highest_prio.curr < curr->prio)
 458			resched_task(curr);
 459	}
 460}
 461
 462static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 463{
 464	struct sched_rt_entity *rt_se;
 465	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 466
 467	rt_se = rt_rq->tg->rt_se[cpu];
 468
 469	if (rt_se && on_rt_rq(rt_se))
 470		dequeue_rt_entity(rt_se);
 471}
 472
 
 
 
 
 
 473static int rt_se_boosted(struct sched_rt_entity *rt_se)
 474{
 475	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 476	struct task_struct *p;
 477
 478	if (rt_rq)
 479		return !!rt_rq->rt_nr_boosted;
 480
 481	p = rt_task_of(rt_se);
 482	return p->prio != p->normal_prio;
 483}
 484
 485#ifdef CONFIG_SMP
 486static inline const struct cpumask *sched_rt_period_mask(void)
 487{
 488	return this_rq()->rd->span;
 489}
 490#else
 491static inline const struct cpumask *sched_rt_period_mask(void)
 492{
 493	return cpu_online_mask;
 494}
 495#endif
 496
 497static inline
 498struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 499{
 500	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
 501}
 502
 503static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 504{
 505	return &rt_rq->tg->rt_bandwidth;
 506}
 507
 508#else /* !CONFIG_RT_GROUP_SCHED */
 509
 510static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 511{
 512	return rt_rq->rt_runtime;
 513}
 514
 515static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 516{
 517	return ktime_to_ns(def_rt_bandwidth.rt_period);
 518}
 519
 520typedef struct rt_rq *rt_rq_iter_t;
 521
 522#define for_each_rt_rq(rt_rq, iter, rq) \
 523	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 524
 
 
 
 
 
 
 
 
 
 
 
 525#define for_each_sched_rt_entity(rt_se) \
 526	for (; rt_se; rt_se = NULL)
 527
 528static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 529{
 530	return NULL;
 531}
 532
 533static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 534{
 535	if (rt_rq->rt_nr_running)
 536		resched_task(rq_of_rt_rq(rt_rq)->curr);
 537}
 538
 539static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 540{
 541}
 542
 
 
 
 
 
 543static inline const struct cpumask *sched_rt_period_mask(void)
 544{
 545	return cpu_online_mask;
 546}
 547
 548static inline
 549struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 550{
 551	return &cpu_rq(cpu)->rt;
 552}
 553
 554static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 555{
 556	return &def_rt_bandwidth;
 557}
 558
 559#endif /* CONFIG_RT_GROUP_SCHED */
 560
 561bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
 562{
 563	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 564
 565	return (hrtimer_active(&rt_b->rt_period_timer) ||
 566		rt_rq->rt_time < rt_b->rt_runtime);
 567}
 568
 569#ifdef CONFIG_SMP
 570/*
 571 * We ran out of runtime, see if we can borrow some from our neighbours.
 572 */
 573static int do_balance_runtime(struct rt_rq *rt_rq)
 574{
 575	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 576	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
 577	int i, weight, more = 0;
 578	u64 rt_period;
 579
 580	weight = cpumask_weight(rd->span);
 581
 582	raw_spin_lock(&rt_b->rt_runtime_lock);
 583	rt_period = ktime_to_ns(rt_b->rt_period);
 584	for_each_cpu(i, rd->span) {
 585		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 586		s64 diff;
 587
 588		if (iter == rt_rq)
 589			continue;
 590
 591		raw_spin_lock(&iter->rt_runtime_lock);
 592		/*
 593		 * Either all rqs have inf runtime and there's nothing to steal
 594		 * or __disable_runtime() below sets a specific rq to inf to
 595		 * indicate its been disabled and disalow stealing.
 596		 */
 597		if (iter->rt_runtime == RUNTIME_INF)
 598			goto next;
 599
 600		/*
 601		 * From runqueues with spare time, take 1/n part of their
 602		 * spare time, but no more than our period.
 603		 */
 604		diff = iter->rt_runtime - iter->rt_time;
 605		if (diff > 0) {
 606			diff = div_u64((u64)diff, weight);
 607			if (rt_rq->rt_runtime + diff > rt_period)
 608				diff = rt_period - rt_rq->rt_runtime;
 609			iter->rt_runtime -= diff;
 610			rt_rq->rt_runtime += diff;
 611			more = 1;
 612			if (rt_rq->rt_runtime == rt_period) {
 613				raw_spin_unlock(&iter->rt_runtime_lock);
 614				break;
 615			}
 616		}
 617next:
 618		raw_spin_unlock(&iter->rt_runtime_lock);
 619	}
 620	raw_spin_unlock(&rt_b->rt_runtime_lock);
 621
 622	return more;
 623}
 624
 625/*
 626 * Ensure this RQ takes back all the runtime it lend to its neighbours.
 627 */
 628static void __disable_runtime(struct rq *rq)
 629{
 630	struct root_domain *rd = rq->rd;
 631	rt_rq_iter_t iter;
 632	struct rt_rq *rt_rq;
 633
 634	if (unlikely(!scheduler_running))
 635		return;
 636
 637	for_each_rt_rq(rt_rq, iter, rq) {
 638		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 639		s64 want;
 640		int i;
 641
 642		raw_spin_lock(&rt_b->rt_runtime_lock);
 643		raw_spin_lock(&rt_rq->rt_runtime_lock);
 644		/*
 645		 * Either we're all inf and nobody needs to borrow, or we're
 646		 * already disabled and thus have nothing to do, or we have
 647		 * exactly the right amount of runtime to take out.
 648		 */
 649		if (rt_rq->rt_runtime == RUNTIME_INF ||
 650				rt_rq->rt_runtime == rt_b->rt_runtime)
 651			goto balanced;
 652		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 653
 654		/*
 655		 * Calculate the difference between what we started out with
 656		 * and what we current have, that's the amount of runtime
 657		 * we lend and now have to reclaim.
 658		 */
 659		want = rt_b->rt_runtime - rt_rq->rt_runtime;
 660
 661		/*
 662		 * Greedy reclaim, take back as much as we can.
 663		 */
 664		for_each_cpu(i, rd->span) {
 665			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 666			s64 diff;
 667
 668			/*
 669			 * Can't reclaim from ourselves or disabled runqueues.
 670			 */
 671			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
 672				continue;
 673
 674			raw_spin_lock(&iter->rt_runtime_lock);
 675			if (want > 0) {
 676				diff = min_t(s64, iter->rt_runtime, want);
 677				iter->rt_runtime -= diff;
 678				want -= diff;
 679			} else {
 680				iter->rt_runtime -= want;
 681				want -= want;
 682			}
 683			raw_spin_unlock(&iter->rt_runtime_lock);
 684
 685			if (!want)
 686				break;
 687		}
 688
 689		raw_spin_lock(&rt_rq->rt_runtime_lock);
 690		/*
 691		 * We cannot be left wanting - that would mean some runtime
 692		 * leaked out of the system.
 693		 */
 694		BUG_ON(want);
 695balanced:
 696		/*
 697		 * Disable all the borrow logic by pretending we have inf
 698		 * runtime - in which case borrowing doesn't make sense.
 699		 */
 700		rt_rq->rt_runtime = RUNTIME_INF;
 701		rt_rq->rt_throttled = 0;
 702		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 703		raw_spin_unlock(&rt_b->rt_runtime_lock);
 704	}
 705}
 706
 
 
 
 
 
 
 
 
 
 707static void __enable_runtime(struct rq *rq)
 708{
 709	rt_rq_iter_t iter;
 710	struct rt_rq *rt_rq;
 711
 712	if (unlikely(!scheduler_running))
 713		return;
 714
 715	/*
 716	 * Reset each runqueue's bandwidth settings
 717	 */
 718	for_each_rt_rq(rt_rq, iter, rq) {
 719		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 720
 721		raw_spin_lock(&rt_b->rt_runtime_lock);
 722		raw_spin_lock(&rt_rq->rt_runtime_lock);
 723		rt_rq->rt_runtime = rt_b->rt_runtime;
 724		rt_rq->rt_time = 0;
 725		rt_rq->rt_throttled = 0;
 726		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 727		raw_spin_unlock(&rt_b->rt_runtime_lock);
 728	}
 729}
 730
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 731static int balance_runtime(struct rt_rq *rt_rq)
 732{
 733	int more = 0;
 734
 735	if (!sched_feat(RT_RUNTIME_SHARE))
 736		return more;
 737
 738	if (rt_rq->rt_time > rt_rq->rt_runtime) {
 739		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 740		more = do_balance_runtime(rt_rq);
 741		raw_spin_lock(&rt_rq->rt_runtime_lock);
 742	}
 743
 744	return more;
 745}
 746#else /* !CONFIG_SMP */
 747static inline int balance_runtime(struct rt_rq *rt_rq)
 748{
 749	return 0;
 750}
 751#endif /* CONFIG_SMP */
 752
 753static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 754{
 755	int i, idle = 1, throttled = 0;
 756	const struct cpumask *span;
 757
 758	span = sched_rt_period_mask();
 759#ifdef CONFIG_RT_GROUP_SCHED
 760	/*
 761	 * FIXME: isolated CPUs should really leave the root task group,
 762	 * whether they are isolcpus or were isolated via cpusets, lest
 763	 * the timer run on a CPU which does not service all runqueues,
 764	 * potentially leaving other CPUs indefinitely throttled.  If
 765	 * isolation is really required, the user will turn the throttle
 766	 * off to kill the perturbations it causes anyway.  Meanwhile,
 767	 * this maintains functionality for boot and/or troubleshooting.
 768	 */
 769	if (rt_b == &root_task_group.rt_bandwidth)
 770		span = cpu_online_mask;
 771#endif
 772	for_each_cpu(i, span) {
 773		int enqueue = 0;
 774		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
 775		struct rq *rq = rq_of_rt_rq(rt_rq);
 776
 777		raw_spin_lock(&rq->lock);
 778		if (rt_rq->rt_time) {
 779			u64 runtime;
 780
 781			raw_spin_lock(&rt_rq->rt_runtime_lock);
 782			if (rt_rq->rt_throttled)
 783				balance_runtime(rt_rq);
 784			runtime = rt_rq->rt_runtime;
 785			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
 786			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
 787				rt_rq->rt_throttled = 0;
 788				enqueue = 1;
 789
 790				/*
 791				 * Force a clock update if the CPU was idle,
 792				 * lest wakeup -> unthrottle time accumulate.
 793				 */
 794				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
 795					rq->skip_clock_update = -1;
 796			}
 797			if (rt_rq->rt_time || rt_rq->rt_nr_running)
 798				idle = 0;
 799			raw_spin_unlock(&rt_rq->rt_runtime_lock);
 800		} else if (rt_rq->rt_nr_running) {
 801			idle = 0;
 802			if (!rt_rq_throttled(rt_rq))
 803				enqueue = 1;
 804		}
 805		if (rt_rq->rt_throttled)
 806			throttled = 1;
 807
 808		if (enqueue)
 809			sched_rt_rq_enqueue(rt_rq);
 810		raw_spin_unlock(&rq->lock);
 811	}
 812
 813	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
 814		return 1;
 815
 816	return idle;
 817}
 818
 819static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 820{
 821#ifdef CONFIG_RT_GROUP_SCHED
 822	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 823
 824	if (rt_rq)
 825		return rt_rq->highest_prio.curr;
 826#endif
 827
 828	return rt_task_of(rt_se)->prio;
 829}
 830
 831static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 832{
 833	u64 runtime = sched_rt_runtime(rt_rq);
 834
 835	if (rt_rq->rt_throttled)
 836		return rt_rq_throttled(rt_rq);
 837
 838	if (runtime >= sched_rt_period(rt_rq))
 839		return 0;
 840
 841	balance_runtime(rt_rq);
 842	runtime = sched_rt_runtime(rt_rq);
 843	if (runtime == RUNTIME_INF)
 844		return 0;
 845
 846	if (rt_rq->rt_time > runtime) {
 847		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 848
 849		/*
 850		 * Don't actually throttle groups that have no runtime assigned
 851		 * but accrue some time due to boosting.
 852		 */
 853		if (likely(rt_b->rt_runtime)) {
 854			static bool once = false;
 855
 856			rt_rq->rt_throttled = 1;
 857
 858			if (!once) {
 859				once = true;
 860				printk_sched("sched: RT throttling activated\n");
 861			}
 862		} else {
 863			/*
 864			 * In case we did anyway, make it go away,
 865			 * replenishment is a joke, since it will replenish us
 866			 * with exactly 0 ns.
 867			 */
 868			rt_rq->rt_time = 0;
 869		}
 870
 871		if (rt_rq_throttled(rt_rq)) {
 872			sched_rt_rq_dequeue(rt_rq);
 873			return 1;
 874		}
 875	}
 876
 877	return 0;
 878}
 879
 880/*
 881 * Update the current task's runtime statistics. Skip current tasks that
 882 * are not in our scheduling class.
 883 */
 884static void update_curr_rt(struct rq *rq)
 885{
 886	struct task_struct *curr = rq->curr;
 887	struct sched_rt_entity *rt_se = &curr->rt;
 888	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 889	u64 delta_exec;
 890
 891	if (curr->sched_class != &rt_sched_class)
 892		return;
 893
 894	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
 895	if (unlikely((s64)delta_exec <= 0))
 896		return;
 897
 898	schedstat_set(curr->se.statistics.exec_max,
 899		      max(curr->se.statistics.exec_max, delta_exec));
 900
 901	curr->se.sum_exec_runtime += delta_exec;
 902	account_group_exec_runtime(curr, delta_exec);
 903
 904	curr->se.exec_start = rq_clock_task(rq);
 905	cpuacct_charge(curr, delta_exec);
 906
 907	sched_rt_avg_update(rq, delta_exec);
 908
 909	if (!rt_bandwidth_enabled())
 910		return;
 911
 912	for_each_sched_rt_entity(rt_se) {
 913		rt_rq = rt_rq_of_se(rt_se);
 914
 915		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
 916			raw_spin_lock(&rt_rq->rt_runtime_lock);
 917			rt_rq->rt_time += delta_exec;
 918			if (sched_rt_runtime_exceeded(rt_rq))
 919				resched_task(curr);
 920			raw_spin_unlock(&rt_rq->rt_runtime_lock);
 921		}
 922	}
 923}
 924
 925#if defined CONFIG_SMP
 926
 927static void
 928inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 929{
 930	struct rq *rq = rq_of_rt_rq(rt_rq);
 931
 932#ifdef CONFIG_RT_GROUP_SCHED
 933	/*
 934	 * Change rq's cpupri only if rt_rq is the top queue.
 935	 */
 936	if (&rq->rt != rt_rq)
 937		return;
 938#endif
 939	if (rq->online && prio < prev_prio)
 940		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
 941}
 942
 943static void
 944dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 945{
 946	struct rq *rq = rq_of_rt_rq(rt_rq);
 947
 948#ifdef CONFIG_RT_GROUP_SCHED
 949	/*
 950	 * Change rq's cpupri only if rt_rq is the top queue.
 951	 */
 952	if (&rq->rt != rt_rq)
 953		return;
 954#endif
 955	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
 956		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
 957}
 958
 959#else /* CONFIG_SMP */
 960
 961static inline
 962void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
 963static inline
 964void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
 965
 966#endif /* CONFIG_SMP */
 967
 968#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 969static void
 970inc_rt_prio(struct rt_rq *rt_rq, int prio)
 971{
 972	int prev_prio = rt_rq->highest_prio.curr;
 973
 974	if (prio < prev_prio)
 975		rt_rq->highest_prio.curr = prio;
 976
 977	inc_rt_prio_smp(rt_rq, prio, prev_prio);
 978}
 979
 980static void
 981dec_rt_prio(struct rt_rq *rt_rq, int prio)
 982{
 983	int prev_prio = rt_rq->highest_prio.curr;
 984
 985	if (rt_rq->rt_nr_running) {
 986
 987		WARN_ON(prio < prev_prio);
 988
 989		/*
 990		 * This may have been our highest task, and therefore
 991		 * we may have some recomputation to do
 992		 */
 993		if (prio == prev_prio) {
 994			struct rt_prio_array *array = &rt_rq->active;
 995
 996			rt_rq->highest_prio.curr =
 997				sched_find_first_bit(array->bitmap);
 998		}
 999
1000	} else
1001		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1002
1003	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1004}
1005
1006#else
1007
1008static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1009static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1010
1011#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1012
1013#ifdef CONFIG_RT_GROUP_SCHED
1014
1015static void
1016inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1017{
1018	if (rt_se_boosted(rt_se))
1019		rt_rq->rt_nr_boosted++;
1020
1021	if (rt_rq->tg)
1022		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1023}
1024
1025static void
1026dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1027{
1028	if (rt_se_boosted(rt_se))
1029		rt_rq->rt_nr_boosted--;
1030
1031	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1032}
1033
1034#else /* CONFIG_RT_GROUP_SCHED */
1035
1036static void
1037inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1038{
1039	start_rt_bandwidth(&def_rt_bandwidth);
1040}
1041
1042static inline
1043void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1044
1045#endif /* CONFIG_RT_GROUP_SCHED */
1046
1047static inline
1048void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1049{
1050	int prio = rt_se_prio(rt_se);
1051
1052	WARN_ON(!rt_prio(prio));
1053	rt_rq->rt_nr_running++;
1054
1055	inc_rt_prio(rt_rq, prio);
1056	inc_rt_migration(rt_se, rt_rq);
1057	inc_rt_group(rt_se, rt_rq);
1058}
1059
1060static inline
1061void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1062{
1063	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1064	WARN_ON(!rt_rq->rt_nr_running);
1065	rt_rq->rt_nr_running--;
1066
1067	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1068	dec_rt_migration(rt_se, rt_rq);
1069	dec_rt_group(rt_se, rt_rq);
1070}
1071
1072static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1073{
1074	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1075	struct rt_prio_array *array = &rt_rq->active;
1076	struct rt_rq *group_rq = group_rt_rq(rt_se);
1077	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1078
1079	/*
1080	 * Don't enqueue the group if its throttled, or when empty.
1081	 * The latter is a consequence of the former when a child group
1082	 * get throttled and the current group doesn't have any other
1083	 * active members.
1084	 */
1085	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1086		return;
1087
 
 
 
1088	if (head)
1089		list_add(&rt_se->run_list, queue);
1090	else
1091		list_add_tail(&rt_se->run_list, queue);
1092	__set_bit(rt_se_prio(rt_se), array->bitmap);
1093
1094	inc_rt_tasks(rt_se, rt_rq);
1095}
1096
1097static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1098{
1099	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1100	struct rt_prio_array *array = &rt_rq->active;
1101
1102	list_del_init(&rt_se->run_list);
1103	if (list_empty(array->queue + rt_se_prio(rt_se)))
1104		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1105
1106	dec_rt_tasks(rt_se, rt_rq);
 
 
1107}
1108
1109/*
1110 * Because the prio of an upper entry depends on the lower
1111 * entries, we must remove entries top - down.
1112 */
1113static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1114{
1115	struct sched_rt_entity *back = NULL;
1116
1117	for_each_sched_rt_entity(rt_se) {
1118		rt_se->back = back;
1119		back = rt_se;
1120	}
1121
1122	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1123		if (on_rt_rq(rt_se))
1124			__dequeue_rt_entity(rt_se);
1125	}
1126}
1127
1128static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1129{
1130	dequeue_rt_stack(rt_se);
1131	for_each_sched_rt_entity(rt_se)
1132		__enqueue_rt_entity(rt_se, head);
1133}
1134
1135static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1136{
1137	dequeue_rt_stack(rt_se);
1138
1139	for_each_sched_rt_entity(rt_se) {
1140		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1141
1142		if (rt_rq && rt_rq->rt_nr_running)
1143			__enqueue_rt_entity(rt_se, false);
1144	}
1145}
1146
1147/*
1148 * Adding/removing a task to/from a priority array:
1149 */
1150static void
1151enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1152{
1153	struct sched_rt_entity *rt_se = &p->rt;
1154
1155	if (flags & ENQUEUE_WAKEUP)
1156		rt_se->timeout = 0;
1157
1158	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1159
1160	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1161		enqueue_pushable_task(rq, p);
1162
1163	inc_nr_running(rq);
1164}
1165
1166static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1167{
1168	struct sched_rt_entity *rt_se = &p->rt;
1169
1170	update_curr_rt(rq);
1171	dequeue_rt_entity(rt_se);
1172
1173	dequeue_pushable_task(rq, p);
1174
1175	dec_nr_running(rq);
1176}
1177
1178/*
1179 * Put task to the head or the end of the run list without the overhead of
1180 * dequeue followed by enqueue.
1181 */
1182static void
1183requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1184{
1185	if (on_rt_rq(rt_se)) {
1186		struct rt_prio_array *array = &rt_rq->active;
1187		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1188
1189		if (head)
1190			list_move(&rt_se->run_list, queue);
1191		else
1192			list_move_tail(&rt_se->run_list, queue);
1193	}
1194}
1195
1196static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1197{
1198	struct sched_rt_entity *rt_se = &p->rt;
1199	struct rt_rq *rt_rq;
1200
1201	for_each_sched_rt_entity(rt_se) {
1202		rt_rq = rt_rq_of_se(rt_se);
1203		requeue_rt_entity(rt_rq, rt_se, head);
1204	}
1205}
1206
1207static void yield_task_rt(struct rq *rq)
1208{
1209	requeue_task_rt(rq, rq->curr, 0);
1210}
1211
1212#ifdef CONFIG_SMP
1213static int find_lowest_rq(struct task_struct *task);
1214
1215static int
1216select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1217{
1218	struct task_struct *curr;
1219	struct rq *rq;
 
 
 
1220
1221	if (p->nr_cpus_allowed == 1)
1222		goto out;
1223
1224	/* For anything but wake ups, just return the task_cpu */
1225	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1226		goto out;
1227
1228	rq = cpu_rq(cpu);
1229
1230	rcu_read_lock();
1231	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1232
1233	/*
1234	 * If the current task on @p's runqueue is an RT task, then
1235	 * try to see if we can wake this RT task up on another
1236	 * runqueue. Otherwise simply start this RT task
1237	 * on its current runqueue.
1238	 *
1239	 * We want to avoid overloading runqueues. If the woken
1240	 * task is a higher priority, then it will stay on this CPU
1241	 * and the lower prio task should be moved to another CPU.
1242	 * Even though this will probably make the lower prio task
1243	 * lose its cache, we do not want to bounce a higher task
1244	 * around just because it gave up its CPU, perhaps for a
1245	 * lock?
1246	 *
1247	 * For equal prio tasks, we just let the scheduler sort it out.
1248	 *
1249	 * Otherwise, just let it ride on the affined RQ and the
1250	 * post-schedule router will push the preempted task away
1251	 *
1252	 * This test is optimistic, if we get it wrong the load-balancer
1253	 * will have to sort it out.
1254	 */
1255	if (curr && unlikely(rt_task(curr)) &&
1256	    (curr->nr_cpus_allowed < 2 ||
1257	     curr->prio <= p->prio)) {
 
1258		int target = find_lowest_rq(p);
1259
1260		if (target != -1)
1261			cpu = target;
1262	}
1263	rcu_read_unlock();
1264
1265out:
1266	return cpu;
1267}
1268
1269static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1270{
1271	if (rq->curr->nr_cpus_allowed == 1)
1272		return;
1273
1274	if (p->nr_cpus_allowed != 1
1275	    && cpupri_find(&rq->rd->cpupri, p, NULL))
1276		return;
1277
1278	if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1279		return;
1280
1281	/*
1282	 * There appears to be other cpus that can accept
1283	 * current and none to run 'p', so lets reschedule
1284	 * to try and push current away:
1285	 */
1286	requeue_task_rt(rq, p, 1);
1287	resched_task(rq->curr);
1288}
1289
1290#endif /* CONFIG_SMP */
1291
1292/*
1293 * Preempt the current task with a newly woken task if needed:
1294 */
1295static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1296{
1297	if (p->prio < rq->curr->prio) {
1298		resched_task(rq->curr);
1299		return;
1300	}
1301
1302#ifdef CONFIG_SMP
1303	/*
1304	 * If:
1305	 *
1306	 * - the newly woken task is of equal priority to the current task
1307	 * - the newly woken task is non-migratable while current is migratable
1308	 * - current will be preempted on the next reschedule
1309	 *
1310	 * we should check to see if current can readily move to a different
1311	 * cpu.  If so, we will reschedule to allow the push logic to try
1312	 * to move current somewhere else, making room for our non-migratable
1313	 * task.
1314	 */
1315	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1316		check_preempt_equal_prio(rq, p);
1317#endif
1318}
1319
1320static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1321						   struct rt_rq *rt_rq)
1322{
1323	struct rt_prio_array *array = &rt_rq->active;
1324	struct sched_rt_entity *next = NULL;
1325	struct list_head *queue;
1326	int idx;
1327
1328	idx = sched_find_first_bit(array->bitmap);
1329	BUG_ON(idx >= MAX_RT_PRIO);
1330
1331	queue = array->queue + idx;
1332	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1333
1334	return next;
1335}
1336
1337static struct task_struct *_pick_next_task_rt(struct rq *rq)
1338{
1339	struct sched_rt_entity *rt_se;
1340	struct task_struct *p;
1341	struct rt_rq *rt_rq  = &rq->rt;
 
 
 
 
 
 
 
 
1342
1343	do {
1344		rt_se = pick_next_rt_entity(rq, rt_rq);
1345		BUG_ON(!rt_se);
1346		rt_rq = group_rt_rq(rt_se);
1347	} while (rt_rq);
1348
1349	p = rt_task_of(rt_se);
1350	p->se.exec_start = rq_clock_task(rq);
1351
1352	return p;
1353}
1354
1355static struct task_struct *
1356pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1357{
1358	struct task_struct *p;
1359	struct rt_rq *rt_rq = &rq->rt;
1360
1361	if (need_pull_rt_task(rq, prev)) {
1362		pull_rt_task(rq);
1363		/*
1364		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1365		 * means a dl or stop task can slip in, in which case we need
1366		 * to re-start task selection.
1367		 */
1368		if (unlikely((rq->stop && rq->stop->on_rq) ||
1369			     rq->dl.dl_nr_running))
1370			return RETRY_TASK;
1371	}
1372
1373	/*
1374	 * We may dequeue prev's rt_rq in put_prev_task().
1375	 * So, we update time before rt_nr_running check.
1376	 */
1377	if (prev->sched_class == &rt_sched_class)
1378		update_curr_rt(rq);
1379
1380	if (!rt_rq->rt_nr_running)
1381		return NULL;
1382
1383	if (rt_rq_throttled(rt_rq))
1384		return NULL;
1385
1386	put_prev_task(rq, prev);
1387
1388	p = _pick_next_task_rt(rq);
1389
1390	/* The running task is never eligible for pushing */
1391	if (p)
1392		dequeue_pushable_task(rq, p);
1393
1394	set_post_schedule(rq);
 
 
 
 
 
 
1395
1396	return p;
1397}
1398
1399static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1400{
1401	update_curr_rt(rq);
1402
1403	/*
1404	 * The previous task needs to be made eligible for pushing
1405	 * if it is still active
1406	 */
1407	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1408		enqueue_pushable_task(rq, p);
1409}
1410
1411#ifdef CONFIG_SMP
1412
1413/* Only try algorithms three times */
1414#define RT_MAX_TRIES 3
1415
1416static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1417{
1418	if (!task_running(rq, p) &&
1419	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
 
1420		return 1;
1421	return 0;
1422}
1423
1424/*
1425 * Return the highest pushable rq's task, which is suitable to be executed
1426 * on the cpu, NULL otherwise
1427 */
1428static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1429{
1430	struct plist_head *head = &rq->rt.pushable_tasks;
1431	struct task_struct *p;
 
 
 
1432
1433	if (!has_pushable_tasks(rq))
1434		return NULL;
 
 
 
 
 
 
 
 
 
 
 
1435
1436	plist_for_each_entry(p, head, pushable_tasks) {
1437		if (pick_rt_task(rq, p, cpu))
1438			return p;
 
 
 
 
 
 
 
1439	}
1440
1441	return NULL;
1442}
1443
1444static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1445
1446static int find_lowest_rq(struct task_struct *task)
1447{
1448	struct sched_domain *sd;
1449	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1450	int this_cpu = smp_processor_id();
1451	int cpu      = task_cpu(task);
1452
1453	/* Make sure the mask is initialized first */
1454	if (unlikely(!lowest_mask))
1455		return -1;
1456
1457	if (task->nr_cpus_allowed == 1)
1458		return -1; /* No other targets possible */
1459
1460	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1461		return -1; /* No targets found */
1462
1463	/*
1464	 * At this point we have built a mask of cpus representing the
1465	 * lowest priority tasks in the system.  Now we want to elect
1466	 * the best one based on our affinity and topology.
1467	 *
1468	 * We prioritize the last cpu that the task executed on since
1469	 * it is most likely cache-hot in that location.
1470	 */
1471	if (cpumask_test_cpu(cpu, lowest_mask))
1472		return cpu;
1473
1474	/*
1475	 * Otherwise, we consult the sched_domains span maps to figure
1476	 * out which cpu is logically closest to our hot cache data.
1477	 */
1478	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1479		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1480
1481	rcu_read_lock();
1482	for_each_domain(cpu, sd) {
1483		if (sd->flags & SD_WAKE_AFFINE) {
1484			int best_cpu;
1485
1486			/*
1487			 * "this_cpu" is cheaper to preempt than a
1488			 * remote processor.
1489			 */
1490			if (this_cpu != -1 &&
1491			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1492				rcu_read_unlock();
1493				return this_cpu;
1494			}
1495
1496			best_cpu = cpumask_first_and(lowest_mask,
1497						     sched_domain_span(sd));
1498			if (best_cpu < nr_cpu_ids) {
1499				rcu_read_unlock();
1500				return best_cpu;
1501			}
1502		}
1503	}
1504	rcu_read_unlock();
1505
1506	/*
1507	 * And finally, if there were no matches within the domains
1508	 * just give the caller *something* to work with from the compatible
1509	 * locations.
1510	 */
1511	if (this_cpu != -1)
1512		return this_cpu;
1513
1514	cpu = cpumask_any(lowest_mask);
1515	if (cpu < nr_cpu_ids)
1516		return cpu;
1517	return -1;
1518}
1519
1520/* Will lock the rq it finds */
1521static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1522{
1523	struct rq *lowest_rq = NULL;
1524	int tries;
1525	int cpu;
1526
1527	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1528		cpu = find_lowest_rq(task);
1529
1530		if ((cpu == -1) || (cpu == rq->cpu))
1531			break;
1532
1533		lowest_rq = cpu_rq(cpu);
1534
1535		/* if the prio of this runqueue changed, try again */
1536		if (double_lock_balance(rq, lowest_rq)) {
1537			/*
1538			 * We had to unlock the run queue. In
1539			 * the mean time, task could have
1540			 * migrated already or had its affinity changed.
1541			 * Also make sure that it wasn't scheduled on its rq.
1542			 */
1543			if (unlikely(task_rq(task) != rq ||
1544				     !cpumask_test_cpu(lowest_rq->cpu,
1545						       tsk_cpus_allowed(task)) ||
1546				     task_running(rq, task) ||
1547				     !task->on_rq)) {
1548
1549				double_unlock_balance(rq, lowest_rq);
1550				lowest_rq = NULL;
1551				break;
1552			}
1553		}
1554
1555		/* If this rq is still suitable use it. */
1556		if (lowest_rq->rt.highest_prio.curr > task->prio)
1557			break;
1558
1559		/* try again */
1560		double_unlock_balance(rq, lowest_rq);
1561		lowest_rq = NULL;
1562	}
1563
1564	return lowest_rq;
1565}
1566
1567static struct task_struct *pick_next_pushable_task(struct rq *rq)
1568{
1569	struct task_struct *p;
1570
1571	if (!has_pushable_tasks(rq))
1572		return NULL;
1573
1574	p = plist_first_entry(&rq->rt.pushable_tasks,
1575			      struct task_struct, pushable_tasks);
1576
1577	BUG_ON(rq->cpu != task_cpu(p));
1578	BUG_ON(task_current(rq, p));
1579	BUG_ON(p->nr_cpus_allowed <= 1);
1580
1581	BUG_ON(!p->on_rq);
1582	BUG_ON(!rt_task(p));
1583
1584	return p;
1585}
1586
1587/*
1588 * If the current CPU has more than one RT task, see if the non
1589 * running task can migrate over to a CPU that is running a task
1590 * of lesser priority.
1591 */
1592static int push_rt_task(struct rq *rq)
1593{
1594	struct task_struct *next_task;
1595	struct rq *lowest_rq;
1596	int ret = 0;
1597
1598	if (!rq->rt.overloaded)
1599		return 0;
1600
1601	next_task = pick_next_pushable_task(rq);
1602	if (!next_task)
1603		return 0;
1604
 
 
 
 
 
1605retry:
1606	if (unlikely(next_task == rq->curr)) {
1607		WARN_ON(1);
1608		return 0;
1609	}
1610
1611	/*
1612	 * It's possible that the next_task slipped in of
1613	 * higher priority than current. If that's the case
1614	 * just reschedule current.
1615	 */
1616	if (unlikely(next_task->prio < rq->curr->prio)) {
1617		resched_task(rq->curr);
1618		return 0;
1619	}
1620
1621	/* We might release rq lock */
1622	get_task_struct(next_task);
1623
1624	/* find_lock_lowest_rq locks the rq if found */
1625	lowest_rq = find_lock_lowest_rq(next_task, rq);
1626	if (!lowest_rq) {
1627		struct task_struct *task;
1628		/*
1629		 * find_lock_lowest_rq releases rq->lock
1630		 * so it is possible that next_task has migrated.
1631		 *
1632		 * We need to make sure that the task is still on the same
1633		 * run-queue and is also still the next task eligible for
1634		 * pushing.
1635		 */
1636		task = pick_next_pushable_task(rq);
1637		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1638			/*
1639			 * The task hasn't migrated, and is still the next
1640			 * eligible task, but we failed to find a run-queue
1641			 * to push it to.  Do not retry in this case, since
1642			 * other cpus will pull from us when ready.
1643			 */
1644			goto out;
1645		}
1646
1647		if (!task)
1648			/* No more tasks, just exit */
1649			goto out;
1650
1651		/*
1652		 * Something has shifted, try again.
1653		 */
1654		put_task_struct(next_task);
1655		next_task = task;
1656		goto retry;
1657	}
1658
1659	deactivate_task(rq, next_task, 0);
1660	set_task_cpu(next_task, lowest_rq->cpu);
1661	activate_task(lowest_rq, next_task, 0);
1662	ret = 1;
1663
1664	resched_task(lowest_rq->curr);
1665
1666	double_unlock_balance(rq, lowest_rq);
1667
1668out:
1669	put_task_struct(next_task);
1670
1671	return ret;
1672}
1673
1674static void push_rt_tasks(struct rq *rq)
1675{
1676	/* push_rt_task will return true if it moved an RT */
1677	while (push_rt_task(rq))
1678		;
1679}
1680
1681static int pull_rt_task(struct rq *this_rq)
1682{
1683	int this_cpu = this_rq->cpu, ret = 0, cpu;
1684	struct task_struct *p;
1685	struct rq *src_rq;
1686
1687	if (likely(!rt_overloaded(this_rq)))
1688		return 0;
1689
1690	/*
1691	 * Match the barrier from rt_set_overloaded; this guarantees that if we
1692	 * see overloaded we must also see the rto_mask bit.
1693	 */
1694	smp_rmb();
1695
1696	for_each_cpu(cpu, this_rq->rd->rto_mask) {
1697		if (this_cpu == cpu)
1698			continue;
1699
1700		src_rq = cpu_rq(cpu);
1701
1702		/*
1703		 * Don't bother taking the src_rq->lock if the next highest
1704		 * task is known to be lower-priority than our current task.
1705		 * This may look racy, but if this value is about to go
1706		 * logically higher, the src_rq will push this task away.
1707		 * And if its going logically lower, we do not care
1708		 */
1709		if (src_rq->rt.highest_prio.next >=
1710		    this_rq->rt.highest_prio.curr)
1711			continue;
1712
1713		/*
1714		 * We can potentially drop this_rq's lock in
1715		 * double_lock_balance, and another CPU could
1716		 * alter this_rq
1717		 */
1718		double_lock_balance(this_rq, src_rq);
1719
1720		/*
1721		 * We can pull only a task, which is pushable
1722		 * on its rq, and no others.
1723		 */
1724		p = pick_highest_pushable_task(src_rq, this_cpu);
 
 
 
1725
1726		/*
1727		 * Do we have an RT task that preempts
1728		 * the to-be-scheduled task?
1729		 */
1730		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1731			WARN_ON(p == src_rq->curr);
1732			WARN_ON(!p->on_rq);
1733
1734			/*
1735			 * There's a chance that p is higher in priority
1736			 * than what's currently running on its cpu.
1737			 * This is just that p is wakeing up and hasn't
1738			 * had a chance to schedule. We only pull
1739			 * p if it is lower in priority than the
1740			 * current task on the run queue
1741			 */
1742			if (p->prio < src_rq->curr->prio)
1743				goto skip;
1744
1745			ret = 1;
1746
1747			deactivate_task(src_rq, p, 0);
1748			set_task_cpu(p, this_cpu);
1749			activate_task(this_rq, p, 0);
1750			/*
1751			 * We continue with the search, just in
1752			 * case there's an even higher prio task
1753			 * in another runqueue. (low likelihood
1754			 * but possible)
1755			 */
1756		}
1757skip:
1758		double_unlock_balance(this_rq, src_rq);
1759	}
1760
1761	return ret;
1762}
1763
 
 
 
 
 
 
 
1764static void post_schedule_rt(struct rq *rq)
1765{
1766	push_rt_tasks(rq);
1767}
1768
1769/*
1770 * If we are not running and we are not going to reschedule soon, we should
1771 * try to push tasks away now
1772 */
1773static void task_woken_rt(struct rq *rq, struct task_struct *p)
1774{
1775	if (!task_running(rq, p) &&
1776	    !test_tsk_need_resched(rq->curr) &&
1777	    has_pushable_tasks(rq) &&
1778	    p->nr_cpus_allowed > 1 &&
1779	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
1780	    (rq->curr->nr_cpus_allowed < 2 ||
1781	     rq->curr->prio <= p->prio))
1782		push_rt_tasks(rq);
1783}
1784
1785static void set_cpus_allowed_rt(struct task_struct *p,
1786				const struct cpumask *new_mask)
1787{
1788	struct rq *rq;
1789	int weight;
1790
1791	BUG_ON(!rt_task(p));
1792
1793	if (!p->on_rq)
1794		return;
1795
1796	weight = cpumask_weight(new_mask);
1797
1798	/*
1799	 * Only update if the process changes its state from whether it
1800	 * can migrate or not.
1801	 */
1802	if ((p->nr_cpus_allowed > 1) == (weight > 1))
1803		return;
1804
1805	rq = task_rq(p);
1806
1807	/*
1808	 * The process used to be able to migrate OR it can now migrate
1809	 */
1810	if (weight <= 1) {
1811		if (!task_current(rq, p))
1812			dequeue_pushable_task(rq, p);
1813		BUG_ON(!rq->rt.rt_nr_migratory);
1814		rq->rt.rt_nr_migratory--;
1815	} else {
1816		if (!task_current(rq, p))
1817			enqueue_pushable_task(rq, p);
1818		rq->rt.rt_nr_migratory++;
1819	}
1820
1821	update_rt_migration(&rq->rt);
1822}
1823
1824/* Assumes rq->lock is held */
1825static void rq_online_rt(struct rq *rq)
1826{
1827	if (rq->rt.overloaded)
1828		rt_set_overload(rq);
1829
1830	__enable_runtime(rq);
1831
1832	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1833}
1834
1835/* Assumes rq->lock is held */
1836static void rq_offline_rt(struct rq *rq)
1837{
1838	if (rq->rt.overloaded)
1839		rt_clear_overload(rq);
1840
1841	__disable_runtime(rq);
1842
1843	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1844}
1845
1846/*
1847 * When switch from the rt queue, we bring ourselves to a position
1848 * that we might want to pull RT tasks from other runqueues.
1849 */
1850static void switched_from_rt(struct rq *rq, struct task_struct *p)
1851{
1852	/*
1853	 * If there are other RT tasks then we will reschedule
1854	 * and the scheduling of the other RT tasks will handle
1855	 * the balancing. But if we are the last RT task
1856	 * we may need to handle the pulling of RT tasks
1857	 * now.
1858	 */
1859	if (!p->on_rq || rq->rt.rt_nr_running)
1860		return;
1861
1862	if (pull_rt_task(rq))
1863		resched_task(rq->curr);
1864}
1865
1866void __init init_sched_rt_class(void)
1867{
1868	unsigned int i;
1869
1870	for_each_possible_cpu(i) {
1871		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1872					GFP_KERNEL, cpu_to_node(i));
1873	}
1874}
1875#endif /* CONFIG_SMP */
1876
1877/*
1878 * When switching a task to RT, we may overload the runqueue
1879 * with RT tasks. In this case we try to push them off to
1880 * other runqueues.
1881 */
1882static void switched_to_rt(struct rq *rq, struct task_struct *p)
1883{
1884	int check_resched = 1;
1885
1886	/*
1887	 * If we are already running, then there's nothing
1888	 * that needs to be done. But if we are not running
1889	 * we may need to preempt the current running task.
1890	 * If that current running task is also an RT task
1891	 * then see if we can move to another run queue.
1892	 */
1893	if (p->on_rq && rq->curr != p) {
1894#ifdef CONFIG_SMP
1895		if (rq->rt.overloaded && push_rt_task(rq) &&
1896		    /* Don't resched if we changed runqueues */
1897		    rq != task_rq(p))
1898			check_resched = 0;
1899#endif /* CONFIG_SMP */
1900		if (check_resched && p->prio < rq->curr->prio)
1901			resched_task(rq->curr);
1902	}
1903}
1904
1905/*
1906 * Priority of the task has changed. This may cause
1907 * us to initiate a push or pull.
1908 */
1909static void
1910prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1911{
1912	if (!p->on_rq)
1913		return;
1914
1915	if (rq->curr == p) {
1916#ifdef CONFIG_SMP
1917		/*
1918		 * If our priority decreases while running, we
1919		 * may need to pull tasks to this runqueue.
1920		 */
1921		if (oldprio < p->prio)
1922			pull_rt_task(rq);
1923		/*
1924		 * If there's a higher priority task waiting to run
1925		 * then reschedule. Note, the above pull_rt_task
1926		 * can release the rq lock and p could migrate.
1927		 * Only reschedule if p is still on the same runqueue.
1928		 */
1929		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1930			resched_task(p);
1931#else
1932		/* For UP simply resched on drop of prio */
1933		if (oldprio < p->prio)
1934			resched_task(p);
1935#endif /* CONFIG_SMP */
1936	} else {
1937		/*
1938		 * This task is not running, but if it is
1939		 * greater than the current running task
1940		 * then reschedule.
1941		 */
1942		if (p->prio < rq->curr->prio)
1943			resched_task(rq->curr);
1944	}
1945}
1946
1947static void watchdog(struct rq *rq, struct task_struct *p)
1948{
1949	unsigned long soft, hard;
1950
1951	/* max may change after cur was read, this will be fixed next tick */
1952	soft = task_rlimit(p, RLIMIT_RTTIME);
1953	hard = task_rlimit_max(p, RLIMIT_RTTIME);
1954
1955	if (soft != RLIM_INFINITY) {
1956		unsigned long next;
1957
1958		if (p->rt.watchdog_stamp != jiffies) {
1959			p->rt.timeout++;
1960			p->rt.watchdog_stamp = jiffies;
1961		}
1962
1963		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1964		if (p->rt.timeout > next)
1965			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1966	}
1967}
1968
1969static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1970{
1971	struct sched_rt_entity *rt_se = &p->rt;
1972
1973	update_curr_rt(rq);
1974
1975	watchdog(rq, p);
1976
1977	/*
1978	 * RR tasks need a special form of timeslice management.
1979	 * FIFO tasks have no timeslices.
1980	 */
1981	if (p->policy != SCHED_RR)
1982		return;
1983
1984	if (--p->rt.time_slice)
1985		return;
1986
1987	p->rt.time_slice = sched_rr_timeslice;
1988
1989	/*
1990	 * Requeue to the end of queue if we (and all of our ancestors) are not
1991	 * the only element on the queue
1992	 */
1993	for_each_sched_rt_entity(rt_se) {
1994		if (rt_se->run_list.prev != rt_se->run_list.next) {
1995			requeue_task_rt(rq, p, 0);
1996			set_tsk_need_resched(p);
1997			return;
1998		}
1999	}
2000}
2001
2002static void set_curr_task_rt(struct rq *rq)
2003{
2004	struct task_struct *p = rq->curr;
2005
2006	p->se.exec_start = rq_clock_task(rq);
2007
2008	/* The running task is never eligible for pushing */
2009	dequeue_pushable_task(rq, p);
2010}
2011
2012static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2013{
2014	/*
2015	 * Time slice is 0 for SCHED_FIFO tasks
2016	 */
2017	if (task->policy == SCHED_RR)
2018		return sched_rr_timeslice;
2019	else
2020		return 0;
2021}
2022
2023const struct sched_class rt_sched_class = {
2024	.next			= &fair_sched_class,
2025	.enqueue_task		= enqueue_task_rt,
2026	.dequeue_task		= dequeue_task_rt,
2027	.yield_task		= yield_task_rt,
2028
2029	.check_preempt_curr	= check_preempt_curr_rt,
2030
2031	.pick_next_task		= pick_next_task_rt,
2032	.put_prev_task		= put_prev_task_rt,
2033
2034#ifdef CONFIG_SMP
2035	.select_task_rq		= select_task_rq_rt,
2036
2037	.set_cpus_allowed       = set_cpus_allowed_rt,
2038	.rq_online              = rq_online_rt,
2039	.rq_offline             = rq_offline_rt,
 
2040	.post_schedule		= post_schedule_rt,
2041	.task_woken		= task_woken_rt,
2042	.switched_from		= switched_from_rt,
2043#endif
2044
2045	.set_curr_task          = set_curr_task_rt,
2046	.task_tick		= task_tick_rt,
2047
2048	.get_rr_interval	= get_rr_interval_rt,
2049
2050	.prio_changed		= prio_changed_rt,
2051	.switched_to		= switched_to_rt,
2052};
2053
2054#ifdef CONFIG_SCHED_DEBUG
2055extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2056
2057void print_rt_stats(struct seq_file *m, int cpu)
2058{
2059	rt_rq_iter_t iter;
2060	struct rt_rq *rt_rq;
2061
2062	rcu_read_lock();
2063	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2064		print_rt_rq(m, cpu, rt_rq);
2065	rcu_read_unlock();
2066}
2067#endif /* CONFIG_SCHED_DEBUG */
v3.5.6
   1/*
   2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   3 * policies)
   4 */
   5
   6#include "sched.h"
   7
   8#include <linux/slab.h>
   9
 
 
  10static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  11
  12struct rt_bandwidth def_rt_bandwidth;
  13
  14static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  15{
  16	struct rt_bandwidth *rt_b =
  17		container_of(timer, struct rt_bandwidth, rt_period_timer);
  18	ktime_t now;
  19	int overrun;
  20	int idle = 0;
  21
  22	for (;;) {
  23		now = hrtimer_cb_get_time(timer);
  24		overrun = hrtimer_forward(timer, now, rt_b->rt_period);
  25
  26		if (!overrun)
  27			break;
  28
  29		idle = do_sched_rt_period_timer(rt_b, overrun);
  30	}
  31
  32	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  33}
  34
  35void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  36{
  37	rt_b->rt_period = ns_to_ktime(period);
  38	rt_b->rt_runtime = runtime;
  39
  40	raw_spin_lock_init(&rt_b->rt_runtime_lock);
  41
  42	hrtimer_init(&rt_b->rt_period_timer,
  43			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  44	rt_b->rt_period_timer.function = sched_rt_period_timer;
  45}
  46
  47static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
  48{
  49	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
  50		return;
  51
  52	if (hrtimer_active(&rt_b->rt_period_timer))
  53		return;
  54
  55	raw_spin_lock(&rt_b->rt_runtime_lock);
  56	start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
  57	raw_spin_unlock(&rt_b->rt_runtime_lock);
  58}
  59
  60void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
  61{
  62	struct rt_prio_array *array;
  63	int i;
  64
  65	array = &rt_rq->active;
  66	for (i = 0; i < MAX_RT_PRIO; i++) {
  67		INIT_LIST_HEAD(array->queue + i);
  68		__clear_bit(i, array->bitmap);
  69	}
  70	/* delimiter for bitsearch: */
  71	__set_bit(MAX_RT_PRIO, array->bitmap);
  72
  73#if defined CONFIG_SMP
  74	rt_rq->highest_prio.curr = MAX_RT_PRIO;
  75	rt_rq->highest_prio.next = MAX_RT_PRIO;
  76	rt_rq->rt_nr_migratory = 0;
  77	rt_rq->overloaded = 0;
  78	plist_head_init(&rt_rq->pushable_tasks);
  79#endif
  80
  81	rt_rq->rt_time = 0;
  82	rt_rq->rt_throttled = 0;
  83	rt_rq->rt_runtime = 0;
  84	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
  85}
  86
  87#ifdef CONFIG_RT_GROUP_SCHED
  88static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
  89{
  90	hrtimer_cancel(&rt_b->rt_period_timer);
  91}
  92
  93#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
  94
  95static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  96{
  97#ifdef CONFIG_SCHED_DEBUG
  98	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
  99#endif
 100	return container_of(rt_se, struct task_struct, rt);
 101}
 102
 103static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 104{
 105	return rt_rq->rq;
 106}
 107
 108static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 109{
 110	return rt_se->rt_rq;
 111}
 112
 113void free_rt_sched_group(struct task_group *tg)
 114{
 115	int i;
 116
 117	if (tg->rt_se)
 118		destroy_rt_bandwidth(&tg->rt_bandwidth);
 119
 120	for_each_possible_cpu(i) {
 121		if (tg->rt_rq)
 122			kfree(tg->rt_rq[i]);
 123		if (tg->rt_se)
 124			kfree(tg->rt_se[i]);
 125	}
 126
 127	kfree(tg->rt_rq);
 128	kfree(tg->rt_se);
 129}
 130
 131void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 132		struct sched_rt_entity *rt_se, int cpu,
 133		struct sched_rt_entity *parent)
 134{
 135	struct rq *rq = cpu_rq(cpu);
 136
 137	rt_rq->highest_prio.curr = MAX_RT_PRIO;
 138	rt_rq->rt_nr_boosted = 0;
 139	rt_rq->rq = rq;
 140	rt_rq->tg = tg;
 141
 142	tg->rt_rq[cpu] = rt_rq;
 143	tg->rt_se[cpu] = rt_se;
 144
 145	if (!rt_se)
 146		return;
 147
 148	if (!parent)
 149		rt_se->rt_rq = &rq->rt;
 150	else
 151		rt_se->rt_rq = parent->my_q;
 152
 153	rt_se->my_q = rt_rq;
 154	rt_se->parent = parent;
 155	INIT_LIST_HEAD(&rt_se->run_list);
 156}
 157
 158int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 159{
 160	struct rt_rq *rt_rq;
 161	struct sched_rt_entity *rt_se;
 162	int i;
 163
 164	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
 165	if (!tg->rt_rq)
 166		goto err;
 167	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
 168	if (!tg->rt_se)
 169		goto err;
 170
 171	init_rt_bandwidth(&tg->rt_bandwidth,
 172			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
 173
 174	for_each_possible_cpu(i) {
 175		rt_rq = kzalloc_node(sizeof(struct rt_rq),
 176				     GFP_KERNEL, cpu_to_node(i));
 177		if (!rt_rq)
 178			goto err;
 179
 180		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
 181				     GFP_KERNEL, cpu_to_node(i));
 182		if (!rt_se)
 183			goto err_free_rq;
 184
 185		init_rt_rq(rt_rq, cpu_rq(i));
 186		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
 187		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
 188	}
 189
 190	return 1;
 191
 192err_free_rq:
 193	kfree(rt_rq);
 194err:
 195	return 0;
 196}
 197
 198#else /* CONFIG_RT_GROUP_SCHED */
 199
 200#define rt_entity_is_task(rt_se) (1)
 201
 202static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 203{
 204	return container_of(rt_se, struct task_struct, rt);
 205}
 206
 207static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 208{
 209	return container_of(rt_rq, struct rq, rt);
 210}
 211
 212static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 213{
 214	struct task_struct *p = rt_task_of(rt_se);
 215	struct rq *rq = task_rq(p);
 216
 217	return &rq->rt;
 218}
 219
 220void free_rt_sched_group(struct task_group *tg) { }
 221
 222int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 223{
 224	return 1;
 225}
 226#endif /* CONFIG_RT_GROUP_SCHED */
 227
 228#ifdef CONFIG_SMP
 229
 
 
 
 
 
 
 
 
 230static inline int rt_overloaded(struct rq *rq)
 231{
 232	return atomic_read(&rq->rd->rto_count);
 233}
 234
 235static inline void rt_set_overload(struct rq *rq)
 236{
 237	if (!rq->online)
 238		return;
 239
 240	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
 241	/*
 242	 * Make sure the mask is visible before we set
 243	 * the overload count. That is checked to determine
 244	 * if we should look at the mask. It would be a shame
 245	 * if we looked at the mask, but the mask was not
 246	 * updated yet.
 
 
 247	 */
 248	wmb();
 249	atomic_inc(&rq->rd->rto_count);
 250}
 251
 252static inline void rt_clear_overload(struct rq *rq)
 253{
 254	if (!rq->online)
 255		return;
 256
 257	/* the order here really doesn't matter */
 258	atomic_dec(&rq->rd->rto_count);
 259	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 260}
 261
 262static void update_rt_migration(struct rt_rq *rt_rq)
 263{
 264	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
 265		if (!rt_rq->overloaded) {
 266			rt_set_overload(rq_of_rt_rq(rt_rq));
 267			rt_rq->overloaded = 1;
 268		}
 269	} else if (rt_rq->overloaded) {
 270		rt_clear_overload(rq_of_rt_rq(rt_rq));
 271		rt_rq->overloaded = 0;
 272	}
 273}
 274
 275static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 276{
 277	struct task_struct *p;
 278
 279	if (!rt_entity_is_task(rt_se))
 280		return;
 281
 282	p = rt_task_of(rt_se);
 283	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 284
 285	rt_rq->rt_nr_total++;
 286	if (p->nr_cpus_allowed > 1)
 287		rt_rq->rt_nr_migratory++;
 288
 289	update_rt_migration(rt_rq);
 290}
 291
 292static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 293{
 294	struct task_struct *p;
 295
 296	if (!rt_entity_is_task(rt_se))
 297		return;
 298
 299	p = rt_task_of(rt_se);
 300	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 301
 302	rt_rq->rt_nr_total--;
 303	if (p->nr_cpus_allowed > 1)
 304		rt_rq->rt_nr_migratory--;
 305
 306	update_rt_migration(rt_rq);
 307}
 308
 309static inline int has_pushable_tasks(struct rq *rq)
 310{
 311	return !plist_head_empty(&rq->rt.pushable_tasks);
 312}
 313
 
 
 
 
 
 
 
 
 
 314static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 315{
 316	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 317	plist_node_init(&p->pushable_tasks, p->prio);
 318	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
 319
 320	/* Update the highest prio pushable task */
 321	if (p->prio < rq->rt.highest_prio.next)
 322		rq->rt.highest_prio.next = p->prio;
 323}
 324
 325static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 326{
 327	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 328
 329	/* Update the new highest prio pushable task */
 330	if (has_pushable_tasks(rq)) {
 331		p = plist_first_entry(&rq->rt.pushable_tasks,
 332				      struct task_struct, pushable_tasks);
 333		rq->rt.highest_prio.next = p->prio;
 334	} else
 335		rq->rt.highest_prio.next = MAX_RT_PRIO;
 336}
 337
 338#else
 339
 340static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 341{
 342}
 343
 344static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 345{
 346}
 347
 348static inline
 349void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 350{
 351}
 352
 353static inline
 354void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 355{
 356}
 357
 
 
 
 
 
 
 
 
 
 
 
 
 
 358#endif /* CONFIG_SMP */
 359
 360static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 361{
 362	return !list_empty(&rt_se->run_list);
 363}
 364
 365#ifdef CONFIG_RT_GROUP_SCHED
 366
 367static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 368{
 369	if (!rt_rq->tg)
 370		return RUNTIME_INF;
 371
 372	return rt_rq->rt_runtime;
 373}
 374
 375static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 376{
 377	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
 378}
 379
 380typedef struct task_group *rt_rq_iter_t;
 381
 382static inline struct task_group *next_task_group(struct task_group *tg)
 383{
 384	do {
 385		tg = list_entry_rcu(tg->list.next,
 386			typeof(struct task_group), list);
 387	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
 388
 389	if (&tg->list == &task_groups)
 390		tg = NULL;
 391
 392	return tg;
 393}
 394
 395#define for_each_rt_rq(rt_rq, iter, rq)					\
 396	for (iter = container_of(&task_groups, typeof(*iter), list);	\
 397		(iter = next_task_group(iter)) &&			\
 398		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
 399
 400static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
 401{
 402	list_add_rcu(&rt_rq->leaf_rt_rq_list,
 403			&rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
 404}
 405
 406static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
 407{
 408	list_del_rcu(&rt_rq->leaf_rt_rq_list);
 409}
 410
 411#define for_each_leaf_rt_rq(rt_rq, rq) \
 412	list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
 413
 414#define for_each_sched_rt_entity(rt_se) \
 415	for (; rt_se; rt_se = rt_se->parent)
 416
 417static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 418{
 419	return rt_se->my_q;
 420}
 421
 422static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
 423static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 424
 425static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 426{
 427	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
 428	struct sched_rt_entity *rt_se;
 429
 430	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 431
 432	rt_se = rt_rq->tg->rt_se[cpu];
 433
 434	if (rt_rq->rt_nr_running) {
 435		if (rt_se && !on_rt_rq(rt_se))
 436			enqueue_rt_entity(rt_se, false);
 437		if (rt_rq->highest_prio.curr < curr->prio)
 438			resched_task(curr);
 439	}
 440}
 441
 442static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 443{
 444	struct sched_rt_entity *rt_se;
 445	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 446
 447	rt_se = rt_rq->tg->rt_se[cpu];
 448
 449	if (rt_se && on_rt_rq(rt_se))
 450		dequeue_rt_entity(rt_se);
 451}
 452
 453static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 454{
 455	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
 456}
 457
 458static int rt_se_boosted(struct sched_rt_entity *rt_se)
 459{
 460	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 461	struct task_struct *p;
 462
 463	if (rt_rq)
 464		return !!rt_rq->rt_nr_boosted;
 465
 466	p = rt_task_of(rt_se);
 467	return p->prio != p->normal_prio;
 468}
 469
 470#ifdef CONFIG_SMP
 471static inline const struct cpumask *sched_rt_period_mask(void)
 472{
 473	return cpu_rq(smp_processor_id())->rd->span;
 474}
 475#else
 476static inline const struct cpumask *sched_rt_period_mask(void)
 477{
 478	return cpu_online_mask;
 479}
 480#endif
 481
 482static inline
 483struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 484{
 485	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
 486}
 487
 488static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 489{
 490	return &rt_rq->tg->rt_bandwidth;
 491}
 492
 493#else /* !CONFIG_RT_GROUP_SCHED */
 494
 495static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 496{
 497	return rt_rq->rt_runtime;
 498}
 499
 500static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 501{
 502	return ktime_to_ns(def_rt_bandwidth.rt_period);
 503}
 504
 505typedef struct rt_rq *rt_rq_iter_t;
 506
 507#define for_each_rt_rq(rt_rq, iter, rq) \
 508	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 509
 510static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
 511{
 512}
 513
 514static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
 515{
 516}
 517
 518#define for_each_leaf_rt_rq(rt_rq, rq) \
 519	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 520
 521#define for_each_sched_rt_entity(rt_se) \
 522	for (; rt_se; rt_se = NULL)
 523
 524static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 525{
 526	return NULL;
 527}
 528
 529static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 530{
 531	if (rt_rq->rt_nr_running)
 532		resched_task(rq_of_rt_rq(rt_rq)->curr);
 533}
 534
 535static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 536{
 537}
 538
 539static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 540{
 541	return rt_rq->rt_throttled;
 542}
 543
 544static inline const struct cpumask *sched_rt_period_mask(void)
 545{
 546	return cpu_online_mask;
 547}
 548
 549static inline
 550struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 551{
 552	return &cpu_rq(cpu)->rt;
 553}
 554
 555static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 556{
 557	return &def_rt_bandwidth;
 558}
 559
 560#endif /* CONFIG_RT_GROUP_SCHED */
 561
 
 
 
 
 
 
 
 
 562#ifdef CONFIG_SMP
 563/*
 564 * We ran out of runtime, see if we can borrow some from our neighbours.
 565 */
 566static int do_balance_runtime(struct rt_rq *rt_rq)
 567{
 568	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 569	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
 570	int i, weight, more = 0;
 571	u64 rt_period;
 572
 573	weight = cpumask_weight(rd->span);
 574
 575	raw_spin_lock(&rt_b->rt_runtime_lock);
 576	rt_period = ktime_to_ns(rt_b->rt_period);
 577	for_each_cpu(i, rd->span) {
 578		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 579		s64 diff;
 580
 581		if (iter == rt_rq)
 582			continue;
 583
 584		raw_spin_lock(&iter->rt_runtime_lock);
 585		/*
 586		 * Either all rqs have inf runtime and there's nothing to steal
 587		 * or __disable_runtime() below sets a specific rq to inf to
 588		 * indicate its been disabled and disalow stealing.
 589		 */
 590		if (iter->rt_runtime == RUNTIME_INF)
 591			goto next;
 592
 593		/*
 594		 * From runqueues with spare time, take 1/n part of their
 595		 * spare time, but no more than our period.
 596		 */
 597		diff = iter->rt_runtime - iter->rt_time;
 598		if (diff > 0) {
 599			diff = div_u64((u64)diff, weight);
 600			if (rt_rq->rt_runtime + diff > rt_period)
 601				diff = rt_period - rt_rq->rt_runtime;
 602			iter->rt_runtime -= diff;
 603			rt_rq->rt_runtime += diff;
 604			more = 1;
 605			if (rt_rq->rt_runtime == rt_period) {
 606				raw_spin_unlock(&iter->rt_runtime_lock);
 607				break;
 608			}
 609		}
 610next:
 611		raw_spin_unlock(&iter->rt_runtime_lock);
 612	}
 613	raw_spin_unlock(&rt_b->rt_runtime_lock);
 614
 615	return more;
 616}
 617
 618/*
 619 * Ensure this RQ takes back all the runtime it lend to its neighbours.
 620 */
 621static void __disable_runtime(struct rq *rq)
 622{
 623	struct root_domain *rd = rq->rd;
 624	rt_rq_iter_t iter;
 625	struct rt_rq *rt_rq;
 626
 627	if (unlikely(!scheduler_running))
 628		return;
 629
 630	for_each_rt_rq(rt_rq, iter, rq) {
 631		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 632		s64 want;
 633		int i;
 634
 635		raw_spin_lock(&rt_b->rt_runtime_lock);
 636		raw_spin_lock(&rt_rq->rt_runtime_lock);
 637		/*
 638		 * Either we're all inf and nobody needs to borrow, or we're
 639		 * already disabled and thus have nothing to do, or we have
 640		 * exactly the right amount of runtime to take out.
 641		 */
 642		if (rt_rq->rt_runtime == RUNTIME_INF ||
 643				rt_rq->rt_runtime == rt_b->rt_runtime)
 644			goto balanced;
 645		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 646
 647		/*
 648		 * Calculate the difference between what we started out with
 649		 * and what we current have, that's the amount of runtime
 650		 * we lend and now have to reclaim.
 651		 */
 652		want = rt_b->rt_runtime - rt_rq->rt_runtime;
 653
 654		/*
 655		 * Greedy reclaim, take back as much as we can.
 656		 */
 657		for_each_cpu(i, rd->span) {
 658			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 659			s64 diff;
 660
 661			/*
 662			 * Can't reclaim from ourselves or disabled runqueues.
 663			 */
 664			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
 665				continue;
 666
 667			raw_spin_lock(&iter->rt_runtime_lock);
 668			if (want > 0) {
 669				diff = min_t(s64, iter->rt_runtime, want);
 670				iter->rt_runtime -= diff;
 671				want -= diff;
 672			} else {
 673				iter->rt_runtime -= want;
 674				want -= want;
 675			}
 676			raw_spin_unlock(&iter->rt_runtime_lock);
 677
 678			if (!want)
 679				break;
 680		}
 681
 682		raw_spin_lock(&rt_rq->rt_runtime_lock);
 683		/*
 684		 * We cannot be left wanting - that would mean some runtime
 685		 * leaked out of the system.
 686		 */
 687		BUG_ON(want);
 688balanced:
 689		/*
 690		 * Disable all the borrow logic by pretending we have inf
 691		 * runtime - in which case borrowing doesn't make sense.
 692		 */
 693		rt_rq->rt_runtime = RUNTIME_INF;
 
 694		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 695		raw_spin_unlock(&rt_b->rt_runtime_lock);
 696	}
 697}
 698
 699static void disable_runtime(struct rq *rq)
 700{
 701	unsigned long flags;
 702
 703	raw_spin_lock_irqsave(&rq->lock, flags);
 704	__disable_runtime(rq);
 705	raw_spin_unlock_irqrestore(&rq->lock, flags);
 706}
 707
 708static void __enable_runtime(struct rq *rq)
 709{
 710	rt_rq_iter_t iter;
 711	struct rt_rq *rt_rq;
 712
 713	if (unlikely(!scheduler_running))
 714		return;
 715
 716	/*
 717	 * Reset each runqueue's bandwidth settings
 718	 */
 719	for_each_rt_rq(rt_rq, iter, rq) {
 720		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 721
 722		raw_spin_lock(&rt_b->rt_runtime_lock);
 723		raw_spin_lock(&rt_rq->rt_runtime_lock);
 724		rt_rq->rt_runtime = rt_b->rt_runtime;
 725		rt_rq->rt_time = 0;
 726		rt_rq->rt_throttled = 0;
 727		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 728		raw_spin_unlock(&rt_b->rt_runtime_lock);
 729	}
 730}
 731
 732static void enable_runtime(struct rq *rq)
 733{
 734	unsigned long flags;
 735
 736	raw_spin_lock_irqsave(&rq->lock, flags);
 737	__enable_runtime(rq);
 738	raw_spin_unlock_irqrestore(&rq->lock, flags);
 739}
 740
 741int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
 742{
 743	int cpu = (int)(long)hcpu;
 744
 745	switch (action) {
 746	case CPU_DOWN_PREPARE:
 747	case CPU_DOWN_PREPARE_FROZEN:
 748		disable_runtime(cpu_rq(cpu));
 749		return NOTIFY_OK;
 750
 751	case CPU_DOWN_FAILED:
 752	case CPU_DOWN_FAILED_FROZEN:
 753	case CPU_ONLINE:
 754	case CPU_ONLINE_FROZEN:
 755		enable_runtime(cpu_rq(cpu));
 756		return NOTIFY_OK;
 757
 758	default:
 759		return NOTIFY_DONE;
 760	}
 761}
 762
 763static int balance_runtime(struct rt_rq *rt_rq)
 764{
 765	int more = 0;
 766
 767	if (!sched_feat(RT_RUNTIME_SHARE))
 768		return more;
 769
 770	if (rt_rq->rt_time > rt_rq->rt_runtime) {
 771		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 772		more = do_balance_runtime(rt_rq);
 773		raw_spin_lock(&rt_rq->rt_runtime_lock);
 774	}
 775
 776	return more;
 777}
 778#else /* !CONFIG_SMP */
 779static inline int balance_runtime(struct rt_rq *rt_rq)
 780{
 781	return 0;
 782}
 783#endif /* CONFIG_SMP */
 784
 785static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 786{
 787	int i, idle = 1, throttled = 0;
 788	const struct cpumask *span;
 789
 790	span = sched_rt_period_mask();
 
 
 
 
 
 
 
 
 
 
 
 
 
 791	for_each_cpu(i, span) {
 792		int enqueue = 0;
 793		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
 794		struct rq *rq = rq_of_rt_rq(rt_rq);
 795
 796		raw_spin_lock(&rq->lock);
 797		if (rt_rq->rt_time) {
 798			u64 runtime;
 799
 800			raw_spin_lock(&rt_rq->rt_runtime_lock);
 801			if (rt_rq->rt_throttled)
 802				balance_runtime(rt_rq);
 803			runtime = rt_rq->rt_runtime;
 804			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
 805			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
 806				rt_rq->rt_throttled = 0;
 807				enqueue = 1;
 808
 809				/*
 810				 * Force a clock update if the CPU was idle,
 811				 * lest wakeup -> unthrottle time accumulate.
 812				 */
 813				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
 814					rq->skip_clock_update = -1;
 815			}
 816			if (rt_rq->rt_time || rt_rq->rt_nr_running)
 817				idle = 0;
 818			raw_spin_unlock(&rt_rq->rt_runtime_lock);
 819		} else if (rt_rq->rt_nr_running) {
 820			idle = 0;
 821			if (!rt_rq_throttled(rt_rq))
 822				enqueue = 1;
 823		}
 824		if (rt_rq->rt_throttled)
 825			throttled = 1;
 826
 827		if (enqueue)
 828			sched_rt_rq_enqueue(rt_rq);
 829		raw_spin_unlock(&rq->lock);
 830	}
 831
 832	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
 833		return 1;
 834
 835	return idle;
 836}
 837
 838static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 839{
 840#ifdef CONFIG_RT_GROUP_SCHED
 841	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 842
 843	if (rt_rq)
 844		return rt_rq->highest_prio.curr;
 845#endif
 846
 847	return rt_task_of(rt_se)->prio;
 848}
 849
 850static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 851{
 852	u64 runtime = sched_rt_runtime(rt_rq);
 853
 854	if (rt_rq->rt_throttled)
 855		return rt_rq_throttled(rt_rq);
 856
 857	if (runtime >= sched_rt_period(rt_rq))
 858		return 0;
 859
 860	balance_runtime(rt_rq);
 861	runtime = sched_rt_runtime(rt_rq);
 862	if (runtime == RUNTIME_INF)
 863		return 0;
 864
 865	if (rt_rq->rt_time > runtime) {
 866		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 867
 868		/*
 869		 * Don't actually throttle groups that have no runtime assigned
 870		 * but accrue some time due to boosting.
 871		 */
 872		if (likely(rt_b->rt_runtime)) {
 873			static bool once = false;
 874
 875			rt_rq->rt_throttled = 1;
 876
 877			if (!once) {
 878				once = true;
 879				printk_sched("sched: RT throttling activated\n");
 880			}
 881		} else {
 882			/*
 883			 * In case we did anyway, make it go away,
 884			 * replenishment is a joke, since it will replenish us
 885			 * with exactly 0 ns.
 886			 */
 887			rt_rq->rt_time = 0;
 888		}
 889
 890		if (rt_rq_throttled(rt_rq)) {
 891			sched_rt_rq_dequeue(rt_rq);
 892			return 1;
 893		}
 894	}
 895
 896	return 0;
 897}
 898
 899/*
 900 * Update the current task's runtime statistics. Skip current tasks that
 901 * are not in our scheduling class.
 902 */
 903static void update_curr_rt(struct rq *rq)
 904{
 905	struct task_struct *curr = rq->curr;
 906	struct sched_rt_entity *rt_se = &curr->rt;
 907	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 908	u64 delta_exec;
 909
 910	if (curr->sched_class != &rt_sched_class)
 911		return;
 912
 913	delta_exec = rq->clock_task - curr->se.exec_start;
 914	if (unlikely((s64)delta_exec < 0))
 915		delta_exec = 0;
 916
 917	schedstat_set(curr->se.statistics.exec_max,
 918		      max(curr->se.statistics.exec_max, delta_exec));
 919
 920	curr->se.sum_exec_runtime += delta_exec;
 921	account_group_exec_runtime(curr, delta_exec);
 922
 923	curr->se.exec_start = rq->clock_task;
 924	cpuacct_charge(curr, delta_exec);
 925
 926	sched_rt_avg_update(rq, delta_exec);
 927
 928	if (!rt_bandwidth_enabled())
 929		return;
 930
 931	for_each_sched_rt_entity(rt_se) {
 932		rt_rq = rt_rq_of_se(rt_se);
 933
 934		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
 935			raw_spin_lock(&rt_rq->rt_runtime_lock);
 936			rt_rq->rt_time += delta_exec;
 937			if (sched_rt_runtime_exceeded(rt_rq))
 938				resched_task(curr);
 939			raw_spin_unlock(&rt_rq->rt_runtime_lock);
 940		}
 941	}
 942}
 943
 944#if defined CONFIG_SMP
 945
 946static void
 947inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 948{
 949	struct rq *rq = rq_of_rt_rq(rt_rq);
 950
 
 
 
 
 
 
 
 951	if (rq->online && prio < prev_prio)
 952		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
 953}
 954
 955static void
 956dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 957{
 958	struct rq *rq = rq_of_rt_rq(rt_rq);
 959
 
 
 
 
 
 
 
 960	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
 961		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
 962}
 963
 964#else /* CONFIG_SMP */
 965
 966static inline
 967void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
 968static inline
 969void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
 970
 971#endif /* CONFIG_SMP */
 972
 973#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 974static void
 975inc_rt_prio(struct rt_rq *rt_rq, int prio)
 976{
 977	int prev_prio = rt_rq->highest_prio.curr;
 978
 979	if (prio < prev_prio)
 980		rt_rq->highest_prio.curr = prio;
 981
 982	inc_rt_prio_smp(rt_rq, prio, prev_prio);
 983}
 984
 985static void
 986dec_rt_prio(struct rt_rq *rt_rq, int prio)
 987{
 988	int prev_prio = rt_rq->highest_prio.curr;
 989
 990	if (rt_rq->rt_nr_running) {
 991
 992		WARN_ON(prio < prev_prio);
 993
 994		/*
 995		 * This may have been our highest task, and therefore
 996		 * we may have some recomputation to do
 997		 */
 998		if (prio == prev_prio) {
 999			struct rt_prio_array *array = &rt_rq->active;
1000
1001			rt_rq->highest_prio.curr =
1002				sched_find_first_bit(array->bitmap);
1003		}
1004
1005	} else
1006		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1007
1008	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1009}
1010
1011#else
1012
1013static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1014static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1015
1016#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1017
1018#ifdef CONFIG_RT_GROUP_SCHED
1019
1020static void
1021inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1022{
1023	if (rt_se_boosted(rt_se))
1024		rt_rq->rt_nr_boosted++;
1025
1026	if (rt_rq->tg)
1027		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1028}
1029
1030static void
1031dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1032{
1033	if (rt_se_boosted(rt_se))
1034		rt_rq->rt_nr_boosted--;
1035
1036	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1037}
1038
1039#else /* CONFIG_RT_GROUP_SCHED */
1040
1041static void
1042inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1043{
1044	start_rt_bandwidth(&def_rt_bandwidth);
1045}
1046
1047static inline
1048void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1049
1050#endif /* CONFIG_RT_GROUP_SCHED */
1051
1052static inline
1053void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1054{
1055	int prio = rt_se_prio(rt_se);
1056
1057	WARN_ON(!rt_prio(prio));
1058	rt_rq->rt_nr_running++;
1059
1060	inc_rt_prio(rt_rq, prio);
1061	inc_rt_migration(rt_se, rt_rq);
1062	inc_rt_group(rt_se, rt_rq);
1063}
1064
1065static inline
1066void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1067{
1068	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1069	WARN_ON(!rt_rq->rt_nr_running);
1070	rt_rq->rt_nr_running--;
1071
1072	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1073	dec_rt_migration(rt_se, rt_rq);
1074	dec_rt_group(rt_se, rt_rq);
1075}
1076
1077static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1078{
1079	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1080	struct rt_prio_array *array = &rt_rq->active;
1081	struct rt_rq *group_rq = group_rt_rq(rt_se);
1082	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1083
1084	/*
1085	 * Don't enqueue the group if its throttled, or when empty.
1086	 * The latter is a consequence of the former when a child group
1087	 * get throttled and the current group doesn't have any other
1088	 * active members.
1089	 */
1090	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1091		return;
1092
1093	if (!rt_rq->rt_nr_running)
1094		list_add_leaf_rt_rq(rt_rq);
1095
1096	if (head)
1097		list_add(&rt_se->run_list, queue);
1098	else
1099		list_add_tail(&rt_se->run_list, queue);
1100	__set_bit(rt_se_prio(rt_se), array->bitmap);
1101
1102	inc_rt_tasks(rt_se, rt_rq);
1103}
1104
1105static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1106{
1107	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1108	struct rt_prio_array *array = &rt_rq->active;
1109
1110	list_del_init(&rt_se->run_list);
1111	if (list_empty(array->queue + rt_se_prio(rt_se)))
1112		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1113
1114	dec_rt_tasks(rt_se, rt_rq);
1115	if (!rt_rq->rt_nr_running)
1116		list_del_leaf_rt_rq(rt_rq);
1117}
1118
1119/*
1120 * Because the prio of an upper entry depends on the lower
1121 * entries, we must remove entries top - down.
1122 */
1123static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1124{
1125	struct sched_rt_entity *back = NULL;
1126
1127	for_each_sched_rt_entity(rt_se) {
1128		rt_se->back = back;
1129		back = rt_se;
1130	}
1131
1132	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1133		if (on_rt_rq(rt_se))
1134			__dequeue_rt_entity(rt_se);
1135	}
1136}
1137
1138static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1139{
1140	dequeue_rt_stack(rt_se);
1141	for_each_sched_rt_entity(rt_se)
1142		__enqueue_rt_entity(rt_se, head);
1143}
1144
1145static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1146{
1147	dequeue_rt_stack(rt_se);
1148
1149	for_each_sched_rt_entity(rt_se) {
1150		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1151
1152		if (rt_rq && rt_rq->rt_nr_running)
1153			__enqueue_rt_entity(rt_se, false);
1154	}
1155}
1156
1157/*
1158 * Adding/removing a task to/from a priority array:
1159 */
1160static void
1161enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1162{
1163	struct sched_rt_entity *rt_se = &p->rt;
1164
1165	if (flags & ENQUEUE_WAKEUP)
1166		rt_se->timeout = 0;
1167
1168	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1169
1170	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1171		enqueue_pushable_task(rq, p);
1172
1173	inc_nr_running(rq);
1174}
1175
1176static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1177{
1178	struct sched_rt_entity *rt_se = &p->rt;
1179
1180	update_curr_rt(rq);
1181	dequeue_rt_entity(rt_se);
1182
1183	dequeue_pushable_task(rq, p);
1184
1185	dec_nr_running(rq);
1186}
1187
1188/*
1189 * Put task to the head or the end of the run list without the overhead of
1190 * dequeue followed by enqueue.
1191 */
1192static void
1193requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1194{
1195	if (on_rt_rq(rt_se)) {
1196		struct rt_prio_array *array = &rt_rq->active;
1197		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1198
1199		if (head)
1200			list_move(&rt_se->run_list, queue);
1201		else
1202			list_move_tail(&rt_se->run_list, queue);
1203	}
1204}
1205
1206static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1207{
1208	struct sched_rt_entity *rt_se = &p->rt;
1209	struct rt_rq *rt_rq;
1210
1211	for_each_sched_rt_entity(rt_se) {
1212		rt_rq = rt_rq_of_se(rt_se);
1213		requeue_rt_entity(rt_rq, rt_se, head);
1214	}
1215}
1216
1217static void yield_task_rt(struct rq *rq)
1218{
1219	requeue_task_rt(rq, rq->curr, 0);
1220}
1221
1222#ifdef CONFIG_SMP
1223static int find_lowest_rq(struct task_struct *task);
1224
1225static int
1226select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1227{
1228	struct task_struct *curr;
1229	struct rq *rq;
1230	int cpu;
1231
1232	cpu = task_cpu(p);
1233
1234	if (p->nr_cpus_allowed == 1)
1235		goto out;
1236
1237	/* For anything but wake ups, just return the task_cpu */
1238	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1239		goto out;
1240
1241	rq = cpu_rq(cpu);
1242
1243	rcu_read_lock();
1244	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1245
1246	/*
1247	 * If the current task on @p's runqueue is an RT task, then
1248	 * try to see if we can wake this RT task up on another
1249	 * runqueue. Otherwise simply start this RT task
1250	 * on its current runqueue.
1251	 *
1252	 * We want to avoid overloading runqueues. If the woken
1253	 * task is a higher priority, then it will stay on this CPU
1254	 * and the lower prio task should be moved to another CPU.
1255	 * Even though this will probably make the lower prio task
1256	 * lose its cache, we do not want to bounce a higher task
1257	 * around just because it gave up its CPU, perhaps for a
1258	 * lock?
1259	 *
1260	 * For equal prio tasks, we just let the scheduler sort it out.
1261	 *
1262	 * Otherwise, just let it ride on the affined RQ and the
1263	 * post-schedule router will push the preempted task away
1264	 *
1265	 * This test is optimistic, if we get it wrong the load-balancer
1266	 * will have to sort it out.
1267	 */
1268	if (curr && unlikely(rt_task(curr)) &&
1269	    (curr->nr_cpus_allowed < 2 ||
1270	     curr->prio <= p->prio) &&
1271	    (p->nr_cpus_allowed > 1)) {
1272		int target = find_lowest_rq(p);
1273
1274		if (target != -1)
1275			cpu = target;
1276	}
1277	rcu_read_unlock();
1278
1279out:
1280	return cpu;
1281}
1282
1283static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1284{
1285	if (rq->curr->nr_cpus_allowed == 1)
1286		return;
1287
1288	if (p->nr_cpus_allowed != 1
1289	    && cpupri_find(&rq->rd->cpupri, p, NULL))
1290		return;
1291
1292	if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1293		return;
1294
1295	/*
1296	 * There appears to be other cpus that can accept
1297	 * current and none to run 'p', so lets reschedule
1298	 * to try and push current away:
1299	 */
1300	requeue_task_rt(rq, p, 1);
1301	resched_task(rq->curr);
1302}
1303
1304#endif /* CONFIG_SMP */
1305
1306/*
1307 * Preempt the current task with a newly woken task if needed:
1308 */
1309static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1310{
1311	if (p->prio < rq->curr->prio) {
1312		resched_task(rq->curr);
1313		return;
1314	}
1315
1316#ifdef CONFIG_SMP
1317	/*
1318	 * If:
1319	 *
1320	 * - the newly woken task is of equal priority to the current task
1321	 * - the newly woken task is non-migratable while current is migratable
1322	 * - current will be preempted on the next reschedule
1323	 *
1324	 * we should check to see if current can readily move to a different
1325	 * cpu.  If so, we will reschedule to allow the push logic to try
1326	 * to move current somewhere else, making room for our non-migratable
1327	 * task.
1328	 */
1329	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1330		check_preempt_equal_prio(rq, p);
1331#endif
1332}
1333
1334static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1335						   struct rt_rq *rt_rq)
1336{
1337	struct rt_prio_array *array = &rt_rq->active;
1338	struct sched_rt_entity *next = NULL;
1339	struct list_head *queue;
1340	int idx;
1341
1342	idx = sched_find_first_bit(array->bitmap);
1343	BUG_ON(idx >= MAX_RT_PRIO);
1344
1345	queue = array->queue + idx;
1346	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1347
1348	return next;
1349}
1350
1351static struct task_struct *_pick_next_task_rt(struct rq *rq)
1352{
1353	struct sched_rt_entity *rt_se;
1354	struct task_struct *p;
1355	struct rt_rq *rt_rq;
1356
1357	rt_rq = &rq->rt;
1358
1359	if (!rt_rq->rt_nr_running)
1360		return NULL;
1361
1362	if (rt_rq_throttled(rt_rq))
1363		return NULL;
1364
1365	do {
1366		rt_se = pick_next_rt_entity(rq, rt_rq);
1367		BUG_ON(!rt_se);
1368		rt_rq = group_rt_rq(rt_se);
1369	} while (rt_rq);
1370
1371	p = rt_task_of(rt_se);
1372	p->se.exec_start = rq->clock_task;
1373
1374	return p;
1375}
1376
1377static struct task_struct *pick_next_task_rt(struct rq *rq)
 
1378{
1379	struct task_struct *p = _pick_next_task_rt(rq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1380
1381	/* The running task is never eligible for pushing */
1382	if (p)
1383		dequeue_pushable_task(rq, p);
1384
1385#ifdef CONFIG_SMP
1386	/*
1387	 * We detect this state here so that we can avoid taking the RQ
1388	 * lock again later if there is no need to push
1389	 */
1390	rq->post_schedule = has_pushable_tasks(rq);
1391#endif
1392
1393	return p;
1394}
1395
1396static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1397{
1398	update_curr_rt(rq);
1399
1400	/*
1401	 * The previous task needs to be made eligible for pushing
1402	 * if it is still active
1403	 */
1404	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1405		enqueue_pushable_task(rq, p);
1406}
1407
1408#ifdef CONFIG_SMP
1409
1410/* Only try algorithms three times */
1411#define RT_MAX_TRIES 3
1412
1413static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1414{
1415	if (!task_running(rq, p) &&
1416	    (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
1417	    (p->nr_cpus_allowed > 1))
1418		return 1;
1419	return 0;
1420}
1421
1422/* Return the second highest RT task, NULL otherwise */
1423static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
 
 
 
1424{
1425	struct task_struct *next = NULL;
1426	struct sched_rt_entity *rt_se;
1427	struct rt_prio_array *array;
1428	struct rt_rq *rt_rq;
1429	int idx;
1430
1431	for_each_leaf_rt_rq(rt_rq, rq) {
1432		array = &rt_rq->active;
1433		idx = sched_find_first_bit(array->bitmap);
1434next_idx:
1435		if (idx >= MAX_RT_PRIO)
1436			continue;
1437		if (next && next->prio <= idx)
1438			continue;
1439		list_for_each_entry(rt_se, array->queue + idx, run_list) {
1440			struct task_struct *p;
1441
1442			if (!rt_entity_is_task(rt_se))
1443				continue;
1444
1445			p = rt_task_of(rt_se);
1446			if (pick_rt_task(rq, p, cpu)) {
1447				next = p;
1448				break;
1449			}
1450		}
1451		if (!next) {
1452			idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1453			goto next_idx;
1454		}
1455	}
1456
1457	return next;
1458}
1459
1460static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1461
1462static int find_lowest_rq(struct task_struct *task)
1463{
1464	struct sched_domain *sd;
1465	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1466	int this_cpu = smp_processor_id();
1467	int cpu      = task_cpu(task);
1468
1469	/* Make sure the mask is initialized first */
1470	if (unlikely(!lowest_mask))
1471		return -1;
1472
1473	if (task->nr_cpus_allowed == 1)
1474		return -1; /* No other targets possible */
1475
1476	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1477		return -1; /* No targets found */
1478
1479	/*
1480	 * At this point we have built a mask of cpus representing the
1481	 * lowest priority tasks in the system.  Now we want to elect
1482	 * the best one based on our affinity and topology.
1483	 *
1484	 * We prioritize the last cpu that the task executed on since
1485	 * it is most likely cache-hot in that location.
1486	 */
1487	if (cpumask_test_cpu(cpu, lowest_mask))
1488		return cpu;
1489
1490	/*
1491	 * Otherwise, we consult the sched_domains span maps to figure
1492	 * out which cpu is logically closest to our hot cache data.
1493	 */
1494	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1495		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1496
1497	rcu_read_lock();
1498	for_each_domain(cpu, sd) {
1499		if (sd->flags & SD_WAKE_AFFINE) {
1500			int best_cpu;
1501
1502			/*
1503			 * "this_cpu" is cheaper to preempt than a
1504			 * remote processor.
1505			 */
1506			if (this_cpu != -1 &&
1507			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1508				rcu_read_unlock();
1509				return this_cpu;
1510			}
1511
1512			best_cpu = cpumask_first_and(lowest_mask,
1513						     sched_domain_span(sd));
1514			if (best_cpu < nr_cpu_ids) {
1515				rcu_read_unlock();
1516				return best_cpu;
1517			}
1518		}
1519	}
1520	rcu_read_unlock();
1521
1522	/*
1523	 * And finally, if there were no matches within the domains
1524	 * just give the caller *something* to work with from the compatible
1525	 * locations.
1526	 */
1527	if (this_cpu != -1)
1528		return this_cpu;
1529
1530	cpu = cpumask_any(lowest_mask);
1531	if (cpu < nr_cpu_ids)
1532		return cpu;
1533	return -1;
1534}
1535
1536/* Will lock the rq it finds */
1537static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1538{
1539	struct rq *lowest_rq = NULL;
1540	int tries;
1541	int cpu;
1542
1543	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1544		cpu = find_lowest_rq(task);
1545
1546		if ((cpu == -1) || (cpu == rq->cpu))
1547			break;
1548
1549		lowest_rq = cpu_rq(cpu);
1550
1551		/* if the prio of this runqueue changed, try again */
1552		if (double_lock_balance(rq, lowest_rq)) {
1553			/*
1554			 * We had to unlock the run queue. In
1555			 * the mean time, task could have
1556			 * migrated already or had its affinity changed.
1557			 * Also make sure that it wasn't scheduled on its rq.
1558			 */
1559			if (unlikely(task_rq(task) != rq ||
1560				     !cpumask_test_cpu(lowest_rq->cpu,
1561						       tsk_cpus_allowed(task)) ||
1562				     task_running(rq, task) ||
1563				     !task->on_rq)) {
1564
1565				double_unlock_balance(rq, lowest_rq);
1566				lowest_rq = NULL;
1567				break;
1568			}
1569		}
1570
1571		/* If this rq is still suitable use it. */
1572		if (lowest_rq->rt.highest_prio.curr > task->prio)
1573			break;
1574
1575		/* try again */
1576		double_unlock_balance(rq, lowest_rq);
1577		lowest_rq = NULL;
1578	}
1579
1580	return lowest_rq;
1581}
1582
1583static struct task_struct *pick_next_pushable_task(struct rq *rq)
1584{
1585	struct task_struct *p;
1586
1587	if (!has_pushable_tasks(rq))
1588		return NULL;
1589
1590	p = plist_first_entry(&rq->rt.pushable_tasks,
1591			      struct task_struct, pushable_tasks);
1592
1593	BUG_ON(rq->cpu != task_cpu(p));
1594	BUG_ON(task_current(rq, p));
1595	BUG_ON(p->nr_cpus_allowed <= 1);
1596
1597	BUG_ON(!p->on_rq);
1598	BUG_ON(!rt_task(p));
1599
1600	return p;
1601}
1602
1603/*
1604 * If the current CPU has more than one RT task, see if the non
1605 * running task can migrate over to a CPU that is running a task
1606 * of lesser priority.
1607 */
1608static int push_rt_task(struct rq *rq)
1609{
1610	struct task_struct *next_task;
1611	struct rq *lowest_rq;
1612	int ret = 0;
1613
1614	if (!rq->rt.overloaded)
1615		return 0;
1616
1617	next_task = pick_next_pushable_task(rq);
1618	if (!next_task)
1619		return 0;
1620
1621#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1622       if (unlikely(task_running(rq, next_task)))
1623               return 0;
1624#endif
1625
1626retry:
1627	if (unlikely(next_task == rq->curr)) {
1628		WARN_ON(1);
1629		return 0;
1630	}
1631
1632	/*
1633	 * It's possible that the next_task slipped in of
1634	 * higher priority than current. If that's the case
1635	 * just reschedule current.
1636	 */
1637	if (unlikely(next_task->prio < rq->curr->prio)) {
1638		resched_task(rq->curr);
1639		return 0;
1640	}
1641
1642	/* We might release rq lock */
1643	get_task_struct(next_task);
1644
1645	/* find_lock_lowest_rq locks the rq if found */
1646	lowest_rq = find_lock_lowest_rq(next_task, rq);
1647	if (!lowest_rq) {
1648		struct task_struct *task;
1649		/*
1650		 * find_lock_lowest_rq releases rq->lock
1651		 * so it is possible that next_task has migrated.
1652		 *
1653		 * We need to make sure that the task is still on the same
1654		 * run-queue and is also still the next task eligible for
1655		 * pushing.
1656		 */
1657		task = pick_next_pushable_task(rq);
1658		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1659			/*
1660			 * The task hasn't migrated, and is still the next
1661			 * eligible task, but we failed to find a run-queue
1662			 * to push it to.  Do not retry in this case, since
1663			 * other cpus will pull from us when ready.
1664			 */
1665			goto out;
1666		}
1667
1668		if (!task)
1669			/* No more tasks, just exit */
1670			goto out;
1671
1672		/*
1673		 * Something has shifted, try again.
1674		 */
1675		put_task_struct(next_task);
1676		next_task = task;
1677		goto retry;
1678	}
1679
1680	deactivate_task(rq, next_task, 0);
1681	set_task_cpu(next_task, lowest_rq->cpu);
1682	activate_task(lowest_rq, next_task, 0);
1683	ret = 1;
1684
1685	resched_task(lowest_rq->curr);
1686
1687	double_unlock_balance(rq, lowest_rq);
1688
1689out:
1690	put_task_struct(next_task);
1691
1692	return ret;
1693}
1694
1695static void push_rt_tasks(struct rq *rq)
1696{
1697	/* push_rt_task will return true if it moved an RT */
1698	while (push_rt_task(rq))
1699		;
1700}
1701
1702static int pull_rt_task(struct rq *this_rq)
1703{
1704	int this_cpu = this_rq->cpu, ret = 0, cpu;
1705	struct task_struct *p;
1706	struct rq *src_rq;
1707
1708	if (likely(!rt_overloaded(this_rq)))
1709		return 0;
1710
 
 
 
 
 
 
1711	for_each_cpu(cpu, this_rq->rd->rto_mask) {
1712		if (this_cpu == cpu)
1713			continue;
1714
1715		src_rq = cpu_rq(cpu);
1716
1717		/*
1718		 * Don't bother taking the src_rq->lock if the next highest
1719		 * task is known to be lower-priority than our current task.
1720		 * This may look racy, but if this value is about to go
1721		 * logically higher, the src_rq will push this task away.
1722		 * And if its going logically lower, we do not care
1723		 */
1724		if (src_rq->rt.highest_prio.next >=
1725		    this_rq->rt.highest_prio.curr)
1726			continue;
1727
1728		/*
1729		 * We can potentially drop this_rq's lock in
1730		 * double_lock_balance, and another CPU could
1731		 * alter this_rq
1732		 */
1733		double_lock_balance(this_rq, src_rq);
1734
1735		/*
1736		 * Are there still pullable RT tasks?
 
1737		 */
1738		if (src_rq->rt.rt_nr_running <= 1)
1739			goto skip;
1740
1741		p = pick_next_highest_task_rt(src_rq, this_cpu);
1742
1743		/*
1744		 * Do we have an RT task that preempts
1745		 * the to-be-scheduled task?
1746		 */
1747		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1748			WARN_ON(p == src_rq->curr);
1749			WARN_ON(!p->on_rq);
1750
1751			/*
1752			 * There's a chance that p is higher in priority
1753			 * than what's currently running on its cpu.
1754			 * This is just that p is wakeing up and hasn't
1755			 * had a chance to schedule. We only pull
1756			 * p if it is lower in priority than the
1757			 * current task on the run queue
1758			 */
1759			if (p->prio < src_rq->curr->prio)
1760				goto skip;
1761
1762			ret = 1;
1763
1764			deactivate_task(src_rq, p, 0);
1765			set_task_cpu(p, this_cpu);
1766			activate_task(this_rq, p, 0);
1767			/*
1768			 * We continue with the search, just in
1769			 * case there's an even higher prio task
1770			 * in another runqueue. (low likelihood
1771			 * but possible)
1772			 */
1773		}
1774skip:
1775		double_unlock_balance(this_rq, src_rq);
1776	}
1777
1778	return ret;
1779}
1780
1781static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1782{
1783	/* Try to pull RT tasks here if we lower this rq's prio */
1784	if (rq->rt.highest_prio.curr > prev->prio)
1785		pull_rt_task(rq);
1786}
1787
1788static void post_schedule_rt(struct rq *rq)
1789{
1790	push_rt_tasks(rq);
1791}
1792
1793/*
1794 * If we are not running and we are not going to reschedule soon, we should
1795 * try to push tasks away now
1796 */
1797static void task_woken_rt(struct rq *rq, struct task_struct *p)
1798{
1799	if (!task_running(rq, p) &&
1800	    !test_tsk_need_resched(rq->curr) &&
1801	    has_pushable_tasks(rq) &&
1802	    p->nr_cpus_allowed > 1 &&
1803	    rt_task(rq->curr) &&
1804	    (rq->curr->nr_cpus_allowed < 2 ||
1805	     rq->curr->prio <= p->prio))
1806		push_rt_tasks(rq);
1807}
1808
1809static void set_cpus_allowed_rt(struct task_struct *p,
1810				const struct cpumask *new_mask)
1811{
1812	struct rq *rq;
1813	int weight;
1814
1815	BUG_ON(!rt_task(p));
1816
1817	if (!p->on_rq)
1818		return;
1819
1820	weight = cpumask_weight(new_mask);
1821
1822	/*
1823	 * Only update if the process changes its state from whether it
1824	 * can migrate or not.
1825	 */
1826	if ((p->nr_cpus_allowed > 1) == (weight > 1))
1827		return;
1828
1829	rq = task_rq(p);
1830
1831	/*
1832	 * The process used to be able to migrate OR it can now migrate
1833	 */
1834	if (weight <= 1) {
1835		if (!task_current(rq, p))
1836			dequeue_pushable_task(rq, p);
1837		BUG_ON(!rq->rt.rt_nr_migratory);
1838		rq->rt.rt_nr_migratory--;
1839	} else {
1840		if (!task_current(rq, p))
1841			enqueue_pushable_task(rq, p);
1842		rq->rt.rt_nr_migratory++;
1843	}
1844
1845	update_rt_migration(&rq->rt);
1846}
1847
1848/* Assumes rq->lock is held */
1849static void rq_online_rt(struct rq *rq)
1850{
1851	if (rq->rt.overloaded)
1852		rt_set_overload(rq);
1853
1854	__enable_runtime(rq);
1855
1856	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1857}
1858
1859/* Assumes rq->lock is held */
1860static void rq_offline_rt(struct rq *rq)
1861{
1862	if (rq->rt.overloaded)
1863		rt_clear_overload(rq);
1864
1865	__disable_runtime(rq);
1866
1867	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1868}
1869
1870/*
1871 * When switch from the rt queue, we bring ourselves to a position
1872 * that we might want to pull RT tasks from other runqueues.
1873 */
1874static void switched_from_rt(struct rq *rq, struct task_struct *p)
1875{
1876	/*
1877	 * If there are other RT tasks then we will reschedule
1878	 * and the scheduling of the other RT tasks will handle
1879	 * the balancing. But if we are the last RT task
1880	 * we may need to handle the pulling of RT tasks
1881	 * now.
1882	 */
1883	if (p->on_rq && !rq->rt.rt_nr_running)
1884		pull_rt_task(rq);
 
 
 
1885}
1886
1887void init_sched_rt_class(void)
1888{
1889	unsigned int i;
1890
1891	for_each_possible_cpu(i) {
1892		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1893					GFP_KERNEL, cpu_to_node(i));
1894	}
1895}
1896#endif /* CONFIG_SMP */
1897
1898/*
1899 * When switching a task to RT, we may overload the runqueue
1900 * with RT tasks. In this case we try to push them off to
1901 * other runqueues.
1902 */
1903static void switched_to_rt(struct rq *rq, struct task_struct *p)
1904{
1905	int check_resched = 1;
1906
1907	/*
1908	 * If we are already running, then there's nothing
1909	 * that needs to be done. But if we are not running
1910	 * we may need to preempt the current running task.
1911	 * If that current running task is also an RT task
1912	 * then see if we can move to another run queue.
1913	 */
1914	if (p->on_rq && rq->curr != p) {
1915#ifdef CONFIG_SMP
1916		if (rq->rt.overloaded && push_rt_task(rq) &&
1917		    /* Don't resched if we changed runqueues */
1918		    rq != task_rq(p))
1919			check_resched = 0;
1920#endif /* CONFIG_SMP */
1921		if (check_resched && p->prio < rq->curr->prio)
1922			resched_task(rq->curr);
1923	}
1924}
1925
1926/*
1927 * Priority of the task has changed. This may cause
1928 * us to initiate a push or pull.
1929 */
1930static void
1931prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1932{
1933	if (!p->on_rq)
1934		return;
1935
1936	if (rq->curr == p) {
1937#ifdef CONFIG_SMP
1938		/*
1939		 * If our priority decreases while running, we
1940		 * may need to pull tasks to this runqueue.
1941		 */
1942		if (oldprio < p->prio)
1943			pull_rt_task(rq);
1944		/*
1945		 * If there's a higher priority task waiting to run
1946		 * then reschedule. Note, the above pull_rt_task
1947		 * can release the rq lock and p could migrate.
1948		 * Only reschedule if p is still on the same runqueue.
1949		 */
1950		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1951			resched_task(p);
1952#else
1953		/* For UP simply resched on drop of prio */
1954		if (oldprio < p->prio)
1955			resched_task(p);
1956#endif /* CONFIG_SMP */
1957	} else {
1958		/*
1959		 * This task is not running, but if it is
1960		 * greater than the current running task
1961		 * then reschedule.
1962		 */
1963		if (p->prio < rq->curr->prio)
1964			resched_task(rq->curr);
1965	}
1966}
1967
1968static void watchdog(struct rq *rq, struct task_struct *p)
1969{
1970	unsigned long soft, hard;
1971
1972	/* max may change after cur was read, this will be fixed next tick */
1973	soft = task_rlimit(p, RLIMIT_RTTIME);
1974	hard = task_rlimit_max(p, RLIMIT_RTTIME);
1975
1976	if (soft != RLIM_INFINITY) {
1977		unsigned long next;
1978
1979		p->rt.timeout++;
 
 
 
 
1980		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1981		if (p->rt.timeout > next)
1982			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1983	}
1984}
1985
1986static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1987{
1988	struct sched_rt_entity *rt_se = &p->rt;
1989
1990	update_curr_rt(rq);
1991
1992	watchdog(rq, p);
1993
1994	/*
1995	 * RR tasks need a special form of timeslice management.
1996	 * FIFO tasks have no timeslices.
1997	 */
1998	if (p->policy != SCHED_RR)
1999		return;
2000
2001	if (--p->rt.time_slice)
2002		return;
2003
2004	p->rt.time_slice = RR_TIMESLICE;
2005
2006	/*
2007	 * Requeue to the end of queue if we (and all of our ancestors) are the
2008	 * only element on the queue
2009	 */
2010	for_each_sched_rt_entity(rt_se) {
2011		if (rt_se->run_list.prev != rt_se->run_list.next) {
2012			requeue_task_rt(rq, p, 0);
2013			set_tsk_need_resched(p);
2014			return;
2015		}
2016	}
2017}
2018
2019static void set_curr_task_rt(struct rq *rq)
2020{
2021	struct task_struct *p = rq->curr;
2022
2023	p->se.exec_start = rq->clock_task;
2024
2025	/* The running task is never eligible for pushing */
2026	dequeue_pushable_task(rq, p);
2027}
2028
2029static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2030{
2031	/*
2032	 * Time slice is 0 for SCHED_FIFO tasks
2033	 */
2034	if (task->policy == SCHED_RR)
2035		return RR_TIMESLICE;
2036	else
2037		return 0;
2038}
2039
2040const struct sched_class rt_sched_class = {
2041	.next			= &fair_sched_class,
2042	.enqueue_task		= enqueue_task_rt,
2043	.dequeue_task		= dequeue_task_rt,
2044	.yield_task		= yield_task_rt,
2045
2046	.check_preempt_curr	= check_preempt_curr_rt,
2047
2048	.pick_next_task		= pick_next_task_rt,
2049	.put_prev_task		= put_prev_task_rt,
2050
2051#ifdef CONFIG_SMP
2052	.select_task_rq		= select_task_rq_rt,
2053
2054	.set_cpus_allowed       = set_cpus_allowed_rt,
2055	.rq_online              = rq_online_rt,
2056	.rq_offline             = rq_offline_rt,
2057	.pre_schedule		= pre_schedule_rt,
2058	.post_schedule		= post_schedule_rt,
2059	.task_woken		= task_woken_rt,
2060	.switched_from		= switched_from_rt,
2061#endif
2062
2063	.set_curr_task          = set_curr_task_rt,
2064	.task_tick		= task_tick_rt,
2065
2066	.get_rr_interval	= get_rr_interval_rt,
2067
2068	.prio_changed		= prio_changed_rt,
2069	.switched_to		= switched_to_rt,
2070};
2071
2072#ifdef CONFIG_SCHED_DEBUG
2073extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2074
2075void print_rt_stats(struct seq_file *m, int cpu)
2076{
2077	rt_rq_iter_t iter;
2078	struct rt_rq *rt_rq;
2079
2080	rcu_read_lock();
2081	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2082		print_rt_rq(m, cpu, rt_rq);
2083	rcu_read_unlock();
2084}
2085#endif /* CONFIG_SCHED_DEBUG */