Loading...
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#include "sched.h"
7
8#include <linux/slab.h>
9
10int sched_rr_timeslice = RR_TIMESLICE;
11
12static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14struct rt_bandwidth def_rt_bandwidth;
15
16static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17{
18 struct rt_bandwidth *rt_b =
19 container_of(timer, struct rt_bandwidth, rt_period_timer);
20 ktime_t now;
21 int overrun;
22 int idle = 0;
23
24 for (;;) {
25 now = hrtimer_cb_get_time(timer);
26 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28 if (!overrun)
29 break;
30
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 }
33
34 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35}
36
37void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38{
39 rt_b->rt_period = ns_to_ktime(period);
40 rt_b->rt_runtime = runtime;
41
42 raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44 hrtimer_init(&rt_b->rt_period_timer,
45 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46 rt_b->rt_period_timer.function = sched_rt_period_timer;
47}
48
49static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50{
51 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52 return;
53
54 if (hrtimer_active(&rt_b->rt_period_timer))
55 return;
56
57 raw_spin_lock(&rt_b->rt_runtime_lock);
58 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59 raw_spin_unlock(&rt_b->rt_runtime_lock);
60}
61
62void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63{
64 struct rt_prio_array *array;
65 int i;
66
67 array = &rt_rq->active;
68 for (i = 0; i < MAX_RT_PRIO; i++) {
69 INIT_LIST_HEAD(array->queue + i);
70 __clear_bit(i, array->bitmap);
71 }
72 /* delimiter for bitsearch: */
73 __set_bit(MAX_RT_PRIO, array->bitmap);
74
75#if defined CONFIG_SMP
76 rt_rq->highest_prio.curr = MAX_RT_PRIO;
77 rt_rq->highest_prio.next = MAX_RT_PRIO;
78 rt_rq->rt_nr_migratory = 0;
79 rt_rq->overloaded = 0;
80 plist_head_init(&rt_rq->pushable_tasks);
81#endif
82
83 rt_rq->rt_time = 0;
84 rt_rq->rt_throttled = 0;
85 rt_rq->rt_runtime = 0;
86 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
87}
88
89#ifdef CONFIG_RT_GROUP_SCHED
90static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
91{
92 hrtimer_cancel(&rt_b->rt_period_timer);
93}
94
95#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
96
97static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
98{
99#ifdef CONFIG_SCHED_DEBUG
100 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
101#endif
102 return container_of(rt_se, struct task_struct, rt);
103}
104
105static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
106{
107 return rt_rq->rq;
108}
109
110static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
111{
112 return rt_se->rt_rq;
113}
114
115void free_rt_sched_group(struct task_group *tg)
116{
117 int i;
118
119 if (tg->rt_se)
120 destroy_rt_bandwidth(&tg->rt_bandwidth);
121
122 for_each_possible_cpu(i) {
123 if (tg->rt_rq)
124 kfree(tg->rt_rq[i]);
125 if (tg->rt_se)
126 kfree(tg->rt_se[i]);
127 }
128
129 kfree(tg->rt_rq);
130 kfree(tg->rt_se);
131}
132
133void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
134 struct sched_rt_entity *rt_se, int cpu,
135 struct sched_rt_entity *parent)
136{
137 struct rq *rq = cpu_rq(cpu);
138
139 rt_rq->highest_prio.curr = MAX_RT_PRIO;
140 rt_rq->rt_nr_boosted = 0;
141 rt_rq->rq = rq;
142 rt_rq->tg = tg;
143
144 tg->rt_rq[cpu] = rt_rq;
145 tg->rt_se[cpu] = rt_se;
146
147 if (!rt_se)
148 return;
149
150 if (!parent)
151 rt_se->rt_rq = &rq->rt;
152 else
153 rt_se->rt_rq = parent->my_q;
154
155 rt_se->my_q = rt_rq;
156 rt_se->parent = parent;
157 INIT_LIST_HEAD(&rt_se->run_list);
158}
159
160int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
161{
162 struct rt_rq *rt_rq;
163 struct sched_rt_entity *rt_se;
164 int i;
165
166 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
167 if (!tg->rt_rq)
168 goto err;
169 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
170 if (!tg->rt_se)
171 goto err;
172
173 init_rt_bandwidth(&tg->rt_bandwidth,
174 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
175
176 for_each_possible_cpu(i) {
177 rt_rq = kzalloc_node(sizeof(struct rt_rq),
178 GFP_KERNEL, cpu_to_node(i));
179 if (!rt_rq)
180 goto err;
181
182 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
183 GFP_KERNEL, cpu_to_node(i));
184 if (!rt_se)
185 goto err_free_rq;
186
187 init_rt_rq(rt_rq, cpu_rq(i));
188 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
189 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
190 }
191
192 return 1;
193
194err_free_rq:
195 kfree(rt_rq);
196err:
197 return 0;
198}
199
200#else /* CONFIG_RT_GROUP_SCHED */
201
202#define rt_entity_is_task(rt_se) (1)
203
204static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
205{
206 return container_of(rt_se, struct task_struct, rt);
207}
208
209static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
210{
211 return container_of(rt_rq, struct rq, rt);
212}
213
214static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
215{
216 struct task_struct *p = rt_task_of(rt_se);
217 struct rq *rq = task_rq(p);
218
219 return &rq->rt;
220}
221
222void free_rt_sched_group(struct task_group *tg) { }
223
224int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
225{
226 return 1;
227}
228#endif /* CONFIG_RT_GROUP_SCHED */
229
230#ifdef CONFIG_SMP
231
232static int pull_rt_task(struct rq *this_rq);
233
234static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
235{
236 /* Try to pull RT tasks here if we lower this rq's prio */
237 return rq->rt.highest_prio.curr > prev->prio;
238}
239
240static inline int rt_overloaded(struct rq *rq)
241{
242 return atomic_read(&rq->rd->rto_count);
243}
244
245static inline void rt_set_overload(struct rq *rq)
246{
247 if (!rq->online)
248 return;
249
250 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
251 /*
252 * Make sure the mask is visible before we set
253 * the overload count. That is checked to determine
254 * if we should look at the mask. It would be a shame
255 * if we looked at the mask, but the mask was not
256 * updated yet.
257 *
258 * Matched by the barrier in pull_rt_task().
259 */
260 smp_wmb();
261 atomic_inc(&rq->rd->rto_count);
262}
263
264static inline void rt_clear_overload(struct rq *rq)
265{
266 if (!rq->online)
267 return;
268
269 /* the order here really doesn't matter */
270 atomic_dec(&rq->rd->rto_count);
271 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
272}
273
274static void update_rt_migration(struct rt_rq *rt_rq)
275{
276 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
277 if (!rt_rq->overloaded) {
278 rt_set_overload(rq_of_rt_rq(rt_rq));
279 rt_rq->overloaded = 1;
280 }
281 } else if (rt_rq->overloaded) {
282 rt_clear_overload(rq_of_rt_rq(rt_rq));
283 rt_rq->overloaded = 0;
284 }
285}
286
287static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
288{
289 struct task_struct *p;
290
291 if (!rt_entity_is_task(rt_se))
292 return;
293
294 p = rt_task_of(rt_se);
295 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
296
297 rt_rq->rt_nr_total++;
298 if (p->nr_cpus_allowed > 1)
299 rt_rq->rt_nr_migratory++;
300
301 update_rt_migration(rt_rq);
302}
303
304static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
305{
306 struct task_struct *p;
307
308 if (!rt_entity_is_task(rt_se))
309 return;
310
311 p = rt_task_of(rt_se);
312 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
313
314 rt_rq->rt_nr_total--;
315 if (p->nr_cpus_allowed > 1)
316 rt_rq->rt_nr_migratory--;
317
318 update_rt_migration(rt_rq);
319}
320
321static inline int has_pushable_tasks(struct rq *rq)
322{
323 return !plist_head_empty(&rq->rt.pushable_tasks);
324}
325
326static inline void set_post_schedule(struct rq *rq)
327{
328 /*
329 * We detect this state here so that we can avoid taking the RQ
330 * lock again later if there is no need to push
331 */
332 rq->post_schedule = has_pushable_tasks(rq);
333}
334
335static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
336{
337 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
338 plist_node_init(&p->pushable_tasks, p->prio);
339 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
340
341 /* Update the highest prio pushable task */
342 if (p->prio < rq->rt.highest_prio.next)
343 rq->rt.highest_prio.next = p->prio;
344}
345
346static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
347{
348 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
349
350 /* Update the new highest prio pushable task */
351 if (has_pushable_tasks(rq)) {
352 p = plist_first_entry(&rq->rt.pushable_tasks,
353 struct task_struct, pushable_tasks);
354 rq->rt.highest_prio.next = p->prio;
355 } else
356 rq->rt.highest_prio.next = MAX_RT_PRIO;
357}
358
359#else
360
361static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
362{
363}
364
365static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
366{
367}
368
369static inline
370void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
371{
372}
373
374static inline
375void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
376{
377}
378
379static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
380{
381 return false;
382}
383
384static inline int pull_rt_task(struct rq *this_rq)
385{
386 return 0;
387}
388
389static inline void set_post_schedule(struct rq *rq)
390{
391}
392#endif /* CONFIG_SMP */
393
394static inline int on_rt_rq(struct sched_rt_entity *rt_se)
395{
396 return !list_empty(&rt_se->run_list);
397}
398
399#ifdef CONFIG_RT_GROUP_SCHED
400
401static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
402{
403 if (!rt_rq->tg)
404 return RUNTIME_INF;
405
406 return rt_rq->rt_runtime;
407}
408
409static inline u64 sched_rt_period(struct rt_rq *rt_rq)
410{
411 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
412}
413
414typedef struct task_group *rt_rq_iter_t;
415
416static inline struct task_group *next_task_group(struct task_group *tg)
417{
418 do {
419 tg = list_entry_rcu(tg->list.next,
420 typeof(struct task_group), list);
421 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
422
423 if (&tg->list == &task_groups)
424 tg = NULL;
425
426 return tg;
427}
428
429#define for_each_rt_rq(rt_rq, iter, rq) \
430 for (iter = container_of(&task_groups, typeof(*iter), list); \
431 (iter = next_task_group(iter)) && \
432 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
433
434#define for_each_sched_rt_entity(rt_se) \
435 for (; rt_se; rt_se = rt_se->parent)
436
437static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
438{
439 return rt_se->my_q;
440}
441
442static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
443static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
444
445static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
446{
447 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
448 struct sched_rt_entity *rt_se;
449
450 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
451
452 rt_se = rt_rq->tg->rt_se[cpu];
453
454 if (rt_rq->rt_nr_running) {
455 if (rt_se && !on_rt_rq(rt_se))
456 enqueue_rt_entity(rt_se, false);
457 if (rt_rq->highest_prio.curr < curr->prio)
458 resched_task(curr);
459 }
460}
461
462static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
463{
464 struct sched_rt_entity *rt_se;
465 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
466
467 rt_se = rt_rq->tg->rt_se[cpu];
468
469 if (rt_se && on_rt_rq(rt_se))
470 dequeue_rt_entity(rt_se);
471}
472
473static int rt_se_boosted(struct sched_rt_entity *rt_se)
474{
475 struct rt_rq *rt_rq = group_rt_rq(rt_se);
476 struct task_struct *p;
477
478 if (rt_rq)
479 return !!rt_rq->rt_nr_boosted;
480
481 p = rt_task_of(rt_se);
482 return p->prio != p->normal_prio;
483}
484
485#ifdef CONFIG_SMP
486static inline const struct cpumask *sched_rt_period_mask(void)
487{
488 return this_rq()->rd->span;
489}
490#else
491static inline const struct cpumask *sched_rt_period_mask(void)
492{
493 return cpu_online_mask;
494}
495#endif
496
497static inline
498struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
499{
500 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
501}
502
503static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
504{
505 return &rt_rq->tg->rt_bandwidth;
506}
507
508#else /* !CONFIG_RT_GROUP_SCHED */
509
510static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
511{
512 return rt_rq->rt_runtime;
513}
514
515static inline u64 sched_rt_period(struct rt_rq *rt_rq)
516{
517 return ktime_to_ns(def_rt_bandwidth.rt_period);
518}
519
520typedef struct rt_rq *rt_rq_iter_t;
521
522#define for_each_rt_rq(rt_rq, iter, rq) \
523 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
524
525#define for_each_sched_rt_entity(rt_se) \
526 for (; rt_se; rt_se = NULL)
527
528static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
529{
530 return NULL;
531}
532
533static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
534{
535 if (rt_rq->rt_nr_running)
536 resched_task(rq_of_rt_rq(rt_rq)->curr);
537}
538
539static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
540{
541}
542
543static inline const struct cpumask *sched_rt_period_mask(void)
544{
545 return cpu_online_mask;
546}
547
548static inline
549struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
550{
551 return &cpu_rq(cpu)->rt;
552}
553
554static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
555{
556 return &def_rt_bandwidth;
557}
558
559#endif /* CONFIG_RT_GROUP_SCHED */
560
561bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
562{
563 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
564
565 return (hrtimer_active(&rt_b->rt_period_timer) ||
566 rt_rq->rt_time < rt_b->rt_runtime);
567}
568
569#ifdef CONFIG_SMP
570/*
571 * We ran out of runtime, see if we can borrow some from our neighbours.
572 */
573static int do_balance_runtime(struct rt_rq *rt_rq)
574{
575 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
576 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
577 int i, weight, more = 0;
578 u64 rt_period;
579
580 weight = cpumask_weight(rd->span);
581
582 raw_spin_lock(&rt_b->rt_runtime_lock);
583 rt_period = ktime_to_ns(rt_b->rt_period);
584 for_each_cpu(i, rd->span) {
585 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
586 s64 diff;
587
588 if (iter == rt_rq)
589 continue;
590
591 raw_spin_lock(&iter->rt_runtime_lock);
592 /*
593 * Either all rqs have inf runtime and there's nothing to steal
594 * or __disable_runtime() below sets a specific rq to inf to
595 * indicate its been disabled and disalow stealing.
596 */
597 if (iter->rt_runtime == RUNTIME_INF)
598 goto next;
599
600 /*
601 * From runqueues with spare time, take 1/n part of their
602 * spare time, but no more than our period.
603 */
604 diff = iter->rt_runtime - iter->rt_time;
605 if (diff > 0) {
606 diff = div_u64((u64)diff, weight);
607 if (rt_rq->rt_runtime + diff > rt_period)
608 diff = rt_period - rt_rq->rt_runtime;
609 iter->rt_runtime -= diff;
610 rt_rq->rt_runtime += diff;
611 more = 1;
612 if (rt_rq->rt_runtime == rt_period) {
613 raw_spin_unlock(&iter->rt_runtime_lock);
614 break;
615 }
616 }
617next:
618 raw_spin_unlock(&iter->rt_runtime_lock);
619 }
620 raw_spin_unlock(&rt_b->rt_runtime_lock);
621
622 return more;
623}
624
625/*
626 * Ensure this RQ takes back all the runtime it lend to its neighbours.
627 */
628static void __disable_runtime(struct rq *rq)
629{
630 struct root_domain *rd = rq->rd;
631 rt_rq_iter_t iter;
632 struct rt_rq *rt_rq;
633
634 if (unlikely(!scheduler_running))
635 return;
636
637 for_each_rt_rq(rt_rq, iter, rq) {
638 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
639 s64 want;
640 int i;
641
642 raw_spin_lock(&rt_b->rt_runtime_lock);
643 raw_spin_lock(&rt_rq->rt_runtime_lock);
644 /*
645 * Either we're all inf and nobody needs to borrow, or we're
646 * already disabled and thus have nothing to do, or we have
647 * exactly the right amount of runtime to take out.
648 */
649 if (rt_rq->rt_runtime == RUNTIME_INF ||
650 rt_rq->rt_runtime == rt_b->rt_runtime)
651 goto balanced;
652 raw_spin_unlock(&rt_rq->rt_runtime_lock);
653
654 /*
655 * Calculate the difference between what we started out with
656 * and what we current have, that's the amount of runtime
657 * we lend and now have to reclaim.
658 */
659 want = rt_b->rt_runtime - rt_rq->rt_runtime;
660
661 /*
662 * Greedy reclaim, take back as much as we can.
663 */
664 for_each_cpu(i, rd->span) {
665 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
666 s64 diff;
667
668 /*
669 * Can't reclaim from ourselves or disabled runqueues.
670 */
671 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
672 continue;
673
674 raw_spin_lock(&iter->rt_runtime_lock);
675 if (want > 0) {
676 diff = min_t(s64, iter->rt_runtime, want);
677 iter->rt_runtime -= diff;
678 want -= diff;
679 } else {
680 iter->rt_runtime -= want;
681 want -= want;
682 }
683 raw_spin_unlock(&iter->rt_runtime_lock);
684
685 if (!want)
686 break;
687 }
688
689 raw_spin_lock(&rt_rq->rt_runtime_lock);
690 /*
691 * We cannot be left wanting - that would mean some runtime
692 * leaked out of the system.
693 */
694 BUG_ON(want);
695balanced:
696 /*
697 * Disable all the borrow logic by pretending we have inf
698 * runtime - in which case borrowing doesn't make sense.
699 */
700 rt_rq->rt_runtime = RUNTIME_INF;
701 rt_rq->rt_throttled = 0;
702 raw_spin_unlock(&rt_rq->rt_runtime_lock);
703 raw_spin_unlock(&rt_b->rt_runtime_lock);
704 }
705}
706
707static void __enable_runtime(struct rq *rq)
708{
709 rt_rq_iter_t iter;
710 struct rt_rq *rt_rq;
711
712 if (unlikely(!scheduler_running))
713 return;
714
715 /*
716 * Reset each runqueue's bandwidth settings
717 */
718 for_each_rt_rq(rt_rq, iter, rq) {
719 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
720
721 raw_spin_lock(&rt_b->rt_runtime_lock);
722 raw_spin_lock(&rt_rq->rt_runtime_lock);
723 rt_rq->rt_runtime = rt_b->rt_runtime;
724 rt_rq->rt_time = 0;
725 rt_rq->rt_throttled = 0;
726 raw_spin_unlock(&rt_rq->rt_runtime_lock);
727 raw_spin_unlock(&rt_b->rt_runtime_lock);
728 }
729}
730
731static int balance_runtime(struct rt_rq *rt_rq)
732{
733 int more = 0;
734
735 if (!sched_feat(RT_RUNTIME_SHARE))
736 return more;
737
738 if (rt_rq->rt_time > rt_rq->rt_runtime) {
739 raw_spin_unlock(&rt_rq->rt_runtime_lock);
740 more = do_balance_runtime(rt_rq);
741 raw_spin_lock(&rt_rq->rt_runtime_lock);
742 }
743
744 return more;
745}
746#else /* !CONFIG_SMP */
747static inline int balance_runtime(struct rt_rq *rt_rq)
748{
749 return 0;
750}
751#endif /* CONFIG_SMP */
752
753static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
754{
755 int i, idle = 1, throttled = 0;
756 const struct cpumask *span;
757
758 span = sched_rt_period_mask();
759#ifdef CONFIG_RT_GROUP_SCHED
760 /*
761 * FIXME: isolated CPUs should really leave the root task group,
762 * whether they are isolcpus or were isolated via cpusets, lest
763 * the timer run on a CPU which does not service all runqueues,
764 * potentially leaving other CPUs indefinitely throttled. If
765 * isolation is really required, the user will turn the throttle
766 * off to kill the perturbations it causes anyway. Meanwhile,
767 * this maintains functionality for boot and/or troubleshooting.
768 */
769 if (rt_b == &root_task_group.rt_bandwidth)
770 span = cpu_online_mask;
771#endif
772 for_each_cpu(i, span) {
773 int enqueue = 0;
774 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
775 struct rq *rq = rq_of_rt_rq(rt_rq);
776
777 raw_spin_lock(&rq->lock);
778 if (rt_rq->rt_time) {
779 u64 runtime;
780
781 raw_spin_lock(&rt_rq->rt_runtime_lock);
782 if (rt_rq->rt_throttled)
783 balance_runtime(rt_rq);
784 runtime = rt_rq->rt_runtime;
785 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
786 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
787 rt_rq->rt_throttled = 0;
788 enqueue = 1;
789
790 /*
791 * Force a clock update if the CPU was idle,
792 * lest wakeup -> unthrottle time accumulate.
793 */
794 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
795 rq->skip_clock_update = -1;
796 }
797 if (rt_rq->rt_time || rt_rq->rt_nr_running)
798 idle = 0;
799 raw_spin_unlock(&rt_rq->rt_runtime_lock);
800 } else if (rt_rq->rt_nr_running) {
801 idle = 0;
802 if (!rt_rq_throttled(rt_rq))
803 enqueue = 1;
804 }
805 if (rt_rq->rt_throttled)
806 throttled = 1;
807
808 if (enqueue)
809 sched_rt_rq_enqueue(rt_rq);
810 raw_spin_unlock(&rq->lock);
811 }
812
813 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
814 return 1;
815
816 return idle;
817}
818
819static inline int rt_se_prio(struct sched_rt_entity *rt_se)
820{
821#ifdef CONFIG_RT_GROUP_SCHED
822 struct rt_rq *rt_rq = group_rt_rq(rt_se);
823
824 if (rt_rq)
825 return rt_rq->highest_prio.curr;
826#endif
827
828 return rt_task_of(rt_se)->prio;
829}
830
831static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
832{
833 u64 runtime = sched_rt_runtime(rt_rq);
834
835 if (rt_rq->rt_throttled)
836 return rt_rq_throttled(rt_rq);
837
838 if (runtime >= sched_rt_period(rt_rq))
839 return 0;
840
841 balance_runtime(rt_rq);
842 runtime = sched_rt_runtime(rt_rq);
843 if (runtime == RUNTIME_INF)
844 return 0;
845
846 if (rt_rq->rt_time > runtime) {
847 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
848
849 /*
850 * Don't actually throttle groups that have no runtime assigned
851 * but accrue some time due to boosting.
852 */
853 if (likely(rt_b->rt_runtime)) {
854 static bool once = false;
855
856 rt_rq->rt_throttled = 1;
857
858 if (!once) {
859 once = true;
860 printk_sched("sched: RT throttling activated\n");
861 }
862 } else {
863 /*
864 * In case we did anyway, make it go away,
865 * replenishment is a joke, since it will replenish us
866 * with exactly 0 ns.
867 */
868 rt_rq->rt_time = 0;
869 }
870
871 if (rt_rq_throttled(rt_rq)) {
872 sched_rt_rq_dequeue(rt_rq);
873 return 1;
874 }
875 }
876
877 return 0;
878}
879
880/*
881 * Update the current task's runtime statistics. Skip current tasks that
882 * are not in our scheduling class.
883 */
884static void update_curr_rt(struct rq *rq)
885{
886 struct task_struct *curr = rq->curr;
887 struct sched_rt_entity *rt_se = &curr->rt;
888 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
889 u64 delta_exec;
890
891 if (curr->sched_class != &rt_sched_class)
892 return;
893
894 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
895 if (unlikely((s64)delta_exec <= 0))
896 return;
897
898 schedstat_set(curr->se.statistics.exec_max,
899 max(curr->se.statistics.exec_max, delta_exec));
900
901 curr->se.sum_exec_runtime += delta_exec;
902 account_group_exec_runtime(curr, delta_exec);
903
904 curr->se.exec_start = rq_clock_task(rq);
905 cpuacct_charge(curr, delta_exec);
906
907 sched_rt_avg_update(rq, delta_exec);
908
909 if (!rt_bandwidth_enabled())
910 return;
911
912 for_each_sched_rt_entity(rt_se) {
913 rt_rq = rt_rq_of_se(rt_se);
914
915 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
916 raw_spin_lock(&rt_rq->rt_runtime_lock);
917 rt_rq->rt_time += delta_exec;
918 if (sched_rt_runtime_exceeded(rt_rq))
919 resched_task(curr);
920 raw_spin_unlock(&rt_rq->rt_runtime_lock);
921 }
922 }
923}
924
925#if defined CONFIG_SMP
926
927static void
928inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
929{
930 struct rq *rq = rq_of_rt_rq(rt_rq);
931
932#ifdef CONFIG_RT_GROUP_SCHED
933 /*
934 * Change rq's cpupri only if rt_rq is the top queue.
935 */
936 if (&rq->rt != rt_rq)
937 return;
938#endif
939 if (rq->online && prio < prev_prio)
940 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
941}
942
943static void
944dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
945{
946 struct rq *rq = rq_of_rt_rq(rt_rq);
947
948#ifdef CONFIG_RT_GROUP_SCHED
949 /*
950 * Change rq's cpupri only if rt_rq is the top queue.
951 */
952 if (&rq->rt != rt_rq)
953 return;
954#endif
955 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
956 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
957}
958
959#else /* CONFIG_SMP */
960
961static inline
962void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
963static inline
964void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
965
966#endif /* CONFIG_SMP */
967
968#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
969static void
970inc_rt_prio(struct rt_rq *rt_rq, int prio)
971{
972 int prev_prio = rt_rq->highest_prio.curr;
973
974 if (prio < prev_prio)
975 rt_rq->highest_prio.curr = prio;
976
977 inc_rt_prio_smp(rt_rq, prio, prev_prio);
978}
979
980static void
981dec_rt_prio(struct rt_rq *rt_rq, int prio)
982{
983 int prev_prio = rt_rq->highest_prio.curr;
984
985 if (rt_rq->rt_nr_running) {
986
987 WARN_ON(prio < prev_prio);
988
989 /*
990 * This may have been our highest task, and therefore
991 * we may have some recomputation to do
992 */
993 if (prio == prev_prio) {
994 struct rt_prio_array *array = &rt_rq->active;
995
996 rt_rq->highest_prio.curr =
997 sched_find_first_bit(array->bitmap);
998 }
999
1000 } else
1001 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1002
1003 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1004}
1005
1006#else
1007
1008static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1009static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1010
1011#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1012
1013#ifdef CONFIG_RT_GROUP_SCHED
1014
1015static void
1016inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1017{
1018 if (rt_se_boosted(rt_se))
1019 rt_rq->rt_nr_boosted++;
1020
1021 if (rt_rq->tg)
1022 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1023}
1024
1025static void
1026dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1027{
1028 if (rt_se_boosted(rt_se))
1029 rt_rq->rt_nr_boosted--;
1030
1031 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1032}
1033
1034#else /* CONFIG_RT_GROUP_SCHED */
1035
1036static void
1037inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1038{
1039 start_rt_bandwidth(&def_rt_bandwidth);
1040}
1041
1042static inline
1043void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1044
1045#endif /* CONFIG_RT_GROUP_SCHED */
1046
1047static inline
1048void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1049{
1050 int prio = rt_se_prio(rt_se);
1051
1052 WARN_ON(!rt_prio(prio));
1053 rt_rq->rt_nr_running++;
1054
1055 inc_rt_prio(rt_rq, prio);
1056 inc_rt_migration(rt_se, rt_rq);
1057 inc_rt_group(rt_se, rt_rq);
1058}
1059
1060static inline
1061void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1062{
1063 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1064 WARN_ON(!rt_rq->rt_nr_running);
1065 rt_rq->rt_nr_running--;
1066
1067 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1068 dec_rt_migration(rt_se, rt_rq);
1069 dec_rt_group(rt_se, rt_rq);
1070}
1071
1072static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1073{
1074 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1075 struct rt_prio_array *array = &rt_rq->active;
1076 struct rt_rq *group_rq = group_rt_rq(rt_se);
1077 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1078
1079 /*
1080 * Don't enqueue the group if its throttled, or when empty.
1081 * The latter is a consequence of the former when a child group
1082 * get throttled and the current group doesn't have any other
1083 * active members.
1084 */
1085 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1086 return;
1087
1088 if (head)
1089 list_add(&rt_se->run_list, queue);
1090 else
1091 list_add_tail(&rt_se->run_list, queue);
1092 __set_bit(rt_se_prio(rt_se), array->bitmap);
1093
1094 inc_rt_tasks(rt_se, rt_rq);
1095}
1096
1097static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1098{
1099 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1100 struct rt_prio_array *array = &rt_rq->active;
1101
1102 list_del_init(&rt_se->run_list);
1103 if (list_empty(array->queue + rt_se_prio(rt_se)))
1104 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1105
1106 dec_rt_tasks(rt_se, rt_rq);
1107}
1108
1109/*
1110 * Because the prio of an upper entry depends on the lower
1111 * entries, we must remove entries top - down.
1112 */
1113static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1114{
1115 struct sched_rt_entity *back = NULL;
1116
1117 for_each_sched_rt_entity(rt_se) {
1118 rt_se->back = back;
1119 back = rt_se;
1120 }
1121
1122 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1123 if (on_rt_rq(rt_se))
1124 __dequeue_rt_entity(rt_se);
1125 }
1126}
1127
1128static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1129{
1130 dequeue_rt_stack(rt_se);
1131 for_each_sched_rt_entity(rt_se)
1132 __enqueue_rt_entity(rt_se, head);
1133}
1134
1135static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1136{
1137 dequeue_rt_stack(rt_se);
1138
1139 for_each_sched_rt_entity(rt_se) {
1140 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1141
1142 if (rt_rq && rt_rq->rt_nr_running)
1143 __enqueue_rt_entity(rt_se, false);
1144 }
1145}
1146
1147/*
1148 * Adding/removing a task to/from a priority array:
1149 */
1150static void
1151enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1152{
1153 struct sched_rt_entity *rt_se = &p->rt;
1154
1155 if (flags & ENQUEUE_WAKEUP)
1156 rt_se->timeout = 0;
1157
1158 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1159
1160 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1161 enqueue_pushable_task(rq, p);
1162
1163 inc_nr_running(rq);
1164}
1165
1166static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1167{
1168 struct sched_rt_entity *rt_se = &p->rt;
1169
1170 update_curr_rt(rq);
1171 dequeue_rt_entity(rt_se);
1172
1173 dequeue_pushable_task(rq, p);
1174
1175 dec_nr_running(rq);
1176}
1177
1178/*
1179 * Put task to the head or the end of the run list without the overhead of
1180 * dequeue followed by enqueue.
1181 */
1182static void
1183requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1184{
1185 if (on_rt_rq(rt_se)) {
1186 struct rt_prio_array *array = &rt_rq->active;
1187 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1188
1189 if (head)
1190 list_move(&rt_se->run_list, queue);
1191 else
1192 list_move_tail(&rt_se->run_list, queue);
1193 }
1194}
1195
1196static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1197{
1198 struct sched_rt_entity *rt_se = &p->rt;
1199 struct rt_rq *rt_rq;
1200
1201 for_each_sched_rt_entity(rt_se) {
1202 rt_rq = rt_rq_of_se(rt_se);
1203 requeue_rt_entity(rt_rq, rt_se, head);
1204 }
1205}
1206
1207static void yield_task_rt(struct rq *rq)
1208{
1209 requeue_task_rt(rq, rq->curr, 0);
1210}
1211
1212#ifdef CONFIG_SMP
1213static int find_lowest_rq(struct task_struct *task);
1214
1215static int
1216select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1217{
1218 struct task_struct *curr;
1219 struct rq *rq;
1220
1221 if (p->nr_cpus_allowed == 1)
1222 goto out;
1223
1224 /* For anything but wake ups, just return the task_cpu */
1225 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1226 goto out;
1227
1228 rq = cpu_rq(cpu);
1229
1230 rcu_read_lock();
1231 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1232
1233 /*
1234 * If the current task on @p's runqueue is an RT task, then
1235 * try to see if we can wake this RT task up on another
1236 * runqueue. Otherwise simply start this RT task
1237 * on its current runqueue.
1238 *
1239 * We want to avoid overloading runqueues. If the woken
1240 * task is a higher priority, then it will stay on this CPU
1241 * and the lower prio task should be moved to another CPU.
1242 * Even though this will probably make the lower prio task
1243 * lose its cache, we do not want to bounce a higher task
1244 * around just because it gave up its CPU, perhaps for a
1245 * lock?
1246 *
1247 * For equal prio tasks, we just let the scheduler sort it out.
1248 *
1249 * Otherwise, just let it ride on the affined RQ and the
1250 * post-schedule router will push the preempted task away
1251 *
1252 * This test is optimistic, if we get it wrong the load-balancer
1253 * will have to sort it out.
1254 */
1255 if (curr && unlikely(rt_task(curr)) &&
1256 (curr->nr_cpus_allowed < 2 ||
1257 curr->prio <= p->prio)) {
1258 int target = find_lowest_rq(p);
1259
1260 if (target != -1)
1261 cpu = target;
1262 }
1263 rcu_read_unlock();
1264
1265out:
1266 return cpu;
1267}
1268
1269static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1270{
1271 if (rq->curr->nr_cpus_allowed == 1)
1272 return;
1273
1274 if (p->nr_cpus_allowed != 1
1275 && cpupri_find(&rq->rd->cpupri, p, NULL))
1276 return;
1277
1278 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1279 return;
1280
1281 /*
1282 * There appears to be other cpus that can accept
1283 * current and none to run 'p', so lets reschedule
1284 * to try and push current away:
1285 */
1286 requeue_task_rt(rq, p, 1);
1287 resched_task(rq->curr);
1288}
1289
1290#endif /* CONFIG_SMP */
1291
1292/*
1293 * Preempt the current task with a newly woken task if needed:
1294 */
1295static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1296{
1297 if (p->prio < rq->curr->prio) {
1298 resched_task(rq->curr);
1299 return;
1300 }
1301
1302#ifdef CONFIG_SMP
1303 /*
1304 * If:
1305 *
1306 * - the newly woken task is of equal priority to the current task
1307 * - the newly woken task is non-migratable while current is migratable
1308 * - current will be preempted on the next reschedule
1309 *
1310 * we should check to see if current can readily move to a different
1311 * cpu. If so, we will reschedule to allow the push logic to try
1312 * to move current somewhere else, making room for our non-migratable
1313 * task.
1314 */
1315 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1316 check_preempt_equal_prio(rq, p);
1317#endif
1318}
1319
1320static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1321 struct rt_rq *rt_rq)
1322{
1323 struct rt_prio_array *array = &rt_rq->active;
1324 struct sched_rt_entity *next = NULL;
1325 struct list_head *queue;
1326 int idx;
1327
1328 idx = sched_find_first_bit(array->bitmap);
1329 BUG_ON(idx >= MAX_RT_PRIO);
1330
1331 queue = array->queue + idx;
1332 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1333
1334 return next;
1335}
1336
1337static struct task_struct *_pick_next_task_rt(struct rq *rq)
1338{
1339 struct sched_rt_entity *rt_se;
1340 struct task_struct *p;
1341 struct rt_rq *rt_rq = &rq->rt;
1342
1343 do {
1344 rt_se = pick_next_rt_entity(rq, rt_rq);
1345 BUG_ON(!rt_se);
1346 rt_rq = group_rt_rq(rt_se);
1347 } while (rt_rq);
1348
1349 p = rt_task_of(rt_se);
1350 p->se.exec_start = rq_clock_task(rq);
1351
1352 return p;
1353}
1354
1355static struct task_struct *
1356pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1357{
1358 struct task_struct *p;
1359 struct rt_rq *rt_rq = &rq->rt;
1360
1361 if (need_pull_rt_task(rq, prev)) {
1362 pull_rt_task(rq);
1363 /*
1364 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1365 * means a dl or stop task can slip in, in which case we need
1366 * to re-start task selection.
1367 */
1368 if (unlikely((rq->stop && rq->stop->on_rq) ||
1369 rq->dl.dl_nr_running))
1370 return RETRY_TASK;
1371 }
1372
1373 /*
1374 * We may dequeue prev's rt_rq in put_prev_task().
1375 * So, we update time before rt_nr_running check.
1376 */
1377 if (prev->sched_class == &rt_sched_class)
1378 update_curr_rt(rq);
1379
1380 if (!rt_rq->rt_nr_running)
1381 return NULL;
1382
1383 if (rt_rq_throttled(rt_rq))
1384 return NULL;
1385
1386 put_prev_task(rq, prev);
1387
1388 p = _pick_next_task_rt(rq);
1389
1390 /* The running task is never eligible for pushing */
1391 if (p)
1392 dequeue_pushable_task(rq, p);
1393
1394 set_post_schedule(rq);
1395
1396 return p;
1397}
1398
1399static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1400{
1401 update_curr_rt(rq);
1402
1403 /*
1404 * The previous task needs to be made eligible for pushing
1405 * if it is still active
1406 */
1407 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1408 enqueue_pushable_task(rq, p);
1409}
1410
1411#ifdef CONFIG_SMP
1412
1413/* Only try algorithms three times */
1414#define RT_MAX_TRIES 3
1415
1416static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1417{
1418 if (!task_running(rq, p) &&
1419 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1420 return 1;
1421 return 0;
1422}
1423
1424/*
1425 * Return the highest pushable rq's task, which is suitable to be executed
1426 * on the cpu, NULL otherwise
1427 */
1428static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1429{
1430 struct plist_head *head = &rq->rt.pushable_tasks;
1431 struct task_struct *p;
1432
1433 if (!has_pushable_tasks(rq))
1434 return NULL;
1435
1436 plist_for_each_entry(p, head, pushable_tasks) {
1437 if (pick_rt_task(rq, p, cpu))
1438 return p;
1439 }
1440
1441 return NULL;
1442}
1443
1444static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1445
1446static int find_lowest_rq(struct task_struct *task)
1447{
1448 struct sched_domain *sd;
1449 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1450 int this_cpu = smp_processor_id();
1451 int cpu = task_cpu(task);
1452
1453 /* Make sure the mask is initialized first */
1454 if (unlikely(!lowest_mask))
1455 return -1;
1456
1457 if (task->nr_cpus_allowed == 1)
1458 return -1; /* No other targets possible */
1459
1460 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1461 return -1; /* No targets found */
1462
1463 /*
1464 * At this point we have built a mask of cpus representing the
1465 * lowest priority tasks in the system. Now we want to elect
1466 * the best one based on our affinity and topology.
1467 *
1468 * We prioritize the last cpu that the task executed on since
1469 * it is most likely cache-hot in that location.
1470 */
1471 if (cpumask_test_cpu(cpu, lowest_mask))
1472 return cpu;
1473
1474 /*
1475 * Otherwise, we consult the sched_domains span maps to figure
1476 * out which cpu is logically closest to our hot cache data.
1477 */
1478 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1479 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1480
1481 rcu_read_lock();
1482 for_each_domain(cpu, sd) {
1483 if (sd->flags & SD_WAKE_AFFINE) {
1484 int best_cpu;
1485
1486 /*
1487 * "this_cpu" is cheaper to preempt than a
1488 * remote processor.
1489 */
1490 if (this_cpu != -1 &&
1491 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1492 rcu_read_unlock();
1493 return this_cpu;
1494 }
1495
1496 best_cpu = cpumask_first_and(lowest_mask,
1497 sched_domain_span(sd));
1498 if (best_cpu < nr_cpu_ids) {
1499 rcu_read_unlock();
1500 return best_cpu;
1501 }
1502 }
1503 }
1504 rcu_read_unlock();
1505
1506 /*
1507 * And finally, if there were no matches within the domains
1508 * just give the caller *something* to work with from the compatible
1509 * locations.
1510 */
1511 if (this_cpu != -1)
1512 return this_cpu;
1513
1514 cpu = cpumask_any(lowest_mask);
1515 if (cpu < nr_cpu_ids)
1516 return cpu;
1517 return -1;
1518}
1519
1520/* Will lock the rq it finds */
1521static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1522{
1523 struct rq *lowest_rq = NULL;
1524 int tries;
1525 int cpu;
1526
1527 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1528 cpu = find_lowest_rq(task);
1529
1530 if ((cpu == -1) || (cpu == rq->cpu))
1531 break;
1532
1533 lowest_rq = cpu_rq(cpu);
1534
1535 /* if the prio of this runqueue changed, try again */
1536 if (double_lock_balance(rq, lowest_rq)) {
1537 /*
1538 * We had to unlock the run queue. In
1539 * the mean time, task could have
1540 * migrated already or had its affinity changed.
1541 * Also make sure that it wasn't scheduled on its rq.
1542 */
1543 if (unlikely(task_rq(task) != rq ||
1544 !cpumask_test_cpu(lowest_rq->cpu,
1545 tsk_cpus_allowed(task)) ||
1546 task_running(rq, task) ||
1547 !task->on_rq)) {
1548
1549 double_unlock_balance(rq, lowest_rq);
1550 lowest_rq = NULL;
1551 break;
1552 }
1553 }
1554
1555 /* If this rq is still suitable use it. */
1556 if (lowest_rq->rt.highest_prio.curr > task->prio)
1557 break;
1558
1559 /* try again */
1560 double_unlock_balance(rq, lowest_rq);
1561 lowest_rq = NULL;
1562 }
1563
1564 return lowest_rq;
1565}
1566
1567static struct task_struct *pick_next_pushable_task(struct rq *rq)
1568{
1569 struct task_struct *p;
1570
1571 if (!has_pushable_tasks(rq))
1572 return NULL;
1573
1574 p = plist_first_entry(&rq->rt.pushable_tasks,
1575 struct task_struct, pushable_tasks);
1576
1577 BUG_ON(rq->cpu != task_cpu(p));
1578 BUG_ON(task_current(rq, p));
1579 BUG_ON(p->nr_cpus_allowed <= 1);
1580
1581 BUG_ON(!p->on_rq);
1582 BUG_ON(!rt_task(p));
1583
1584 return p;
1585}
1586
1587/*
1588 * If the current CPU has more than one RT task, see if the non
1589 * running task can migrate over to a CPU that is running a task
1590 * of lesser priority.
1591 */
1592static int push_rt_task(struct rq *rq)
1593{
1594 struct task_struct *next_task;
1595 struct rq *lowest_rq;
1596 int ret = 0;
1597
1598 if (!rq->rt.overloaded)
1599 return 0;
1600
1601 next_task = pick_next_pushable_task(rq);
1602 if (!next_task)
1603 return 0;
1604
1605retry:
1606 if (unlikely(next_task == rq->curr)) {
1607 WARN_ON(1);
1608 return 0;
1609 }
1610
1611 /*
1612 * It's possible that the next_task slipped in of
1613 * higher priority than current. If that's the case
1614 * just reschedule current.
1615 */
1616 if (unlikely(next_task->prio < rq->curr->prio)) {
1617 resched_task(rq->curr);
1618 return 0;
1619 }
1620
1621 /* We might release rq lock */
1622 get_task_struct(next_task);
1623
1624 /* find_lock_lowest_rq locks the rq if found */
1625 lowest_rq = find_lock_lowest_rq(next_task, rq);
1626 if (!lowest_rq) {
1627 struct task_struct *task;
1628 /*
1629 * find_lock_lowest_rq releases rq->lock
1630 * so it is possible that next_task has migrated.
1631 *
1632 * We need to make sure that the task is still on the same
1633 * run-queue and is also still the next task eligible for
1634 * pushing.
1635 */
1636 task = pick_next_pushable_task(rq);
1637 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1638 /*
1639 * The task hasn't migrated, and is still the next
1640 * eligible task, but we failed to find a run-queue
1641 * to push it to. Do not retry in this case, since
1642 * other cpus will pull from us when ready.
1643 */
1644 goto out;
1645 }
1646
1647 if (!task)
1648 /* No more tasks, just exit */
1649 goto out;
1650
1651 /*
1652 * Something has shifted, try again.
1653 */
1654 put_task_struct(next_task);
1655 next_task = task;
1656 goto retry;
1657 }
1658
1659 deactivate_task(rq, next_task, 0);
1660 set_task_cpu(next_task, lowest_rq->cpu);
1661 activate_task(lowest_rq, next_task, 0);
1662 ret = 1;
1663
1664 resched_task(lowest_rq->curr);
1665
1666 double_unlock_balance(rq, lowest_rq);
1667
1668out:
1669 put_task_struct(next_task);
1670
1671 return ret;
1672}
1673
1674static void push_rt_tasks(struct rq *rq)
1675{
1676 /* push_rt_task will return true if it moved an RT */
1677 while (push_rt_task(rq))
1678 ;
1679}
1680
1681static int pull_rt_task(struct rq *this_rq)
1682{
1683 int this_cpu = this_rq->cpu, ret = 0, cpu;
1684 struct task_struct *p;
1685 struct rq *src_rq;
1686
1687 if (likely(!rt_overloaded(this_rq)))
1688 return 0;
1689
1690 /*
1691 * Match the barrier from rt_set_overloaded; this guarantees that if we
1692 * see overloaded we must also see the rto_mask bit.
1693 */
1694 smp_rmb();
1695
1696 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1697 if (this_cpu == cpu)
1698 continue;
1699
1700 src_rq = cpu_rq(cpu);
1701
1702 /*
1703 * Don't bother taking the src_rq->lock if the next highest
1704 * task is known to be lower-priority than our current task.
1705 * This may look racy, but if this value is about to go
1706 * logically higher, the src_rq will push this task away.
1707 * And if its going logically lower, we do not care
1708 */
1709 if (src_rq->rt.highest_prio.next >=
1710 this_rq->rt.highest_prio.curr)
1711 continue;
1712
1713 /*
1714 * We can potentially drop this_rq's lock in
1715 * double_lock_balance, and another CPU could
1716 * alter this_rq
1717 */
1718 double_lock_balance(this_rq, src_rq);
1719
1720 /*
1721 * We can pull only a task, which is pushable
1722 * on its rq, and no others.
1723 */
1724 p = pick_highest_pushable_task(src_rq, this_cpu);
1725
1726 /*
1727 * Do we have an RT task that preempts
1728 * the to-be-scheduled task?
1729 */
1730 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1731 WARN_ON(p == src_rq->curr);
1732 WARN_ON(!p->on_rq);
1733
1734 /*
1735 * There's a chance that p is higher in priority
1736 * than what's currently running on its cpu.
1737 * This is just that p is wakeing up and hasn't
1738 * had a chance to schedule. We only pull
1739 * p if it is lower in priority than the
1740 * current task on the run queue
1741 */
1742 if (p->prio < src_rq->curr->prio)
1743 goto skip;
1744
1745 ret = 1;
1746
1747 deactivate_task(src_rq, p, 0);
1748 set_task_cpu(p, this_cpu);
1749 activate_task(this_rq, p, 0);
1750 /*
1751 * We continue with the search, just in
1752 * case there's an even higher prio task
1753 * in another runqueue. (low likelihood
1754 * but possible)
1755 */
1756 }
1757skip:
1758 double_unlock_balance(this_rq, src_rq);
1759 }
1760
1761 return ret;
1762}
1763
1764static void post_schedule_rt(struct rq *rq)
1765{
1766 push_rt_tasks(rq);
1767}
1768
1769/*
1770 * If we are not running and we are not going to reschedule soon, we should
1771 * try to push tasks away now
1772 */
1773static void task_woken_rt(struct rq *rq, struct task_struct *p)
1774{
1775 if (!task_running(rq, p) &&
1776 !test_tsk_need_resched(rq->curr) &&
1777 has_pushable_tasks(rq) &&
1778 p->nr_cpus_allowed > 1 &&
1779 (dl_task(rq->curr) || rt_task(rq->curr)) &&
1780 (rq->curr->nr_cpus_allowed < 2 ||
1781 rq->curr->prio <= p->prio))
1782 push_rt_tasks(rq);
1783}
1784
1785static void set_cpus_allowed_rt(struct task_struct *p,
1786 const struct cpumask *new_mask)
1787{
1788 struct rq *rq;
1789 int weight;
1790
1791 BUG_ON(!rt_task(p));
1792
1793 if (!p->on_rq)
1794 return;
1795
1796 weight = cpumask_weight(new_mask);
1797
1798 /*
1799 * Only update if the process changes its state from whether it
1800 * can migrate or not.
1801 */
1802 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1803 return;
1804
1805 rq = task_rq(p);
1806
1807 /*
1808 * The process used to be able to migrate OR it can now migrate
1809 */
1810 if (weight <= 1) {
1811 if (!task_current(rq, p))
1812 dequeue_pushable_task(rq, p);
1813 BUG_ON(!rq->rt.rt_nr_migratory);
1814 rq->rt.rt_nr_migratory--;
1815 } else {
1816 if (!task_current(rq, p))
1817 enqueue_pushable_task(rq, p);
1818 rq->rt.rt_nr_migratory++;
1819 }
1820
1821 update_rt_migration(&rq->rt);
1822}
1823
1824/* Assumes rq->lock is held */
1825static void rq_online_rt(struct rq *rq)
1826{
1827 if (rq->rt.overloaded)
1828 rt_set_overload(rq);
1829
1830 __enable_runtime(rq);
1831
1832 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1833}
1834
1835/* Assumes rq->lock is held */
1836static void rq_offline_rt(struct rq *rq)
1837{
1838 if (rq->rt.overloaded)
1839 rt_clear_overload(rq);
1840
1841 __disable_runtime(rq);
1842
1843 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1844}
1845
1846/*
1847 * When switch from the rt queue, we bring ourselves to a position
1848 * that we might want to pull RT tasks from other runqueues.
1849 */
1850static void switched_from_rt(struct rq *rq, struct task_struct *p)
1851{
1852 /*
1853 * If there are other RT tasks then we will reschedule
1854 * and the scheduling of the other RT tasks will handle
1855 * the balancing. But if we are the last RT task
1856 * we may need to handle the pulling of RT tasks
1857 * now.
1858 */
1859 if (!p->on_rq || rq->rt.rt_nr_running)
1860 return;
1861
1862 if (pull_rt_task(rq))
1863 resched_task(rq->curr);
1864}
1865
1866void __init init_sched_rt_class(void)
1867{
1868 unsigned int i;
1869
1870 for_each_possible_cpu(i) {
1871 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1872 GFP_KERNEL, cpu_to_node(i));
1873 }
1874}
1875#endif /* CONFIG_SMP */
1876
1877/*
1878 * When switching a task to RT, we may overload the runqueue
1879 * with RT tasks. In this case we try to push them off to
1880 * other runqueues.
1881 */
1882static void switched_to_rt(struct rq *rq, struct task_struct *p)
1883{
1884 int check_resched = 1;
1885
1886 /*
1887 * If we are already running, then there's nothing
1888 * that needs to be done. But if we are not running
1889 * we may need to preempt the current running task.
1890 * If that current running task is also an RT task
1891 * then see if we can move to another run queue.
1892 */
1893 if (p->on_rq && rq->curr != p) {
1894#ifdef CONFIG_SMP
1895 if (rq->rt.overloaded && push_rt_task(rq) &&
1896 /* Don't resched if we changed runqueues */
1897 rq != task_rq(p))
1898 check_resched = 0;
1899#endif /* CONFIG_SMP */
1900 if (check_resched && p->prio < rq->curr->prio)
1901 resched_task(rq->curr);
1902 }
1903}
1904
1905/*
1906 * Priority of the task has changed. This may cause
1907 * us to initiate a push or pull.
1908 */
1909static void
1910prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1911{
1912 if (!p->on_rq)
1913 return;
1914
1915 if (rq->curr == p) {
1916#ifdef CONFIG_SMP
1917 /*
1918 * If our priority decreases while running, we
1919 * may need to pull tasks to this runqueue.
1920 */
1921 if (oldprio < p->prio)
1922 pull_rt_task(rq);
1923 /*
1924 * If there's a higher priority task waiting to run
1925 * then reschedule. Note, the above pull_rt_task
1926 * can release the rq lock and p could migrate.
1927 * Only reschedule if p is still on the same runqueue.
1928 */
1929 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1930 resched_task(p);
1931#else
1932 /* For UP simply resched on drop of prio */
1933 if (oldprio < p->prio)
1934 resched_task(p);
1935#endif /* CONFIG_SMP */
1936 } else {
1937 /*
1938 * This task is not running, but if it is
1939 * greater than the current running task
1940 * then reschedule.
1941 */
1942 if (p->prio < rq->curr->prio)
1943 resched_task(rq->curr);
1944 }
1945}
1946
1947static void watchdog(struct rq *rq, struct task_struct *p)
1948{
1949 unsigned long soft, hard;
1950
1951 /* max may change after cur was read, this will be fixed next tick */
1952 soft = task_rlimit(p, RLIMIT_RTTIME);
1953 hard = task_rlimit_max(p, RLIMIT_RTTIME);
1954
1955 if (soft != RLIM_INFINITY) {
1956 unsigned long next;
1957
1958 if (p->rt.watchdog_stamp != jiffies) {
1959 p->rt.timeout++;
1960 p->rt.watchdog_stamp = jiffies;
1961 }
1962
1963 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1964 if (p->rt.timeout > next)
1965 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1966 }
1967}
1968
1969static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1970{
1971 struct sched_rt_entity *rt_se = &p->rt;
1972
1973 update_curr_rt(rq);
1974
1975 watchdog(rq, p);
1976
1977 /*
1978 * RR tasks need a special form of timeslice management.
1979 * FIFO tasks have no timeslices.
1980 */
1981 if (p->policy != SCHED_RR)
1982 return;
1983
1984 if (--p->rt.time_slice)
1985 return;
1986
1987 p->rt.time_slice = sched_rr_timeslice;
1988
1989 /*
1990 * Requeue to the end of queue if we (and all of our ancestors) are not
1991 * the only element on the queue
1992 */
1993 for_each_sched_rt_entity(rt_se) {
1994 if (rt_se->run_list.prev != rt_se->run_list.next) {
1995 requeue_task_rt(rq, p, 0);
1996 set_tsk_need_resched(p);
1997 return;
1998 }
1999 }
2000}
2001
2002static void set_curr_task_rt(struct rq *rq)
2003{
2004 struct task_struct *p = rq->curr;
2005
2006 p->se.exec_start = rq_clock_task(rq);
2007
2008 /* The running task is never eligible for pushing */
2009 dequeue_pushable_task(rq, p);
2010}
2011
2012static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2013{
2014 /*
2015 * Time slice is 0 for SCHED_FIFO tasks
2016 */
2017 if (task->policy == SCHED_RR)
2018 return sched_rr_timeslice;
2019 else
2020 return 0;
2021}
2022
2023const struct sched_class rt_sched_class = {
2024 .next = &fair_sched_class,
2025 .enqueue_task = enqueue_task_rt,
2026 .dequeue_task = dequeue_task_rt,
2027 .yield_task = yield_task_rt,
2028
2029 .check_preempt_curr = check_preempt_curr_rt,
2030
2031 .pick_next_task = pick_next_task_rt,
2032 .put_prev_task = put_prev_task_rt,
2033
2034#ifdef CONFIG_SMP
2035 .select_task_rq = select_task_rq_rt,
2036
2037 .set_cpus_allowed = set_cpus_allowed_rt,
2038 .rq_online = rq_online_rt,
2039 .rq_offline = rq_offline_rt,
2040 .post_schedule = post_schedule_rt,
2041 .task_woken = task_woken_rt,
2042 .switched_from = switched_from_rt,
2043#endif
2044
2045 .set_curr_task = set_curr_task_rt,
2046 .task_tick = task_tick_rt,
2047
2048 .get_rr_interval = get_rr_interval_rt,
2049
2050 .prio_changed = prio_changed_rt,
2051 .switched_to = switched_to_rt,
2052};
2053
2054#ifdef CONFIG_SCHED_DEBUG
2055extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2056
2057void print_rt_stats(struct seq_file *m, int cpu)
2058{
2059 rt_rq_iter_t iter;
2060 struct rt_rq *rt_rq;
2061
2062 rcu_read_lock();
2063 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2064 print_rt_rq(m, cpu, rt_rq);
2065 rcu_read_unlock();
2066}
2067#endif /* CONFIG_SCHED_DEBUG */
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#include "sched.h"
7
8#include <linux/slab.h>
9#include <linux/irq_work.h>
10
11int sched_rr_timeslice = RR_TIMESLICE;
12
13static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
14
15struct rt_bandwidth def_rt_bandwidth;
16
17static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
18{
19 struct rt_bandwidth *rt_b =
20 container_of(timer, struct rt_bandwidth, rt_period_timer);
21 int idle = 0;
22 int overrun;
23
24 raw_spin_lock(&rt_b->rt_runtime_lock);
25 for (;;) {
26 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
27 if (!overrun)
28 break;
29
30 raw_spin_unlock(&rt_b->rt_runtime_lock);
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 raw_spin_lock(&rt_b->rt_runtime_lock);
33 }
34 if (idle)
35 rt_b->rt_period_active = 0;
36 raw_spin_unlock(&rt_b->rt_runtime_lock);
37
38 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
39}
40
41void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
42{
43 rt_b->rt_period = ns_to_ktime(period);
44 rt_b->rt_runtime = runtime;
45
46 raw_spin_lock_init(&rt_b->rt_runtime_lock);
47
48 hrtimer_init(&rt_b->rt_period_timer,
49 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
50 rt_b->rt_period_timer.function = sched_rt_period_timer;
51}
52
53static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
54{
55 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
56 return;
57
58 raw_spin_lock(&rt_b->rt_runtime_lock);
59 if (!rt_b->rt_period_active) {
60 rt_b->rt_period_active = 1;
61 /*
62 * SCHED_DEADLINE updates the bandwidth, as a run away
63 * RT task with a DL task could hog a CPU. But DL does
64 * not reset the period. If a deadline task was running
65 * without an RT task running, it can cause RT tasks to
66 * throttle when they start up. Kick the timer right away
67 * to update the period.
68 */
69 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
70 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
71 }
72 raw_spin_unlock(&rt_b->rt_runtime_lock);
73}
74
75#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
76static void push_irq_work_func(struct irq_work *work);
77#endif
78
79void init_rt_rq(struct rt_rq *rt_rq)
80{
81 struct rt_prio_array *array;
82 int i;
83
84 array = &rt_rq->active;
85 for (i = 0; i < MAX_RT_PRIO; i++) {
86 INIT_LIST_HEAD(array->queue + i);
87 __clear_bit(i, array->bitmap);
88 }
89 /* delimiter for bitsearch: */
90 __set_bit(MAX_RT_PRIO, array->bitmap);
91
92#if defined CONFIG_SMP
93 rt_rq->highest_prio.curr = MAX_RT_PRIO;
94 rt_rq->highest_prio.next = MAX_RT_PRIO;
95 rt_rq->rt_nr_migratory = 0;
96 rt_rq->overloaded = 0;
97 plist_head_init(&rt_rq->pushable_tasks);
98
99#ifdef HAVE_RT_PUSH_IPI
100 rt_rq->push_flags = 0;
101 rt_rq->push_cpu = nr_cpu_ids;
102 raw_spin_lock_init(&rt_rq->push_lock);
103 init_irq_work(&rt_rq->push_work, push_irq_work_func);
104#endif
105#endif /* CONFIG_SMP */
106 /* We start is dequeued state, because no RT tasks are queued */
107 rt_rq->rt_queued = 0;
108
109 rt_rq->rt_time = 0;
110 rt_rq->rt_throttled = 0;
111 rt_rq->rt_runtime = 0;
112 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
113}
114
115#ifdef CONFIG_RT_GROUP_SCHED
116static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
117{
118 hrtimer_cancel(&rt_b->rt_period_timer);
119}
120
121#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
122
123static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
124{
125#ifdef CONFIG_SCHED_DEBUG
126 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
127#endif
128 return container_of(rt_se, struct task_struct, rt);
129}
130
131static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
132{
133 return rt_rq->rq;
134}
135
136static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
137{
138 return rt_se->rt_rq;
139}
140
141static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
142{
143 struct rt_rq *rt_rq = rt_se->rt_rq;
144
145 return rt_rq->rq;
146}
147
148void free_rt_sched_group(struct task_group *tg)
149{
150 int i;
151
152 if (tg->rt_se)
153 destroy_rt_bandwidth(&tg->rt_bandwidth);
154
155 for_each_possible_cpu(i) {
156 if (tg->rt_rq)
157 kfree(tg->rt_rq[i]);
158 if (tg->rt_se)
159 kfree(tg->rt_se[i]);
160 }
161
162 kfree(tg->rt_rq);
163 kfree(tg->rt_se);
164}
165
166void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
167 struct sched_rt_entity *rt_se, int cpu,
168 struct sched_rt_entity *parent)
169{
170 struct rq *rq = cpu_rq(cpu);
171
172 rt_rq->highest_prio.curr = MAX_RT_PRIO;
173 rt_rq->rt_nr_boosted = 0;
174 rt_rq->rq = rq;
175 rt_rq->tg = tg;
176
177 tg->rt_rq[cpu] = rt_rq;
178 tg->rt_se[cpu] = rt_se;
179
180 if (!rt_se)
181 return;
182
183 if (!parent)
184 rt_se->rt_rq = &rq->rt;
185 else
186 rt_se->rt_rq = parent->my_q;
187
188 rt_se->my_q = rt_rq;
189 rt_se->parent = parent;
190 INIT_LIST_HEAD(&rt_se->run_list);
191}
192
193int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
194{
195 struct rt_rq *rt_rq;
196 struct sched_rt_entity *rt_se;
197 int i;
198
199 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
200 if (!tg->rt_rq)
201 goto err;
202 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
203 if (!tg->rt_se)
204 goto err;
205
206 init_rt_bandwidth(&tg->rt_bandwidth,
207 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
208
209 for_each_possible_cpu(i) {
210 rt_rq = kzalloc_node(sizeof(struct rt_rq),
211 GFP_KERNEL, cpu_to_node(i));
212 if (!rt_rq)
213 goto err;
214
215 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
216 GFP_KERNEL, cpu_to_node(i));
217 if (!rt_se)
218 goto err_free_rq;
219
220 init_rt_rq(rt_rq);
221 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
222 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
223 }
224
225 return 1;
226
227err_free_rq:
228 kfree(rt_rq);
229err:
230 return 0;
231}
232
233#else /* CONFIG_RT_GROUP_SCHED */
234
235#define rt_entity_is_task(rt_se) (1)
236
237static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
238{
239 return container_of(rt_se, struct task_struct, rt);
240}
241
242static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
243{
244 return container_of(rt_rq, struct rq, rt);
245}
246
247static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
248{
249 struct task_struct *p = rt_task_of(rt_se);
250
251 return task_rq(p);
252}
253
254static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
255{
256 struct rq *rq = rq_of_rt_se(rt_se);
257
258 return &rq->rt;
259}
260
261void free_rt_sched_group(struct task_group *tg) { }
262
263int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
264{
265 return 1;
266}
267#endif /* CONFIG_RT_GROUP_SCHED */
268
269#ifdef CONFIG_SMP
270
271static void pull_rt_task(struct rq *this_rq);
272
273static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
274{
275 /* Try to pull RT tasks here if we lower this rq's prio */
276 return rq->rt.highest_prio.curr > prev->prio;
277}
278
279static inline int rt_overloaded(struct rq *rq)
280{
281 return atomic_read(&rq->rd->rto_count);
282}
283
284static inline void rt_set_overload(struct rq *rq)
285{
286 if (!rq->online)
287 return;
288
289 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
290 /*
291 * Make sure the mask is visible before we set
292 * the overload count. That is checked to determine
293 * if we should look at the mask. It would be a shame
294 * if we looked at the mask, but the mask was not
295 * updated yet.
296 *
297 * Matched by the barrier in pull_rt_task().
298 */
299 smp_wmb();
300 atomic_inc(&rq->rd->rto_count);
301}
302
303static inline void rt_clear_overload(struct rq *rq)
304{
305 if (!rq->online)
306 return;
307
308 /* the order here really doesn't matter */
309 atomic_dec(&rq->rd->rto_count);
310 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
311}
312
313static void update_rt_migration(struct rt_rq *rt_rq)
314{
315 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
316 if (!rt_rq->overloaded) {
317 rt_set_overload(rq_of_rt_rq(rt_rq));
318 rt_rq->overloaded = 1;
319 }
320 } else if (rt_rq->overloaded) {
321 rt_clear_overload(rq_of_rt_rq(rt_rq));
322 rt_rq->overloaded = 0;
323 }
324}
325
326static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
327{
328 struct task_struct *p;
329
330 if (!rt_entity_is_task(rt_se))
331 return;
332
333 p = rt_task_of(rt_se);
334 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
335
336 rt_rq->rt_nr_total++;
337 if (p->nr_cpus_allowed > 1)
338 rt_rq->rt_nr_migratory++;
339
340 update_rt_migration(rt_rq);
341}
342
343static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
344{
345 struct task_struct *p;
346
347 if (!rt_entity_is_task(rt_se))
348 return;
349
350 p = rt_task_of(rt_se);
351 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
352
353 rt_rq->rt_nr_total--;
354 if (p->nr_cpus_allowed > 1)
355 rt_rq->rt_nr_migratory--;
356
357 update_rt_migration(rt_rq);
358}
359
360static inline int has_pushable_tasks(struct rq *rq)
361{
362 return !plist_head_empty(&rq->rt.pushable_tasks);
363}
364
365static DEFINE_PER_CPU(struct callback_head, rt_push_head);
366static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
367
368static void push_rt_tasks(struct rq *);
369static void pull_rt_task(struct rq *);
370
371static inline void queue_push_tasks(struct rq *rq)
372{
373 if (!has_pushable_tasks(rq))
374 return;
375
376 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
377}
378
379static inline void queue_pull_task(struct rq *rq)
380{
381 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
382}
383
384static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
385{
386 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
387 plist_node_init(&p->pushable_tasks, p->prio);
388 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
389
390 /* Update the highest prio pushable task */
391 if (p->prio < rq->rt.highest_prio.next)
392 rq->rt.highest_prio.next = p->prio;
393}
394
395static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
396{
397 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
398
399 /* Update the new highest prio pushable task */
400 if (has_pushable_tasks(rq)) {
401 p = plist_first_entry(&rq->rt.pushable_tasks,
402 struct task_struct, pushable_tasks);
403 rq->rt.highest_prio.next = p->prio;
404 } else
405 rq->rt.highest_prio.next = MAX_RT_PRIO;
406}
407
408#else
409
410static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
411{
412}
413
414static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
415{
416}
417
418static inline
419void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
420{
421}
422
423static inline
424void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
425{
426}
427
428static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
429{
430 return false;
431}
432
433static inline void pull_rt_task(struct rq *this_rq)
434{
435}
436
437static inline void queue_push_tasks(struct rq *rq)
438{
439}
440#endif /* CONFIG_SMP */
441
442static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
443static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
444
445static inline int on_rt_rq(struct sched_rt_entity *rt_se)
446{
447 return rt_se->on_rq;
448}
449
450#ifdef CONFIG_RT_GROUP_SCHED
451
452static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
453{
454 if (!rt_rq->tg)
455 return RUNTIME_INF;
456
457 return rt_rq->rt_runtime;
458}
459
460static inline u64 sched_rt_period(struct rt_rq *rt_rq)
461{
462 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
463}
464
465typedef struct task_group *rt_rq_iter_t;
466
467static inline struct task_group *next_task_group(struct task_group *tg)
468{
469 do {
470 tg = list_entry_rcu(tg->list.next,
471 typeof(struct task_group), list);
472 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
473
474 if (&tg->list == &task_groups)
475 tg = NULL;
476
477 return tg;
478}
479
480#define for_each_rt_rq(rt_rq, iter, rq) \
481 for (iter = container_of(&task_groups, typeof(*iter), list); \
482 (iter = next_task_group(iter)) && \
483 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
484
485#define for_each_sched_rt_entity(rt_se) \
486 for (; rt_se; rt_se = rt_se->parent)
487
488static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
489{
490 return rt_se->my_q;
491}
492
493static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
494static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
495
496static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
497{
498 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
499 struct rq *rq = rq_of_rt_rq(rt_rq);
500 struct sched_rt_entity *rt_se;
501
502 int cpu = cpu_of(rq);
503
504 rt_se = rt_rq->tg->rt_se[cpu];
505
506 if (rt_rq->rt_nr_running) {
507 if (!rt_se)
508 enqueue_top_rt_rq(rt_rq);
509 else if (!on_rt_rq(rt_se))
510 enqueue_rt_entity(rt_se, 0);
511
512 if (rt_rq->highest_prio.curr < curr->prio)
513 resched_curr(rq);
514 }
515}
516
517static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
518{
519 struct sched_rt_entity *rt_se;
520 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
521
522 rt_se = rt_rq->tg->rt_se[cpu];
523
524 if (!rt_se)
525 dequeue_top_rt_rq(rt_rq);
526 else if (on_rt_rq(rt_se))
527 dequeue_rt_entity(rt_se, 0);
528}
529
530static inline int rt_rq_throttled(struct rt_rq *rt_rq)
531{
532 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
533}
534
535static int rt_se_boosted(struct sched_rt_entity *rt_se)
536{
537 struct rt_rq *rt_rq = group_rt_rq(rt_se);
538 struct task_struct *p;
539
540 if (rt_rq)
541 return !!rt_rq->rt_nr_boosted;
542
543 p = rt_task_of(rt_se);
544 return p->prio != p->normal_prio;
545}
546
547#ifdef CONFIG_SMP
548static inline const struct cpumask *sched_rt_period_mask(void)
549{
550 return this_rq()->rd->span;
551}
552#else
553static inline const struct cpumask *sched_rt_period_mask(void)
554{
555 return cpu_online_mask;
556}
557#endif
558
559static inline
560struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
561{
562 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
563}
564
565static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
566{
567 return &rt_rq->tg->rt_bandwidth;
568}
569
570#else /* !CONFIG_RT_GROUP_SCHED */
571
572static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
573{
574 return rt_rq->rt_runtime;
575}
576
577static inline u64 sched_rt_period(struct rt_rq *rt_rq)
578{
579 return ktime_to_ns(def_rt_bandwidth.rt_period);
580}
581
582typedef struct rt_rq *rt_rq_iter_t;
583
584#define for_each_rt_rq(rt_rq, iter, rq) \
585 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
586
587#define for_each_sched_rt_entity(rt_se) \
588 for (; rt_se; rt_se = NULL)
589
590static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
591{
592 return NULL;
593}
594
595static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
596{
597 struct rq *rq = rq_of_rt_rq(rt_rq);
598
599 if (!rt_rq->rt_nr_running)
600 return;
601
602 enqueue_top_rt_rq(rt_rq);
603 resched_curr(rq);
604}
605
606static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
607{
608 dequeue_top_rt_rq(rt_rq);
609}
610
611static inline int rt_rq_throttled(struct rt_rq *rt_rq)
612{
613 return rt_rq->rt_throttled;
614}
615
616static inline const struct cpumask *sched_rt_period_mask(void)
617{
618 return cpu_online_mask;
619}
620
621static inline
622struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
623{
624 return &cpu_rq(cpu)->rt;
625}
626
627static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
628{
629 return &def_rt_bandwidth;
630}
631
632#endif /* CONFIG_RT_GROUP_SCHED */
633
634bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
635{
636 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
637
638 return (hrtimer_active(&rt_b->rt_period_timer) ||
639 rt_rq->rt_time < rt_b->rt_runtime);
640}
641
642#ifdef CONFIG_SMP
643/*
644 * We ran out of runtime, see if we can borrow some from our neighbours.
645 */
646static void do_balance_runtime(struct rt_rq *rt_rq)
647{
648 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
649 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
650 int i, weight;
651 u64 rt_period;
652
653 weight = cpumask_weight(rd->span);
654
655 raw_spin_lock(&rt_b->rt_runtime_lock);
656 rt_period = ktime_to_ns(rt_b->rt_period);
657 for_each_cpu(i, rd->span) {
658 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
659 s64 diff;
660
661 if (iter == rt_rq)
662 continue;
663
664 raw_spin_lock(&iter->rt_runtime_lock);
665 /*
666 * Either all rqs have inf runtime and there's nothing to steal
667 * or __disable_runtime() below sets a specific rq to inf to
668 * indicate its been disabled and disalow stealing.
669 */
670 if (iter->rt_runtime == RUNTIME_INF)
671 goto next;
672
673 /*
674 * From runqueues with spare time, take 1/n part of their
675 * spare time, but no more than our period.
676 */
677 diff = iter->rt_runtime - iter->rt_time;
678 if (diff > 0) {
679 diff = div_u64((u64)diff, weight);
680 if (rt_rq->rt_runtime + diff > rt_period)
681 diff = rt_period - rt_rq->rt_runtime;
682 iter->rt_runtime -= diff;
683 rt_rq->rt_runtime += diff;
684 if (rt_rq->rt_runtime == rt_period) {
685 raw_spin_unlock(&iter->rt_runtime_lock);
686 break;
687 }
688 }
689next:
690 raw_spin_unlock(&iter->rt_runtime_lock);
691 }
692 raw_spin_unlock(&rt_b->rt_runtime_lock);
693}
694
695/*
696 * Ensure this RQ takes back all the runtime it lend to its neighbours.
697 */
698static void __disable_runtime(struct rq *rq)
699{
700 struct root_domain *rd = rq->rd;
701 rt_rq_iter_t iter;
702 struct rt_rq *rt_rq;
703
704 if (unlikely(!scheduler_running))
705 return;
706
707 for_each_rt_rq(rt_rq, iter, rq) {
708 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
709 s64 want;
710 int i;
711
712 raw_spin_lock(&rt_b->rt_runtime_lock);
713 raw_spin_lock(&rt_rq->rt_runtime_lock);
714 /*
715 * Either we're all inf and nobody needs to borrow, or we're
716 * already disabled and thus have nothing to do, or we have
717 * exactly the right amount of runtime to take out.
718 */
719 if (rt_rq->rt_runtime == RUNTIME_INF ||
720 rt_rq->rt_runtime == rt_b->rt_runtime)
721 goto balanced;
722 raw_spin_unlock(&rt_rq->rt_runtime_lock);
723
724 /*
725 * Calculate the difference between what we started out with
726 * and what we current have, that's the amount of runtime
727 * we lend and now have to reclaim.
728 */
729 want = rt_b->rt_runtime - rt_rq->rt_runtime;
730
731 /*
732 * Greedy reclaim, take back as much as we can.
733 */
734 for_each_cpu(i, rd->span) {
735 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
736 s64 diff;
737
738 /*
739 * Can't reclaim from ourselves or disabled runqueues.
740 */
741 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
742 continue;
743
744 raw_spin_lock(&iter->rt_runtime_lock);
745 if (want > 0) {
746 diff = min_t(s64, iter->rt_runtime, want);
747 iter->rt_runtime -= diff;
748 want -= diff;
749 } else {
750 iter->rt_runtime -= want;
751 want -= want;
752 }
753 raw_spin_unlock(&iter->rt_runtime_lock);
754
755 if (!want)
756 break;
757 }
758
759 raw_spin_lock(&rt_rq->rt_runtime_lock);
760 /*
761 * We cannot be left wanting - that would mean some runtime
762 * leaked out of the system.
763 */
764 BUG_ON(want);
765balanced:
766 /*
767 * Disable all the borrow logic by pretending we have inf
768 * runtime - in which case borrowing doesn't make sense.
769 */
770 rt_rq->rt_runtime = RUNTIME_INF;
771 rt_rq->rt_throttled = 0;
772 raw_spin_unlock(&rt_rq->rt_runtime_lock);
773 raw_spin_unlock(&rt_b->rt_runtime_lock);
774
775 /* Make rt_rq available for pick_next_task() */
776 sched_rt_rq_enqueue(rt_rq);
777 }
778}
779
780static void __enable_runtime(struct rq *rq)
781{
782 rt_rq_iter_t iter;
783 struct rt_rq *rt_rq;
784
785 if (unlikely(!scheduler_running))
786 return;
787
788 /*
789 * Reset each runqueue's bandwidth settings
790 */
791 for_each_rt_rq(rt_rq, iter, rq) {
792 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
793
794 raw_spin_lock(&rt_b->rt_runtime_lock);
795 raw_spin_lock(&rt_rq->rt_runtime_lock);
796 rt_rq->rt_runtime = rt_b->rt_runtime;
797 rt_rq->rt_time = 0;
798 rt_rq->rt_throttled = 0;
799 raw_spin_unlock(&rt_rq->rt_runtime_lock);
800 raw_spin_unlock(&rt_b->rt_runtime_lock);
801 }
802}
803
804static void balance_runtime(struct rt_rq *rt_rq)
805{
806 if (!sched_feat(RT_RUNTIME_SHARE))
807 return;
808
809 if (rt_rq->rt_time > rt_rq->rt_runtime) {
810 raw_spin_unlock(&rt_rq->rt_runtime_lock);
811 do_balance_runtime(rt_rq);
812 raw_spin_lock(&rt_rq->rt_runtime_lock);
813 }
814}
815#else /* !CONFIG_SMP */
816static inline void balance_runtime(struct rt_rq *rt_rq) {}
817#endif /* CONFIG_SMP */
818
819static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
820{
821 int i, idle = 1, throttled = 0;
822 const struct cpumask *span;
823
824 span = sched_rt_period_mask();
825#ifdef CONFIG_RT_GROUP_SCHED
826 /*
827 * FIXME: isolated CPUs should really leave the root task group,
828 * whether they are isolcpus or were isolated via cpusets, lest
829 * the timer run on a CPU which does not service all runqueues,
830 * potentially leaving other CPUs indefinitely throttled. If
831 * isolation is really required, the user will turn the throttle
832 * off to kill the perturbations it causes anyway. Meanwhile,
833 * this maintains functionality for boot and/or troubleshooting.
834 */
835 if (rt_b == &root_task_group.rt_bandwidth)
836 span = cpu_online_mask;
837#endif
838 for_each_cpu(i, span) {
839 int enqueue = 0;
840 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
841 struct rq *rq = rq_of_rt_rq(rt_rq);
842
843 raw_spin_lock(&rq->lock);
844 if (rt_rq->rt_time) {
845 u64 runtime;
846
847 raw_spin_lock(&rt_rq->rt_runtime_lock);
848 if (rt_rq->rt_throttled)
849 balance_runtime(rt_rq);
850 runtime = rt_rq->rt_runtime;
851 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
852 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
853 rt_rq->rt_throttled = 0;
854 enqueue = 1;
855
856 /*
857 * When we're idle and a woken (rt) task is
858 * throttled check_preempt_curr() will set
859 * skip_update and the time between the wakeup
860 * and this unthrottle will get accounted as
861 * 'runtime'.
862 */
863 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
864 rq_clock_skip_update(rq, false);
865 }
866 if (rt_rq->rt_time || rt_rq->rt_nr_running)
867 idle = 0;
868 raw_spin_unlock(&rt_rq->rt_runtime_lock);
869 } else if (rt_rq->rt_nr_running) {
870 idle = 0;
871 if (!rt_rq_throttled(rt_rq))
872 enqueue = 1;
873 }
874 if (rt_rq->rt_throttled)
875 throttled = 1;
876
877 if (enqueue)
878 sched_rt_rq_enqueue(rt_rq);
879 raw_spin_unlock(&rq->lock);
880 }
881
882 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
883 return 1;
884
885 return idle;
886}
887
888static inline int rt_se_prio(struct sched_rt_entity *rt_se)
889{
890#ifdef CONFIG_RT_GROUP_SCHED
891 struct rt_rq *rt_rq = group_rt_rq(rt_se);
892
893 if (rt_rq)
894 return rt_rq->highest_prio.curr;
895#endif
896
897 return rt_task_of(rt_se)->prio;
898}
899
900static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
901{
902 u64 runtime = sched_rt_runtime(rt_rq);
903
904 if (rt_rq->rt_throttled)
905 return rt_rq_throttled(rt_rq);
906
907 if (runtime >= sched_rt_period(rt_rq))
908 return 0;
909
910 balance_runtime(rt_rq);
911 runtime = sched_rt_runtime(rt_rq);
912 if (runtime == RUNTIME_INF)
913 return 0;
914
915 if (rt_rq->rt_time > runtime) {
916 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
917
918 /*
919 * Don't actually throttle groups that have no runtime assigned
920 * but accrue some time due to boosting.
921 */
922 if (likely(rt_b->rt_runtime)) {
923 rt_rq->rt_throttled = 1;
924 printk_deferred_once("sched: RT throttling activated\n");
925 } else {
926 /*
927 * In case we did anyway, make it go away,
928 * replenishment is a joke, since it will replenish us
929 * with exactly 0 ns.
930 */
931 rt_rq->rt_time = 0;
932 }
933
934 if (rt_rq_throttled(rt_rq)) {
935 sched_rt_rq_dequeue(rt_rq);
936 return 1;
937 }
938 }
939
940 return 0;
941}
942
943/*
944 * Update the current task's runtime statistics. Skip current tasks that
945 * are not in our scheduling class.
946 */
947static void update_curr_rt(struct rq *rq)
948{
949 struct task_struct *curr = rq->curr;
950 struct sched_rt_entity *rt_se = &curr->rt;
951 u64 delta_exec;
952
953 if (curr->sched_class != &rt_sched_class)
954 return;
955
956 /* Kick cpufreq (see the comment in linux/cpufreq.h). */
957 if (cpu_of(rq) == smp_processor_id())
958 cpufreq_trigger_update(rq_clock(rq));
959
960 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
961 if (unlikely((s64)delta_exec <= 0))
962 return;
963
964 schedstat_set(curr->se.statistics.exec_max,
965 max(curr->se.statistics.exec_max, delta_exec));
966
967 curr->se.sum_exec_runtime += delta_exec;
968 account_group_exec_runtime(curr, delta_exec);
969
970 curr->se.exec_start = rq_clock_task(rq);
971 cpuacct_charge(curr, delta_exec);
972
973 sched_rt_avg_update(rq, delta_exec);
974
975 if (!rt_bandwidth_enabled())
976 return;
977
978 for_each_sched_rt_entity(rt_se) {
979 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
980
981 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
982 raw_spin_lock(&rt_rq->rt_runtime_lock);
983 rt_rq->rt_time += delta_exec;
984 if (sched_rt_runtime_exceeded(rt_rq))
985 resched_curr(rq);
986 raw_spin_unlock(&rt_rq->rt_runtime_lock);
987 }
988 }
989}
990
991static void
992dequeue_top_rt_rq(struct rt_rq *rt_rq)
993{
994 struct rq *rq = rq_of_rt_rq(rt_rq);
995
996 BUG_ON(&rq->rt != rt_rq);
997
998 if (!rt_rq->rt_queued)
999 return;
1000
1001 BUG_ON(!rq->nr_running);
1002
1003 sub_nr_running(rq, rt_rq->rt_nr_running);
1004 rt_rq->rt_queued = 0;
1005}
1006
1007static void
1008enqueue_top_rt_rq(struct rt_rq *rt_rq)
1009{
1010 struct rq *rq = rq_of_rt_rq(rt_rq);
1011
1012 BUG_ON(&rq->rt != rt_rq);
1013
1014 if (rt_rq->rt_queued)
1015 return;
1016 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1017 return;
1018
1019 add_nr_running(rq, rt_rq->rt_nr_running);
1020 rt_rq->rt_queued = 1;
1021}
1022
1023#if defined CONFIG_SMP
1024
1025static void
1026inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1027{
1028 struct rq *rq = rq_of_rt_rq(rt_rq);
1029
1030#ifdef CONFIG_RT_GROUP_SCHED
1031 /*
1032 * Change rq's cpupri only if rt_rq is the top queue.
1033 */
1034 if (&rq->rt != rt_rq)
1035 return;
1036#endif
1037 if (rq->online && prio < prev_prio)
1038 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1039}
1040
1041static void
1042dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1043{
1044 struct rq *rq = rq_of_rt_rq(rt_rq);
1045
1046#ifdef CONFIG_RT_GROUP_SCHED
1047 /*
1048 * Change rq's cpupri only if rt_rq is the top queue.
1049 */
1050 if (&rq->rt != rt_rq)
1051 return;
1052#endif
1053 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1054 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1055}
1056
1057#else /* CONFIG_SMP */
1058
1059static inline
1060void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1061static inline
1062void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1063
1064#endif /* CONFIG_SMP */
1065
1066#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1067static void
1068inc_rt_prio(struct rt_rq *rt_rq, int prio)
1069{
1070 int prev_prio = rt_rq->highest_prio.curr;
1071
1072 if (prio < prev_prio)
1073 rt_rq->highest_prio.curr = prio;
1074
1075 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1076}
1077
1078static void
1079dec_rt_prio(struct rt_rq *rt_rq, int prio)
1080{
1081 int prev_prio = rt_rq->highest_prio.curr;
1082
1083 if (rt_rq->rt_nr_running) {
1084
1085 WARN_ON(prio < prev_prio);
1086
1087 /*
1088 * This may have been our highest task, and therefore
1089 * we may have some recomputation to do
1090 */
1091 if (prio == prev_prio) {
1092 struct rt_prio_array *array = &rt_rq->active;
1093
1094 rt_rq->highest_prio.curr =
1095 sched_find_first_bit(array->bitmap);
1096 }
1097
1098 } else
1099 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1100
1101 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1102}
1103
1104#else
1105
1106static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1107static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1108
1109#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1110
1111#ifdef CONFIG_RT_GROUP_SCHED
1112
1113static void
1114inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1115{
1116 if (rt_se_boosted(rt_se))
1117 rt_rq->rt_nr_boosted++;
1118
1119 if (rt_rq->tg)
1120 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1121}
1122
1123static void
1124dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1125{
1126 if (rt_se_boosted(rt_se))
1127 rt_rq->rt_nr_boosted--;
1128
1129 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1130}
1131
1132#else /* CONFIG_RT_GROUP_SCHED */
1133
1134static void
1135inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1136{
1137 start_rt_bandwidth(&def_rt_bandwidth);
1138}
1139
1140static inline
1141void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1142
1143#endif /* CONFIG_RT_GROUP_SCHED */
1144
1145static inline
1146unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1147{
1148 struct rt_rq *group_rq = group_rt_rq(rt_se);
1149
1150 if (group_rq)
1151 return group_rq->rt_nr_running;
1152 else
1153 return 1;
1154}
1155
1156static inline
1157unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1158{
1159 struct rt_rq *group_rq = group_rt_rq(rt_se);
1160 struct task_struct *tsk;
1161
1162 if (group_rq)
1163 return group_rq->rr_nr_running;
1164
1165 tsk = rt_task_of(rt_se);
1166
1167 return (tsk->policy == SCHED_RR) ? 1 : 0;
1168}
1169
1170static inline
1171void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1172{
1173 int prio = rt_se_prio(rt_se);
1174
1175 WARN_ON(!rt_prio(prio));
1176 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1177 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1178
1179 inc_rt_prio(rt_rq, prio);
1180 inc_rt_migration(rt_se, rt_rq);
1181 inc_rt_group(rt_se, rt_rq);
1182}
1183
1184static inline
1185void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1186{
1187 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1188 WARN_ON(!rt_rq->rt_nr_running);
1189 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1190 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1191
1192 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1193 dec_rt_migration(rt_se, rt_rq);
1194 dec_rt_group(rt_se, rt_rq);
1195}
1196
1197/*
1198 * Change rt_se->run_list location unless SAVE && !MOVE
1199 *
1200 * assumes ENQUEUE/DEQUEUE flags match
1201 */
1202static inline bool move_entity(unsigned int flags)
1203{
1204 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1205 return false;
1206
1207 return true;
1208}
1209
1210static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1211{
1212 list_del_init(&rt_se->run_list);
1213
1214 if (list_empty(array->queue + rt_se_prio(rt_se)))
1215 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1216
1217 rt_se->on_list = 0;
1218}
1219
1220static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1221{
1222 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1223 struct rt_prio_array *array = &rt_rq->active;
1224 struct rt_rq *group_rq = group_rt_rq(rt_se);
1225 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1226
1227 /*
1228 * Don't enqueue the group if its throttled, or when empty.
1229 * The latter is a consequence of the former when a child group
1230 * get throttled and the current group doesn't have any other
1231 * active members.
1232 */
1233 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1234 if (rt_se->on_list)
1235 __delist_rt_entity(rt_se, array);
1236 return;
1237 }
1238
1239 if (move_entity(flags)) {
1240 WARN_ON_ONCE(rt_se->on_list);
1241 if (flags & ENQUEUE_HEAD)
1242 list_add(&rt_se->run_list, queue);
1243 else
1244 list_add_tail(&rt_se->run_list, queue);
1245
1246 __set_bit(rt_se_prio(rt_se), array->bitmap);
1247 rt_se->on_list = 1;
1248 }
1249 rt_se->on_rq = 1;
1250
1251 inc_rt_tasks(rt_se, rt_rq);
1252}
1253
1254static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1255{
1256 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1257 struct rt_prio_array *array = &rt_rq->active;
1258
1259 if (move_entity(flags)) {
1260 WARN_ON_ONCE(!rt_se->on_list);
1261 __delist_rt_entity(rt_se, array);
1262 }
1263 rt_se->on_rq = 0;
1264
1265 dec_rt_tasks(rt_se, rt_rq);
1266}
1267
1268/*
1269 * Because the prio of an upper entry depends on the lower
1270 * entries, we must remove entries top - down.
1271 */
1272static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1273{
1274 struct sched_rt_entity *back = NULL;
1275
1276 for_each_sched_rt_entity(rt_se) {
1277 rt_se->back = back;
1278 back = rt_se;
1279 }
1280
1281 dequeue_top_rt_rq(rt_rq_of_se(back));
1282
1283 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1284 if (on_rt_rq(rt_se))
1285 __dequeue_rt_entity(rt_se, flags);
1286 }
1287}
1288
1289static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1290{
1291 struct rq *rq = rq_of_rt_se(rt_se);
1292
1293 dequeue_rt_stack(rt_se, flags);
1294 for_each_sched_rt_entity(rt_se)
1295 __enqueue_rt_entity(rt_se, flags);
1296 enqueue_top_rt_rq(&rq->rt);
1297}
1298
1299static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1300{
1301 struct rq *rq = rq_of_rt_se(rt_se);
1302
1303 dequeue_rt_stack(rt_se, flags);
1304
1305 for_each_sched_rt_entity(rt_se) {
1306 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1307
1308 if (rt_rq && rt_rq->rt_nr_running)
1309 __enqueue_rt_entity(rt_se, flags);
1310 }
1311 enqueue_top_rt_rq(&rq->rt);
1312}
1313
1314/*
1315 * Adding/removing a task to/from a priority array:
1316 */
1317static void
1318enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1319{
1320 struct sched_rt_entity *rt_se = &p->rt;
1321
1322 if (flags & ENQUEUE_WAKEUP)
1323 rt_se->timeout = 0;
1324
1325 enqueue_rt_entity(rt_se, flags);
1326
1327 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1328 enqueue_pushable_task(rq, p);
1329}
1330
1331static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1332{
1333 struct sched_rt_entity *rt_se = &p->rt;
1334
1335 update_curr_rt(rq);
1336 dequeue_rt_entity(rt_se, flags);
1337
1338 dequeue_pushable_task(rq, p);
1339}
1340
1341/*
1342 * Put task to the head or the end of the run list without the overhead of
1343 * dequeue followed by enqueue.
1344 */
1345static void
1346requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1347{
1348 if (on_rt_rq(rt_se)) {
1349 struct rt_prio_array *array = &rt_rq->active;
1350 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1351
1352 if (head)
1353 list_move(&rt_se->run_list, queue);
1354 else
1355 list_move_tail(&rt_se->run_list, queue);
1356 }
1357}
1358
1359static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1360{
1361 struct sched_rt_entity *rt_se = &p->rt;
1362 struct rt_rq *rt_rq;
1363
1364 for_each_sched_rt_entity(rt_se) {
1365 rt_rq = rt_rq_of_se(rt_se);
1366 requeue_rt_entity(rt_rq, rt_se, head);
1367 }
1368}
1369
1370static void yield_task_rt(struct rq *rq)
1371{
1372 requeue_task_rt(rq, rq->curr, 0);
1373}
1374
1375#ifdef CONFIG_SMP
1376static int find_lowest_rq(struct task_struct *task);
1377
1378static int
1379select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1380{
1381 struct task_struct *curr;
1382 struct rq *rq;
1383
1384 /* For anything but wake ups, just return the task_cpu */
1385 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1386 goto out;
1387
1388 rq = cpu_rq(cpu);
1389
1390 rcu_read_lock();
1391 curr = READ_ONCE(rq->curr); /* unlocked access */
1392
1393 /*
1394 * If the current task on @p's runqueue is an RT task, then
1395 * try to see if we can wake this RT task up on another
1396 * runqueue. Otherwise simply start this RT task
1397 * on its current runqueue.
1398 *
1399 * We want to avoid overloading runqueues. If the woken
1400 * task is a higher priority, then it will stay on this CPU
1401 * and the lower prio task should be moved to another CPU.
1402 * Even though this will probably make the lower prio task
1403 * lose its cache, we do not want to bounce a higher task
1404 * around just because it gave up its CPU, perhaps for a
1405 * lock?
1406 *
1407 * For equal prio tasks, we just let the scheduler sort it out.
1408 *
1409 * Otherwise, just let it ride on the affined RQ and the
1410 * post-schedule router will push the preempted task away
1411 *
1412 * This test is optimistic, if we get it wrong the load-balancer
1413 * will have to sort it out.
1414 */
1415 if (curr && unlikely(rt_task(curr)) &&
1416 (curr->nr_cpus_allowed < 2 ||
1417 curr->prio <= p->prio)) {
1418 int target = find_lowest_rq(p);
1419
1420 /*
1421 * Don't bother moving it if the destination CPU is
1422 * not running a lower priority task.
1423 */
1424 if (target != -1 &&
1425 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1426 cpu = target;
1427 }
1428 rcu_read_unlock();
1429
1430out:
1431 return cpu;
1432}
1433
1434static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1435{
1436 /*
1437 * Current can't be migrated, useless to reschedule,
1438 * let's hope p can move out.
1439 */
1440 if (rq->curr->nr_cpus_allowed == 1 ||
1441 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1442 return;
1443
1444 /*
1445 * p is migratable, so let's not schedule it and
1446 * see if it is pushed or pulled somewhere else.
1447 */
1448 if (p->nr_cpus_allowed != 1
1449 && cpupri_find(&rq->rd->cpupri, p, NULL))
1450 return;
1451
1452 /*
1453 * There appears to be other cpus that can accept
1454 * current and none to run 'p', so lets reschedule
1455 * to try and push current away:
1456 */
1457 requeue_task_rt(rq, p, 1);
1458 resched_curr(rq);
1459}
1460
1461#endif /* CONFIG_SMP */
1462
1463/*
1464 * Preempt the current task with a newly woken task if needed:
1465 */
1466static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1467{
1468 if (p->prio < rq->curr->prio) {
1469 resched_curr(rq);
1470 return;
1471 }
1472
1473#ifdef CONFIG_SMP
1474 /*
1475 * If:
1476 *
1477 * - the newly woken task is of equal priority to the current task
1478 * - the newly woken task is non-migratable while current is migratable
1479 * - current will be preempted on the next reschedule
1480 *
1481 * we should check to see if current can readily move to a different
1482 * cpu. If so, we will reschedule to allow the push logic to try
1483 * to move current somewhere else, making room for our non-migratable
1484 * task.
1485 */
1486 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1487 check_preempt_equal_prio(rq, p);
1488#endif
1489}
1490
1491static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1492 struct rt_rq *rt_rq)
1493{
1494 struct rt_prio_array *array = &rt_rq->active;
1495 struct sched_rt_entity *next = NULL;
1496 struct list_head *queue;
1497 int idx;
1498
1499 idx = sched_find_first_bit(array->bitmap);
1500 BUG_ON(idx >= MAX_RT_PRIO);
1501
1502 queue = array->queue + idx;
1503 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1504
1505 return next;
1506}
1507
1508static struct task_struct *_pick_next_task_rt(struct rq *rq)
1509{
1510 struct sched_rt_entity *rt_se;
1511 struct task_struct *p;
1512 struct rt_rq *rt_rq = &rq->rt;
1513
1514 do {
1515 rt_se = pick_next_rt_entity(rq, rt_rq);
1516 BUG_ON(!rt_se);
1517 rt_rq = group_rt_rq(rt_se);
1518 } while (rt_rq);
1519
1520 p = rt_task_of(rt_se);
1521 p->se.exec_start = rq_clock_task(rq);
1522
1523 return p;
1524}
1525
1526static struct task_struct *
1527pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1528{
1529 struct task_struct *p;
1530 struct rt_rq *rt_rq = &rq->rt;
1531
1532 if (need_pull_rt_task(rq, prev)) {
1533 /*
1534 * This is OK, because current is on_cpu, which avoids it being
1535 * picked for load-balance and preemption/IRQs are still
1536 * disabled avoiding further scheduler activity on it and we're
1537 * being very careful to re-start the picking loop.
1538 */
1539 lockdep_unpin_lock(&rq->lock);
1540 pull_rt_task(rq);
1541 lockdep_pin_lock(&rq->lock);
1542 /*
1543 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1544 * means a dl or stop task can slip in, in which case we need
1545 * to re-start task selection.
1546 */
1547 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1548 rq->dl.dl_nr_running))
1549 return RETRY_TASK;
1550 }
1551
1552 /*
1553 * We may dequeue prev's rt_rq in put_prev_task().
1554 * So, we update time before rt_nr_running check.
1555 */
1556 if (prev->sched_class == &rt_sched_class)
1557 update_curr_rt(rq);
1558
1559 if (!rt_rq->rt_queued)
1560 return NULL;
1561
1562 put_prev_task(rq, prev);
1563
1564 p = _pick_next_task_rt(rq);
1565
1566 /* The running task is never eligible for pushing */
1567 dequeue_pushable_task(rq, p);
1568
1569 queue_push_tasks(rq);
1570
1571 return p;
1572}
1573
1574static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1575{
1576 update_curr_rt(rq);
1577
1578 /*
1579 * The previous task needs to be made eligible for pushing
1580 * if it is still active
1581 */
1582 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1583 enqueue_pushable_task(rq, p);
1584}
1585
1586#ifdef CONFIG_SMP
1587
1588/* Only try algorithms three times */
1589#define RT_MAX_TRIES 3
1590
1591static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1592{
1593 if (!task_running(rq, p) &&
1594 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1595 return 1;
1596 return 0;
1597}
1598
1599/*
1600 * Return the highest pushable rq's task, which is suitable to be executed
1601 * on the cpu, NULL otherwise
1602 */
1603static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1604{
1605 struct plist_head *head = &rq->rt.pushable_tasks;
1606 struct task_struct *p;
1607
1608 if (!has_pushable_tasks(rq))
1609 return NULL;
1610
1611 plist_for_each_entry(p, head, pushable_tasks) {
1612 if (pick_rt_task(rq, p, cpu))
1613 return p;
1614 }
1615
1616 return NULL;
1617}
1618
1619static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1620
1621static int find_lowest_rq(struct task_struct *task)
1622{
1623 struct sched_domain *sd;
1624 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1625 int this_cpu = smp_processor_id();
1626 int cpu = task_cpu(task);
1627
1628 /* Make sure the mask is initialized first */
1629 if (unlikely(!lowest_mask))
1630 return -1;
1631
1632 if (task->nr_cpus_allowed == 1)
1633 return -1; /* No other targets possible */
1634
1635 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1636 return -1; /* No targets found */
1637
1638 /*
1639 * At this point we have built a mask of cpus representing the
1640 * lowest priority tasks in the system. Now we want to elect
1641 * the best one based on our affinity and topology.
1642 *
1643 * We prioritize the last cpu that the task executed on since
1644 * it is most likely cache-hot in that location.
1645 */
1646 if (cpumask_test_cpu(cpu, lowest_mask))
1647 return cpu;
1648
1649 /*
1650 * Otherwise, we consult the sched_domains span maps to figure
1651 * out which cpu is logically closest to our hot cache data.
1652 */
1653 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1654 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1655
1656 rcu_read_lock();
1657 for_each_domain(cpu, sd) {
1658 if (sd->flags & SD_WAKE_AFFINE) {
1659 int best_cpu;
1660
1661 /*
1662 * "this_cpu" is cheaper to preempt than a
1663 * remote processor.
1664 */
1665 if (this_cpu != -1 &&
1666 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1667 rcu_read_unlock();
1668 return this_cpu;
1669 }
1670
1671 best_cpu = cpumask_first_and(lowest_mask,
1672 sched_domain_span(sd));
1673 if (best_cpu < nr_cpu_ids) {
1674 rcu_read_unlock();
1675 return best_cpu;
1676 }
1677 }
1678 }
1679 rcu_read_unlock();
1680
1681 /*
1682 * And finally, if there were no matches within the domains
1683 * just give the caller *something* to work with from the compatible
1684 * locations.
1685 */
1686 if (this_cpu != -1)
1687 return this_cpu;
1688
1689 cpu = cpumask_any(lowest_mask);
1690 if (cpu < nr_cpu_ids)
1691 return cpu;
1692 return -1;
1693}
1694
1695/* Will lock the rq it finds */
1696static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1697{
1698 struct rq *lowest_rq = NULL;
1699 int tries;
1700 int cpu;
1701
1702 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1703 cpu = find_lowest_rq(task);
1704
1705 if ((cpu == -1) || (cpu == rq->cpu))
1706 break;
1707
1708 lowest_rq = cpu_rq(cpu);
1709
1710 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1711 /*
1712 * Target rq has tasks of equal or higher priority,
1713 * retrying does not release any lock and is unlikely
1714 * to yield a different result.
1715 */
1716 lowest_rq = NULL;
1717 break;
1718 }
1719
1720 /* if the prio of this runqueue changed, try again */
1721 if (double_lock_balance(rq, lowest_rq)) {
1722 /*
1723 * We had to unlock the run queue. In
1724 * the mean time, task could have
1725 * migrated already or had its affinity changed.
1726 * Also make sure that it wasn't scheduled on its rq.
1727 */
1728 if (unlikely(task_rq(task) != rq ||
1729 !cpumask_test_cpu(lowest_rq->cpu,
1730 tsk_cpus_allowed(task)) ||
1731 task_running(rq, task) ||
1732 !rt_task(task) ||
1733 !task_on_rq_queued(task))) {
1734
1735 double_unlock_balance(rq, lowest_rq);
1736 lowest_rq = NULL;
1737 break;
1738 }
1739 }
1740
1741 /* If this rq is still suitable use it. */
1742 if (lowest_rq->rt.highest_prio.curr > task->prio)
1743 break;
1744
1745 /* try again */
1746 double_unlock_balance(rq, lowest_rq);
1747 lowest_rq = NULL;
1748 }
1749
1750 return lowest_rq;
1751}
1752
1753static struct task_struct *pick_next_pushable_task(struct rq *rq)
1754{
1755 struct task_struct *p;
1756
1757 if (!has_pushable_tasks(rq))
1758 return NULL;
1759
1760 p = plist_first_entry(&rq->rt.pushable_tasks,
1761 struct task_struct, pushable_tasks);
1762
1763 BUG_ON(rq->cpu != task_cpu(p));
1764 BUG_ON(task_current(rq, p));
1765 BUG_ON(p->nr_cpus_allowed <= 1);
1766
1767 BUG_ON(!task_on_rq_queued(p));
1768 BUG_ON(!rt_task(p));
1769
1770 return p;
1771}
1772
1773/*
1774 * If the current CPU has more than one RT task, see if the non
1775 * running task can migrate over to a CPU that is running a task
1776 * of lesser priority.
1777 */
1778static int push_rt_task(struct rq *rq)
1779{
1780 struct task_struct *next_task;
1781 struct rq *lowest_rq;
1782 int ret = 0;
1783
1784 if (!rq->rt.overloaded)
1785 return 0;
1786
1787 next_task = pick_next_pushable_task(rq);
1788 if (!next_task)
1789 return 0;
1790
1791retry:
1792 if (unlikely(next_task == rq->curr)) {
1793 WARN_ON(1);
1794 return 0;
1795 }
1796
1797 /*
1798 * It's possible that the next_task slipped in of
1799 * higher priority than current. If that's the case
1800 * just reschedule current.
1801 */
1802 if (unlikely(next_task->prio < rq->curr->prio)) {
1803 resched_curr(rq);
1804 return 0;
1805 }
1806
1807 /* We might release rq lock */
1808 get_task_struct(next_task);
1809
1810 /* find_lock_lowest_rq locks the rq if found */
1811 lowest_rq = find_lock_lowest_rq(next_task, rq);
1812 if (!lowest_rq) {
1813 struct task_struct *task;
1814 /*
1815 * find_lock_lowest_rq releases rq->lock
1816 * so it is possible that next_task has migrated.
1817 *
1818 * We need to make sure that the task is still on the same
1819 * run-queue and is also still the next task eligible for
1820 * pushing.
1821 */
1822 task = pick_next_pushable_task(rq);
1823 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1824 /*
1825 * The task hasn't migrated, and is still the next
1826 * eligible task, but we failed to find a run-queue
1827 * to push it to. Do not retry in this case, since
1828 * other cpus will pull from us when ready.
1829 */
1830 goto out;
1831 }
1832
1833 if (!task)
1834 /* No more tasks, just exit */
1835 goto out;
1836
1837 /*
1838 * Something has shifted, try again.
1839 */
1840 put_task_struct(next_task);
1841 next_task = task;
1842 goto retry;
1843 }
1844
1845 deactivate_task(rq, next_task, 0);
1846 set_task_cpu(next_task, lowest_rq->cpu);
1847 activate_task(lowest_rq, next_task, 0);
1848 ret = 1;
1849
1850 resched_curr(lowest_rq);
1851
1852 double_unlock_balance(rq, lowest_rq);
1853
1854out:
1855 put_task_struct(next_task);
1856
1857 return ret;
1858}
1859
1860static void push_rt_tasks(struct rq *rq)
1861{
1862 /* push_rt_task will return true if it moved an RT */
1863 while (push_rt_task(rq))
1864 ;
1865}
1866
1867#ifdef HAVE_RT_PUSH_IPI
1868/*
1869 * The search for the next cpu always starts at rq->cpu and ends
1870 * when we reach rq->cpu again. It will never return rq->cpu.
1871 * This returns the next cpu to check, or nr_cpu_ids if the loop
1872 * is complete.
1873 *
1874 * rq->rt.push_cpu holds the last cpu returned by this function,
1875 * or if this is the first instance, it must hold rq->cpu.
1876 */
1877static int rto_next_cpu(struct rq *rq)
1878{
1879 int prev_cpu = rq->rt.push_cpu;
1880 int cpu;
1881
1882 cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1883
1884 /*
1885 * If the previous cpu is less than the rq's CPU, then it already
1886 * passed the end of the mask, and has started from the beginning.
1887 * We end if the next CPU is greater or equal to rq's CPU.
1888 */
1889 if (prev_cpu < rq->cpu) {
1890 if (cpu >= rq->cpu)
1891 return nr_cpu_ids;
1892
1893 } else if (cpu >= nr_cpu_ids) {
1894 /*
1895 * We passed the end of the mask, start at the beginning.
1896 * If the result is greater or equal to the rq's CPU, then
1897 * the loop is finished.
1898 */
1899 cpu = cpumask_first(rq->rd->rto_mask);
1900 if (cpu >= rq->cpu)
1901 return nr_cpu_ids;
1902 }
1903 rq->rt.push_cpu = cpu;
1904
1905 /* Return cpu to let the caller know if the loop is finished or not */
1906 return cpu;
1907}
1908
1909static int find_next_push_cpu(struct rq *rq)
1910{
1911 struct rq *next_rq;
1912 int cpu;
1913
1914 while (1) {
1915 cpu = rto_next_cpu(rq);
1916 if (cpu >= nr_cpu_ids)
1917 break;
1918 next_rq = cpu_rq(cpu);
1919
1920 /* Make sure the next rq can push to this rq */
1921 if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1922 break;
1923 }
1924
1925 return cpu;
1926}
1927
1928#define RT_PUSH_IPI_EXECUTING 1
1929#define RT_PUSH_IPI_RESTART 2
1930
1931static void tell_cpu_to_push(struct rq *rq)
1932{
1933 int cpu;
1934
1935 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1936 raw_spin_lock(&rq->rt.push_lock);
1937 /* Make sure it's still executing */
1938 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1939 /*
1940 * Tell the IPI to restart the loop as things have
1941 * changed since it started.
1942 */
1943 rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1944 raw_spin_unlock(&rq->rt.push_lock);
1945 return;
1946 }
1947 raw_spin_unlock(&rq->rt.push_lock);
1948 }
1949
1950 /* When here, there's no IPI going around */
1951
1952 rq->rt.push_cpu = rq->cpu;
1953 cpu = find_next_push_cpu(rq);
1954 if (cpu >= nr_cpu_ids)
1955 return;
1956
1957 rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
1958
1959 irq_work_queue_on(&rq->rt.push_work, cpu);
1960}
1961
1962/* Called from hardirq context */
1963static void try_to_push_tasks(void *arg)
1964{
1965 struct rt_rq *rt_rq = arg;
1966 struct rq *rq, *src_rq;
1967 int this_cpu;
1968 int cpu;
1969
1970 this_cpu = rt_rq->push_cpu;
1971
1972 /* Paranoid check */
1973 BUG_ON(this_cpu != smp_processor_id());
1974
1975 rq = cpu_rq(this_cpu);
1976 src_rq = rq_of_rt_rq(rt_rq);
1977
1978again:
1979 if (has_pushable_tasks(rq)) {
1980 raw_spin_lock(&rq->lock);
1981 push_rt_task(rq);
1982 raw_spin_unlock(&rq->lock);
1983 }
1984
1985 /* Pass the IPI to the next rt overloaded queue */
1986 raw_spin_lock(&rt_rq->push_lock);
1987 /*
1988 * If the source queue changed since the IPI went out,
1989 * we need to restart the search from that CPU again.
1990 */
1991 if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
1992 rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
1993 rt_rq->push_cpu = src_rq->cpu;
1994 }
1995
1996 cpu = find_next_push_cpu(src_rq);
1997
1998 if (cpu >= nr_cpu_ids)
1999 rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
2000 raw_spin_unlock(&rt_rq->push_lock);
2001
2002 if (cpu >= nr_cpu_ids)
2003 return;
2004
2005 /*
2006 * It is possible that a restart caused this CPU to be
2007 * chosen again. Don't bother with an IPI, just see if we
2008 * have more to push.
2009 */
2010 if (unlikely(cpu == rq->cpu))
2011 goto again;
2012
2013 /* Try the next RT overloaded CPU */
2014 irq_work_queue_on(&rt_rq->push_work, cpu);
2015}
2016
2017static void push_irq_work_func(struct irq_work *work)
2018{
2019 struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
2020
2021 try_to_push_tasks(rt_rq);
2022}
2023#endif /* HAVE_RT_PUSH_IPI */
2024
2025static void pull_rt_task(struct rq *this_rq)
2026{
2027 int this_cpu = this_rq->cpu, cpu;
2028 bool resched = false;
2029 struct task_struct *p;
2030 struct rq *src_rq;
2031
2032 if (likely(!rt_overloaded(this_rq)))
2033 return;
2034
2035 /*
2036 * Match the barrier from rt_set_overloaded; this guarantees that if we
2037 * see overloaded we must also see the rto_mask bit.
2038 */
2039 smp_rmb();
2040
2041#ifdef HAVE_RT_PUSH_IPI
2042 if (sched_feat(RT_PUSH_IPI)) {
2043 tell_cpu_to_push(this_rq);
2044 return;
2045 }
2046#endif
2047
2048 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2049 if (this_cpu == cpu)
2050 continue;
2051
2052 src_rq = cpu_rq(cpu);
2053
2054 /*
2055 * Don't bother taking the src_rq->lock if the next highest
2056 * task is known to be lower-priority than our current task.
2057 * This may look racy, but if this value is about to go
2058 * logically higher, the src_rq will push this task away.
2059 * And if its going logically lower, we do not care
2060 */
2061 if (src_rq->rt.highest_prio.next >=
2062 this_rq->rt.highest_prio.curr)
2063 continue;
2064
2065 /*
2066 * We can potentially drop this_rq's lock in
2067 * double_lock_balance, and another CPU could
2068 * alter this_rq
2069 */
2070 double_lock_balance(this_rq, src_rq);
2071
2072 /*
2073 * We can pull only a task, which is pushable
2074 * on its rq, and no others.
2075 */
2076 p = pick_highest_pushable_task(src_rq, this_cpu);
2077
2078 /*
2079 * Do we have an RT task that preempts
2080 * the to-be-scheduled task?
2081 */
2082 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2083 WARN_ON(p == src_rq->curr);
2084 WARN_ON(!task_on_rq_queued(p));
2085
2086 /*
2087 * There's a chance that p is higher in priority
2088 * than what's currently running on its cpu.
2089 * This is just that p is wakeing up and hasn't
2090 * had a chance to schedule. We only pull
2091 * p if it is lower in priority than the
2092 * current task on the run queue
2093 */
2094 if (p->prio < src_rq->curr->prio)
2095 goto skip;
2096
2097 resched = true;
2098
2099 deactivate_task(src_rq, p, 0);
2100 set_task_cpu(p, this_cpu);
2101 activate_task(this_rq, p, 0);
2102 /*
2103 * We continue with the search, just in
2104 * case there's an even higher prio task
2105 * in another runqueue. (low likelihood
2106 * but possible)
2107 */
2108 }
2109skip:
2110 double_unlock_balance(this_rq, src_rq);
2111 }
2112
2113 if (resched)
2114 resched_curr(this_rq);
2115}
2116
2117/*
2118 * If we are not running and we are not going to reschedule soon, we should
2119 * try to push tasks away now
2120 */
2121static void task_woken_rt(struct rq *rq, struct task_struct *p)
2122{
2123 if (!task_running(rq, p) &&
2124 !test_tsk_need_resched(rq->curr) &&
2125 p->nr_cpus_allowed > 1 &&
2126 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2127 (rq->curr->nr_cpus_allowed < 2 ||
2128 rq->curr->prio <= p->prio))
2129 push_rt_tasks(rq);
2130}
2131
2132/* Assumes rq->lock is held */
2133static void rq_online_rt(struct rq *rq)
2134{
2135 if (rq->rt.overloaded)
2136 rt_set_overload(rq);
2137
2138 __enable_runtime(rq);
2139
2140 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2141}
2142
2143/* Assumes rq->lock is held */
2144static void rq_offline_rt(struct rq *rq)
2145{
2146 if (rq->rt.overloaded)
2147 rt_clear_overload(rq);
2148
2149 __disable_runtime(rq);
2150
2151 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2152}
2153
2154/*
2155 * When switch from the rt queue, we bring ourselves to a position
2156 * that we might want to pull RT tasks from other runqueues.
2157 */
2158static void switched_from_rt(struct rq *rq, struct task_struct *p)
2159{
2160 /*
2161 * If there are other RT tasks then we will reschedule
2162 * and the scheduling of the other RT tasks will handle
2163 * the balancing. But if we are the last RT task
2164 * we may need to handle the pulling of RT tasks
2165 * now.
2166 */
2167 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2168 return;
2169
2170 queue_pull_task(rq);
2171}
2172
2173void __init init_sched_rt_class(void)
2174{
2175 unsigned int i;
2176
2177 for_each_possible_cpu(i) {
2178 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2179 GFP_KERNEL, cpu_to_node(i));
2180 }
2181}
2182#endif /* CONFIG_SMP */
2183
2184/*
2185 * When switching a task to RT, we may overload the runqueue
2186 * with RT tasks. In this case we try to push them off to
2187 * other runqueues.
2188 */
2189static void switched_to_rt(struct rq *rq, struct task_struct *p)
2190{
2191 /*
2192 * If we are already running, then there's nothing
2193 * that needs to be done. But if we are not running
2194 * we may need to preempt the current running task.
2195 * If that current running task is also an RT task
2196 * then see if we can move to another run queue.
2197 */
2198 if (task_on_rq_queued(p) && rq->curr != p) {
2199#ifdef CONFIG_SMP
2200 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2201 queue_push_tasks(rq);
2202#else
2203 if (p->prio < rq->curr->prio)
2204 resched_curr(rq);
2205#endif /* CONFIG_SMP */
2206 }
2207}
2208
2209/*
2210 * Priority of the task has changed. This may cause
2211 * us to initiate a push or pull.
2212 */
2213static void
2214prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2215{
2216 if (!task_on_rq_queued(p))
2217 return;
2218
2219 if (rq->curr == p) {
2220#ifdef CONFIG_SMP
2221 /*
2222 * If our priority decreases while running, we
2223 * may need to pull tasks to this runqueue.
2224 */
2225 if (oldprio < p->prio)
2226 queue_pull_task(rq);
2227
2228 /*
2229 * If there's a higher priority task waiting to run
2230 * then reschedule.
2231 */
2232 if (p->prio > rq->rt.highest_prio.curr)
2233 resched_curr(rq);
2234#else
2235 /* For UP simply resched on drop of prio */
2236 if (oldprio < p->prio)
2237 resched_curr(rq);
2238#endif /* CONFIG_SMP */
2239 } else {
2240 /*
2241 * This task is not running, but if it is
2242 * greater than the current running task
2243 * then reschedule.
2244 */
2245 if (p->prio < rq->curr->prio)
2246 resched_curr(rq);
2247 }
2248}
2249
2250static void watchdog(struct rq *rq, struct task_struct *p)
2251{
2252 unsigned long soft, hard;
2253
2254 /* max may change after cur was read, this will be fixed next tick */
2255 soft = task_rlimit(p, RLIMIT_RTTIME);
2256 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2257
2258 if (soft != RLIM_INFINITY) {
2259 unsigned long next;
2260
2261 if (p->rt.watchdog_stamp != jiffies) {
2262 p->rt.timeout++;
2263 p->rt.watchdog_stamp = jiffies;
2264 }
2265
2266 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2267 if (p->rt.timeout > next)
2268 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2269 }
2270}
2271
2272static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2273{
2274 struct sched_rt_entity *rt_se = &p->rt;
2275
2276 update_curr_rt(rq);
2277
2278 watchdog(rq, p);
2279
2280 /*
2281 * RR tasks need a special form of timeslice management.
2282 * FIFO tasks have no timeslices.
2283 */
2284 if (p->policy != SCHED_RR)
2285 return;
2286
2287 if (--p->rt.time_slice)
2288 return;
2289
2290 p->rt.time_slice = sched_rr_timeslice;
2291
2292 /*
2293 * Requeue to the end of queue if we (and all of our ancestors) are not
2294 * the only element on the queue
2295 */
2296 for_each_sched_rt_entity(rt_se) {
2297 if (rt_se->run_list.prev != rt_se->run_list.next) {
2298 requeue_task_rt(rq, p, 0);
2299 resched_curr(rq);
2300 return;
2301 }
2302 }
2303}
2304
2305static void set_curr_task_rt(struct rq *rq)
2306{
2307 struct task_struct *p = rq->curr;
2308
2309 p->se.exec_start = rq_clock_task(rq);
2310
2311 /* The running task is never eligible for pushing */
2312 dequeue_pushable_task(rq, p);
2313}
2314
2315static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2316{
2317 /*
2318 * Time slice is 0 for SCHED_FIFO tasks
2319 */
2320 if (task->policy == SCHED_RR)
2321 return sched_rr_timeslice;
2322 else
2323 return 0;
2324}
2325
2326const struct sched_class rt_sched_class = {
2327 .next = &fair_sched_class,
2328 .enqueue_task = enqueue_task_rt,
2329 .dequeue_task = dequeue_task_rt,
2330 .yield_task = yield_task_rt,
2331
2332 .check_preempt_curr = check_preempt_curr_rt,
2333
2334 .pick_next_task = pick_next_task_rt,
2335 .put_prev_task = put_prev_task_rt,
2336
2337#ifdef CONFIG_SMP
2338 .select_task_rq = select_task_rq_rt,
2339
2340 .set_cpus_allowed = set_cpus_allowed_common,
2341 .rq_online = rq_online_rt,
2342 .rq_offline = rq_offline_rt,
2343 .task_woken = task_woken_rt,
2344 .switched_from = switched_from_rt,
2345#endif
2346
2347 .set_curr_task = set_curr_task_rt,
2348 .task_tick = task_tick_rt,
2349
2350 .get_rr_interval = get_rr_interval_rt,
2351
2352 .prio_changed = prio_changed_rt,
2353 .switched_to = switched_to_rt,
2354
2355 .update_curr = update_curr_rt,
2356};
2357
2358#ifdef CONFIG_SCHED_DEBUG
2359extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2360
2361void print_rt_stats(struct seq_file *m, int cpu)
2362{
2363 rt_rq_iter_t iter;
2364 struct rt_rq *rt_rq;
2365
2366 rcu_read_lock();
2367 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2368 print_rt_rq(m, cpu, rt_rq);
2369 rcu_read_unlock();
2370}
2371#endif /* CONFIG_SCHED_DEBUG */