Loading...
1/*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
16 */
17#include "sched.h"
18
19#include <linux/slab.h>
20
21struct dl_bandwidth def_dl_bandwidth;
22
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47{
48 struct sched_dl_entity *dl_se = &p->dl;
49
50 return dl_rq->rb_leftmost == &dl_se->rb_node;
51}
52
53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54{
55 raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 dl_b->dl_period = period;
57 dl_b->dl_runtime = runtime;
58}
59
60void init_dl_bw(struct dl_bw *dl_b)
61{
62 raw_spin_lock_init(&dl_b->lock);
63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
64 if (global_rt_runtime() == RUNTIME_INF)
65 dl_b->bw = -1;
66 else
67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 dl_b->total_bw = 0;
70}
71
72void init_dl_rq(struct dl_rq *dl_rq)
73{
74 dl_rq->rb_root = RB_ROOT;
75
76#ifdef CONFIG_SMP
77 /* zero means no -deadline tasks */
78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
79
80 dl_rq->dl_nr_migratory = 0;
81 dl_rq->overloaded = 0;
82 dl_rq->pushable_dl_tasks_root = RB_ROOT;
83#else
84 init_dl_bw(&dl_rq->dl_bw);
85#endif
86}
87
88#ifdef CONFIG_SMP
89
90static inline int dl_overloaded(struct rq *rq)
91{
92 return atomic_read(&rq->rd->dlo_count);
93}
94
95static inline void dl_set_overload(struct rq *rq)
96{
97 if (!rq->online)
98 return;
99
100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
101 /*
102 * Must be visible before the overload count is
103 * set (as in sched_rt.c).
104 *
105 * Matched by the barrier in pull_dl_task().
106 */
107 smp_wmb();
108 atomic_inc(&rq->rd->dlo_count);
109}
110
111static inline void dl_clear_overload(struct rq *rq)
112{
113 if (!rq->online)
114 return;
115
116 atomic_dec(&rq->rd->dlo_count);
117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
118}
119
120static void update_dl_migration(struct dl_rq *dl_rq)
121{
122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
123 if (!dl_rq->overloaded) {
124 dl_set_overload(rq_of_dl_rq(dl_rq));
125 dl_rq->overloaded = 1;
126 }
127 } else if (dl_rq->overloaded) {
128 dl_clear_overload(rq_of_dl_rq(dl_rq));
129 dl_rq->overloaded = 0;
130 }
131}
132
133static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134{
135 struct task_struct *p = dl_task_of(dl_se);
136
137 if (p->nr_cpus_allowed > 1)
138 dl_rq->dl_nr_migratory++;
139
140 update_dl_migration(dl_rq);
141}
142
143static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144{
145 struct task_struct *p = dl_task_of(dl_se);
146
147 if (p->nr_cpus_allowed > 1)
148 dl_rq->dl_nr_migratory--;
149
150 update_dl_migration(dl_rq);
151}
152
153/*
154 * The list of pushable -deadline task is not a plist, like in
155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
156 */
157static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
158{
159 struct dl_rq *dl_rq = &rq->dl;
160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 struct rb_node *parent = NULL;
162 struct task_struct *entry;
163 int leftmost = 1;
164
165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
166
167 while (*link) {
168 parent = *link;
169 entry = rb_entry(parent, struct task_struct,
170 pushable_dl_tasks);
171 if (dl_entity_preempt(&p->dl, &entry->dl))
172 link = &parent->rb_left;
173 else {
174 link = &parent->rb_right;
175 leftmost = 0;
176 }
177 }
178
179 if (leftmost) {
180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
181 dl_rq->earliest_dl.next = p->dl.deadline;
182 }
183
184 rb_link_node(&p->pushable_dl_tasks, parent, link);
185 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
186}
187
188static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
189{
190 struct dl_rq *dl_rq = &rq->dl;
191
192 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
193 return;
194
195 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
196 struct rb_node *next_node;
197
198 next_node = rb_next(&p->pushable_dl_tasks);
199 dl_rq->pushable_dl_tasks_leftmost = next_node;
200 if (next_node) {
201 dl_rq->earliest_dl.next = rb_entry(next_node,
202 struct task_struct, pushable_dl_tasks)->dl.deadline;
203 }
204 }
205
206 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
207 RB_CLEAR_NODE(&p->pushable_dl_tasks);
208}
209
210static inline int has_pushable_dl_tasks(struct rq *rq)
211{
212 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
213}
214
215static int push_dl_task(struct rq *rq);
216
217static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
218{
219 return dl_task(prev);
220}
221
222static DEFINE_PER_CPU(struct callback_head, dl_push_head);
223static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
224
225static void push_dl_tasks(struct rq *);
226static void pull_dl_task(struct rq *);
227
228static inline void queue_push_tasks(struct rq *rq)
229{
230 if (!has_pushable_dl_tasks(rq))
231 return;
232
233 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
234}
235
236static inline void queue_pull_task(struct rq *rq)
237{
238 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
239}
240
241static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
242
243static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
244{
245 struct rq *later_rq = NULL;
246 bool fallback = false;
247
248 later_rq = find_lock_later_rq(p, rq);
249
250 if (!later_rq) {
251 int cpu;
252
253 /*
254 * If we cannot preempt any rq, fall back to pick any
255 * online cpu.
256 */
257 fallback = true;
258 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
259 if (cpu >= nr_cpu_ids) {
260 /*
261 * Fail to find any suitable cpu.
262 * The task will never come back!
263 */
264 BUG_ON(dl_bandwidth_enabled());
265
266 /*
267 * If admission control is disabled we
268 * try a little harder to let the task
269 * run.
270 */
271 cpu = cpumask_any(cpu_active_mask);
272 }
273 later_rq = cpu_rq(cpu);
274 double_lock_balance(rq, later_rq);
275 }
276
277 /*
278 * By now the task is replenished and enqueued; migrate it.
279 */
280 deactivate_task(rq, p, 0);
281 set_task_cpu(p, later_rq->cpu);
282 activate_task(later_rq, p, 0);
283
284 if (!fallback)
285 resched_curr(later_rq);
286
287 double_unlock_balance(later_rq, rq);
288
289 return later_rq;
290}
291
292#else
293
294static inline
295void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
296{
297}
298
299static inline
300void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
301{
302}
303
304static inline
305void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
306{
307}
308
309static inline
310void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
311{
312}
313
314static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
315{
316 return false;
317}
318
319static inline void pull_dl_task(struct rq *rq)
320{
321}
322
323static inline void queue_push_tasks(struct rq *rq)
324{
325}
326
327static inline void queue_pull_task(struct rq *rq)
328{
329}
330#endif /* CONFIG_SMP */
331
332static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
333static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
334static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
335 int flags);
336
337/*
338 * We are being explicitly informed that a new instance is starting,
339 * and this means that:
340 * - the absolute deadline of the entity has to be placed at
341 * current time + relative deadline;
342 * - the runtime of the entity has to be set to the maximum value.
343 *
344 * The capability of specifying such event is useful whenever a -deadline
345 * entity wants to (try to!) synchronize its behaviour with the scheduler's
346 * one, and to (try to!) reconcile itself with its own scheduling
347 * parameters.
348 */
349static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
350 struct sched_dl_entity *pi_se)
351{
352 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
353 struct rq *rq = rq_of_dl_rq(dl_rq);
354
355 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
356
357 /*
358 * We are racing with the deadline timer. So, do nothing because
359 * the deadline timer handler will take care of properly recharging
360 * the runtime and postponing the deadline
361 */
362 if (dl_se->dl_throttled)
363 return;
364
365 /*
366 * We use the regular wall clock time to set deadlines in the
367 * future; in fact, we must consider execution overheads (time
368 * spent on hardirq context, etc.).
369 */
370 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
371 dl_se->runtime = pi_se->dl_runtime;
372}
373
374/*
375 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
376 * possibility of a entity lasting more than what it declared, and thus
377 * exhausting its runtime.
378 *
379 * Here we are interested in making runtime overrun possible, but we do
380 * not want a entity which is misbehaving to affect the scheduling of all
381 * other entities.
382 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
383 * is used, in order to confine each entity within its own bandwidth.
384 *
385 * This function deals exactly with that, and ensures that when the runtime
386 * of a entity is replenished, its deadline is also postponed. That ensures
387 * the overrunning entity can't interfere with other entity in the system and
388 * can't make them miss their deadlines. Reasons why this kind of overruns
389 * could happen are, typically, a entity voluntarily trying to overcome its
390 * runtime, or it just underestimated it during sched_setattr().
391 */
392static void replenish_dl_entity(struct sched_dl_entity *dl_se,
393 struct sched_dl_entity *pi_se)
394{
395 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
396 struct rq *rq = rq_of_dl_rq(dl_rq);
397
398 BUG_ON(pi_se->dl_runtime <= 0);
399
400 /*
401 * This could be the case for a !-dl task that is boosted.
402 * Just go with full inherited parameters.
403 */
404 if (dl_se->dl_deadline == 0) {
405 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
406 dl_se->runtime = pi_se->dl_runtime;
407 }
408
409 if (dl_se->dl_yielded && dl_se->runtime > 0)
410 dl_se->runtime = 0;
411
412 /*
413 * We keep moving the deadline away until we get some
414 * available runtime for the entity. This ensures correct
415 * handling of situations where the runtime overrun is
416 * arbitrary large.
417 */
418 while (dl_se->runtime <= 0) {
419 dl_se->deadline += pi_se->dl_period;
420 dl_se->runtime += pi_se->dl_runtime;
421 }
422
423 /*
424 * At this point, the deadline really should be "in
425 * the future" with respect to rq->clock. If it's
426 * not, we are, for some reason, lagging too much!
427 * Anyway, after having warn userspace abut that,
428 * we still try to keep the things running by
429 * resetting the deadline and the budget of the
430 * entity.
431 */
432 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
433 printk_deferred_once("sched: DL replenish lagged too much\n");
434 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
435 dl_se->runtime = pi_se->dl_runtime;
436 }
437
438 if (dl_se->dl_yielded)
439 dl_se->dl_yielded = 0;
440 if (dl_se->dl_throttled)
441 dl_se->dl_throttled = 0;
442}
443
444/*
445 * Here we check if --at time t-- an entity (which is probably being
446 * [re]activated or, in general, enqueued) can use its remaining runtime
447 * and its current deadline _without_ exceeding the bandwidth it is
448 * assigned (function returns true if it can't). We are in fact applying
449 * one of the CBS rules: when a task wakes up, if the residual runtime
450 * over residual deadline fits within the allocated bandwidth, then we
451 * can keep the current (absolute) deadline and residual budget without
452 * disrupting the schedulability of the system. Otherwise, we should
453 * refill the runtime and set the deadline a period in the future,
454 * because keeping the current (absolute) deadline of the task would
455 * result in breaking guarantees promised to other tasks (refer to
456 * Documentation/scheduler/sched-deadline.txt for more informations).
457 *
458 * This function returns true if:
459 *
460 * runtime / (deadline - t) > dl_runtime / dl_period ,
461 *
462 * IOW we can't recycle current parameters.
463 *
464 * Notice that the bandwidth check is done against the period. For
465 * task with deadline equal to period this is the same of using
466 * dl_deadline instead of dl_period in the equation above.
467 */
468static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
469 struct sched_dl_entity *pi_se, u64 t)
470{
471 u64 left, right;
472
473 /*
474 * left and right are the two sides of the equation above,
475 * after a bit of shuffling to use multiplications instead
476 * of divisions.
477 *
478 * Note that none of the time values involved in the two
479 * multiplications are absolute: dl_deadline and dl_runtime
480 * are the relative deadline and the maximum runtime of each
481 * instance, runtime is the runtime left for the last instance
482 * and (deadline - t), since t is rq->clock, is the time left
483 * to the (absolute) deadline. Even if overflowing the u64 type
484 * is very unlikely to occur in both cases, here we scale down
485 * as we want to avoid that risk at all. Scaling down by 10
486 * means that we reduce granularity to 1us. We are fine with it,
487 * since this is only a true/false check and, anyway, thinking
488 * of anything below microseconds resolution is actually fiction
489 * (but still we want to give the user that illusion >;).
490 */
491 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
492 right = ((dl_se->deadline - t) >> DL_SCALE) *
493 (pi_se->dl_runtime >> DL_SCALE);
494
495 return dl_time_before(right, left);
496}
497
498/*
499 * When a -deadline entity is queued back on the runqueue, its runtime and
500 * deadline might need updating.
501 *
502 * The policy here is that we update the deadline of the entity only if:
503 * - the current deadline is in the past,
504 * - using the remaining runtime with the current deadline would make
505 * the entity exceed its bandwidth.
506 */
507static void update_dl_entity(struct sched_dl_entity *dl_se,
508 struct sched_dl_entity *pi_se)
509{
510 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
511 struct rq *rq = rq_of_dl_rq(dl_rq);
512
513 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
514 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
515 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
516 dl_se->runtime = pi_se->dl_runtime;
517 }
518}
519
520/*
521 * If the entity depleted all its runtime, and if we want it to sleep
522 * while waiting for some new execution time to become available, we
523 * set the bandwidth enforcement timer to the replenishment instant
524 * and try to activate it.
525 *
526 * Notice that it is important for the caller to know if the timer
527 * actually started or not (i.e., the replenishment instant is in
528 * the future or in the past).
529 */
530static int start_dl_timer(struct task_struct *p)
531{
532 struct sched_dl_entity *dl_se = &p->dl;
533 struct hrtimer *timer = &dl_se->dl_timer;
534 struct rq *rq = task_rq(p);
535 ktime_t now, act;
536 s64 delta;
537
538 lockdep_assert_held(&rq->lock);
539
540 /*
541 * We want the timer to fire at the deadline, but considering
542 * that it is actually coming from rq->clock and not from
543 * hrtimer's time base reading.
544 */
545 act = ns_to_ktime(dl_se->deadline);
546 now = hrtimer_cb_get_time(timer);
547 delta = ktime_to_ns(now) - rq_clock(rq);
548 act = ktime_add_ns(act, delta);
549
550 /*
551 * If the expiry time already passed, e.g., because the value
552 * chosen as the deadline is too small, don't even try to
553 * start the timer in the past!
554 */
555 if (ktime_us_delta(act, now) < 0)
556 return 0;
557
558 /*
559 * !enqueued will guarantee another callback; even if one is already in
560 * progress. This ensures a balanced {get,put}_task_struct().
561 *
562 * The race against __run_timer() clearing the enqueued state is
563 * harmless because we're holding task_rq()->lock, therefore the timer
564 * expiring after we've done the check will wait on its task_rq_lock()
565 * and observe our state.
566 */
567 if (!hrtimer_is_queued(timer)) {
568 get_task_struct(p);
569 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
570 }
571
572 return 1;
573}
574
575/*
576 * This is the bandwidth enforcement timer callback. If here, we know
577 * a task is not on its dl_rq, since the fact that the timer was running
578 * means the task is throttled and needs a runtime replenishment.
579 *
580 * However, what we actually do depends on the fact the task is active,
581 * (it is on its rq) or has been removed from there by a call to
582 * dequeue_task_dl(). In the former case we must issue the runtime
583 * replenishment and add the task back to the dl_rq; in the latter, we just
584 * do nothing but clearing dl_throttled, so that runtime and deadline
585 * updating (and the queueing back to dl_rq) will be done by the
586 * next call to enqueue_task_dl().
587 */
588static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
589{
590 struct sched_dl_entity *dl_se = container_of(timer,
591 struct sched_dl_entity,
592 dl_timer);
593 struct task_struct *p = dl_task_of(dl_se);
594 unsigned long flags;
595 struct rq *rq;
596
597 rq = task_rq_lock(p, &flags);
598
599 /*
600 * The task might have changed its scheduling policy to something
601 * different than SCHED_DEADLINE (through switched_fromd_dl()).
602 */
603 if (!dl_task(p)) {
604 __dl_clear_params(p);
605 goto unlock;
606 }
607
608 /*
609 * The task might have been boosted by someone else and might be in the
610 * boosting/deboosting path, its not throttled.
611 */
612 if (dl_se->dl_boosted)
613 goto unlock;
614
615 /*
616 * Spurious timer due to start_dl_timer() race; or we already received
617 * a replenishment from rt_mutex_setprio().
618 */
619 if (!dl_se->dl_throttled)
620 goto unlock;
621
622 sched_clock_tick();
623 update_rq_clock(rq);
624
625 /*
626 * If the throttle happened during sched-out; like:
627 *
628 * schedule()
629 * deactivate_task()
630 * dequeue_task_dl()
631 * update_curr_dl()
632 * start_dl_timer()
633 * __dequeue_task_dl()
634 * prev->on_rq = 0;
635 *
636 * We can be both throttled and !queued. Replenish the counter
637 * but do not enqueue -- wait for our wakeup to do that.
638 */
639 if (!task_on_rq_queued(p)) {
640 replenish_dl_entity(dl_se, dl_se);
641 goto unlock;
642 }
643
644 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
645 if (dl_task(rq->curr))
646 check_preempt_curr_dl(rq, p, 0);
647 else
648 resched_curr(rq);
649
650#ifdef CONFIG_SMP
651 /*
652 * Perform balancing operations here; after the replenishments. We
653 * cannot drop rq->lock before this, otherwise the assertion in
654 * start_dl_timer() about not missing updates is not true.
655 *
656 * If we find that the rq the task was on is no longer available, we
657 * need to select a new rq.
658 *
659 * XXX figure out if select_task_rq_dl() deals with offline cpus.
660 */
661 if (unlikely(!rq->online))
662 rq = dl_task_offline_migration(rq, p);
663
664 /*
665 * Queueing this task back might have overloaded rq, check if we need
666 * to kick someone away.
667 */
668 if (has_pushable_dl_tasks(rq)) {
669 /*
670 * Nothing relies on rq->lock after this, so its safe to drop
671 * rq->lock.
672 */
673 lockdep_unpin_lock(&rq->lock);
674 push_dl_task(rq);
675 lockdep_pin_lock(&rq->lock);
676 }
677#endif
678
679unlock:
680 task_rq_unlock(rq, p, &flags);
681
682 /*
683 * This can free the task_struct, including this hrtimer, do not touch
684 * anything related to that after this.
685 */
686 put_task_struct(p);
687
688 return HRTIMER_NORESTART;
689}
690
691void init_dl_task_timer(struct sched_dl_entity *dl_se)
692{
693 struct hrtimer *timer = &dl_se->dl_timer;
694
695 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
696 timer->function = dl_task_timer;
697}
698
699static
700int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
701{
702 return (dl_se->runtime <= 0);
703}
704
705extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
706
707/*
708 * Update the current task's runtime statistics (provided it is still
709 * a -deadline task and has not been removed from the dl_rq).
710 */
711static void update_curr_dl(struct rq *rq)
712{
713 struct task_struct *curr = rq->curr;
714 struct sched_dl_entity *dl_se = &curr->dl;
715 u64 delta_exec;
716
717 if (!dl_task(curr) || !on_dl_rq(dl_se))
718 return;
719
720 /* Kick cpufreq (see the comment in linux/cpufreq.h). */
721 if (cpu_of(rq) == smp_processor_id())
722 cpufreq_trigger_update(rq_clock(rq));
723
724 /*
725 * Consumed budget is computed considering the time as
726 * observed by schedulable tasks (excluding time spent
727 * in hardirq context, etc.). Deadlines are instead
728 * computed using hard walltime. This seems to be the more
729 * natural solution, but the full ramifications of this
730 * approach need further study.
731 */
732 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
733 if (unlikely((s64)delta_exec <= 0)) {
734 if (unlikely(dl_se->dl_yielded))
735 goto throttle;
736 return;
737 }
738
739 schedstat_set(curr->se.statistics.exec_max,
740 max(curr->se.statistics.exec_max, delta_exec));
741
742 curr->se.sum_exec_runtime += delta_exec;
743 account_group_exec_runtime(curr, delta_exec);
744
745 curr->se.exec_start = rq_clock_task(rq);
746 cpuacct_charge(curr, delta_exec);
747
748 sched_rt_avg_update(rq, delta_exec);
749
750 dl_se->runtime -= delta_exec;
751
752throttle:
753 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
754 dl_se->dl_throttled = 1;
755 __dequeue_task_dl(rq, curr, 0);
756 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
757 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
758
759 if (!is_leftmost(curr, &rq->dl))
760 resched_curr(rq);
761 }
762
763 /*
764 * Because -- for now -- we share the rt bandwidth, we need to
765 * account our runtime there too, otherwise actual rt tasks
766 * would be able to exceed the shared quota.
767 *
768 * Account to the root rt group for now.
769 *
770 * The solution we're working towards is having the RT groups scheduled
771 * using deadline servers -- however there's a few nasties to figure
772 * out before that can happen.
773 */
774 if (rt_bandwidth_enabled()) {
775 struct rt_rq *rt_rq = &rq->rt;
776
777 raw_spin_lock(&rt_rq->rt_runtime_lock);
778 /*
779 * We'll let actual RT tasks worry about the overflow here, we
780 * have our own CBS to keep us inline; only account when RT
781 * bandwidth is relevant.
782 */
783 if (sched_rt_bandwidth_account(rt_rq))
784 rt_rq->rt_time += delta_exec;
785 raw_spin_unlock(&rt_rq->rt_runtime_lock);
786 }
787}
788
789#ifdef CONFIG_SMP
790
791static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
792{
793 struct rq *rq = rq_of_dl_rq(dl_rq);
794
795 if (dl_rq->earliest_dl.curr == 0 ||
796 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
797 dl_rq->earliest_dl.curr = deadline;
798 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
799 }
800}
801
802static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
803{
804 struct rq *rq = rq_of_dl_rq(dl_rq);
805
806 /*
807 * Since we may have removed our earliest (and/or next earliest)
808 * task we must recompute them.
809 */
810 if (!dl_rq->dl_nr_running) {
811 dl_rq->earliest_dl.curr = 0;
812 dl_rq->earliest_dl.next = 0;
813 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
814 } else {
815 struct rb_node *leftmost = dl_rq->rb_leftmost;
816 struct sched_dl_entity *entry;
817
818 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
819 dl_rq->earliest_dl.curr = entry->deadline;
820 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
821 }
822}
823
824#else
825
826static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
827static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
828
829#endif /* CONFIG_SMP */
830
831static inline
832void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
833{
834 int prio = dl_task_of(dl_se)->prio;
835 u64 deadline = dl_se->deadline;
836
837 WARN_ON(!dl_prio(prio));
838 dl_rq->dl_nr_running++;
839 add_nr_running(rq_of_dl_rq(dl_rq), 1);
840
841 inc_dl_deadline(dl_rq, deadline);
842 inc_dl_migration(dl_se, dl_rq);
843}
844
845static inline
846void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
847{
848 int prio = dl_task_of(dl_se)->prio;
849
850 WARN_ON(!dl_prio(prio));
851 WARN_ON(!dl_rq->dl_nr_running);
852 dl_rq->dl_nr_running--;
853 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
854
855 dec_dl_deadline(dl_rq, dl_se->deadline);
856 dec_dl_migration(dl_se, dl_rq);
857}
858
859static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
860{
861 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
862 struct rb_node **link = &dl_rq->rb_root.rb_node;
863 struct rb_node *parent = NULL;
864 struct sched_dl_entity *entry;
865 int leftmost = 1;
866
867 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
868
869 while (*link) {
870 parent = *link;
871 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
872 if (dl_time_before(dl_se->deadline, entry->deadline))
873 link = &parent->rb_left;
874 else {
875 link = &parent->rb_right;
876 leftmost = 0;
877 }
878 }
879
880 if (leftmost)
881 dl_rq->rb_leftmost = &dl_se->rb_node;
882
883 rb_link_node(&dl_se->rb_node, parent, link);
884 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
885
886 inc_dl_tasks(dl_se, dl_rq);
887}
888
889static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
890{
891 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
892
893 if (RB_EMPTY_NODE(&dl_se->rb_node))
894 return;
895
896 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
897 struct rb_node *next_node;
898
899 next_node = rb_next(&dl_se->rb_node);
900 dl_rq->rb_leftmost = next_node;
901 }
902
903 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
904 RB_CLEAR_NODE(&dl_se->rb_node);
905
906 dec_dl_tasks(dl_se, dl_rq);
907}
908
909static void
910enqueue_dl_entity(struct sched_dl_entity *dl_se,
911 struct sched_dl_entity *pi_se, int flags)
912{
913 BUG_ON(on_dl_rq(dl_se));
914
915 /*
916 * If this is a wakeup or a new instance, the scheduling
917 * parameters of the task might need updating. Otherwise,
918 * we want a replenishment of its runtime.
919 */
920 if (flags & ENQUEUE_WAKEUP)
921 update_dl_entity(dl_se, pi_se);
922 else if (flags & ENQUEUE_REPLENISH)
923 replenish_dl_entity(dl_se, pi_se);
924
925 __enqueue_dl_entity(dl_se);
926}
927
928static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
929{
930 __dequeue_dl_entity(dl_se);
931}
932
933static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
934{
935 struct task_struct *pi_task = rt_mutex_get_top_task(p);
936 struct sched_dl_entity *pi_se = &p->dl;
937
938 /*
939 * Use the scheduling parameters of the top pi-waiter
940 * task if we have one and its (absolute) deadline is
941 * smaller than our one... OTW we keep our runtime and
942 * deadline.
943 */
944 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
945 pi_se = &pi_task->dl;
946 } else if (!dl_prio(p->normal_prio)) {
947 /*
948 * Special case in which we have a !SCHED_DEADLINE task
949 * that is going to be deboosted, but exceedes its
950 * runtime while doing so. No point in replenishing
951 * it, as it's going to return back to its original
952 * scheduling class after this.
953 */
954 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
955 return;
956 }
957
958 /*
959 * If p is throttled, we do nothing. In fact, if it exhausted
960 * its budget it needs a replenishment and, since it now is on
961 * its rq, the bandwidth timer callback (which clearly has not
962 * run yet) will take care of this.
963 */
964 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
965 return;
966
967 enqueue_dl_entity(&p->dl, pi_se, flags);
968
969 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
970 enqueue_pushable_dl_task(rq, p);
971}
972
973static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
974{
975 dequeue_dl_entity(&p->dl);
976 dequeue_pushable_dl_task(rq, p);
977}
978
979static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
980{
981 update_curr_dl(rq);
982 __dequeue_task_dl(rq, p, flags);
983}
984
985/*
986 * Yield task semantic for -deadline tasks is:
987 *
988 * get off from the CPU until our next instance, with
989 * a new runtime. This is of little use now, since we
990 * don't have a bandwidth reclaiming mechanism. Anyway,
991 * bandwidth reclaiming is planned for the future, and
992 * yield_task_dl will indicate that some spare budget
993 * is available for other task instances to use it.
994 */
995static void yield_task_dl(struct rq *rq)
996{
997 /*
998 * We make the task go to sleep until its current deadline by
999 * forcing its runtime to zero. This way, update_curr_dl() stops
1000 * it and the bandwidth timer will wake it up and will give it
1001 * new scheduling parameters (thanks to dl_yielded=1).
1002 */
1003 rq->curr->dl.dl_yielded = 1;
1004
1005 update_rq_clock(rq);
1006 update_curr_dl(rq);
1007 /*
1008 * Tell update_rq_clock() that we've just updated,
1009 * so we don't do microscopic update in schedule()
1010 * and double the fastpath cost.
1011 */
1012 rq_clock_skip_update(rq, true);
1013}
1014
1015#ifdef CONFIG_SMP
1016
1017static int find_later_rq(struct task_struct *task);
1018
1019static int
1020select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1021{
1022 struct task_struct *curr;
1023 struct rq *rq;
1024
1025 if (sd_flag != SD_BALANCE_WAKE)
1026 goto out;
1027
1028 rq = cpu_rq(cpu);
1029
1030 rcu_read_lock();
1031 curr = READ_ONCE(rq->curr); /* unlocked access */
1032
1033 /*
1034 * If we are dealing with a -deadline task, we must
1035 * decide where to wake it up.
1036 * If it has a later deadline and the current task
1037 * on this rq can't move (provided the waking task
1038 * can!) we prefer to send it somewhere else. On the
1039 * other hand, if it has a shorter deadline, we
1040 * try to make it stay here, it might be important.
1041 */
1042 if (unlikely(dl_task(curr)) &&
1043 (curr->nr_cpus_allowed < 2 ||
1044 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1045 (p->nr_cpus_allowed > 1)) {
1046 int target = find_later_rq(p);
1047
1048 if (target != -1 &&
1049 (dl_time_before(p->dl.deadline,
1050 cpu_rq(target)->dl.earliest_dl.curr) ||
1051 (cpu_rq(target)->dl.dl_nr_running == 0)))
1052 cpu = target;
1053 }
1054 rcu_read_unlock();
1055
1056out:
1057 return cpu;
1058}
1059
1060static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1061{
1062 /*
1063 * Current can't be migrated, useless to reschedule,
1064 * let's hope p can move out.
1065 */
1066 if (rq->curr->nr_cpus_allowed == 1 ||
1067 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1068 return;
1069
1070 /*
1071 * p is migratable, so let's not schedule it and
1072 * see if it is pushed or pulled somewhere else.
1073 */
1074 if (p->nr_cpus_allowed != 1 &&
1075 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1076 return;
1077
1078 resched_curr(rq);
1079}
1080
1081#endif /* CONFIG_SMP */
1082
1083/*
1084 * Only called when both the current and waking task are -deadline
1085 * tasks.
1086 */
1087static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1088 int flags)
1089{
1090 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1091 resched_curr(rq);
1092 return;
1093 }
1094
1095#ifdef CONFIG_SMP
1096 /*
1097 * In the unlikely case current and p have the same deadline
1098 * let us try to decide what's the best thing to do...
1099 */
1100 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1101 !test_tsk_need_resched(rq->curr))
1102 check_preempt_equal_dl(rq, p);
1103#endif /* CONFIG_SMP */
1104}
1105
1106#ifdef CONFIG_SCHED_HRTICK
1107static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1108{
1109 hrtick_start(rq, p->dl.runtime);
1110}
1111#else /* !CONFIG_SCHED_HRTICK */
1112static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1113{
1114}
1115#endif
1116
1117static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1118 struct dl_rq *dl_rq)
1119{
1120 struct rb_node *left = dl_rq->rb_leftmost;
1121
1122 if (!left)
1123 return NULL;
1124
1125 return rb_entry(left, struct sched_dl_entity, rb_node);
1126}
1127
1128struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1129{
1130 struct sched_dl_entity *dl_se;
1131 struct task_struct *p;
1132 struct dl_rq *dl_rq;
1133
1134 dl_rq = &rq->dl;
1135
1136 if (need_pull_dl_task(rq, prev)) {
1137 /*
1138 * This is OK, because current is on_cpu, which avoids it being
1139 * picked for load-balance and preemption/IRQs are still
1140 * disabled avoiding further scheduler activity on it and we're
1141 * being very careful to re-start the picking loop.
1142 */
1143 lockdep_unpin_lock(&rq->lock);
1144 pull_dl_task(rq);
1145 lockdep_pin_lock(&rq->lock);
1146 /*
1147 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1148 * means a stop task can slip in, in which case we need to
1149 * re-start task selection.
1150 */
1151 if (rq->stop && task_on_rq_queued(rq->stop))
1152 return RETRY_TASK;
1153 }
1154
1155 /*
1156 * When prev is DL, we may throttle it in put_prev_task().
1157 * So, we update time before we check for dl_nr_running.
1158 */
1159 if (prev->sched_class == &dl_sched_class)
1160 update_curr_dl(rq);
1161
1162 if (unlikely(!dl_rq->dl_nr_running))
1163 return NULL;
1164
1165 put_prev_task(rq, prev);
1166
1167 dl_se = pick_next_dl_entity(rq, dl_rq);
1168 BUG_ON(!dl_se);
1169
1170 p = dl_task_of(dl_se);
1171 p->se.exec_start = rq_clock_task(rq);
1172
1173 /* Running task will never be pushed. */
1174 dequeue_pushable_dl_task(rq, p);
1175
1176 if (hrtick_enabled(rq))
1177 start_hrtick_dl(rq, p);
1178
1179 queue_push_tasks(rq);
1180
1181 return p;
1182}
1183
1184static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1185{
1186 update_curr_dl(rq);
1187
1188 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1189 enqueue_pushable_dl_task(rq, p);
1190}
1191
1192static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1193{
1194 update_curr_dl(rq);
1195
1196 /*
1197 * Even when we have runtime, update_curr_dl() might have resulted in us
1198 * not being the leftmost task anymore. In that case NEED_RESCHED will
1199 * be set and schedule() will start a new hrtick for the next task.
1200 */
1201 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1202 is_leftmost(p, &rq->dl))
1203 start_hrtick_dl(rq, p);
1204}
1205
1206static void task_fork_dl(struct task_struct *p)
1207{
1208 /*
1209 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1210 * sched_fork()
1211 */
1212}
1213
1214static void task_dead_dl(struct task_struct *p)
1215{
1216 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1217
1218 /*
1219 * Since we are TASK_DEAD we won't slip out of the domain!
1220 */
1221 raw_spin_lock_irq(&dl_b->lock);
1222 /* XXX we should retain the bw until 0-lag */
1223 dl_b->total_bw -= p->dl.dl_bw;
1224 raw_spin_unlock_irq(&dl_b->lock);
1225}
1226
1227static void set_curr_task_dl(struct rq *rq)
1228{
1229 struct task_struct *p = rq->curr;
1230
1231 p->se.exec_start = rq_clock_task(rq);
1232
1233 /* You can't push away the running task */
1234 dequeue_pushable_dl_task(rq, p);
1235}
1236
1237#ifdef CONFIG_SMP
1238
1239/* Only try algorithms three times */
1240#define DL_MAX_TRIES 3
1241
1242static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1243{
1244 if (!task_running(rq, p) &&
1245 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1246 return 1;
1247 return 0;
1248}
1249
1250/*
1251 * Return the earliest pushable rq's task, which is suitable to be executed
1252 * on the CPU, NULL otherwise:
1253 */
1254static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1255{
1256 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1257 struct task_struct *p = NULL;
1258
1259 if (!has_pushable_dl_tasks(rq))
1260 return NULL;
1261
1262next_node:
1263 if (next_node) {
1264 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1265
1266 if (pick_dl_task(rq, p, cpu))
1267 return p;
1268
1269 next_node = rb_next(next_node);
1270 goto next_node;
1271 }
1272
1273 return NULL;
1274}
1275
1276static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1277
1278static int find_later_rq(struct task_struct *task)
1279{
1280 struct sched_domain *sd;
1281 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1282 int this_cpu = smp_processor_id();
1283 int best_cpu, cpu = task_cpu(task);
1284
1285 /* Make sure the mask is initialized first */
1286 if (unlikely(!later_mask))
1287 return -1;
1288
1289 if (task->nr_cpus_allowed == 1)
1290 return -1;
1291
1292 /*
1293 * We have to consider system topology and task affinity
1294 * first, then we can look for a suitable cpu.
1295 */
1296 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1297 task, later_mask);
1298 if (best_cpu == -1)
1299 return -1;
1300
1301 /*
1302 * If we are here, some target has been found,
1303 * the most suitable of which is cached in best_cpu.
1304 * This is, among the runqueues where the current tasks
1305 * have later deadlines than the task's one, the rq
1306 * with the latest possible one.
1307 *
1308 * Now we check how well this matches with task's
1309 * affinity and system topology.
1310 *
1311 * The last cpu where the task run is our first
1312 * guess, since it is most likely cache-hot there.
1313 */
1314 if (cpumask_test_cpu(cpu, later_mask))
1315 return cpu;
1316 /*
1317 * Check if this_cpu is to be skipped (i.e., it is
1318 * not in the mask) or not.
1319 */
1320 if (!cpumask_test_cpu(this_cpu, later_mask))
1321 this_cpu = -1;
1322
1323 rcu_read_lock();
1324 for_each_domain(cpu, sd) {
1325 if (sd->flags & SD_WAKE_AFFINE) {
1326
1327 /*
1328 * If possible, preempting this_cpu is
1329 * cheaper than migrating.
1330 */
1331 if (this_cpu != -1 &&
1332 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1333 rcu_read_unlock();
1334 return this_cpu;
1335 }
1336
1337 /*
1338 * Last chance: if best_cpu is valid and is
1339 * in the mask, that becomes our choice.
1340 */
1341 if (best_cpu < nr_cpu_ids &&
1342 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1343 rcu_read_unlock();
1344 return best_cpu;
1345 }
1346 }
1347 }
1348 rcu_read_unlock();
1349
1350 /*
1351 * At this point, all our guesses failed, we just return
1352 * 'something', and let the caller sort the things out.
1353 */
1354 if (this_cpu != -1)
1355 return this_cpu;
1356
1357 cpu = cpumask_any(later_mask);
1358 if (cpu < nr_cpu_ids)
1359 return cpu;
1360
1361 return -1;
1362}
1363
1364/* Locks the rq it finds */
1365static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1366{
1367 struct rq *later_rq = NULL;
1368 int tries;
1369 int cpu;
1370
1371 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1372 cpu = find_later_rq(task);
1373
1374 if ((cpu == -1) || (cpu == rq->cpu))
1375 break;
1376
1377 later_rq = cpu_rq(cpu);
1378
1379 if (later_rq->dl.dl_nr_running &&
1380 !dl_time_before(task->dl.deadline,
1381 later_rq->dl.earliest_dl.curr)) {
1382 /*
1383 * Target rq has tasks of equal or earlier deadline,
1384 * retrying does not release any lock and is unlikely
1385 * to yield a different result.
1386 */
1387 later_rq = NULL;
1388 break;
1389 }
1390
1391 /* Retry if something changed. */
1392 if (double_lock_balance(rq, later_rq)) {
1393 if (unlikely(task_rq(task) != rq ||
1394 !cpumask_test_cpu(later_rq->cpu,
1395 &task->cpus_allowed) ||
1396 task_running(rq, task) ||
1397 !dl_task(task) ||
1398 !task_on_rq_queued(task))) {
1399 double_unlock_balance(rq, later_rq);
1400 later_rq = NULL;
1401 break;
1402 }
1403 }
1404
1405 /*
1406 * If the rq we found has no -deadline task, or
1407 * its earliest one has a later deadline than our
1408 * task, the rq is a good one.
1409 */
1410 if (!later_rq->dl.dl_nr_running ||
1411 dl_time_before(task->dl.deadline,
1412 later_rq->dl.earliest_dl.curr))
1413 break;
1414
1415 /* Otherwise we try again. */
1416 double_unlock_balance(rq, later_rq);
1417 later_rq = NULL;
1418 }
1419
1420 return later_rq;
1421}
1422
1423static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1424{
1425 struct task_struct *p;
1426
1427 if (!has_pushable_dl_tasks(rq))
1428 return NULL;
1429
1430 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1431 struct task_struct, pushable_dl_tasks);
1432
1433 BUG_ON(rq->cpu != task_cpu(p));
1434 BUG_ON(task_current(rq, p));
1435 BUG_ON(p->nr_cpus_allowed <= 1);
1436
1437 BUG_ON(!task_on_rq_queued(p));
1438 BUG_ON(!dl_task(p));
1439
1440 return p;
1441}
1442
1443/*
1444 * See if the non running -deadline tasks on this rq
1445 * can be sent to some other CPU where they can preempt
1446 * and start executing.
1447 */
1448static int push_dl_task(struct rq *rq)
1449{
1450 struct task_struct *next_task;
1451 struct rq *later_rq;
1452 int ret = 0;
1453
1454 if (!rq->dl.overloaded)
1455 return 0;
1456
1457 next_task = pick_next_pushable_dl_task(rq);
1458 if (!next_task)
1459 return 0;
1460
1461retry:
1462 if (unlikely(next_task == rq->curr)) {
1463 WARN_ON(1);
1464 return 0;
1465 }
1466
1467 /*
1468 * If next_task preempts rq->curr, and rq->curr
1469 * can move away, it makes sense to just reschedule
1470 * without going further in pushing next_task.
1471 */
1472 if (dl_task(rq->curr) &&
1473 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1474 rq->curr->nr_cpus_allowed > 1) {
1475 resched_curr(rq);
1476 return 0;
1477 }
1478
1479 /* We might release rq lock */
1480 get_task_struct(next_task);
1481
1482 /* Will lock the rq it'll find */
1483 later_rq = find_lock_later_rq(next_task, rq);
1484 if (!later_rq) {
1485 struct task_struct *task;
1486
1487 /*
1488 * We must check all this again, since
1489 * find_lock_later_rq releases rq->lock and it is
1490 * then possible that next_task has migrated.
1491 */
1492 task = pick_next_pushable_dl_task(rq);
1493 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1494 /*
1495 * The task is still there. We don't try
1496 * again, some other cpu will pull it when ready.
1497 */
1498 goto out;
1499 }
1500
1501 if (!task)
1502 /* No more tasks */
1503 goto out;
1504
1505 put_task_struct(next_task);
1506 next_task = task;
1507 goto retry;
1508 }
1509
1510 deactivate_task(rq, next_task, 0);
1511 set_task_cpu(next_task, later_rq->cpu);
1512 activate_task(later_rq, next_task, 0);
1513 ret = 1;
1514
1515 resched_curr(later_rq);
1516
1517 double_unlock_balance(rq, later_rq);
1518
1519out:
1520 put_task_struct(next_task);
1521
1522 return ret;
1523}
1524
1525static void push_dl_tasks(struct rq *rq)
1526{
1527 /* push_dl_task() will return true if it moved a -deadline task */
1528 while (push_dl_task(rq))
1529 ;
1530}
1531
1532static void pull_dl_task(struct rq *this_rq)
1533{
1534 int this_cpu = this_rq->cpu, cpu;
1535 struct task_struct *p;
1536 bool resched = false;
1537 struct rq *src_rq;
1538 u64 dmin = LONG_MAX;
1539
1540 if (likely(!dl_overloaded(this_rq)))
1541 return;
1542
1543 /*
1544 * Match the barrier from dl_set_overloaded; this guarantees that if we
1545 * see overloaded we must also see the dlo_mask bit.
1546 */
1547 smp_rmb();
1548
1549 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1550 if (this_cpu == cpu)
1551 continue;
1552
1553 src_rq = cpu_rq(cpu);
1554
1555 /*
1556 * It looks racy, abd it is! However, as in sched_rt.c,
1557 * we are fine with this.
1558 */
1559 if (this_rq->dl.dl_nr_running &&
1560 dl_time_before(this_rq->dl.earliest_dl.curr,
1561 src_rq->dl.earliest_dl.next))
1562 continue;
1563
1564 /* Might drop this_rq->lock */
1565 double_lock_balance(this_rq, src_rq);
1566
1567 /*
1568 * If there are no more pullable tasks on the
1569 * rq, we're done with it.
1570 */
1571 if (src_rq->dl.dl_nr_running <= 1)
1572 goto skip;
1573
1574 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1575
1576 /*
1577 * We found a task to be pulled if:
1578 * - it preempts our current (if there's one),
1579 * - it will preempt the last one we pulled (if any).
1580 */
1581 if (p && dl_time_before(p->dl.deadline, dmin) &&
1582 (!this_rq->dl.dl_nr_running ||
1583 dl_time_before(p->dl.deadline,
1584 this_rq->dl.earliest_dl.curr))) {
1585 WARN_ON(p == src_rq->curr);
1586 WARN_ON(!task_on_rq_queued(p));
1587
1588 /*
1589 * Then we pull iff p has actually an earlier
1590 * deadline than the current task of its runqueue.
1591 */
1592 if (dl_time_before(p->dl.deadline,
1593 src_rq->curr->dl.deadline))
1594 goto skip;
1595
1596 resched = true;
1597
1598 deactivate_task(src_rq, p, 0);
1599 set_task_cpu(p, this_cpu);
1600 activate_task(this_rq, p, 0);
1601 dmin = p->dl.deadline;
1602
1603 /* Is there any other task even earlier? */
1604 }
1605skip:
1606 double_unlock_balance(this_rq, src_rq);
1607 }
1608
1609 if (resched)
1610 resched_curr(this_rq);
1611}
1612
1613/*
1614 * Since the task is not running and a reschedule is not going to happen
1615 * anytime soon on its runqueue, we try pushing it away now.
1616 */
1617static void task_woken_dl(struct rq *rq, struct task_struct *p)
1618{
1619 if (!task_running(rq, p) &&
1620 !test_tsk_need_resched(rq->curr) &&
1621 p->nr_cpus_allowed > 1 &&
1622 dl_task(rq->curr) &&
1623 (rq->curr->nr_cpus_allowed < 2 ||
1624 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1625 push_dl_tasks(rq);
1626 }
1627}
1628
1629static void set_cpus_allowed_dl(struct task_struct *p,
1630 const struct cpumask *new_mask)
1631{
1632 struct root_domain *src_rd;
1633 struct rq *rq;
1634
1635 BUG_ON(!dl_task(p));
1636
1637 rq = task_rq(p);
1638 src_rd = rq->rd;
1639 /*
1640 * Migrating a SCHED_DEADLINE task between exclusive
1641 * cpusets (different root_domains) entails a bandwidth
1642 * update. We already made space for us in the destination
1643 * domain (see cpuset_can_attach()).
1644 */
1645 if (!cpumask_intersects(src_rd->span, new_mask)) {
1646 struct dl_bw *src_dl_b;
1647
1648 src_dl_b = dl_bw_of(cpu_of(rq));
1649 /*
1650 * We now free resources of the root_domain we are migrating
1651 * off. In the worst case, sched_setattr() may temporary fail
1652 * until we complete the update.
1653 */
1654 raw_spin_lock(&src_dl_b->lock);
1655 __dl_clear(src_dl_b, p->dl.dl_bw);
1656 raw_spin_unlock(&src_dl_b->lock);
1657 }
1658
1659 set_cpus_allowed_common(p, new_mask);
1660}
1661
1662/* Assumes rq->lock is held */
1663static void rq_online_dl(struct rq *rq)
1664{
1665 if (rq->dl.overloaded)
1666 dl_set_overload(rq);
1667
1668 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1669 if (rq->dl.dl_nr_running > 0)
1670 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1671}
1672
1673/* Assumes rq->lock is held */
1674static void rq_offline_dl(struct rq *rq)
1675{
1676 if (rq->dl.overloaded)
1677 dl_clear_overload(rq);
1678
1679 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1680 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1681}
1682
1683void __init init_sched_dl_class(void)
1684{
1685 unsigned int i;
1686
1687 for_each_possible_cpu(i)
1688 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1689 GFP_KERNEL, cpu_to_node(i));
1690}
1691
1692#endif /* CONFIG_SMP */
1693
1694static void switched_from_dl(struct rq *rq, struct task_struct *p)
1695{
1696 /*
1697 * Start the deadline timer; if we switch back to dl before this we'll
1698 * continue consuming our current CBS slice. If we stay outside of
1699 * SCHED_DEADLINE until the deadline passes, the timer will reset the
1700 * task.
1701 */
1702 if (!start_dl_timer(p))
1703 __dl_clear_params(p);
1704
1705 /*
1706 * Since this might be the only -deadline task on the rq,
1707 * this is the right place to try to pull some other one
1708 * from an overloaded cpu, if any.
1709 */
1710 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1711 return;
1712
1713 queue_pull_task(rq);
1714}
1715
1716/*
1717 * When switching to -deadline, we may overload the rq, then
1718 * we try to push someone off, if possible.
1719 */
1720static void switched_to_dl(struct rq *rq, struct task_struct *p)
1721{
1722 if (dl_time_before(p->dl.deadline, rq_clock(rq)))
1723 setup_new_dl_entity(&p->dl, &p->dl);
1724
1725 if (task_on_rq_queued(p) && rq->curr != p) {
1726#ifdef CONFIG_SMP
1727 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
1728 queue_push_tasks(rq);
1729#else
1730 if (dl_task(rq->curr))
1731 check_preempt_curr_dl(rq, p, 0);
1732 else
1733 resched_curr(rq);
1734#endif
1735 }
1736}
1737
1738/*
1739 * If the scheduling parameters of a -deadline task changed,
1740 * a push or pull operation might be needed.
1741 */
1742static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1743 int oldprio)
1744{
1745 if (task_on_rq_queued(p) || rq->curr == p) {
1746#ifdef CONFIG_SMP
1747 /*
1748 * This might be too much, but unfortunately
1749 * we don't have the old deadline value, and
1750 * we can't argue if the task is increasing
1751 * or lowering its prio, so...
1752 */
1753 if (!rq->dl.overloaded)
1754 queue_pull_task(rq);
1755
1756 /*
1757 * If we now have a earlier deadline task than p,
1758 * then reschedule, provided p is still on this
1759 * runqueue.
1760 */
1761 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1762 resched_curr(rq);
1763#else
1764 /*
1765 * Again, we don't know if p has a earlier
1766 * or later deadline, so let's blindly set a
1767 * (maybe not needed) rescheduling point.
1768 */
1769 resched_curr(rq);
1770#endif /* CONFIG_SMP */
1771 }
1772}
1773
1774const struct sched_class dl_sched_class = {
1775 .next = &rt_sched_class,
1776 .enqueue_task = enqueue_task_dl,
1777 .dequeue_task = dequeue_task_dl,
1778 .yield_task = yield_task_dl,
1779
1780 .check_preempt_curr = check_preempt_curr_dl,
1781
1782 .pick_next_task = pick_next_task_dl,
1783 .put_prev_task = put_prev_task_dl,
1784
1785#ifdef CONFIG_SMP
1786 .select_task_rq = select_task_rq_dl,
1787 .set_cpus_allowed = set_cpus_allowed_dl,
1788 .rq_online = rq_online_dl,
1789 .rq_offline = rq_offline_dl,
1790 .task_woken = task_woken_dl,
1791#endif
1792
1793 .set_curr_task = set_curr_task_dl,
1794 .task_tick = task_tick_dl,
1795 .task_fork = task_fork_dl,
1796 .task_dead = task_dead_dl,
1797
1798 .prio_changed = prio_changed_dl,
1799 .switched_from = switched_from_dl,
1800 .switched_to = switched_to_dl,
1801
1802 .update_curr = update_curr_dl,
1803};
1804
1805#ifdef CONFIG_SCHED_DEBUG
1806extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1807
1808void print_dl_stats(struct seq_file *m, int cpu)
1809{
1810 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1811}
1812#endif /* CONFIG_SCHED_DEBUG */
1/*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
16 */
17#include "sched.h"
18
19#include <linux/slab.h>
20
21struct dl_bandwidth def_dl_bandwidth;
22
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47{
48 struct sched_dl_entity *dl_se = &p->dl;
49
50 return dl_rq->rb_leftmost == &dl_se->rb_node;
51}
52
53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54{
55 raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 dl_b->dl_period = period;
57 dl_b->dl_runtime = runtime;
58}
59
60void init_dl_bw(struct dl_bw *dl_b)
61{
62 raw_spin_lock_init(&dl_b->lock);
63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
64 if (global_rt_runtime() == RUNTIME_INF)
65 dl_b->bw = -1;
66 else
67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 dl_b->total_bw = 0;
70}
71
72void init_dl_rq(struct dl_rq *dl_rq)
73{
74 dl_rq->rb_root = RB_ROOT;
75
76#ifdef CONFIG_SMP
77 /* zero means no -deadline tasks */
78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
79
80 dl_rq->dl_nr_migratory = 0;
81 dl_rq->overloaded = 0;
82 dl_rq->pushable_dl_tasks_root = RB_ROOT;
83#else
84 init_dl_bw(&dl_rq->dl_bw);
85#endif
86}
87
88#ifdef CONFIG_SMP
89
90static inline int dl_overloaded(struct rq *rq)
91{
92 return atomic_read(&rq->rd->dlo_count);
93}
94
95static inline void dl_set_overload(struct rq *rq)
96{
97 if (!rq->online)
98 return;
99
100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
101 /*
102 * Must be visible before the overload count is
103 * set (as in sched_rt.c).
104 *
105 * Matched by the barrier in pull_dl_task().
106 */
107 smp_wmb();
108 atomic_inc(&rq->rd->dlo_count);
109}
110
111static inline void dl_clear_overload(struct rq *rq)
112{
113 if (!rq->online)
114 return;
115
116 atomic_dec(&rq->rd->dlo_count);
117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
118}
119
120static void update_dl_migration(struct dl_rq *dl_rq)
121{
122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
123 if (!dl_rq->overloaded) {
124 dl_set_overload(rq_of_dl_rq(dl_rq));
125 dl_rq->overloaded = 1;
126 }
127 } else if (dl_rq->overloaded) {
128 dl_clear_overload(rq_of_dl_rq(dl_rq));
129 dl_rq->overloaded = 0;
130 }
131}
132
133static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134{
135 struct task_struct *p = dl_task_of(dl_se);
136
137 if (tsk_nr_cpus_allowed(p) > 1)
138 dl_rq->dl_nr_migratory++;
139
140 update_dl_migration(dl_rq);
141}
142
143static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144{
145 struct task_struct *p = dl_task_of(dl_se);
146
147 if (tsk_nr_cpus_allowed(p) > 1)
148 dl_rq->dl_nr_migratory--;
149
150 update_dl_migration(dl_rq);
151}
152
153/*
154 * The list of pushable -deadline task is not a plist, like in
155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
156 */
157static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
158{
159 struct dl_rq *dl_rq = &rq->dl;
160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 struct rb_node *parent = NULL;
162 struct task_struct *entry;
163 int leftmost = 1;
164
165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
166
167 while (*link) {
168 parent = *link;
169 entry = rb_entry(parent, struct task_struct,
170 pushable_dl_tasks);
171 if (dl_entity_preempt(&p->dl, &entry->dl))
172 link = &parent->rb_left;
173 else {
174 link = &parent->rb_right;
175 leftmost = 0;
176 }
177 }
178
179 if (leftmost) {
180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
181 dl_rq->earliest_dl.next = p->dl.deadline;
182 }
183
184 rb_link_node(&p->pushable_dl_tasks, parent, link);
185 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
186}
187
188static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
189{
190 struct dl_rq *dl_rq = &rq->dl;
191
192 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
193 return;
194
195 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
196 struct rb_node *next_node;
197
198 next_node = rb_next(&p->pushable_dl_tasks);
199 dl_rq->pushable_dl_tasks_leftmost = next_node;
200 if (next_node) {
201 dl_rq->earliest_dl.next = rb_entry(next_node,
202 struct task_struct, pushable_dl_tasks)->dl.deadline;
203 }
204 }
205
206 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
207 RB_CLEAR_NODE(&p->pushable_dl_tasks);
208}
209
210static inline int has_pushable_dl_tasks(struct rq *rq)
211{
212 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
213}
214
215static int push_dl_task(struct rq *rq);
216
217static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
218{
219 return dl_task(prev);
220}
221
222static DEFINE_PER_CPU(struct callback_head, dl_push_head);
223static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
224
225static void push_dl_tasks(struct rq *);
226static void pull_dl_task(struct rq *);
227
228static inline void queue_push_tasks(struct rq *rq)
229{
230 if (!has_pushable_dl_tasks(rq))
231 return;
232
233 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
234}
235
236static inline void queue_pull_task(struct rq *rq)
237{
238 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
239}
240
241static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
242
243static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
244{
245 struct rq *later_rq = NULL;
246
247 later_rq = find_lock_later_rq(p, rq);
248 if (!later_rq) {
249 int cpu;
250
251 /*
252 * If we cannot preempt any rq, fall back to pick any
253 * online cpu.
254 */
255 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
256 if (cpu >= nr_cpu_ids) {
257 /*
258 * Fail to find any suitable cpu.
259 * The task will never come back!
260 */
261 BUG_ON(dl_bandwidth_enabled());
262
263 /*
264 * If admission control is disabled we
265 * try a little harder to let the task
266 * run.
267 */
268 cpu = cpumask_any(cpu_active_mask);
269 }
270 later_rq = cpu_rq(cpu);
271 double_lock_balance(rq, later_rq);
272 }
273
274 set_task_cpu(p, later_rq->cpu);
275 double_unlock_balance(later_rq, rq);
276
277 return later_rq;
278}
279
280#else
281
282static inline
283void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
284{
285}
286
287static inline
288void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
289{
290}
291
292static inline
293void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
294{
295}
296
297static inline
298void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
299{
300}
301
302static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
303{
304 return false;
305}
306
307static inline void pull_dl_task(struct rq *rq)
308{
309}
310
311static inline void queue_push_tasks(struct rq *rq)
312{
313}
314
315static inline void queue_pull_task(struct rq *rq)
316{
317}
318#endif /* CONFIG_SMP */
319
320static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
321static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
322static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
323 int flags);
324
325/*
326 * We are being explicitly informed that a new instance is starting,
327 * and this means that:
328 * - the absolute deadline of the entity has to be placed at
329 * current time + relative deadline;
330 * - the runtime of the entity has to be set to the maximum value.
331 *
332 * The capability of specifying such event is useful whenever a -deadline
333 * entity wants to (try to!) synchronize its behaviour with the scheduler's
334 * one, and to (try to!) reconcile itself with its own scheduling
335 * parameters.
336 */
337static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
338{
339 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
340 struct rq *rq = rq_of_dl_rq(dl_rq);
341
342 WARN_ON(dl_se->dl_boosted);
343 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
344
345 /*
346 * We are racing with the deadline timer. So, do nothing because
347 * the deadline timer handler will take care of properly recharging
348 * the runtime and postponing the deadline
349 */
350 if (dl_se->dl_throttled)
351 return;
352
353 /*
354 * We use the regular wall clock time to set deadlines in the
355 * future; in fact, we must consider execution overheads (time
356 * spent on hardirq context, etc.).
357 */
358 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
359 dl_se->runtime = dl_se->dl_runtime;
360}
361
362/*
363 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
364 * possibility of a entity lasting more than what it declared, and thus
365 * exhausting its runtime.
366 *
367 * Here we are interested in making runtime overrun possible, but we do
368 * not want a entity which is misbehaving to affect the scheduling of all
369 * other entities.
370 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
371 * is used, in order to confine each entity within its own bandwidth.
372 *
373 * This function deals exactly with that, and ensures that when the runtime
374 * of a entity is replenished, its deadline is also postponed. That ensures
375 * the overrunning entity can't interfere with other entity in the system and
376 * can't make them miss their deadlines. Reasons why this kind of overruns
377 * could happen are, typically, a entity voluntarily trying to overcome its
378 * runtime, or it just underestimated it during sched_setattr().
379 */
380static void replenish_dl_entity(struct sched_dl_entity *dl_se,
381 struct sched_dl_entity *pi_se)
382{
383 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
384 struct rq *rq = rq_of_dl_rq(dl_rq);
385
386 BUG_ON(pi_se->dl_runtime <= 0);
387
388 /*
389 * This could be the case for a !-dl task that is boosted.
390 * Just go with full inherited parameters.
391 */
392 if (dl_se->dl_deadline == 0) {
393 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
394 dl_se->runtime = pi_se->dl_runtime;
395 }
396
397 if (dl_se->dl_yielded && dl_se->runtime > 0)
398 dl_se->runtime = 0;
399
400 /*
401 * We keep moving the deadline away until we get some
402 * available runtime for the entity. This ensures correct
403 * handling of situations where the runtime overrun is
404 * arbitrary large.
405 */
406 while (dl_se->runtime <= 0) {
407 dl_se->deadline += pi_se->dl_period;
408 dl_se->runtime += pi_se->dl_runtime;
409 }
410
411 /*
412 * At this point, the deadline really should be "in
413 * the future" with respect to rq->clock. If it's
414 * not, we are, for some reason, lagging too much!
415 * Anyway, after having warn userspace abut that,
416 * we still try to keep the things running by
417 * resetting the deadline and the budget of the
418 * entity.
419 */
420 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
421 printk_deferred_once("sched: DL replenish lagged too much\n");
422 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
423 dl_se->runtime = pi_se->dl_runtime;
424 }
425
426 if (dl_se->dl_yielded)
427 dl_se->dl_yielded = 0;
428 if (dl_se->dl_throttled)
429 dl_se->dl_throttled = 0;
430}
431
432/*
433 * Here we check if --at time t-- an entity (which is probably being
434 * [re]activated or, in general, enqueued) can use its remaining runtime
435 * and its current deadline _without_ exceeding the bandwidth it is
436 * assigned (function returns true if it can't). We are in fact applying
437 * one of the CBS rules: when a task wakes up, if the residual runtime
438 * over residual deadline fits within the allocated bandwidth, then we
439 * can keep the current (absolute) deadline and residual budget without
440 * disrupting the schedulability of the system. Otherwise, we should
441 * refill the runtime and set the deadline a period in the future,
442 * because keeping the current (absolute) deadline of the task would
443 * result in breaking guarantees promised to other tasks (refer to
444 * Documentation/scheduler/sched-deadline.txt for more informations).
445 *
446 * This function returns true if:
447 *
448 * runtime / (deadline - t) > dl_runtime / dl_period ,
449 *
450 * IOW we can't recycle current parameters.
451 *
452 * Notice that the bandwidth check is done against the period. For
453 * task with deadline equal to period this is the same of using
454 * dl_deadline instead of dl_period in the equation above.
455 */
456static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
457 struct sched_dl_entity *pi_se, u64 t)
458{
459 u64 left, right;
460
461 /*
462 * left and right are the two sides of the equation above,
463 * after a bit of shuffling to use multiplications instead
464 * of divisions.
465 *
466 * Note that none of the time values involved in the two
467 * multiplications are absolute: dl_deadline and dl_runtime
468 * are the relative deadline and the maximum runtime of each
469 * instance, runtime is the runtime left for the last instance
470 * and (deadline - t), since t is rq->clock, is the time left
471 * to the (absolute) deadline. Even if overflowing the u64 type
472 * is very unlikely to occur in both cases, here we scale down
473 * as we want to avoid that risk at all. Scaling down by 10
474 * means that we reduce granularity to 1us. We are fine with it,
475 * since this is only a true/false check and, anyway, thinking
476 * of anything below microseconds resolution is actually fiction
477 * (but still we want to give the user that illusion >;).
478 */
479 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
480 right = ((dl_se->deadline - t) >> DL_SCALE) *
481 (pi_se->dl_runtime >> DL_SCALE);
482
483 return dl_time_before(right, left);
484}
485
486/*
487 * When a -deadline entity is queued back on the runqueue, its runtime and
488 * deadline might need updating.
489 *
490 * The policy here is that we update the deadline of the entity only if:
491 * - the current deadline is in the past,
492 * - using the remaining runtime with the current deadline would make
493 * the entity exceed its bandwidth.
494 */
495static void update_dl_entity(struct sched_dl_entity *dl_se,
496 struct sched_dl_entity *pi_se)
497{
498 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
499 struct rq *rq = rq_of_dl_rq(dl_rq);
500
501 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
502 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
503 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
504 dl_se->runtime = pi_se->dl_runtime;
505 }
506}
507
508/*
509 * If the entity depleted all its runtime, and if we want it to sleep
510 * while waiting for some new execution time to become available, we
511 * set the bandwidth enforcement timer to the replenishment instant
512 * and try to activate it.
513 *
514 * Notice that it is important for the caller to know if the timer
515 * actually started or not (i.e., the replenishment instant is in
516 * the future or in the past).
517 */
518static int start_dl_timer(struct task_struct *p)
519{
520 struct sched_dl_entity *dl_se = &p->dl;
521 struct hrtimer *timer = &dl_se->dl_timer;
522 struct rq *rq = task_rq(p);
523 ktime_t now, act;
524 s64 delta;
525
526 lockdep_assert_held(&rq->lock);
527
528 /*
529 * We want the timer to fire at the deadline, but considering
530 * that it is actually coming from rq->clock and not from
531 * hrtimer's time base reading.
532 */
533 act = ns_to_ktime(dl_se->deadline);
534 now = hrtimer_cb_get_time(timer);
535 delta = ktime_to_ns(now) - rq_clock(rq);
536 act = ktime_add_ns(act, delta);
537
538 /*
539 * If the expiry time already passed, e.g., because the value
540 * chosen as the deadline is too small, don't even try to
541 * start the timer in the past!
542 */
543 if (ktime_us_delta(act, now) < 0)
544 return 0;
545
546 /*
547 * !enqueued will guarantee another callback; even if one is already in
548 * progress. This ensures a balanced {get,put}_task_struct().
549 *
550 * The race against __run_timer() clearing the enqueued state is
551 * harmless because we're holding task_rq()->lock, therefore the timer
552 * expiring after we've done the check will wait on its task_rq_lock()
553 * and observe our state.
554 */
555 if (!hrtimer_is_queued(timer)) {
556 get_task_struct(p);
557 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
558 }
559
560 return 1;
561}
562
563/*
564 * This is the bandwidth enforcement timer callback. If here, we know
565 * a task is not on its dl_rq, since the fact that the timer was running
566 * means the task is throttled and needs a runtime replenishment.
567 *
568 * However, what we actually do depends on the fact the task is active,
569 * (it is on its rq) or has been removed from there by a call to
570 * dequeue_task_dl(). In the former case we must issue the runtime
571 * replenishment and add the task back to the dl_rq; in the latter, we just
572 * do nothing but clearing dl_throttled, so that runtime and deadline
573 * updating (and the queueing back to dl_rq) will be done by the
574 * next call to enqueue_task_dl().
575 */
576static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
577{
578 struct sched_dl_entity *dl_se = container_of(timer,
579 struct sched_dl_entity,
580 dl_timer);
581 struct task_struct *p = dl_task_of(dl_se);
582 struct rq_flags rf;
583 struct rq *rq;
584
585 rq = task_rq_lock(p, &rf);
586
587 /*
588 * The task might have changed its scheduling policy to something
589 * different than SCHED_DEADLINE (through switched_from_dl()).
590 */
591 if (!dl_task(p)) {
592 __dl_clear_params(p);
593 goto unlock;
594 }
595
596 /*
597 * The task might have been boosted by someone else and might be in the
598 * boosting/deboosting path, its not throttled.
599 */
600 if (dl_se->dl_boosted)
601 goto unlock;
602
603 /*
604 * Spurious timer due to start_dl_timer() race; or we already received
605 * a replenishment from rt_mutex_setprio().
606 */
607 if (!dl_se->dl_throttled)
608 goto unlock;
609
610 sched_clock_tick();
611 update_rq_clock(rq);
612
613 /*
614 * If the throttle happened during sched-out; like:
615 *
616 * schedule()
617 * deactivate_task()
618 * dequeue_task_dl()
619 * update_curr_dl()
620 * start_dl_timer()
621 * __dequeue_task_dl()
622 * prev->on_rq = 0;
623 *
624 * We can be both throttled and !queued. Replenish the counter
625 * but do not enqueue -- wait for our wakeup to do that.
626 */
627 if (!task_on_rq_queued(p)) {
628 replenish_dl_entity(dl_se, dl_se);
629 goto unlock;
630 }
631
632#ifdef CONFIG_SMP
633 if (unlikely(!rq->online)) {
634 /*
635 * If the runqueue is no longer available, migrate the
636 * task elsewhere. This necessarily changes rq.
637 */
638 lockdep_unpin_lock(&rq->lock, rf.cookie);
639 rq = dl_task_offline_migration(rq, p);
640 rf.cookie = lockdep_pin_lock(&rq->lock);
641
642 /*
643 * Now that the task has been migrated to the new RQ and we
644 * have that locked, proceed as normal and enqueue the task
645 * there.
646 */
647 }
648#endif
649
650 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
651 if (dl_task(rq->curr))
652 check_preempt_curr_dl(rq, p, 0);
653 else
654 resched_curr(rq);
655
656#ifdef CONFIG_SMP
657 /*
658 * Queueing this task back might have overloaded rq, check if we need
659 * to kick someone away.
660 */
661 if (has_pushable_dl_tasks(rq)) {
662 /*
663 * Nothing relies on rq->lock after this, so its safe to drop
664 * rq->lock.
665 */
666 lockdep_unpin_lock(&rq->lock, rf.cookie);
667 push_dl_task(rq);
668 lockdep_repin_lock(&rq->lock, rf.cookie);
669 }
670#endif
671
672unlock:
673 task_rq_unlock(rq, p, &rf);
674
675 /*
676 * This can free the task_struct, including this hrtimer, do not touch
677 * anything related to that after this.
678 */
679 put_task_struct(p);
680
681 return HRTIMER_NORESTART;
682}
683
684void init_dl_task_timer(struct sched_dl_entity *dl_se)
685{
686 struct hrtimer *timer = &dl_se->dl_timer;
687
688 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
689 timer->function = dl_task_timer;
690}
691
692static
693int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
694{
695 return (dl_se->runtime <= 0);
696}
697
698extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
699
700/*
701 * Update the current task's runtime statistics (provided it is still
702 * a -deadline task and has not been removed from the dl_rq).
703 */
704static void update_curr_dl(struct rq *rq)
705{
706 struct task_struct *curr = rq->curr;
707 struct sched_dl_entity *dl_se = &curr->dl;
708 u64 delta_exec;
709
710 if (!dl_task(curr) || !on_dl_rq(dl_se))
711 return;
712
713 /*
714 * Consumed budget is computed considering the time as
715 * observed by schedulable tasks (excluding time spent
716 * in hardirq context, etc.). Deadlines are instead
717 * computed using hard walltime. This seems to be the more
718 * natural solution, but the full ramifications of this
719 * approach need further study.
720 */
721 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
722 if (unlikely((s64)delta_exec <= 0)) {
723 if (unlikely(dl_se->dl_yielded))
724 goto throttle;
725 return;
726 }
727
728 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
729 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL);
730
731 schedstat_set(curr->se.statistics.exec_max,
732 max(curr->se.statistics.exec_max, delta_exec));
733
734 curr->se.sum_exec_runtime += delta_exec;
735 account_group_exec_runtime(curr, delta_exec);
736
737 curr->se.exec_start = rq_clock_task(rq);
738 cpuacct_charge(curr, delta_exec);
739
740 sched_rt_avg_update(rq, delta_exec);
741
742 dl_se->runtime -= delta_exec;
743
744throttle:
745 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
746 dl_se->dl_throttled = 1;
747 __dequeue_task_dl(rq, curr, 0);
748 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
749 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
750
751 if (!is_leftmost(curr, &rq->dl))
752 resched_curr(rq);
753 }
754
755 /*
756 * Because -- for now -- we share the rt bandwidth, we need to
757 * account our runtime there too, otherwise actual rt tasks
758 * would be able to exceed the shared quota.
759 *
760 * Account to the root rt group for now.
761 *
762 * The solution we're working towards is having the RT groups scheduled
763 * using deadline servers -- however there's a few nasties to figure
764 * out before that can happen.
765 */
766 if (rt_bandwidth_enabled()) {
767 struct rt_rq *rt_rq = &rq->rt;
768
769 raw_spin_lock(&rt_rq->rt_runtime_lock);
770 /*
771 * We'll let actual RT tasks worry about the overflow here, we
772 * have our own CBS to keep us inline; only account when RT
773 * bandwidth is relevant.
774 */
775 if (sched_rt_bandwidth_account(rt_rq))
776 rt_rq->rt_time += delta_exec;
777 raw_spin_unlock(&rt_rq->rt_runtime_lock);
778 }
779}
780
781#ifdef CONFIG_SMP
782
783static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
784{
785 struct rq *rq = rq_of_dl_rq(dl_rq);
786
787 if (dl_rq->earliest_dl.curr == 0 ||
788 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
789 dl_rq->earliest_dl.curr = deadline;
790 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
791 }
792}
793
794static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
795{
796 struct rq *rq = rq_of_dl_rq(dl_rq);
797
798 /*
799 * Since we may have removed our earliest (and/or next earliest)
800 * task we must recompute them.
801 */
802 if (!dl_rq->dl_nr_running) {
803 dl_rq->earliest_dl.curr = 0;
804 dl_rq->earliest_dl.next = 0;
805 cpudl_clear(&rq->rd->cpudl, rq->cpu);
806 } else {
807 struct rb_node *leftmost = dl_rq->rb_leftmost;
808 struct sched_dl_entity *entry;
809
810 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
811 dl_rq->earliest_dl.curr = entry->deadline;
812 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
813 }
814}
815
816#else
817
818static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
819static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
820
821#endif /* CONFIG_SMP */
822
823static inline
824void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
825{
826 int prio = dl_task_of(dl_se)->prio;
827 u64 deadline = dl_se->deadline;
828
829 WARN_ON(!dl_prio(prio));
830 dl_rq->dl_nr_running++;
831 add_nr_running(rq_of_dl_rq(dl_rq), 1);
832
833 inc_dl_deadline(dl_rq, deadline);
834 inc_dl_migration(dl_se, dl_rq);
835}
836
837static inline
838void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
839{
840 int prio = dl_task_of(dl_se)->prio;
841
842 WARN_ON(!dl_prio(prio));
843 WARN_ON(!dl_rq->dl_nr_running);
844 dl_rq->dl_nr_running--;
845 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
846
847 dec_dl_deadline(dl_rq, dl_se->deadline);
848 dec_dl_migration(dl_se, dl_rq);
849}
850
851static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
852{
853 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
854 struct rb_node **link = &dl_rq->rb_root.rb_node;
855 struct rb_node *parent = NULL;
856 struct sched_dl_entity *entry;
857 int leftmost = 1;
858
859 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
860
861 while (*link) {
862 parent = *link;
863 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
864 if (dl_time_before(dl_se->deadline, entry->deadline))
865 link = &parent->rb_left;
866 else {
867 link = &parent->rb_right;
868 leftmost = 0;
869 }
870 }
871
872 if (leftmost)
873 dl_rq->rb_leftmost = &dl_se->rb_node;
874
875 rb_link_node(&dl_se->rb_node, parent, link);
876 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
877
878 inc_dl_tasks(dl_se, dl_rq);
879}
880
881static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
882{
883 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
884
885 if (RB_EMPTY_NODE(&dl_se->rb_node))
886 return;
887
888 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
889 struct rb_node *next_node;
890
891 next_node = rb_next(&dl_se->rb_node);
892 dl_rq->rb_leftmost = next_node;
893 }
894
895 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
896 RB_CLEAR_NODE(&dl_se->rb_node);
897
898 dec_dl_tasks(dl_se, dl_rq);
899}
900
901static void
902enqueue_dl_entity(struct sched_dl_entity *dl_se,
903 struct sched_dl_entity *pi_se, int flags)
904{
905 BUG_ON(on_dl_rq(dl_se));
906
907 /*
908 * If this is a wakeup or a new instance, the scheduling
909 * parameters of the task might need updating. Otherwise,
910 * we want a replenishment of its runtime.
911 */
912 if (flags & ENQUEUE_WAKEUP)
913 update_dl_entity(dl_se, pi_se);
914 else if (flags & ENQUEUE_REPLENISH)
915 replenish_dl_entity(dl_se, pi_se);
916
917 __enqueue_dl_entity(dl_se);
918}
919
920static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
921{
922 __dequeue_dl_entity(dl_se);
923}
924
925static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
926{
927 struct task_struct *pi_task = rt_mutex_get_top_task(p);
928 struct sched_dl_entity *pi_se = &p->dl;
929
930 /*
931 * Use the scheduling parameters of the top pi-waiter
932 * task if we have one and its (absolute) deadline is
933 * smaller than our one... OTW we keep our runtime and
934 * deadline.
935 */
936 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
937 pi_se = &pi_task->dl;
938 } else if (!dl_prio(p->normal_prio)) {
939 /*
940 * Special case in which we have a !SCHED_DEADLINE task
941 * that is going to be deboosted, but exceedes its
942 * runtime while doing so. No point in replenishing
943 * it, as it's going to return back to its original
944 * scheduling class after this.
945 */
946 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
947 return;
948 }
949
950 /*
951 * If p is throttled, we do nothing. In fact, if it exhausted
952 * its budget it needs a replenishment and, since it now is on
953 * its rq, the bandwidth timer callback (which clearly has not
954 * run yet) will take care of this.
955 */
956 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
957 return;
958
959 enqueue_dl_entity(&p->dl, pi_se, flags);
960
961 if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
962 enqueue_pushable_dl_task(rq, p);
963}
964
965static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
966{
967 dequeue_dl_entity(&p->dl);
968 dequeue_pushable_dl_task(rq, p);
969}
970
971static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
972{
973 update_curr_dl(rq);
974 __dequeue_task_dl(rq, p, flags);
975}
976
977/*
978 * Yield task semantic for -deadline tasks is:
979 *
980 * get off from the CPU until our next instance, with
981 * a new runtime. This is of little use now, since we
982 * don't have a bandwidth reclaiming mechanism. Anyway,
983 * bandwidth reclaiming is planned for the future, and
984 * yield_task_dl will indicate that some spare budget
985 * is available for other task instances to use it.
986 */
987static void yield_task_dl(struct rq *rq)
988{
989 /*
990 * We make the task go to sleep until its current deadline by
991 * forcing its runtime to zero. This way, update_curr_dl() stops
992 * it and the bandwidth timer will wake it up and will give it
993 * new scheduling parameters (thanks to dl_yielded=1).
994 */
995 rq->curr->dl.dl_yielded = 1;
996
997 update_rq_clock(rq);
998 update_curr_dl(rq);
999 /*
1000 * Tell update_rq_clock() that we've just updated,
1001 * so we don't do microscopic update in schedule()
1002 * and double the fastpath cost.
1003 */
1004 rq_clock_skip_update(rq, true);
1005}
1006
1007#ifdef CONFIG_SMP
1008
1009static int find_later_rq(struct task_struct *task);
1010
1011static int
1012select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1013{
1014 struct task_struct *curr;
1015 struct rq *rq;
1016
1017 if (sd_flag != SD_BALANCE_WAKE)
1018 goto out;
1019
1020 rq = cpu_rq(cpu);
1021
1022 rcu_read_lock();
1023 curr = READ_ONCE(rq->curr); /* unlocked access */
1024
1025 /*
1026 * If we are dealing with a -deadline task, we must
1027 * decide where to wake it up.
1028 * If it has a later deadline and the current task
1029 * on this rq can't move (provided the waking task
1030 * can!) we prefer to send it somewhere else. On the
1031 * other hand, if it has a shorter deadline, we
1032 * try to make it stay here, it might be important.
1033 */
1034 if (unlikely(dl_task(curr)) &&
1035 (tsk_nr_cpus_allowed(curr) < 2 ||
1036 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1037 (tsk_nr_cpus_allowed(p) > 1)) {
1038 int target = find_later_rq(p);
1039
1040 if (target != -1 &&
1041 (dl_time_before(p->dl.deadline,
1042 cpu_rq(target)->dl.earliest_dl.curr) ||
1043 (cpu_rq(target)->dl.dl_nr_running == 0)))
1044 cpu = target;
1045 }
1046 rcu_read_unlock();
1047
1048out:
1049 return cpu;
1050}
1051
1052static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1053{
1054 /*
1055 * Current can't be migrated, useless to reschedule,
1056 * let's hope p can move out.
1057 */
1058 if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
1059 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1060 return;
1061
1062 /*
1063 * p is migratable, so let's not schedule it and
1064 * see if it is pushed or pulled somewhere else.
1065 */
1066 if (tsk_nr_cpus_allowed(p) != 1 &&
1067 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1068 return;
1069
1070 resched_curr(rq);
1071}
1072
1073#endif /* CONFIG_SMP */
1074
1075/*
1076 * Only called when both the current and waking task are -deadline
1077 * tasks.
1078 */
1079static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1080 int flags)
1081{
1082 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1083 resched_curr(rq);
1084 return;
1085 }
1086
1087#ifdef CONFIG_SMP
1088 /*
1089 * In the unlikely case current and p have the same deadline
1090 * let us try to decide what's the best thing to do...
1091 */
1092 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1093 !test_tsk_need_resched(rq->curr))
1094 check_preempt_equal_dl(rq, p);
1095#endif /* CONFIG_SMP */
1096}
1097
1098#ifdef CONFIG_SCHED_HRTICK
1099static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1100{
1101 hrtick_start(rq, p->dl.runtime);
1102}
1103#else /* !CONFIG_SCHED_HRTICK */
1104static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1105{
1106}
1107#endif
1108
1109static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1110 struct dl_rq *dl_rq)
1111{
1112 struct rb_node *left = dl_rq->rb_leftmost;
1113
1114 if (!left)
1115 return NULL;
1116
1117 return rb_entry(left, struct sched_dl_entity, rb_node);
1118}
1119
1120struct task_struct *
1121pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
1122{
1123 struct sched_dl_entity *dl_se;
1124 struct task_struct *p;
1125 struct dl_rq *dl_rq;
1126
1127 dl_rq = &rq->dl;
1128
1129 if (need_pull_dl_task(rq, prev)) {
1130 /*
1131 * This is OK, because current is on_cpu, which avoids it being
1132 * picked for load-balance and preemption/IRQs are still
1133 * disabled avoiding further scheduler activity on it and we're
1134 * being very careful to re-start the picking loop.
1135 */
1136 lockdep_unpin_lock(&rq->lock, cookie);
1137 pull_dl_task(rq);
1138 lockdep_repin_lock(&rq->lock, cookie);
1139 /*
1140 * pull_dl_task() can drop (and re-acquire) rq->lock; this
1141 * means a stop task can slip in, in which case we need to
1142 * re-start task selection.
1143 */
1144 if (rq->stop && task_on_rq_queued(rq->stop))
1145 return RETRY_TASK;
1146 }
1147
1148 /*
1149 * When prev is DL, we may throttle it in put_prev_task().
1150 * So, we update time before we check for dl_nr_running.
1151 */
1152 if (prev->sched_class == &dl_sched_class)
1153 update_curr_dl(rq);
1154
1155 if (unlikely(!dl_rq->dl_nr_running))
1156 return NULL;
1157
1158 put_prev_task(rq, prev);
1159
1160 dl_se = pick_next_dl_entity(rq, dl_rq);
1161 BUG_ON(!dl_se);
1162
1163 p = dl_task_of(dl_se);
1164 p->se.exec_start = rq_clock_task(rq);
1165
1166 /* Running task will never be pushed. */
1167 dequeue_pushable_dl_task(rq, p);
1168
1169 if (hrtick_enabled(rq))
1170 start_hrtick_dl(rq, p);
1171
1172 queue_push_tasks(rq);
1173
1174 return p;
1175}
1176
1177static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1178{
1179 update_curr_dl(rq);
1180
1181 if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1)
1182 enqueue_pushable_dl_task(rq, p);
1183}
1184
1185static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1186{
1187 update_curr_dl(rq);
1188
1189 /*
1190 * Even when we have runtime, update_curr_dl() might have resulted in us
1191 * not being the leftmost task anymore. In that case NEED_RESCHED will
1192 * be set and schedule() will start a new hrtick for the next task.
1193 */
1194 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1195 is_leftmost(p, &rq->dl))
1196 start_hrtick_dl(rq, p);
1197}
1198
1199static void task_fork_dl(struct task_struct *p)
1200{
1201 /*
1202 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1203 * sched_fork()
1204 */
1205}
1206
1207static void task_dead_dl(struct task_struct *p)
1208{
1209 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1210
1211 /*
1212 * Since we are TASK_DEAD we won't slip out of the domain!
1213 */
1214 raw_spin_lock_irq(&dl_b->lock);
1215 /* XXX we should retain the bw until 0-lag */
1216 dl_b->total_bw -= p->dl.dl_bw;
1217 raw_spin_unlock_irq(&dl_b->lock);
1218}
1219
1220static void set_curr_task_dl(struct rq *rq)
1221{
1222 struct task_struct *p = rq->curr;
1223
1224 p->se.exec_start = rq_clock_task(rq);
1225
1226 /* You can't push away the running task */
1227 dequeue_pushable_dl_task(rq, p);
1228}
1229
1230#ifdef CONFIG_SMP
1231
1232/* Only try algorithms three times */
1233#define DL_MAX_TRIES 3
1234
1235static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1236{
1237 if (!task_running(rq, p) &&
1238 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1239 return 1;
1240 return 0;
1241}
1242
1243/*
1244 * Return the earliest pushable rq's task, which is suitable to be executed
1245 * on the CPU, NULL otherwise:
1246 */
1247static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1248{
1249 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1250 struct task_struct *p = NULL;
1251
1252 if (!has_pushable_dl_tasks(rq))
1253 return NULL;
1254
1255next_node:
1256 if (next_node) {
1257 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1258
1259 if (pick_dl_task(rq, p, cpu))
1260 return p;
1261
1262 next_node = rb_next(next_node);
1263 goto next_node;
1264 }
1265
1266 return NULL;
1267}
1268
1269static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1270
1271static int find_later_rq(struct task_struct *task)
1272{
1273 struct sched_domain *sd;
1274 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1275 int this_cpu = smp_processor_id();
1276 int best_cpu, cpu = task_cpu(task);
1277
1278 /* Make sure the mask is initialized first */
1279 if (unlikely(!later_mask))
1280 return -1;
1281
1282 if (tsk_nr_cpus_allowed(task) == 1)
1283 return -1;
1284
1285 /*
1286 * We have to consider system topology and task affinity
1287 * first, then we can look for a suitable cpu.
1288 */
1289 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1290 task, later_mask);
1291 if (best_cpu == -1)
1292 return -1;
1293
1294 /*
1295 * If we are here, some target has been found,
1296 * the most suitable of which is cached in best_cpu.
1297 * This is, among the runqueues where the current tasks
1298 * have later deadlines than the task's one, the rq
1299 * with the latest possible one.
1300 *
1301 * Now we check how well this matches with task's
1302 * affinity and system topology.
1303 *
1304 * The last cpu where the task run is our first
1305 * guess, since it is most likely cache-hot there.
1306 */
1307 if (cpumask_test_cpu(cpu, later_mask))
1308 return cpu;
1309 /*
1310 * Check if this_cpu is to be skipped (i.e., it is
1311 * not in the mask) or not.
1312 */
1313 if (!cpumask_test_cpu(this_cpu, later_mask))
1314 this_cpu = -1;
1315
1316 rcu_read_lock();
1317 for_each_domain(cpu, sd) {
1318 if (sd->flags & SD_WAKE_AFFINE) {
1319
1320 /*
1321 * If possible, preempting this_cpu is
1322 * cheaper than migrating.
1323 */
1324 if (this_cpu != -1 &&
1325 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1326 rcu_read_unlock();
1327 return this_cpu;
1328 }
1329
1330 /*
1331 * Last chance: if best_cpu is valid and is
1332 * in the mask, that becomes our choice.
1333 */
1334 if (best_cpu < nr_cpu_ids &&
1335 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1336 rcu_read_unlock();
1337 return best_cpu;
1338 }
1339 }
1340 }
1341 rcu_read_unlock();
1342
1343 /*
1344 * At this point, all our guesses failed, we just return
1345 * 'something', and let the caller sort the things out.
1346 */
1347 if (this_cpu != -1)
1348 return this_cpu;
1349
1350 cpu = cpumask_any(later_mask);
1351 if (cpu < nr_cpu_ids)
1352 return cpu;
1353
1354 return -1;
1355}
1356
1357/* Locks the rq it finds */
1358static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1359{
1360 struct rq *later_rq = NULL;
1361 int tries;
1362 int cpu;
1363
1364 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1365 cpu = find_later_rq(task);
1366
1367 if ((cpu == -1) || (cpu == rq->cpu))
1368 break;
1369
1370 later_rq = cpu_rq(cpu);
1371
1372 if (later_rq->dl.dl_nr_running &&
1373 !dl_time_before(task->dl.deadline,
1374 later_rq->dl.earliest_dl.curr)) {
1375 /*
1376 * Target rq has tasks of equal or earlier deadline,
1377 * retrying does not release any lock and is unlikely
1378 * to yield a different result.
1379 */
1380 later_rq = NULL;
1381 break;
1382 }
1383
1384 /* Retry if something changed. */
1385 if (double_lock_balance(rq, later_rq)) {
1386 if (unlikely(task_rq(task) != rq ||
1387 !cpumask_test_cpu(later_rq->cpu,
1388 tsk_cpus_allowed(task)) ||
1389 task_running(rq, task) ||
1390 !dl_task(task) ||
1391 !task_on_rq_queued(task))) {
1392 double_unlock_balance(rq, later_rq);
1393 later_rq = NULL;
1394 break;
1395 }
1396 }
1397
1398 /*
1399 * If the rq we found has no -deadline task, or
1400 * its earliest one has a later deadline than our
1401 * task, the rq is a good one.
1402 */
1403 if (!later_rq->dl.dl_nr_running ||
1404 dl_time_before(task->dl.deadline,
1405 later_rq->dl.earliest_dl.curr))
1406 break;
1407
1408 /* Otherwise we try again. */
1409 double_unlock_balance(rq, later_rq);
1410 later_rq = NULL;
1411 }
1412
1413 return later_rq;
1414}
1415
1416static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1417{
1418 struct task_struct *p;
1419
1420 if (!has_pushable_dl_tasks(rq))
1421 return NULL;
1422
1423 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1424 struct task_struct, pushable_dl_tasks);
1425
1426 BUG_ON(rq->cpu != task_cpu(p));
1427 BUG_ON(task_current(rq, p));
1428 BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
1429
1430 BUG_ON(!task_on_rq_queued(p));
1431 BUG_ON(!dl_task(p));
1432
1433 return p;
1434}
1435
1436/*
1437 * See if the non running -deadline tasks on this rq
1438 * can be sent to some other CPU where they can preempt
1439 * and start executing.
1440 */
1441static int push_dl_task(struct rq *rq)
1442{
1443 struct task_struct *next_task;
1444 struct rq *later_rq;
1445 int ret = 0;
1446
1447 if (!rq->dl.overloaded)
1448 return 0;
1449
1450 next_task = pick_next_pushable_dl_task(rq);
1451 if (!next_task)
1452 return 0;
1453
1454retry:
1455 if (unlikely(next_task == rq->curr)) {
1456 WARN_ON(1);
1457 return 0;
1458 }
1459
1460 /*
1461 * If next_task preempts rq->curr, and rq->curr
1462 * can move away, it makes sense to just reschedule
1463 * without going further in pushing next_task.
1464 */
1465 if (dl_task(rq->curr) &&
1466 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1467 tsk_nr_cpus_allowed(rq->curr) > 1) {
1468 resched_curr(rq);
1469 return 0;
1470 }
1471
1472 /* We might release rq lock */
1473 get_task_struct(next_task);
1474
1475 /* Will lock the rq it'll find */
1476 later_rq = find_lock_later_rq(next_task, rq);
1477 if (!later_rq) {
1478 struct task_struct *task;
1479
1480 /*
1481 * We must check all this again, since
1482 * find_lock_later_rq releases rq->lock and it is
1483 * then possible that next_task has migrated.
1484 */
1485 task = pick_next_pushable_dl_task(rq);
1486 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1487 /*
1488 * The task is still there. We don't try
1489 * again, some other cpu will pull it when ready.
1490 */
1491 goto out;
1492 }
1493
1494 if (!task)
1495 /* No more tasks */
1496 goto out;
1497
1498 put_task_struct(next_task);
1499 next_task = task;
1500 goto retry;
1501 }
1502
1503 deactivate_task(rq, next_task, 0);
1504 set_task_cpu(next_task, later_rq->cpu);
1505 activate_task(later_rq, next_task, 0);
1506 ret = 1;
1507
1508 resched_curr(later_rq);
1509
1510 double_unlock_balance(rq, later_rq);
1511
1512out:
1513 put_task_struct(next_task);
1514
1515 return ret;
1516}
1517
1518static void push_dl_tasks(struct rq *rq)
1519{
1520 /* push_dl_task() will return true if it moved a -deadline task */
1521 while (push_dl_task(rq))
1522 ;
1523}
1524
1525static void pull_dl_task(struct rq *this_rq)
1526{
1527 int this_cpu = this_rq->cpu, cpu;
1528 struct task_struct *p;
1529 bool resched = false;
1530 struct rq *src_rq;
1531 u64 dmin = LONG_MAX;
1532
1533 if (likely(!dl_overloaded(this_rq)))
1534 return;
1535
1536 /*
1537 * Match the barrier from dl_set_overloaded; this guarantees that if we
1538 * see overloaded we must also see the dlo_mask bit.
1539 */
1540 smp_rmb();
1541
1542 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1543 if (this_cpu == cpu)
1544 continue;
1545
1546 src_rq = cpu_rq(cpu);
1547
1548 /*
1549 * It looks racy, abd it is! However, as in sched_rt.c,
1550 * we are fine with this.
1551 */
1552 if (this_rq->dl.dl_nr_running &&
1553 dl_time_before(this_rq->dl.earliest_dl.curr,
1554 src_rq->dl.earliest_dl.next))
1555 continue;
1556
1557 /* Might drop this_rq->lock */
1558 double_lock_balance(this_rq, src_rq);
1559
1560 /*
1561 * If there are no more pullable tasks on the
1562 * rq, we're done with it.
1563 */
1564 if (src_rq->dl.dl_nr_running <= 1)
1565 goto skip;
1566
1567 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1568
1569 /*
1570 * We found a task to be pulled if:
1571 * - it preempts our current (if there's one),
1572 * - it will preempt the last one we pulled (if any).
1573 */
1574 if (p && dl_time_before(p->dl.deadline, dmin) &&
1575 (!this_rq->dl.dl_nr_running ||
1576 dl_time_before(p->dl.deadline,
1577 this_rq->dl.earliest_dl.curr))) {
1578 WARN_ON(p == src_rq->curr);
1579 WARN_ON(!task_on_rq_queued(p));
1580
1581 /*
1582 * Then we pull iff p has actually an earlier
1583 * deadline than the current task of its runqueue.
1584 */
1585 if (dl_time_before(p->dl.deadline,
1586 src_rq->curr->dl.deadline))
1587 goto skip;
1588
1589 resched = true;
1590
1591 deactivate_task(src_rq, p, 0);
1592 set_task_cpu(p, this_cpu);
1593 activate_task(this_rq, p, 0);
1594 dmin = p->dl.deadline;
1595
1596 /* Is there any other task even earlier? */
1597 }
1598skip:
1599 double_unlock_balance(this_rq, src_rq);
1600 }
1601
1602 if (resched)
1603 resched_curr(this_rq);
1604}
1605
1606/*
1607 * Since the task is not running and a reschedule is not going to happen
1608 * anytime soon on its runqueue, we try pushing it away now.
1609 */
1610static void task_woken_dl(struct rq *rq, struct task_struct *p)
1611{
1612 if (!task_running(rq, p) &&
1613 !test_tsk_need_resched(rq->curr) &&
1614 tsk_nr_cpus_allowed(p) > 1 &&
1615 dl_task(rq->curr) &&
1616 (tsk_nr_cpus_allowed(rq->curr) < 2 ||
1617 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1618 push_dl_tasks(rq);
1619 }
1620}
1621
1622static void set_cpus_allowed_dl(struct task_struct *p,
1623 const struct cpumask *new_mask)
1624{
1625 struct root_domain *src_rd;
1626 struct rq *rq;
1627
1628 BUG_ON(!dl_task(p));
1629
1630 rq = task_rq(p);
1631 src_rd = rq->rd;
1632 /*
1633 * Migrating a SCHED_DEADLINE task between exclusive
1634 * cpusets (different root_domains) entails a bandwidth
1635 * update. We already made space for us in the destination
1636 * domain (see cpuset_can_attach()).
1637 */
1638 if (!cpumask_intersects(src_rd->span, new_mask)) {
1639 struct dl_bw *src_dl_b;
1640
1641 src_dl_b = dl_bw_of(cpu_of(rq));
1642 /*
1643 * We now free resources of the root_domain we are migrating
1644 * off. In the worst case, sched_setattr() may temporary fail
1645 * until we complete the update.
1646 */
1647 raw_spin_lock(&src_dl_b->lock);
1648 __dl_clear(src_dl_b, p->dl.dl_bw);
1649 raw_spin_unlock(&src_dl_b->lock);
1650 }
1651
1652 set_cpus_allowed_common(p, new_mask);
1653}
1654
1655/* Assumes rq->lock is held */
1656static void rq_online_dl(struct rq *rq)
1657{
1658 if (rq->dl.overloaded)
1659 dl_set_overload(rq);
1660
1661 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1662 if (rq->dl.dl_nr_running > 0)
1663 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
1664}
1665
1666/* Assumes rq->lock is held */
1667static void rq_offline_dl(struct rq *rq)
1668{
1669 if (rq->dl.overloaded)
1670 dl_clear_overload(rq);
1671
1672 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1673 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1674}
1675
1676void __init init_sched_dl_class(void)
1677{
1678 unsigned int i;
1679
1680 for_each_possible_cpu(i)
1681 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1682 GFP_KERNEL, cpu_to_node(i));
1683}
1684
1685#endif /* CONFIG_SMP */
1686
1687static void switched_from_dl(struct rq *rq, struct task_struct *p)
1688{
1689 /*
1690 * Start the deadline timer; if we switch back to dl before this we'll
1691 * continue consuming our current CBS slice. If we stay outside of
1692 * SCHED_DEADLINE until the deadline passes, the timer will reset the
1693 * task.
1694 */
1695 if (!start_dl_timer(p))
1696 __dl_clear_params(p);
1697
1698 /*
1699 * Since this might be the only -deadline task on the rq,
1700 * this is the right place to try to pull some other one
1701 * from an overloaded cpu, if any.
1702 */
1703 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1704 return;
1705
1706 queue_pull_task(rq);
1707}
1708
1709/*
1710 * When switching to -deadline, we may overload the rq, then
1711 * we try to push someone off, if possible.
1712 */
1713static void switched_to_dl(struct rq *rq, struct task_struct *p)
1714{
1715
1716 /* If p is not queued we will update its parameters at next wakeup. */
1717 if (!task_on_rq_queued(p))
1718 return;
1719
1720 /*
1721 * If p is boosted we already updated its params in
1722 * rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH),
1723 * p's deadline being now already after rq_clock(rq).
1724 */
1725 if (dl_time_before(p->dl.deadline, rq_clock(rq)))
1726 setup_new_dl_entity(&p->dl);
1727
1728 if (rq->curr != p) {
1729#ifdef CONFIG_SMP
1730 if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
1731 queue_push_tasks(rq);
1732#endif
1733 if (dl_task(rq->curr))
1734 check_preempt_curr_dl(rq, p, 0);
1735 else
1736 resched_curr(rq);
1737 }
1738}
1739
1740/*
1741 * If the scheduling parameters of a -deadline task changed,
1742 * a push or pull operation might be needed.
1743 */
1744static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1745 int oldprio)
1746{
1747 if (task_on_rq_queued(p) || rq->curr == p) {
1748#ifdef CONFIG_SMP
1749 /*
1750 * This might be too much, but unfortunately
1751 * we don't have the old deadline value, and
1752 * we can't argue if the task is increasing
1753 * or lowering its prio, so...
1754 */
1755 if (!rq->dl.overloaded)
1756 queue_pull_task(rq);
1757
1758 /*
1759 * If we now have a earlier deadline task than p,
1760 * then reschedule, provided p is still on this
1761 * runqueue.
1762 */
1763 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1764 resched_curr(rq);
1765#else
1766 /*
1767 * Again, we don't know if p has a earlier
1768 * or later deadline, so let's blindly set a
1769 * (maybe not needed) rescheduling point.
1770 */
1771 resched_curr(rq);
1772#endif /* CONFIG_SMP */
1773 }
1774}
1775
1776const struct sched_class dl_sched_class = {
1777 .next = &rt_sched_class,
1778 .enqueue_task = enqueue_task_dl,
1779 .dequeue_task = dequeue_task_dl,
1780 .yield_task = yield_task_dl,
1781
1782 .check_preempt_curr = check_preempt_curr_dl,
1783
1784 .pick_next_task = pick_next_task_dl,
1785 .put_prev_task = put_prev_task_dl,
1786
1787#ifdef CONFIG_SMP
1788 .select_task_rq = select_task_rq_dl,
1789 .set_cpus_allowed = set_cpus_allowed_dl,
1790 .rq_online = rq_online_dl,
1791 .rq_offline = rq_offline_dl,
1792 .task_woken = task_woken_dl,
1793#endif
1794
1795 .set_curr_task = set_curr_task_dl,
1796 .task_tick = task_tick_dl,
1797 .task_fork = task_fork_dl,
1798 .task_dead = task_dead_dl,
1799
1800 .prio_changed = prio_changed_dl,
1801 .switched_from = switched_from_dl,
1802 .switched_to = switched_to_dl,
1803
1804 .update_curr = update_curr_dl,
1805};
1806
1807#ifdef CONFIG_SCHED_DEBUG
1808extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1809
1810void print_dl_stats(struct seq_file *m, int cpu)
1811{
1812 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1813}
1814#endif /* CONFIG_SCHED_DEBUG */