Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * Deadline Scheduling Class (SCHED_DEADLINE)
   3 *
   4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
   5 *
   6 * Tasks that periodically executes their instances for less than their
   7 * runtime won't miss any of their deadlines.
   8 * Tasks that are not periodic or sporadic or that tries to execute more
   9 * than their reserved bandwidth will be slowed down (and may potentially
  10 * miss some of their deadlines), and won't affect any other task.
  11 *
  12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
  13 *                    Juri Lelli <juri.lelli@gmail.com>,
  14 *                    Michael Trimarchi <michael@amarulasolutions.com>,
  15 *                    Fabio Checconi <fchecconi@gmail.com>
  16 */
  17#include "sched.h"
  18
  19#include <linux/slab.h>
  20
  21struct dl_bandwidth def_dl_bandwidth;
  22
  23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
  24{
  25	return container_of(dl_se, struct task_struct, dl);
  26}
  27
  28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
  29{
  30	return container_of(dl_rq, struct rq, dl);
  31}
  32
  33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
  34{
  35	struct task_struct *p = dl_task_of(dl_se);
  36	struct rq *rq = task_rq(p);
  37
  38	return &rq->dl;
  39}
  40
  41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
  42{
  43	return !RB_EMPTY_NODE(&dl_se->rb_node);
  44}
  45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
  47{
  48	struct sched_dl_entity *dl_se = &p->dl;
  49
  50	return dl_rq->rb_leftmost == &dl_se->rb_node;
  51}
  52
  53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
  54{
  55	raw_spin_lock_init(&dl_b->dl_runtime_lock);
  56	dl_b->dl_period = period;
  57	dl_b->dl_runtime = runtime;
  58}
  59
  60void init_dl_bw(struct dl_bw *dl_b)
  61{
  62	raw_spin_lock_init(&dl_b->lock);
  63	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
  64	if (global_rt_runtime() == RUNTIME_INF)
  65		dl_b->bw = -1;
  66	else
  67		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
  68	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
  69	dl_b->total_bw = 0;
  70}
  71
  72void init_dl_rq(struct dl_rq *dl_rq)
  73{
  74	dl_rq->rb_root = RB_ROOT;
  75
  76#ifdef CONFIG_SMP
  77	/* zero means no -deadline tasks */
  78	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
  79
  80	dl_rq->dl_nr_migratory = 0;
  81	dl_rq->overloaded = 0;
  82	dl_rq->pushable_dl_tasks_root = RB_ROOT;
  83#else
  84	init_dl_bw(&dl_rq->dl_bw);
  85#endif
 
 
 
 
  86}
  87
  88#ifdef CONFIG_SMP
  89
  90static inline int dl_overloaded(struct rq *rq)
  91{
  92	return atomic_read(&rq->rd->dlo_count);
  93}
  94
  95static inline void dl_set_overload(struct rq *rq)
  96{
  97	if (!rq->online)
  98		return;
  99
 100	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
 101	/*
 102	 * Must be visible before the overload count is
 103	 * set (as in sched_rt.c).
 104	 *
 105	 * Matched by the barrier in pull_dl_task().
 106	 */
 107	smp_wmb();
 108	atomic_inc(&rq->rd->dlo_count);
 109}
 110
 111static inline void dl_clear_overload(struct rq *rq)
 112{
 113	if (!rq->online)
 114		return;
 115
 116	atomic_dec(&rq->rd->dlo_count);
 117	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
 118}
 119
 120static void update_dl_migration(struct dl_rq *dl_rq)
 121{
 122	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
 123		if (!dl_rq->overloaded) {
 124			dl_set_overload(rq_of_dl_rq(dl_rq));
 125			dl_rq->overloaded = 1;
 126		}
 127	} else if (dl_rq->overloaded) {
 128		dl_clear_overload(rq_of_dl_rq(dl_rq));
 129		dl_rq->overloaded = 0;
 130	}
 131}
 132
 133static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 134{
 135	struct task_struct *p = dl_task_of(dl_se);
 136
 137	if (tsk_nr_cpus_allowed(p) > 1)
 138		dl_rq->dl_nr_migratory++;
 139
 140	update_dl_migration(dl_rq);
 141}
 142
 143static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 144{
 145	struct task_struct *p = dl_task_of(dl_se);
 146
 147	if (tsk_nr_cpus_allowed(p) > 1)
 148		dl_rq->dl_nr_migratory--;
 149
 150	update_dl_migration(dl_rq);
 151}
 152
 153/*
 154 * The list of pushable -deadline task is not a plist, like in
 155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
 156 */
 157static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 158{
 159	struct dl_rq *dl_rq = &rq->dl;
 160	struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
 161	struct rb_node *parent = NULL;
 162	struct task_struct *entry;
 163	int leftmost = 1;
 164
 165	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
 166
 167	while (*link) {
 168		parent = *link;
 169		entry = rb_entry(parent, struct task_struct,
 170				 pushable_dl_tasks);
 171		if (dl_entity_preempt(&p->dl, &entry->dl))
 172			link = &parent->rb_left;
 173		else {
 174			link = &parent->rb_right;
 175			leftmost = 0;
 176		}
 177	}
 178
 179	if (leftmost) {
 180		dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
 181		dl_rq->earliest_dl.next = p->dl.deadline;
 182	}
 183
 184	rb_link_node(&p->pushable_dl_tasks, parent, link);
 185	rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 
 186}
 187
 188static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 189{
 190	struct dl_rq *dl_rq = &rq->dl;
 191
 192	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
 193		return;
 194
 195	if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
 196		struct rb_node *next_node;
 197
 198		next_node = rb_next(&p->pushable_dl_tasks);
 199		dl_rq->pushable_dl_tasks_leftmost = next_node;
 200		if (next_node) {
 201			dl_rq->earliest_dl.next = rb_entry(next_node,
 202				struct task_struct, pushable_dl_tasks)->dl.deadline;
 203		}
 204	}
 205
 206	rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 207	RB_CLEAR_NODE(&p->pushable_dl_tasks);
 208}
 209
 210static inline int has_pushable_dl_tasks(struct rq *rq)
 211{
 212	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
 213}
 214
 215static int push_dl_task(struct rq *rq);
 216
 217static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 218{
 219	return dl_task(prev);
 220}
 221
 222static DEFINE_PER_CPU(struct callback_head, dl_push_head);
 223static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
 224
 225static void push_dl_tasks(struct rq *);
 226static void pull_dl_task(struct rq *);
 227
 228static inline void queue_push_tasks(struct rq *rq)
 229{
 230	if (!has_pushable_dl_tasks(rq))
 231		return;
 232
 233	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
 234}
 235
 236static inline void queue_pull_task(struct rq *rq)
 237{
 238	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
 239}
 240
 241static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 242
 243static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 244{
 245	struct rq *later_rq = NULL;
 
 246
 247	later_rq = find_lock_later_rq(p, rq);
 248	if (!later_rq) {
 249		int cpu;
 250
 251		/*
 252		 * If we cannot preempt any rq, fall back to pick any
 253		 * online cpu.
 254		 */
 255		cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
 256		if (cpu >= nr_cpu_ids) {
 257			/*
 258			 * Fail to find any suitable cpu.
 259			 * The task will never come back!
 260			 */
 261			BUG_ON(dl_bandwidth_enabled());
 262
 263			/*
 264			 * If admission control is disabled we
 265			 * try a little harder to let the task
 266			 * run.
 267			 */
 268			cpu = cpumask_any(cpu_active_mask);
 269		}
 270		later_rq = cpu_rq(cpu);
 271		double_lock_balance(rq, later_rq);
 272	}
 273
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 274	set_task_cpu(p, later_rq->cpu);
 275	double_unlock_balance(later_rq, rq);
 276
 277	return later_rq;
 278}
 279
 280#else
 281
 282static inline
 283void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 284{
 285}
 286
 287static inline
 288void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 289{
 290}
 291
 292static inline
 293void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 294{
 295}
 296
 297static inline
 298void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 299{
 300}
 301
 302static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 303{
 304	return false;
 305}
 306
 307static inline void pull_dl_task(struct rq *rq)
 308{
 309}
 310
 311static inline void queue_push_tasks(struct rq *rq)
 312{
 313}
 314
 315static inline void queue_pull_task(struct rq *rq)
 316{
 317}
 318#endif /* CONFIG_SMP */
 319
 320static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 321static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 322static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
 323				  int flags);
 324
 325/*
 326 * We are being explicitly informed that a new instance is starting,
 327 * and this means that:
 328 *  - the absolute deadline of the entity has to be placed at
 329 *    current time + relative deadline;
 330 *  - the runtime of the entity has to be set to the maximum value.
 331 *
 332 * The capability of specifying such event is useful whenever a -deadline
 333 * entity wants to (try to!) synchronize its behaviour with the scheduler's
 334 * one, and to (try to!) reconcile itself with its own scheduling
 335 * parameters.
 336 */
 337static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
 338{
 339	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 340	struct rq *rq = rq_of_dl_rq(dl_rq);
 341
 342	WARN_ON(dl_se->dl_boosted);
 343	WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
 344
 345	/*
 346	 * We are racing with the deadline timer. So, do nothing because
 347	 * the deadline timer handler will take care of properly recharging
 348	 * the runtime and postponing the deadline
 349	 */
 350	if (dl_se->dl_throttled)
 351		return;
 352
 353	/*
 354	 * We use the regular wall clock time to set deadlines in the
 355	 * future; in fact, we must consider execution overheads (time
 356	 * spent on hardirq context, etc.).
 357	 */
 358	dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
 359	dl_se->runtime = dl_se->dl_runtime;
 360}
 361
 362/*
 363 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
 364 * possibility of a entity lasting more than what it declared, and thus
 365 * exhausting its runtime.
 366 *
 367 * Here we are interested in making runtime overrun possible, but we do
 368 * not want a entity which is misbehaving to affect the scheduling of all
 369 * other entities.
 370 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
 371 * is used, in order to confine each entity within its own bandwidth.
 372 *
 373 * This function deals exactly with that, and ensures that when the runtime
 374 * of a entity is replenished, its deadline is also postponed. That ensures
 375 * the overrunning entity can't interfere with other entity in the system and
 376 * can't make them miss their deadlines. Reasons why this kind of overruns
 377 * could happen are, typically, a entity voluntarily trying to overcome its
 378 * runtime, or it just underestimated it during sched_setattr().
 379 */
 380static void replenish_dl_entity(struct sched_dl_entity *dl_se,
 381				struct sched_dl_entity *pi_se)
 382{
 383	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 384	struct rq *rq = rq_of_dl_rq(dl_rq);
 385
 386	BUG_ON(pi_se->dl_runtime <= 0);
 387
 388	/*
 389	 * This could be the case for a !-dl task that is boosted.
 390	 * Just go with full inherited parameters.
 391	 */
 392	if (dl_se->dl_deadline == 0) {
 393		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 394		dl_se->runtime = pi_se->dl_runtime;
 395	}
 396
 397	if (dl_se->dl_yielded && dl_se->runtime > 0)
 398		dl_se->runtime = 0;
 399
 400	/*
 401	 * We keep moving the deadline away until we get some
 402	 * available runtime for the entity. This ensures correct
 403	 * handling of situations where the runtime overrun is
 404	 * arbitrary large.
 405	 */
 406	while (dl_se->runtime <= 0) {
 407		dl_se->deadline += pi_se->dl_period;
 408		dl_se->runtime += pi_se->dl_runtime;
 409	}
 410
 411	/*
 412	 * At this point, the deadline really should be "in
 413	 * the future" with respect to rq->clock. If it's
 414	 * not, we are, for some reason, lagging too much!
 415	 * Anyway, after having warn userspace abut that,
 416	 * we still try to keep the things running by
 417	 * resetting the deadline and the budget of the
 418	 * entity.
 419	 */
 420	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
 421		printk_deferred_once("sched: DL replenish lagged too much\n");
 422		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 423		dl_se->runtime = pi_se->dl_runtime;
 424	}
 425
 426	if (dl_se->dl_yielded)
 427		dl_se->dl_yielded = 0;
 428	if (dl_se->dl_throttled)
 429		dl_se->dl_throttled = 0;
 430}
 431
 432/*
 433 * Here we check if --at time t-- an entity (which is probably being
 434 * [re]activated or, in general, enqueued) can use its remaining runtime
 435 * and its current deadline _without_ exceeding the bandwidth it is
 436 * assigned (function returns true if it can't). We are in fact applying
 437 * one of the CBS rules: when a task wakes up, if the residual runtime
 438 * over residual deadline fits within the allocated bandwidth, then we
 439 * can keep the current (absolute) deadline and residual budget without
 440 * disrupting the schedulability of the system. Otherwise, we should
 441 * refill the runtime and set the deadline a period in the future,
 442 * because keeping the current (absolute) deadline of the task would
 443 * result in breaking guarantees promised to other tasks (refer to
 444 * Documentation/scheduler/sched-deadline.txt for more informations).
 445 *
 446 * This function returns true if:
 447 *
 448 *   runtime / (deadline - t) > dl_runtime / dl_period ,
 449 *
 450 * IOW we can't recycle current parameters.
 451 *
 452 * Notice that the bandwidth check is done against the period. For
 453 * task with deadline equal to period this is the same of using
 454 * dl_deadline instead of dl_period in the equation above.
 455 */
 456static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 457			       struct sched_dl_entity *pi_se, u64 t)
 458{
 459	u64 left, right;
 460
 461	/*
 462	 * left and right are the two sides of the equation above,
 463	 * after a bit of shuffling to use multiplications instead
 464	 * of divisions.
 465	 *
 466	 * Note that none of the time values involved in the two
 467	 * multiplications are absolute: dl_deadline and dl_runtime
 468	 * are the relative deadline and the maximum runtime of each
 469	 * instance, runtime is the runtime left for the last instance
 470	 * and (deadline - t), since t is rq->clock, is the time left
 471	 * to the (absolute) deadline. Even if overflowing the u64 type
 472	 * is very unlikely to occur in both cases, here we scale down
 473	 * as we want to avoid that risk at all. Scaling down by 10
 474	 * means that we reduce granularity to 1us. We are fine with it,
 475	 * since this is only a true/false check and, anyway, thinking
 476	 * of anything below microseconds resolution is actually fiction
 477	 * (but still we want to give the user that illusion >;).
 478	 */
 479	left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 480	right = ((dl_se->deadline - t) >> DL_SCALE) *
 481		(pi_se->dl_runtime >> DL_SCALE);
 482
 483	return dl_time_before(right, left);
 484}
 485
 486/*
 487 * When a -deadline entity is queued back on the runqueue, its runtime and
 488 * deadline might need updating.
 
 
 489 *
 490 * The policy here is that we update the deadline of the entity only if:
 491 *  - the current deadline is in the past,
 492 *  - using the remaining runtime with the current deadline would make
 493 *    the entity exceed its bandwidth.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 494 */
 495static void update_dl_entity(struct sched_dl_entity *dl_se,
 496			     struct sched_dl_entity *pi_se)
 497{
 498	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 499	struct rq *rq = rq_of_dl_rq(dl_rq);
 500
 501	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 502	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
 
 
 
 
 
 
 
 
 503		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 504		dl_se->runtime = pi_se->dl_runtime;
 505	}
 506}
 507
 
 
 
 
 
 508/*
 509 * If the entity depleted all its runtime, and if we want it to sleep
 510 * while waiting for some new execution time to become available, we
 511 * set the bandwidth enforcement timer to the replenishment instant
 512 * and try to activate it.
 513 *
 514 * Notice that it is important for the caller to know if the timer
 515 * actually started or not (i.e., the replenishment instant is in
 516 * the future or in the past).
 517 */
 518static int start_dl_timer(struct task_struct *p)
 519{
 520	struct sched_dl_entity *dl_se = &p->dl;
 521	struct hrtimer *timer = &dl_se->dl_timer;
 522	struct rq *rq = task_rq(p);
 523	ktime_t now, act;
 524	s64 delta;
 525
 526	lockdep_assert_held(&rq->lock);
 527
 528	/*
 529	 * We want the timer to fire at the deadline, but considering
 530	 * that it is actually coming from rq->clock and not from
 531	 * hrtimer's time base reading.
 532	 */
 533	act = ns_to_ktime(dl_se->deadline);
 534	now = hrtimer_cb_get_time(timer);
 535	delta = ktime_to_ns(now) - rq_clock(rq);
 536	act = ktime_add_ns(act, delta);
 537
 538	/*
 539	 * If the expiry time already passed, e.g., because the value
 540	 * chosen as the deadline is too small, don't even try to
 541	 * start the timer in the past!
 542	 */
 543	if (ktime_us_delta(act, now) < 0)
 544		return 0;
 545
 546	/*
 547	 * !enqueued will guarantee another callback; even if one is already in
 548	 * progress. This ensures a balanced {get,put}_task_struct().
 549	 *
 550	 * The race against __run_timer() clearing the enqueued state is
 551	 * harmless because we're holding task_rq()->lock, therefore the timer
 552	 * expiring after we've done the check will wait on its task_rq_lock()
 553	 * and observe our state.
 554	 */
 555	if (!hrtimer_is_queued(timer)) {
 556		get_task_struct(p);
 557		hrtimer_start(timer, act, HRTIMER_MODE_ABS);
 558	}
 559
 560	return 1;
 561}
 562
 563/*
 564 * This is the bandwidth enforcement timer callback. If here, we know
 565 * a task is not on its dl_rq, since the fact that the timer was running
 566 * means the task is throttled and needs a runtime replenishment.
 567 *
 568 * However, what we actually do depends on the fact the task is active,
 569 * (it is on its rq) or has been removed from there by a call to
 570 * dequeue_task_dl(). In the former case we must issue the runtime
 571 * replenishment and add the task back to the dl_rq; in the latter, we just
 572 * do nothing but clearing dl_throttled, so that runtime and deadline
 573 * updating (and the queueing back to dl_rq) will be done by the
 574 * next call to enqueue_task_dl().
 575 */
 576static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 577{
 578	struct sched_dl_entity *dl_se = container_of(timer,
 579						     struct sched_dl_entity,
 580						     dl_timer);
 581	struct task_struct *p = dl_task_of(dl_se);
 582	struct rq_flags rf;
 583	struct rq *rq;
 584
 585	rq = task_rq_lock(p, &rf);
 586
 587	/*
 588	 * The task might have changed its scheduling policy to something
 589	 * different than SCHED_DEADLINE (through switched_from_dl()).
 590	 */
 591	if (!dl_task(p)) {
 592		__dl_clear_params(p);
 593		goto unlock;
 594	}
 595
 596	/*
 597	 * The task might have been boosted by someone else and might be in the
 598	 * boosting/deboosting path, its not throttled.
 599	 */
 600	if (dl_se->dl_boosted)
 601		goto unlock;
 602
 603	/*
 604	 * Spurious timer due to start_dl_timer() race; or we already received
 605	 * a replenishment from rt_mutex_setprio().
 606	 */
 607	if (!dl_se->dl_throttled)
 608		goto unlock;
 609
 610	sched_clock_tick();
 611	update_rq_clock(rq);
 612
 613	/*
 614	 * If the throttle happened during sched-out; like:
 615	 *
 616	 *   schedule()
 617	 *     deactivate_task()
 618	 *       dequeue_task_dl()
 619	 *         update_curr_dl()
 620	 *           start_dl_timer()
 621	 *         __dequeue_task_dl()
 622	 *     prev->on_rq = 0;
 623	 *
 624	 * We can be both throttled and !queued. Replenish the counter
 625	 * but do not enqueue -- wait for our wakeup to do that.
 626	 */
 627	if (!task_on_rq_queued(p)) {
 628		replenish_dl_entity(dl_se, dl_se);
 629		goto unlock;
 630	}
 631
 632#ifdef CONFIG_SMP
 633	if (unlikely(!rq->online)) {
 634		/*
 635		 * If the runqueue is no longer available, migrate the
 636		 * task elsewhere. This necessarily changes rq.
 637		 */
 638		lockdep_unpin_lock(&rq->lock, rf.cookie);
 639		rq = dl_task_offline_migration(rq, p);
 640		rf.cookie = lockdep_pin_lock(&rq->lock);
 
 641
 642		/*
 643		 * Now that the task has been migrated to the new RQ and we
 644		 * have that locked, proceed as normal and enqueue the task
 645		 * there.
 646		 */
 647	}
 648#endif
 649
 650	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
 651	if (dl_task(rq->curr))
 652		check_preempt_curr_dl(rq, p, 0);
 653	else
 654		resched_curr(rq);
 655
 656#ifdef CONFIG_SMP
 657	/*
 658	 * Queueing this task back might have overloaded rq, check if we need
 659	 * to kick someone away.
 660	 */
 661	if (has_pushable_dl_tasks(rq)) {
 662		/*
 663		 * Nothing relies on rq->lock after this, so its safe to drop
 664		 * rq->lock.
 665		 */
 666		lockdep_unpin_lock(&rq->lock, rf.cookie);
 667		push_dl_task(rq);
 668		lockdep_repin_lock(&rq->lock, rf.cookie);
 669	}
 670#endif
 671
 672unlock:
 673	task_rq_unlock(rq, p, &rf);
 674
 675	/*
 676	 * This can free the task_struct, including this hrtimer, do not touch
 677	 * anything related to that after this.
 678	 */
 679	put_task_struct(p);
 680
 681	return HRTIMER_NORESTART;
 682}
 683
 684void init_dl_task_timer(struct sched_dl_entity *dl_se)
 685{
 686	struct hrtimer *timer = &dl_se->dl_timer;
 687
 688	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 689	timer->function = dl_task_timer;
 690}
 691
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 692static
 693int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
 694{
 695	return (dl_se->runtime <= 0);
 696}
 697
 698extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
 699
 700/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 701 * Update the current task's runtime statistics (provided it is still
 702 * a -deadline task and has not been removed from the dl_rq).
 703 */
 704static void update_curr_dl(struct rq *rq)
 705{
 706	struct task_struct *curr = rq->curr;
 707	struct sched_dl_entity *dl_se = &curr->dl;
 708	u64 delta_exec;
 
 
 709
 710	if (!dl_task(curr) || !on_dl_rq(dl_se))
 711		return;
 712
 713	/*
 714	 * Consumed budget is computed considering the time as
 715	 * observed by schedulable tasks (excluding time spent
 716	 * in hardirq context, etc.). Deadlines are instead
 717	 * computed using hard walltime. This seems to be the more
 718	 * natural solution, but the full ramifications of this
 719	 * approach need further study.
 720	 */
 721	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
 
 722	if (unlikely((s64)delta_exec <= 0)) {
 723		if (unlikely(dl_se->dl_yielded))
 724			goto throttle;
 725		return;
 726	}
 727
 728	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
 729	cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL);
 730
 731	schedstat_set(curr->se.statistics.exec_max,
 732		      max(curr->se.statistics.exec_max, delta_exec));
 733
 734	curr->se.sum_exec_runtime += delta_exec;
 735	account_group_exec_runtime(curr, delta_exec);
 736
 737	curr->se.exec_start = rq_clock_task(rq);
 738	cpuacct_charge(curr, delta_exec);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 739
 740	sched_rt_avg_update(rq, delta_exec);
 
 
 741
 742	dl_se->runtime -= delta_exec;
 743
 744throttle:
 745	if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
 746		dl_se->dl_throttled = 1;
 
 
 
 
 
 
 747		__dequeue_task_dl(rq, curr, 0);
 748		if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
 749			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
 750
 751		if (!is_leftmost(curr, &rq->dl))
 752			resched_curr(rq);
 753	}
 754
 755	/*
 756	 * Because -- for now -- we share the rt bandwidth, we need to
 757	 * account our runtime there too, otherwise actual rt tasks
 758	 * would be able to exceed the shared quota.
 759	 *
 760	 * Account to the root rt group for now.
 761	 *
 762	 * The solution we're working towards is having the RT groups scheduled
 763	 * using deadline servers -- however there's a few nasties to figure
 764	 * out before that can happen.
 765	 */
 766	if (rt_bandwidth_enabled()) {
 767		struct rt_rq *rt_rq = &rq->rt;
 768
 769		raw_spin_lock(&rt_rq->rt_runtime_lock);
 770		/*
 771		 * We'll let actual RT tasks worry about the overflow here, we
 772		 * have our own CBS to keep us inline; only account when RT
 773		 * bandwidth is relevant.
 774		 */
 775		if (sched_rt_bandwidth_account(rt_rq))
 776			rt_rq->rt_time += delta_exec;
 777		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 778	}
 779}
 780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781#ifdef CONFIG_SMP
 782
 783static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
 784{
 785	struct rq *rq = rq_of_dl_rq(dl_rq);
 786
 787	if (dl_rq->earliest_dl.curr == 0 ||
 788	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
 789		dl_rq->earliest_dl.curr = deadline;
 790		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
 791	}
 792}
 793
 794static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
 795{
 796	struct rq *rq = rq_of_dl_rq(dl_rq);
 797
 798	/*
 799	 * Since we may have removed our earliest (and/or next earliest)
 800	 * task we must recompute them.
 801	 */
 802	if (!dl_rq->dl_nr_running) {
 803		dl_rq->earliest_dl.curr = 0;
 804		dl_rq->earliest_dl.next = 0;
 805		cpudl_clear(&rq->rd->cpudl, rq->cpu);
 806	} else {
 807		struct rb_node *leftmost = dl_rq->rb_leftmost;
 808		struct sched_dl_entity *entry;
 809
 810		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
 811		dl_rq->earliest_dl.curr = entry->deadline;
 812		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
 813	}
 814}
 815
 816#else
 817
 818static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
 819static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
 820
 821#endif /* CONFIG_SMP */
 822
 823static inline
 824void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 825{
 826	int prio = dl_task_of(dl_se)->prio;
 827	u64 deadline = dl_se->deadline;
 828
 829	WARN_ON(!dl_prio(prio));
 830	dl_rq->dl_nr_running++;
 831	add_nr_running(rq_of_dl_rq(dl_rq), 1);
 832
 833	inc_dl_deadline(dl_rq, deadline);
 834	inc_dl_migration(dl_se, dl_rq);
 835}
 836
 837static inline
 838void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 839{
 840	int prio = dl_task_of(dl_se)->prio;
 841
 842	WARN_ON(!dl_prio(prio));
 843	WARN_ON(!dl_rq->dl_nr_running);
 844	dl_rq->dl_nr_running--;
 845	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
 846
 847	dec_dl_deadline(dl_rq, dl_se->deadline);
 848	dec_dl_migration(dl_se, dl_rq);
 849}
 850
 851static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
 852{
 853	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 854	struct rb_node **link = &dl_rq->rb_root.rb_node;
 855	struct rb_node *parent = NULL;
 856	struct sched_dl_entity *entry;
 857	int leftmost = 1;
 858
 859	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
 860
 861	while (*link) {
 862		parent = *link;
 863		entry = rb_entry(parent, struct sched_dl_entity, rb_node);
 864		if (dl_time_before(dl_se->deadline, entry->deadline))
 865			link = &parent->rb_left;
 866		else {
 867			link = &parent->rb_right;
 868			leftmost = 0;
 869		}
 870	}
 871
 872	if (leftmost)
 873		dl_rq->rb_leftmost = &dl_se->rb_node;
 874
 875	rb_link_node(&dl_se->rb_node, parent, link);
 876	rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
 877
 878	inc_dl_tasks(dl_se, dl_rq);
 879}
 880
 881static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
 882{
 883	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 884
 885	if (RB_EMPTY_NODE(&dl_se->rb_node))
 886		return;
 887
 888	if (dl_rq->rb_leftmost == &dl_se->rb_node) {
 889		struct rb_node *next_node;
 890
 891		next_node = rb_next(&dl_se->rb_node);
 892		dl_rq->rb_leftmost = next_node;
 893	}
 894
 895	rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
 896	RB_CLEAR_NODE(&dl_se->rb_node);
 897
 898	dec_dl_tasks(dl_se, dl_rq);
 899}
 900
 901static void
 902enqueue_dl_entity(struct sched_dl_entity *dl_se,
 903		  struct sched_dl_entity *pi_se, int flags)
 904{
 905	BUG_ON(on_dl_rq(dl_se));
 906
 907	/*
 908	 * If this is a wakeup or a new instance, the scheduling
 909	 * parameters of the task might need updating. Otherwise,
 910	 * we want a replenishment of its runtime.
 911	 */
 912	if (flags & ENQUEUE_WAKEUP)
 
 913		update_dl_entity(dl_se, pi_se);
 914	else if (flags & ENQUEUE_REPLENISH)
 915		replenish_dl_entity(dl_se, pi_se);
 
 
 
 
 
 916
 917	__enqueue_dl_entity(dl_se);
 918}
 919
 920static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
 921{
 922	__dequeue_dl_entity(dl_se);
 923}
 924
 925static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 926{
 927	struct task_struct *pi_task = rt_mutex_get_top_task(p);
 928	struct sched_dl_entity *pi_se = &p->dl;
 929
 930	/*
 931	 * Use the scheduling parameters of the top pi-waiter
 932	 * task if we have one and its (absolute) deadline is
 933	 * smaller than our one... OTW we keep our runtime and
 934	 * deadline.
 
 
 935	 */
 936	if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
 937		pi_se = &pi_task->dl;
 938	} else if (!dl_prio(p->normal_prio)) {
 939		/*
 940		 * Special case in which we have a !SCHED_DEADLINE task
 941		 * that is going to be deboosted, but exceedes its
 942		 * runtime while doing so. No point in replenishing
 943		 * it, as it's going to return back to its original
 944		 * scheduling class after this.
 945		 */
 946		BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
 947		return;
 948	}
 949
 950	/*
 951	 * If p is throttled, we do nothing. In fact, if it exhausted
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 952	 * its budget it needs a replenishment and, since it now is on
 953	 * its rq, the bandwidth timer callback (which clearly has not
 954	 * run yet) will take care of this.
 955	 */
 956	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
 
 
 
 
 
 
 
 
 
 957		return;
 
 958
 959	enqueue_dl_entity(&p->dl, pi_se, flags);
 960
 961	if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
 962		enqueue_pushable_dl_task(rq, p);
 963}
 964
 965static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 966{
 967	dequeue_dl_entity(&p->dl);
 968	dequeue_pushable_dl_task(rq, p);
 969}
 970
 971static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 972{
 973	update_curr_dl(rq);
 974	__dequeue_task_dl(rq, p, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975}
 976
 977/*
 978 * Yield task semantic for -deadline tasks is:
 979 *
 980 *   get off from the CPU until our next instance, with
 981 *   a new runtime. This is of little use now, since we
 982 *   don't have a bandwidth reclaiming mechanism. Anyway,
 983 *   bandwidth reclaiming is planned for the future, and
 984 *   yield_task_dl will indicate that some spare budget
 985 *   is available for other task instances to use it.
 986 */
 987static void yield_task_dl(struct rq *rq)
 988{
 989	/*
 990	 * We make the task go to sleep until its current deadline by
 991	 * forcing its runtime to zero. This way, update_curr_dl() stops
 992	 * it and the bandwidth timer will wake it up and will give it
 993	 * new scheduling parameters (thanks to dl_yielded=1).
 994	 */
 995	rq->curr->dl.dl_yielded = 1;
 996
 997	update_rq_clock(rq);
 998	update_curr_dl(rq);
 999	/*
1000	 * Tell update_rq_clock() that we've just updated,
1001	 * so we don't do microscopic update in schedule()
1002	 * and double the fastpath cost.
1003	 */
1004	rq_clock_skip_update(rq, true);
1005}
1006
1007#ifdef CONFIG_SMP
1008
1009static int find_later_rq(struct task_struct *task);
1010
1011static int
1012select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1013{
1014	struct task_struct *curr;
1015	struct rq *rq;
1016
1017	if (sd_flag != SD_BALANCE_WAKE)
1018		goto out;
1019
1020	rq = cpu_rq(cpu);
1021
1022	rcu_read_lock();
1023	curr = READ_ONCE(rq->curr); /* unlocked access */
1024
1025	/*
1026	 * If we are dealing with a -deadline task, we must
1027	 * decide where to wake it up.
1028	 * If it has a later deadline and the current task
1029	 * on this rq can't move (provided the waking task
1030	 * can!) we prefer to send it somewhere else. On the
1031	 * other hand, if it has a shorter deadline, we
1032	 * try to make it stay here, it might be important.
1033	 */
1034	if (unlikely(dl_task(curr)) &&
1035	    (tsk_nr_cpus_allowed(curr) < 2 ||
1036	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1037	    (tsk_nr_cpus_allowed(p) > 1)) {
1038		int target = find_later_rq(p);
1039
1040		if (target != -1 &&
1041				(dl_time_before(p->dl.deadline,
1042					cpu_rq(target)->dl.earliest_dl.curr) ||
1043				(cpu_rq(target)->dl.dl_nr_running == 0)))
1044			cpu = target;
1045	}
1046	rcu_read_unlock();
1047
1048out:
1049	return cpu;
1050}
1051
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1052static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1053{
1054	/*
1055	 * Current can't be migrated, useless to reschedule,
1056	 * let's hope p can move out.
1057	 */
1058	if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
1059	    cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1060		return;
1061
1062	/*
1063	 * p is migratable, so let's not schedule it and
1064	 * see if it is pushed or pulled somewhere else.
1065	 */
1066	if (tsk_nr_cpus_allowed(p) != 1 &&
1067	    cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1068		return;
1069
1070	resched_curr(rq);
1071}
1072
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073#endif /* CONFIG_SMP */
1074
1075/*
1076 * Only called when both the current and waking task are -deadline
1077 * tasks.
1078 */
1079static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1080				  int flags)
1081{
1082	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1083		resched_curr(rq);
1084		return;
1085	}
1086
1087#ifdef CONFIG_SMP
1088	/*
1089	 * In the unlikely case current and p have the same deadline
1090	 * let us try to decide what's the best thing to do...
1091	 */
1092	if ((p->dl.deadline == rq->curr->dl.deadline) &&
1093	    !test_tsk_need_resched(rq->curr))
1094		check_preempt_equal_dl(rq, p);
1095#endif /* CONFIG_SMP */
1096}
1097
1098#ifdef CONFIG_SCHED_HRTICK
1099static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1100{
1101	hrtick_start(rq, p->dl.runtime);
1102}
1103#else /* !CONFIG_SCHED_HRTICK */
1104static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1105{
1106}
1107#endif
1108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1109static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1110						   struct dl_rq *dl_rq)
1111{
1112	struct rb_node *left = dl_rq->rb_leftmost;
1113
1114	if (!left)
1115		return NULL;
1116
1117	return rb_entry(left, struct sched_dl_entity, rb_node);
1118}
1119
1120struct task_struct *
1121pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
1122{
1123	struct sched_dl_entity *dl_se;
 
1124	struct task_struct *p;
1125	struct dl_rq *dl_rq;
1126
1127	dl_rq = &rq->dl;
1128
1129	if (need_pull_dl_task(rq, prev)) {
1130		/*
1131		 * This is OK, because current is on_cpu, which avoids it being
1132		 * picked for load-balance and preemption/IRQs are still
1133		 * disabled avoiding further scheduler activity on it and we're
1134		 * being very careful to re-start the picking loop.
1135		 */
1136		lockdep_unpin_lock(&rq->lock, cookie);
1137		pull_dl_task(rq);
1138		lockdep_repin_lock(&rq->lock, cookie);
1139		/*
1140		 * pull_dl_task() can drop (and re-acquire) rq->lock; this
1141		 * means a stop task can slip in, in which case we need to
1142		 * re-start task selection.
1143		 */
1144		if (rq->stop && task_on_rq_queued(rq->stop))
1145			return RETRY_TASK;
1146	}
1147
1148	/*
1149	 * When prev is DL, we may throttle it in put_prev_task().
1150	 * So, we update time before we check for dl_nr_running.
1151	 */
1152	if (prev->sched_class == &dl_sched_class)
1153		update_curr_dl(rq);
1154
1155	if (unlikely(!dl_rq->dl_nr_running))
1156		return NULL;
1157
1158	put_prev_task(rq, prev);
1159
1160	dl_se = pick_next_dl_entity(rq, dl_rq);
1161	BUG_ON(!dl_se);
1162
1163	p = dl_task_of(dl_se);
1164	p->se.exec_start = rq_clock_task(rq);
1165
1166	/* Running task will never be pushed. */
1167       dequeue_pushable_dl_task(rq, p);
1168
1169	if (hrtick_enabled(rq))
1170		start_hrtick_dl(rq, p);
1171
1172	queue_push_tasks(rq);
1173
1174	return p;
1175}
1176
1177static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1178{
1179	update_curr_dl(rq);
1180
1181	if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1)
 
1182		enqueue_pushable_dl_task(rq, p);
1183}
1184
 
 
 
 
 
 
 
 
1185static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1186{
1187	update_curr_dl(rq);
1188
 
1189	/*
1190	 * Even when we have runtime, update_curr_dl() might have resulted in us
1191	 * not being the leftmost task anymore. In that case NEED_RESCHED will
1192	 * be set and schedule() will start a new hrtick for the next task.
1193	 */
1194	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1195	    is_leftmost(p, &rq->dl))
1196		start_hrtick_dl(rq, p);
1197}
1198
1199static void task_fork_dl(struct task_struct *p)
1200{
1201	/*
1202	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1203	 * sched_fork()
1204	 */
1205}
1206
1207static void task_dead_dl(struct task_struct *p)
1208{
1209	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1210
1211	/*
1212	 * Since we are TASK_DEAD we won't slip out of the domain!
1213	 */
1214	raw_spin_lock_irq(&dl_b->lock);
1215	/* XXX we should retain the bw until 0-lag */
1216	dl_b->total_bw -= p->dl.dl_bw;
1217	raw_spin_unlock_irq(&dl_b->lock);
1218}
1219
1220static void set_curr_task_dl(struct rq *rq)
1221{
1222	struct task_struct *p = rq->curr;
1223
1224	p->se.exec_start = rq_clock_task(rq);
1225
1226	/* You can't push away the running task */
1227	dequeue_pushable_dl_task(rq, p);
1228}
1229
1230#ifdef CONFIG_SMP
1231
1232/* Only try algorithms three times */
1233#define DL_MAX_TRIES 3
1234
1235static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1236{
1237	if (!task_running(rq, p) &&
1238	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1239		return 1;
1240	return 0;
1241}
1242
1243/*
1244 * Return the earliest pushable rq's task, which is suitable to be executed
1245 * on the CPU, NULL otherwise:
1246 */
1247static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1248{
1249	struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1250	struct task_struct *p = NULL;
1251
1252	if (!has_pushable_dl_tasks(rq))
1253		return NULL;
1254
1255next_node:
1256	if (next_node) {
1257		p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1258
1259		if (pick_dl_task(rq, p, cpu))
1260			return p;
1261
1262		next_node = rb_next(next_node);
1263		goto next_node;
1264	}
1265
1266	return NULL;
1267}
1268
1269static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1270
1271static int find_later_rq(struct task_struct *task)
1272{
1273	struct sched_domain *sd;
1274	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1275	int this_cpu = smp_processor_id();
1276	int best_cpu, cpu = task_cpu(task);
1277
1278	/* Make sure the mask is initialized first */
1279	if (unlikely(!later_mask))
1280		return -1;
1281
1282	if (tsk_nr_cpus_allowed(task) == 1)
1283		return -1;
1284
1285	/*
1286	 * We have to consider system topology and task affinity
1287	 * first, then we can look for a suitable cpu.
1288	 */
1289	best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1290			task, later_mask);
1291	if (best_cpu == -1)
1292		return -1;
1293
1294	/*
1295	 * If we are here, some target has been found,
1296	 * the most suitable of which is cached in best_cpu.
1297	 * This is, among the runqueues where the current tasks
1298	 * have later deadlines than the task's one, the rq
1299	 * with the latest possible one.
1300	 *
1301	 * Now we check how well this matches with task's
1302	 * affinity and system topology.
1303	 *
1304	 * The last cpu where the task run is our first
1305	 * guess, since it is most likely cache-hot there.
1306	 */
1307	if (cpumask_test_cpu(cpu, later_mask))
1308		return cpu;
1309	/*
1310	 * Check if this_cpu is to be skipped (i.e., it is
1311	 * not in the mask) or not.
1312	 */
1313	if (!cpumask_test_cpu(this_cpu, later_mask))
1314		this_cpu = -1;
1315
1316	rcu_read_lock();
1317	for_each_domain(cpu, sd) {
1318		if (sd->flags & SD_WAKE_AFFINE) {
 
1319
1320			/*
1321			 * If possible, preempting this_cpu is
1322			 * cheaper than migrating.
1323			 */
1324			if (this_cpu != -1 &&
1325			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1326				rcu_read_unlock();
1327				return this_cpu;
1328			}
1329
 
 
1330			/*
1331			 * Last chance: if best_cpu is valid and is
1332			 * in the mask, that becomes our choice.
 
 
1333			 */
1334			if (best_cpu < nr_cpu_ids &&
1335			    cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1336				rcu_read_unlock();
1337				return best_cpu;
1338			}
1339		}
1340	}
1341	rcu_read_unlock();
1342
1343	/*
1344	 * At this point, all our guesses failed, we just return
1345	 * 'something', and let the caller sort the things out.
1346	 */
1347	if (this_cpu != -1)
1348		return this_cpu;
1349
1350	cpu = cpumask_any(later_mask);
1351	if (cpu < nr_cpu_ids)
1352		return cpu;
1353
1354	return -1;
1355}
1356
1357/* Locks the rq it finds */
1358static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1359{
1360	struct rq *later_rq = NULL;
1361	int tries;
1362	int cpu;
1363
1364	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1365		cpu = find_later_rq(task);
1366
1367		if ((cpu == -1) || (cpu == rq->cpu))
1368			break;
1369
1370		later_rq = cpu_rq(cpu);
1371
1372		if (later_rq->dl.dl_nr_running &&
1373		    !dl_time_before(task->dl.deadline,
1374					later_rq->dl.earliest_dl.curr)) {
1375			/*
1376			 * Target rq has tasks of equal or earlier deadline,
1377			 * retrying does not release any lock and is unlikely
1378			 * to yield a different result.
1379			 */
1380			later_rq = NULL;
1381			break;
1382		}
1383
1384		/* Retry if something changed. */
1385		if (double_lock_balance(rq, later_rq)) {
1386			if (unlikely(task_rq(task) != rq ||
1387				     !cpumask_test_cpu(later_rq->cpu,
1388						       tsk_cpus_allowed(task)) ||
1389				     task_running(rq, task) ||
1390				     !dl_task(task) ||
1391				     !task_on_rq_queued(task))) {
1392				double_unlock_balance(rq, later_rq);
1393				later_rq = NULL;
1394				break;
1395			}
1396		}
1397
1398		/*
1399		 * If the rq we found has no -deadline task, or
1400		 * its earliest one has a later deadline than our
1401		 * task, the rq is a good one.
1402		 */
1403		if (!later_rq->dl.dl_nr_running ||
1404		    dl_time_before(task->dl.deadline,
1405				   later_rq->dl.earliest_dl.curr))
1406			break;
1407
1408		/* Otherwise we try again. */
1409		double_unlock_balance(rq, later_rq);
1410		later_rq = NULL;
1411	}
1412
1413	return later_rq;
1414}
1415
1416static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1417{
1418	struct task_struct *p;
1419
1420	if (!has_pushable_dl_tasks(rq))
1421		return NULL;
1422
1423	p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1424		     struct task_struct, pushable_dl_tasks);
1425
1426	BUG_ON(rq->cpu != task_cpu(p));
1427	BUG_ON(task_current(rq, p));
1428	BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
1429
1430	BUG_ON(!task_on_rq_queued(p));
1431	BUG_ON(!dl_task(p));
1432
1433	return p;
1434}
1435
1436/*
1437 * See if the non running -deadline tasks on this rq
1438 * can be sent to some other CPU where they can preempt
1439 * and start executing.
1440 */
1441static int push_dl_task(struct rq *rq)
1442{
1443	struct task_struct *next_task;
1444	struct rq *later_rq;
1445	int ret = 0;
1446
1447	if (!rq->dl.overloaded)
1448		return 0;
1449
1450	next_task = pick_next_pushable_dl_task(rq);
1451	if (!next_task)
1452		return 0;
1453
1454retry:
1455	if (unlikely(next_task == rq->curr)) {
1456		WARN_ON(1);
1457		return 0;
1458	}
1459
1460	/*
1461	 * If next_task preempts rq->curr, and rq->curr
1462	 * can move away, it makes sense to just reschedule
1463	 * without going further in pushing next_task.
1464	 */
1465	if (dl_task(rq->curr) &&
1466	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1467	    tsk_nr_cpus_allowed(rq->curr) > 1) {
1468		resched_curr(rq);
1469		return 0;
1470	}
1471
1472	/* We might release rq lock */
1473	get_task_struct(next_task);
1474
1475	/* Will lock the rq it'll find */
1476	later_rq = find_lock_later_rq(next_task, rq);
1477	if (!later_rq) {
1478		struct task_struct *task;
1479
1480		/*
1481		 * We must check all this again, since
1482		 * find_lock_later_rq releases rq->lock and it is
1483		 * then possible that next_task has migrated.
1484		 */
1485		task = pick_next_pushable_dl_task(rq);
1486		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1487			/*
1488			 * The task is still there. We don't try
1489			 * again, some other cpu will pull it when ready.
1490			 */
1491			goto out;
1492		}
1493
1494		if (!task)
1495			/* No more tasks */
1496			goto out;
1497
1498		put_task_struct(next_task);
1499		next_task = task;
1500		goto retry;
1501	}
1502
1503	deactivate_task(rq, next_task, 0);
1504	set_task_cpu(next_task, later_rq->cpu);
1505	activate_task(later_rq, next_task, 0);
 
 
 
 
 
 
1506	ret = 1;
1507
1508	resched_curr(later_rq);
1509
1510	double_unlock_balance(rq, later_rq);
1511
1512out:
1513	put_task_struct(next_task);
1514
1515	return ret;
1516}
1517
1518static void push_dl_tasks(struct rq *rq)
1519{
1520	/* push_dl_task() will return true if it moved a -deadline task */
1521	while (push_dl_task(rq))
1522		;
1523}
1524
1525static void pull_dl_task(struct rq *this_rq)
1526{
1527	int this_cpu = this_rq->cpu, cpu;
1528	struct task_struct *p;
1529	bool resched = false;
1530	struct rq *src_rq;
1531	u64 dmin = LONG_MAX;
1532
1533	if (likely(!dl_overloaded(this_rq)))
1534		return;
1535
1536	/*
1537	 * Match the barrier from dl_set_overloaded; this guarantees that if we
1538	 * see overloaded we must also see the dlo_mask bit.
1539	 */
1540	smp_rmb();
1541
1542	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1543		if (this_cpu == cpu)
1544			continue;
1545
1546		src_rq = cpu_rq(cpu);
1547
1548		/*
1549		 * It looks racy, abd it is! However, as in sched_rt.c,
1550		 * we are fine with this.
1551		 */
1552		if (this_rq->dl.dl_nr_running &&
1553		    dl_time_before(this_rq->dl.earliest_dl.curr,
1554				   src_rq->dl.earliest_dl.next))
1555			continue;
1556
1557		/* Might drop this_rq->lock */
1558		double_lock_balance(this_rq, src_rq);
1559
1560		/*
1561		 * If there are no more pullable tasks on the
1562		 * rq, we're done with it.
1563		 */
1564		if (src_rq->dl.dl_nr_running <= 1)
1565			goto skip;
1566
1567		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1568
1569		/*
1570		 * We found a task to be pulled if:
1571		 *  - it preempts our current (if there's one),
1572		 *  - it will preempt the last one we pulled (if any).
1573		 */
1574		if (p && dl_time_before(p->dl.deadline, dmin) &&
1575		    (!this_rq->dl.dl_nr_running ||
1576		     dl_time_before(p->dl.deadline,
1577				    this_rq->dl.earliest_dl.curr))) {
1578			WARN_ON(p == src_rq->curr);
1579			WARN_ON(!task_on_rq_queued(p));
1580
1581			/*
1582			 * Then we pull iff p has actually an earlier
1583			 * deadline than the current task of its runqueue.
1584			 */
1585			if (dl_time_before(p->dl.deadline,
1586					   src_rq->curr->dl.deadline))
1587				goto skip;
1588
1589			resched = true;
1590
1591			deactivate_task(src_rq, p, 0);
1592			set_task_cpu(p, this_cpu);
1593			activate_task(this_rq, p, 0);
1594			dmin = p->dl.deadline;
1595
1596			/* Is there any other task even earlier? */
1597		}
1598skip:
1599		double_unlock_balance(this_rq, src_rq);
1600	}
1601
1602	if (resched)
1603		resched_curr(this_rq);
1604}
1605
1606/*
1607 * Since the task is not running and a reschedule is not going to happen
1608 * anytime soon on its runqueue, we try pushing it away now.
1609 */
1610static void task_woken_dl(struct rq *rq, struct task_struct *p)
1611{
1612	if (!task_running(rq, p) &&
1613	    !test_tsk_need_resched(rq->curr) &&
1614	    tsk_nr_cpus_allowed(p) > 1 &&
1615	    dl_task(rq->curr) &&
1616	    (tsk_nr_cpus_allowed(rq->curr) < 2 ||
1617	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1618		push_dl_tasks(rq);
1619	}
1620}
1621
1622static void set_cpus_allowed_dl(struct task_struct *p,
1623				const struct cpumask *new_mask)
1624{
1625	struct root_domain *src_rd;
1626	struct rq *rq;
1627
1628	BUG_ON(!dl_task(p));
1629
1630	rq = task_rq(p);
1631	src_rd = rq->rd;
1632	/*
1633	 * Migrating a SCHED_DEADLINE task between exclusive
1634	 * cpusets (different root_domains) entails a bandwidth
1635	 * update. We already made space for us in the destination
1636	 * domain (see cpuset_can_attach()).
1637	 */
1638	if (!cpumask_intersects(src_rd->span, new_mask)) {
1639		struct dl_bw *src_dl_b;
1640
1641		src_dl_b = dl_bw_of(cpu_of(rq));
1642		/*
1643		 * We now free resources of the root_domain we are migrating
1644		 * off. In the worst case, sched_setattr() may temporary fail
1645		 * until we complete the update.
1646		 */
1647		raw_spin_lock(&src_dl_b->lock);
1648		__dl_clear(src_dl_b, p->dl.dl_bw);
1649		raw_spin_unlock(&src_dl_b->lock);
1650	}
1651
1652	set_cpus_allowed_common(p, new_mask);
1653}
1654
1655/* Assumes rq->lock is held */
1656static void rq_online_dl(struct rq *rq)
1657{
1658	if (rq->dl.overloaded)
1659		dl_set_overload(rq);
1660
1661	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1662	if (rq->dl.dl_nr_running > 0)
1663		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
1664}
1665
1666/* Assumes rq->lock is held */
1667static void rq_offline_dl(struct rq *rq)
1668{
1669	if (rq->dl.overloaded)
1670		dl_clear_overload(rq);
1671
1672	cpudl_clear(&rq->rd->cpudl, rq->cpu);
1673	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1674}
1675
1676void __init init_sched_dl_class(void)
1677{
1678	unsigned int i;
1679
1680	for_each_possible_cpu(i)
1681		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1682					GFP_KERNEL, cpu_to_node(i));
1683}
1684
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1685#endif /* CONFIG_SMP */
1686
1687static void switched_from_dl(struct rq *rq, struct task_struct *p)
1688{
1689	/*
1690	 * Start the deadline timer; if we switch back to dl before this we'll
1691	 * continue consuming our current CBS slice. If we stay outside of
1692	 * SCHED_DEADLINE until the deadline passes, the timer will reset the
1693	 * task.
 
 
1694	 */
1695	if (!start_dl_timer(p))
1696		__dl_clear_params(p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1697
1698	/*
1699	 * Since this might be the only -deadline task on the rq,
1700	 * this is the right place to try to pull some other one
1701	 * from an overloaded cpu, if any.
1702	 */
1703	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1704		return;
1705
1706	queue_pull_task(rq);
1707}
1708
1709/*
1710 * When switching to -deadline, we may overload the rq, then
1711 * we try to push someone off, if possible.
1712 */
1713static void switched_to_dl(struct rq *rq, struct task_struct *p)
1714{
 
 
1715
1716	/* If p is not queued we will update its parameters at next wakeup. */
1717	if (!task_on_rq_queued(p))
1718		return;
1719
1720	/*
1721	 * If p is boosted we already updated its params in
1722	 * rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH),
1723	 * p's deadline being now already after rq_clock(rq).
1724	 */
1725	if (dl_time_before(p->dl.deadline, rq_clock(rq)))
1726		setup_new_dl_entity(&p->dl);
1727
1728	if (rq->curr != p) {
1729#ifdef CONFIG_SMP
1730		if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
1731			queue_push_tasks(rq);
1732#endif
1733		if (dl_task(rq->curr))
1734			check_preempt_curr_dl(rq, p, 0);
1735		else
1736			resched_curr(rq);
1737	}
1738}
1739
1740/*
1741 * If the scheduling parameters of a -deadline task changed,
1742 * a push or pull operation might be needed.
1743 */
1744static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1745			    int oldprio)
1746{
1747	if (task_on_rq_queued(p) || rq->curr == p) {
1748#ifdef CONFIG_SMP
1749		/*
1750		 * This might be too much, but unfortunately
1751		 * we don't have the old deadline value, and
1752		 * we can't argue if the task is increasing
1753		 * or lowering its prio, so...
1754		 */
1755		if (!rq->dl.overloaded)
1756			queue_pull_task(rq);
1757
1758		/*
1759		 * If we now have a earlier deadline task than p,
1760		 * then reschedule, provided p is still on this
1761		 * runqueue.
1762		 */
1763		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1764			resched_curr(rq);
1765#else
1766		/*
1767		 * Again, we don't know if p has a earlier
1768		 * or later deadline, so let's blindly set a
1769		 * (maybe not needed) rescheduling point.
1770		 */
1771		resched_curr(rq);
1772#endif /* CONFIG_SMP */
1773	}
1774}
1775
1776const struct sched_class dl_sched_class = {
1777	.next			= &rt_sched_class,
1778	.enqueue_task		= enqueue_task_dl,
1779	.dequeue_task		= dequeue_task_dl,
1780	.yield_task		= yield_task_dl,
1781
1782	.check_preempt_curr	= check_preempt_curr_dl,
1783
1784	.pick_next_task		= pick_next_task_dl,
1785	.put_prev_task		= put_prev_task_dl,
 
1786
1787#ifdef CONFIG_SMP
 
1788	.select_task_rq		= select_task_rq_dl,
 
1789	.set_cpus_allowed       = set_cpus_allowed_dl,
1790	.rq_online              = rq_online_dl,
1791	.rq_offline             = rq_offline_dl,
1792	.task_woken		= task_woken_dl,
1793#endif
1794
1795	.set_curr_task		= set_curr_task_dl,
1796	.task_tick		= task_tick_dl,
1797	.task_fork              = task_fork_dl,
1798	.task_dead		= task_dead_dl,
1799
1800	.prio_changed           = prio_changed_dl,
1801	.switched_from		= switched_from_dl,
1802	.switched_to		= switched_to_dl,
1803
1804	.update_curr		= update_curr_dl,
1805};
1806
1807#ifdef CONFIG_SCHED_DEBUG
1808extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1809
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1810void print_dl_stats(struct seq_file *m, int cpu)
1811{
1812	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1813}
1814#endif /* CONFIG_SCHED_DEBUG */
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Deadline Scheduling Class (SCHED_DEADLINE)
   4 *
   5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
   6 *
   7 * Tasks that periodically executes their instances for less than their
   8 * runtime won't miss any of their deadlines.
   9 * Tasks that are not periodic or sporadic or that tries to execute more
  10 * than their reserved bandwidth will be slowed down (and may potentially
  11 * miss some of their deadlines), and won't affect any other task.
  12 *
  13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
  14 *                    Juri Lelli <juri.lelli@gmail.com>,
  15 *                    Michael Trimarchi <michael@amarulasolutions.com>,
  16 *                    Fabio Checconi <fchecconi@gmail.com>
  17 */
  18#include "sched.h"
  19#include "pelt.h"
 
  20
  21struct dl_bandwidth def_dl_bandwidth;
  22
  23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
  24{
  25	return container_of(dl_se, struct task_struct, dl);
  26}
  27
  28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
  29{
  30	return container_of(dl_rq, struct rq, dl);
  31}
  32
  33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
  34{
  35	struct task_struct *p = dl_task_of(dl_se);
  36	struct rq *rq = task_rq(p);
  37
  38	return &rq->dl;
  39}
  40
  41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
  42{
  43	return !RB_EMPTY_NODE(&dl_se->rb_node);
  44}
  45
  46#ifdef CONFIG_SMP
  47static inline struct dl_bw *dl_bw_of(int i)
  48{
  49	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
  50			 "sched RCU must be held");
  51	return &cpu_rq(i)->rd->dl_bw;
  52}
  53
  54static inline int dl_bw_cpus(int i)
  55{
  56	struct root_domain *rd = cpu_rq(i)->rd;
  57	int cpus = 0;
  58
  59	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
  60			 "sched RCU must be held");
  61	for_each_cpu_and(i, rd->span, cpu_active_mask)
  62		cpus++;
  63
  64	return cpus;
  65}
  66#else
  67static inline struct dl_bw *dl_bw_of(int i)
  68{
  69	return &cpu_rq(i)->dl.dl_bw;
  70}
  71
  72static inline int dl_bw_cpus(int i)
  73{
  74	return 1;
  75}
  76#endif
  77
  78static inline
  79void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
  80{
  81	u64 old = dl_rq->running_bw;
  82
  83	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
  84	dl_rq->running_bw += dl_bw;
  85	SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
  86	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
  87	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
  88	cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
  89}
  90
  91static inline
  92void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
  93{
  94	u64 old = dl_rq->running_bw;
  95
  96	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
  97	dl_rq->running_bw -= dl_bw;
  98	SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
  99	if (dl_rq->running_bw > old)
 100		dl_rq->running_bw = 0;
 101	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
 102	cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
 103}
 104
 105static inline
 106void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
 107{
 108	u64 old = dl_rq->this_bw;
 109
 110	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
 111	dl_rq->this_bw += dl_bw;
 112	SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
 113}
 114
 115static inline
 116void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
 117{
 118	u64 old = dl_rq->this_bw;
 119
 120	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
 121	dl_rq->this_bw -= dl_bw;
 122	SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
 123	if (dl_rq->this_bw > old)
 124		dl_rq->this_bw = 0;
 125	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
 126}
 127
 128static inline
 129void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 130{
 131	if (!dl_entity_is_special(dl_se))
 132		__add_rq_bw(dl_se->dl_bw, dl_rq);
 133}
 134
 135static inline
 136void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 137{
 138	if (!dl_entity_is_special(dl_se))
 139		__sub_rq_bw(dl_se->dl_bw, dl_rq);
 140}
 141
 142static inline
 143void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 144{
 145	if (!dl_entity_is_special(dl_se))
 146		__add_running_bw(dl_se->dl_bw, dl_rq);
 147}
 148
 149static inline
 150void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 151{
 152	if (!dl_entity_is_special(dl_se))
 153		__sub_running_bw(dl_se->dl_bw, dl_rq);
 154}
 155
 156void dl_change_utilization(struct task_struct *p, u64 new_bw)
 157{
 158	struct rq *rq;
 159
 160	BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
 161
 162	if (task_on_rq_queued(p))
 163		return;
 164
 165	rq = task_rq(p);
 166	if (p->dl.dl_non_contending) {
 167		sub_running_bw(&p->dl, &rq->dl);
 168		p->dl.dl_non_contending = 0;
 169		/*
 170		 * If the timer handler is currently running and the
 171		 * timer cannot be cancelled, inactive_task_timer()
 172		 * will see that dl_not_contending is not set, and
 173		 * will not touch the rq's active utilization,
 174		 * so we are still safe.
 175		 */
 176		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
 177			put_task_struct(p);
 178	}
 179	__sub_rq_bw(p->dl.dl_bw, &rq->dl);
 180	__add_rq_bw(new_bw, &rq->dl);
 181}
 182
 183/*
 184 * The utilization of a task cannot be immediately removed from
 185 * the rq active utilization (running_bw) when the task blocks.
 186 * Instead, we have to wait for the so called "0-lag time".
 187 *
 188 * If a task blocks before the "0-lag time", a timer (the inactive
 189 * timer) is armed, and running_bw is decreased when the timer
 190 * fires.
 191 *
 192 * If the task wakes up again before the inactive timer fires,
 193 * the timer is cancelled, whereas if the task wakes up after the
 194 * inactive timer fired (and running_bw has been decreased) the
 195 * task's utilization has to be added to running_bw again.
 196 * A flag in the deadline scheduling entity (dl_non_contending)
 197 * is used to avoid race conditions between the inactive timer handler
 198 * and task wakeups.
 199 *
 200 * The following diagram shows how running_bw is updated. A task is
 201 * "ACTIVE" when its utilization contributes to running_bw; an
 202 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
 203 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
 204 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
 205 * time already passed, which does not contribute to running_bw anymore.
 206 *                              +------------------+
 207 *             wakeup           |    ACTIVE        |
 208 *          +------------------>+   contending     |
 209 *          | add_running_bw    |                  |
 210 *          |                   +----+------+------+
 211 *          |                        |      ^
 212 *          |                dequeue |      |
 213 * +--------+-------+                |      |
 214 * |                |   t >= 0-lag   |      | wakeup
 215 * |    INACTIVE    |<---------------+      |
 216 * |                | sub_running_bw |      |
 217 * +--------+-------+                |      |
 218 *          ^                        |      |
 219 *          |              t < 0-lag |      |
 220 *          |                        |      |
 221 *          |                        V      |
 222 *          |                   +----+------+------+
 223 *          | sub_running_bw    |    ACTIVE        |
 224 *          +-------------------+                  |
 225 *            inactive timer    |  non contending  |
 226 *            fired             +------------------+
 227 *
 228 * The task_non_contending() function is invoked when a task
 229 * blocks, and checks if the 0-lag time already passed or
 230 * not (in the first case, it directly updates running_bw;
 231 * in the second case, it arms the inactive timer).
 232 *
 233 * The task_contending() function is invoked when a task wakes
 234 * up, and checks if the task is still in the "ACTIVE non contending"
 235 * state or not (in the second case, it updates running_bw).
 236 */
 237static void task_non_contending(struct task_struct *p)
 238{
 239	struct sched_dl_entity *dl_se = &p->dl;
 240	struct hrtimer *timer = &dl_se->inactive_timer;
 241	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 242	struct rq *rq = rq_of_dl_rq(dl_rq);
 243	s64 zerolag_time;
 244
 245	/*
 246	 * If this is a non-deadline task that has been boosted,
 247	 * do nothing
 248	 */
 249	if (dl_se->dl_runtime == 0)
 250		return;
 251
 252	if (dl_entity_is_special(dl_se))
 253		return;
 254
 255	WARN_ON(dl_se->dl_non_contending);
 256
 257	zerolag_time = dl_se->deadline -
 258		 div64_long((dl_se->runtime * dl_se->dl_period),
 259			dl_se->dl_runtime);
 260
 261	/*
 262	 * Using relative times instead of the absolute "0-lag time"
 263	 * allows to simplify the code
 264	 */
 265	zerolag_time -= rq_clock(rq);
 266
 267	/*
 268	 * If the "0-lag time" already passed, decrease the active
 269	 * utilization now, instead of starting a timer
 270	 */
 271	if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
 272		if (dl_task(p))
 273			sub_running_bw(dl_se, dl_rq);
 274		if (!dl_task(p) || p->state == TASK_DEAD) {
 275			struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
 276
 277			if (p->state == TASK_DEAD)
 278				sub_rq_bw(&p->dl, &rq->dl);
 279			raw_spin_lock(&dl_b->lock);
 280			__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
 281			__dl_clear_params(p);
 282			raw_spin_unlock(&dl_b->lock);
 283		}
 284
 285		return;
 286	}
 287
 288	dl_se->dl_non_contending = 1;
 289	get_task_struct(p);
 290	hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
 291}
 292
 293static void task_contending(struct sched_dl_entity *dl_se, int flags)
 294{
 295	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 296
 297	/*
 298	 * If this is a non-deadline task that has been boosted,
 299	 * do nothing
 300	 */
 301	if (dl_se->dl_runtime == 0)
 302		return;
 303
 304	if (flags & ENQUEUE_MIGRATED)
 305		add_rq_bw(dl_se, dl_rq);
 306
 307	if (dl_se->dl_non_contending) {
 308		dl_se->dl_non_contending = 0;
 309		/*
 310		 * If the timer handler is currently running and the
 311		 * timer cannot be cancelled, inactive_task_timer()
 312		 * will see that dl_not_contending is not set, and
 313		 * will not touch the rq's active utilization,
 314		 * so we are still safe.
 315		 */
 316		if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
 317			put_task_struct(dl_task_of(dl_se));
 318	} else {
 319		/*
 320		 * Since "dl_non_contending" is not set, the
 321		 * task's utilization has already been removed from
 322		 * active utilization (either when the task blocked,
 323		 * when the "inactive timer" fired).
 324		 * So, add it back.
 325		 */
 326		add_running_bw(dl_se, dl_rq);
 327	}
 328}
 329
 330static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
 331{
 332	struct sched_dl_entity *dl_se = &p->dl;
 333
 334	return dl_rq->root.rb_leftmost == &dl_se->rb_node;
 335}
 336
 337void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
 338{
 339	raw_spin_lock_init(&dl_b->dl_runtime_lock);
 340	dl_b->dl_period = period;
 341	dl_b->dl_runtime = runtime;
 342}
 343
 344void init_dl_bw(struct dl_bw *dl_b)
 345{
 346	raw_spin_lock_init(&dl_b->lock);
 347	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
 348	if (global_rt_runtime() == RUNTIME_INF)
 349		dl_b->bw = -1;
 350	else
 351		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
 352	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
 353	dl_b->total_bw = 0;
 354}
 355
 356void init_dl_rq(struct dl_rq *dl_rq)
 357{
 358	dl_rq->root = RB_ROOT_CACHED;
 359
 360#ifdef CONFIG_SMP
 361	/* zero means no -deadline tasks */
 362	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
 363
 364	dl_rq->dl_nr_migratory = 0;
 365	dl_rq->overloaded = 0;
 366	dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
 367#else
 368	init_dl_bw(&dl_rq->dl_bw);
 369#endif
 370
 371	dl_rq->running_bw = 0;
 372	dl_rq->this_bw = 0;
 373	init_dl_rq_bw_ratio(dl_rq);
 374}
 375
 376#ifdef CONFIG_SMP
 377
 378static inline int dl_overloaded(struct rq *rq)
 379{
 380	return atomic_read(&rq->rd->dlo_count);
 381}
 382
 383static inline void dl_set_overload(struct rq *rq)
 384{
 385	if (!rq->online)
 386		return;
 387
 388	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
 389	/*
 390	 * Must be visible before the overload count is
 391	 * set (as in sched_rt.c).
 392	 *
 393	 * Matched by the barrier in pull_dl_task().
 394	 */
 395	smp_wmb();
 396	atomic_inc(&rq->rd->dlo_count);
 397}
 398
 399static inline void dl_clear_overload(struct rq *rq)
 400{
 401	if (!rq->online)
 402		return;
 403
 404	atomic_dec(&rq->rd->dlo_count);
 405	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
 406}
 407
 408static void update_dl_migration(struct dl_rq *dl_rq)
 409{
 410	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
 411		if (!dl_rq->overloaded) {
 412			dl_set_overload(rq_of_dl_rq(dl_rq));
 413			dl_rq->overloaded = 1;
 414		}
 415	} else if (dl_rq->overloaded) {
 416		dl_clear_overload(rq_of_dl_rq(dl_rq));
 417		dl_rq->overloaded = 0;
 418	}
 419}
 420
 421static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 422{
 423	struct task_struct *p = dl_task_of(dl_se);
 424
 425	if (p->nr_cpus_allowed > 1)
 426		dl_rq->dl_nr_migratory++;
 427
 428	update_dl_migration(dl_rq);
 429}
 430
 431static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 432{
 433	struct task_struct *p = dl_task_of(dl_se);
 434
 435	if (p->nr_cpus_allowed > 1)
 436		dl_rq->dl_nr_migratory--;
 437
 438	update_dl_migration(dl_rq);
 439}
 440
 441/*
 442 * The list of pushable -deadline task is not a plist, like in
 443 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
 444 */
 445static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 446{
 447	struct dl_rq *dl_rq = &rq->dl;
 448	struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
 449	struct rb_node *parent = NULL;
 450	struct task_struct *entry;
 451	bool leftmost = true;
 452
 453	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
 454
 455	while (*link) {
 456		parent = *link;
 457		entry = rb_entry(parent, struct task_struct,
 458				 pushable_dl_tasks);
 459		if (dl_entity_preempt(&p->dl, &entry->dl))
 460			link = &parent->rb_left;
 461		else {
 462			link = &parent->rb_right;
 463			leftmost = false;
 464		}
 465	}
 466
 467	if (leftmost)
 
 468		dl_rq->earliest_dl.next = p->dl.deadline;
 
 469
 470	rb_link_node(&p->pushable_dl_tasks, parent, link);
 471	rb_insert_color_cached(&p->pushable_dl_tasks,
 472			       &dl_rq->pushable_dl_tasks_root, leftmost);
 473}
 474
 475static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 476{
 477	struct dl_rq *dl_rq = &rq->dl;
 478
 479	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
 480		return;
 481
 482	if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
 483		struct rb_node *next_node;
 484
 485		next_node = rb_next(&p->pushable_dl_tasks);
 
 486		if (next_node) {
 487			dl_rq->earliest_dl.next = rb_entry(next_node,
 488				struct task_struct, pushable_dl_tasks)->dl.deadline;
 489		}
 490	}
 491
 492	rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 493	RB_CLEAR_NODE(&p->pushable_dl_tasks);
 494}
 495
 496static inline int has_pushable_dl_tasks(struct rq *rq)
 497{
 498	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
 499}
 500
 501static int push_dl_task(struct rq *rq);
 502
 503static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 504{
 505	return dl_task(prev);
 506}
 507
 508static DEFINE_PER_CPU(struct callback_head, dl_push_head);
 509static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
 510
 511static void push_dl_tasks(struct rq *);
 512static void pull_dl_task(struct rq *);
 513
 514static inline void deadline_queue_push_tasks(struct rq *rq)
 515{
 516	if (!has_pushable_dl_tasks(rq))
 517		return;
 518
 519	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
 520}
 521
 522static inline void deadline_queue_pull_task(struct rq *rq)
 523{
 524	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
 525}
 526
 527static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 528
 529static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 530{
 531	struct rq *later_rq = NULL;
 532	struct dl_bw *dl_b;
 533
 534	later_rq = find_lock_later_rq(p, rq);
 535	if (!later_rq) {
 536		int cpu;
 537
 538		/*
 539		 * If we cannot preempt any rq, fall back to pick any
 540		 * online CPU:
 541		 */
 542		cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
 543		if (cpu >= nr_cpu_ids) {
 544			/*
 545			 * Failed to find any suitable CPU.
 546			 * The task will never come back!
 547			 */
 548			BUG_ON(dl_bandwidth_enabled());
 549
 550			/*
 551			 * If admission control is disabled we
 552			 * try a little harder to let the task
 553			 * run.
 554			 */
 555			cpu = cpumask_any(cpu_active_mask);
 556		}
 557		later_rq = cpu_rq(cpu);
 558		double_lock_balance(rq, later_rq);
 559	}
 560
 561	if (p->dl.dl_non_contending || p->dl.dl_throttled) {
 562		/*
 563		 * Inactive timer is armed (or callback is running, but
 564		 * waiting for us to release rq locks). In any case, when it
 565		 * will fire (or continue), it will see running_bw of this
 566		 * task migrated to later_rq (and correctly handle it).
 567		 */
 568		sub_running_bw(&p->dl, &rq->dl);
 569		sub_rq_bw(&p->dl, &rq->dl);
 570
 571		add_rq_bw(&p->dl, &later_rq->dl);
 572		add_running_bw(&p->dl, &later_rq->dl);
 573	} else {
 574		sub_rq_bw(&p->dl, &rq->dl);
 575		add_rq_bw(&p->dl, &later_rq->dl);
 576	}
 577
 578	/*
 579	 * And we finally need to fixup root_domain(s) bandwidth accounting,
 580	 * since p is still hanging out in the old (now moved to default) root
 581	 * domain.
 582	 */
 583	dl_b = &rq->rd->dl_bw;
 584	raw_spin_lock(&dl_b->lock);
 585	__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
 586	raw_spin_unlock(&dl_b->lock);
 587
 588	dl_b = &later_rq->rd->dl_bw;
 589	raw_spin_lock(&dl_b->lock);
 590	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
 591	raw_spin_unlock(&dl_b->lock);
 592
 593	set_task_cpu(p, later_rq->cpu);
 594	double_unlock_balance(later_rq, rq);
 595
 596	return later_rq;
 597}
 598
 599#else
 600
 601static inline
 602void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 603{
 604}
 605
 606static inline
 607void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 608{
 609}
 610
 611static inline
 612void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 613{
 614}
 615
 616static inline
 617void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 618{
 619}
 620
 621static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 622{
 623	return false;
 624}
 625
 626static inline void pull_dl_task(struct rq *rq)
 627{
 628}
 629
 630static inline void deadline_queue_push_tasks(struct rq *rq)
 631{
 632}
 633
 634static inline void deadline_queue_pull_task(struct rq *rq)
 635{
 636}
 637#endif /* CONFIG_SMP */
 638
 639static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 640static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 641static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
 
 642
 643/*
 644 * We are being explicitly informed that a new instance is starting,
 645 * and this means that:
 646 *  - the absolute deadline of the entity has to be placed at
 647 *    current time + relative deadline;
 648 *  - the runtime of the entity has to be set to the maximum value.
 649 *
 650 * The capability of specifying such event is useful whenever a -deadline
 651 * entity wants to (try to!) synchronize its behaviour with the scheduler's
 652 * one, and to (try to!) reconcile itself with its own scheduling
 653 * parameters.
 654 */
 655static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
 656{
 657	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 658	struct rq *rq = rq_of_dl_rq(dl_rq);
 659
 660	WARN_ON(dl_se->dl_boosted);
 661	WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
 662
 663	/*
 664	 * We are racing with the deadline timer. So, do nothing because
 665	 * the deadline timer handler will take care of properly recharging
 666	 * the runtime and postponing the deadline
 667	 */
 668	if (dl_se->dl_throttled)
 669		return;
 670
 671	/*
 672	 * We use the regular wall clock time to set deadlines in the
 673	 * future; in fact, we must consider execution overheads (time
 674	 * spent on hardirq context, etc.).
 675	 */
 676	dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
 677	dl_se->runtime = dl_se->dl_runtime;
 678}
 679
 680/*
 681 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
 682 * possibility of a entity lasting more than what it declared, and thus
 683 * exhausting its runtime.
 684 *
 685 * Here we are interested in making runtime overrun possible, but we do
 686 * not want a entity which is misbehaving to affect the scheduling of all
 687 * other entities.
 688 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
 689 * is used, in order to confine each entity within its own bandwidth.
 690 *
 691 * This function deals exactly with that, and ensures that when the runtime
 692 * of a entity is replenished, its deadline is also postponed. That ensures
 693 * the overrunning entity can't interfere with other entity in the system and
 694 * can't make them miss their deadlines. Reasons why this kind of overruns
 695 * could happen are, typically, a entity voluntarily trying to overcome its
 696 * runtime, or it just underestimated it during sched_setattr().
 697 */
 698static void replenish_dl_entity(struct sched_dl_entity *dl_se,
 699				struct sched_dl_entity *pi_se)
 700{
 701	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 702	struct rq *rq = rq_of_dl_rq(dl_rq);
 703
 704	BUG_ON(pi_se->dl_runtime <= 0);
 705
 706	/*
 707	 * This could be the case for a !-dl task that is boosted.
 708	 * Just go with full inherited parameters.
 709	 */
 710	if (dl_se->dl_deadline == 0) {
 711		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 712		dl_se->runtime = pi_se->dl_runtime;
 713	}
 714
 715	if (dl_se->dl_yielded && dl_se->runtime > 0)
 716		dl_se->runtime = 0;
 717
 718	/*
 719	 * We keep moving the deadline away until we get some
 720	 * available runtime for the entity. This ensures correct
 721	 * handling of situations where the runtime overrun is
 722	 * arbitrary large.
 723	 */
 724	while (dl_se->runtime <= 0) {
 725		dl_se->deadline += pi_se->dl_period;
 726		dl_se->runtime += pi_se->dl_runtime;
 727	}
 728
 729	/*
 730	 * At this point, the deadline really should be "in
 731	 * the future" with respect to rq->clock. If it's
 732	 * not, we are, for some reason, lagging too much!
 733	 * Anyway, after having warn userspace abut that,
 734	 * we still try to keep the things running by
 735	 * resetting the deadline and the budget of the
 736	 * entity.
 737	 */
 738	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
 739		printk_deferred_once("sched: DL replenish lagged too much\n");
 740		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 741		dl_se->runtime = pi_se->dl_runtime;
 742	}
 743
 744	if (dl_se->dl_yielded)
 745		dl_se->dl_yielded = 0;
 746	if (dl_se->dl_throttled)
 747		dl_se->dl_throttled = 0;
 748}
 749
 750/*
 751 * Here we check if --at time t-- an entity (which is probably being
 752 * [re]activated or, in general, enqueued) can use its remaining runtime
 753 * and its current deadline _without_ exceeding the bandwidth it is
 754 * assigned (function returns true if it can't). We are in fact applying
 755 * one of the CBS rules: when a task wakes up, if the residual runtime
 756 * over residual deadline fits within the allocated bandwidth, then we
 757 * can keep the current (absolute) deadline and residual budget without
 758 * disrupting the schedulability of the system. Otherwise, we should
 759 * refill the runtime and set the deadline a period in the future,
 760 * because keeping the current (absolute) deadline of the task would
 761 * result in breaking guarantees promised to other tasks (refer to
 762 * Documentation/scheduler/sched-deadline.rst for more information).
 763 *
 764 * This function returns true if:
 765 *
 766 *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
 767 *
 768 * IOW we can't recycle current parameters.
 769 *
 770 * Notice that the bandwidth check is done against the deadline. For
 771 * task with deadline equal to period this is the same of using
 772 * dl_period instead of dl_deadline in the equation above.
 773 */
 774static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 775			       struct sched_dl_entity *pi_se, u64 t)
 776{
 777	u64 left, right;
 778
 779	/*
 780	 * left and right are the two sides of the equation above,
 781	 * after a bit of shuffling to use multiplications instead
 782	 * of divisions.
 783	 *
 784	 * Note that none of the time values involved in the two
 785	 * multiplications are absolute: dl_deadline and dl_runtime
 786	 * are the relative deadline and the maximum runtime of each
 787	 * instance, runtime is the runtime left for the last instance
 788	 * and (deadline - t), since t is rq->clock, is the time left
 789	 * to the (absolute) deadline. Even if overflowing the u64 type
 790	 * is very unlikely to occur in both cases, here we scale down
 791	 * as we want to avoid that risk at all. Scaling down by 10
 792	 * means that we reduce granularity to 1us. We are fine with it,
 793	 * since this is only a true/false check and, anyway, thinking
 794	 * of anything below microseconds resolution is actually fiction
 795	 * (but still we want to give the user that illusion >;).
 796	 */
 797	left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 798	right = ((dl_se->deadline - t) >> DL_SCALE) *
 799		(pi_se->dl_runtime >> DL_SCALE);
 800
 801	return dl_time_before(right, left);
 802}
 803
 804/*
 805 * Revised wakeup rule [1]: For self-suspending tasks, rather then
 806 * re-initializing task's runtime and deadline, the revised wakeup
 807 * rule adjusts the task's runtime to avoid the task to overrun its
 808 * density.
 809 *
 810 * Reasoning: a task may overrun the density if:
 811 *    runtime / (deadline - t) > dl_runtime / dl_deadline
 812 *
 813 * Therefore, runtime can be adjusted to:
 814 *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
 815 *
 816 * In such way that runtime will be equal to the maximum density
 817 * the task can use without breaking any rule.
 818 *
 819 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
 820 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
 821 */
 822static void
 823update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
 824{
 825	u64 laxity = dl_se->deadline - rq_clock(rq);
 826
 827	/*
 828	 * If the task has deadline < period, and the deadline is in the past,
 829	 * it should already be throttled before this check.
 830	 *
 831	 * See update_dl_entity() comments for further details.
 832	 */
 833	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
 834
 835	dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
 836}
 837
 838/*
 839 * Regarding the deadline, a task with implicit deadline has a relative
 840 * deadline == relative period. A task with constrained deadline has a
 841 * relative deadline <= relative period.
 842 *
 843 * We support constrained deadline tasks. However, there are some restrictions
 844 * applied only for tasks which do not have an implicit deadline. See
 845 * update_dl_entity() to know more about such restrictions.
 846 *
 847 * The dl_is_implicit() returns true if the task has an implicit deadline.
 848 */
 849static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
 850{
 851	return dl_se->dl_deadline == dl_se->dl_period;
 852}
 853
 854/*
 855 * When a deadline entity is placed in the runqueue, its runtime and deadline
 856 * might need to be updated. This is done by a CBS wake up rule. There are two
 857 * different rules: 1) the original CBS; and 2) the Revisited CBS.
 858 *
 859 * When the task is starting a new period, the Original CBS is used. In this
 860 * case, the runtime is replenished and a new absolute deadline is set.
 861 *
 862 * When a task is queued before the begin of the next period, using the
 863 * remaining runtime and deadline could make the entity to overflow, see
 864 * dl_entity_overflow() to find more about runtime overflow. When such case
 865 * is detected, the runtime and deadline need to be updated.
 866 *
 867 * If the task has an implicit deadline, i.e., deadline == period, the Original
 868 * CBS is applied. the runtime is replenished and a new absolute deadline is
 869 * set, as in the previous cases.
 870 *
 871 * However, the Original CBS does not work properly for tasks with
 872 * deadline < period, which are said to have a constrained deadline. By
 873 * applying the Original CBS, a constrained deadline task would be able to run
 874 * runtime/deadline in a period. With deadline < period, the task would
 875 * overrun the runtime/period allowed bandwidth, breaking the admission test.
 876 *
 877 * In order to prevent this misbehave, the Revisited CBS is used for
 878 * constrained deadline tasks when a runtime overflow is detected. In the
 879 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
 880 * the remaining runtime of the task is reduced to avoid runtime overflow.
 881 * Please refer to the comments update_dl_revised_wakeup() function to find
 882 * more about the Revised CBS rule.
 883 */
 884static void update_dl_entity(struct sched_dl_entity *dl_se,
 885			     struct sched_dl_entity *pi_se)
 886{
 887	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 888	struct rq *rq = rq_of_dl_rq(dl_rq);
 889
 890	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 891	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
 892
 893		if (unlikely(!dl_is_implicit(dl_se) &&
 894			     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
 895			     !dl_se->dl_boosted)){
 896			update_dl_revised_wakeup(dl_se, rq);
 897			return;
 898		}
 899
 900		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 901		dl_se->runtime = pi_se->dl_runtime;
 902	}
 903}
 904
 905static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
 906{
 907	return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
 908}
 909
 910/*
 911 * If the entity depleted all its runtime, and if we want it to sleep
 912 * while waiting for some new execution time to become available, we
 913 * set the bandwidth replenishment timer to the replenishment instant
 914 * and try to activate it.
 915 *
 916 * Notice that it is important for the caller to know if the timer
 917 * actually started or not (i.e., the replenishment instant is in
 918 * the future or in the past).
 919 */
 920static int start_dl_timer(struct task_struct *p)
 921{
 922	struct sched_dl_entity *dl_se = &p->dl;
 923	struct hrtimer *timer = &dl_se->dl_timer;
 924	struct rq *rq = task_rq(p);
 925	ktime_t now, act;
 926	s64 delta;
 927
 928	lockdep_assert_held(&rq->lock);
 929
 930	/*
 931	 * We want the timer to fire at the deadline, but considering
 932	 * that it is actually coming from rq->clock and not from
 933	 * hrtimer's time base reading.
 934	 */
 935	act = ns_to_ktime(dl_next_period(dl_se));
 936	now = hrtimer_cb_get_time(timer);
 937	delta = ktime_to_ns(now) - rq_clock(rq);
 938	act = ktime_add_ns(act, delta);
 939
 940	/*
 941	 * If the expiry time already passed, e.g., because the value
 942	 * chosen as the deadline is too small, don't even try to
 943	 * start the timer in the past!
 944	 */
 945	if (ktime_us_delta(act, now) < 0)
 946		return 0;
 947
 948	/*
 949	 * !enqueued will guarantee another callback; even if one is already in
 950	 * progress. This ensures a balanced {get,put}_task_struct().
 951	 *
 952	 * The race against __run_timer() clearing the enqueued state is
 953	 * harmless because we're holding task_rq()->lock, therefore the timer
 954	 * expiring after we've done the check will wait on its task_rq_lock()
 955	 * and observe our state.
 956	 */
 957	if (!hrtimer_is_queued(timer)) {
 958		get_task_struct(p);
 959		hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
 960	}
 961
 962	return 1;
 963}
 964
 965/*
 966 * This is the bandwidth enforcement timer callback. If here, we know
 967 * a task is not on its dl_rq, since the fact that the timer was running
 968 * means the task is throttled and needs a runtime replenishment.
 969 *
 970 * However, what we actually do depends on the fact the task is active,
 971 * (it is on its rq) or has been removed from there by a call to
 972 * dequeue_task_dl(). In the former case we must issue the runtime
 973 * replenishment and add the task back to the dl_rq; in the latter, we just
 974 * do nothing but clearing dl_throttled, so that runtime and deadline
 975 * updating (and the queueing back to dl_rq) will be done by the
 976 * next call to enqueue_task_dl().
 977 */
 978static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 979{
 980	struct sched_dl_entity *dl_se = container_of(timer,
 981						     struct sched_dl_entity,
 982						     dl_timer);
 983	struct task_struct *p = dl_task_of(dl_se);
 984	struct rq_flags rf;
 985	struct rq *rq;
 986
 987	rq = task_rq_lock(p, &rf);
 988
 989	/*
 990	 * The task might have changed its scheduling policy to something
 991	 * different than SCHED_DEADLINE (through switched_from_dl()).
 992	 */
 993	if (!dl_task(p))
 
 994		goto unlock;
 
 995
 996	/*
 997	 * The task might have been boosted by someone else and might be in the
 998	 * boosting/deboosting path, its not throttled.
 999	 */
1000	if (dl_se->dl_boosted)
1001		goto unlock;
1002
1003	/*
1004	 * Spurious timer due to start_dl_timer() race; or we already received
1005	 * a replenishment from rt_mutex_setprio().
1006	 */
1007	if (!dl_se->dl_throttled)
1008		goto unlock;
1009
1010	sched_clock_tick();
1011	update_rq_clock(rq);
1012
1013	/*
1014	 * If the throttle happened during sched-out; like:
1015	 *
1016	 *   schedule()
1017	 *     deactivate_task()
1018	 *       dequeue_task_dl()
1019	 *         update_curr_dl()
1020	 *           start_dl_timer()
1021	 *         __dequeue_task_dl()
1022	 *     prev->on_rq = 0;
1023	 *
1024	 * We can be both throttled and !queued. Replenish the counter
1025	 * but do not enqueue -- wait for our wakeup to do that.
1026	 */
1027	if (!task_on_rq_queued(p)) {
1028		replenish_dl_entity(dl_se, dl_se);
1029		goto unlock;
1030	}
1031
1032#ifdef CONFIG_SMP
1033	if (unlikely(!rq->online)) {
1034		/*
1035		 * If the runqueue is no longer available, migrate the
1036		 * task elsewhere. This necessarily changes rq.
1037		 */
1038		lockdep_unpin_lock(&rq->lock, rf.cookie);
1039		rq = dl_task_offline_migration(rq, p);
1040		rf.cookie = lockdep_pin_lock(&rq->lock);
1041		update_rq_clock(rq);
1042
1043		/*
1044		 * Now that the task has been migrated to the new RQ and we
1045		 * have that locked, proceed as normal and enqueue the task
1046		 * there.
1047		 */
1048	}
1049#endif
1050
1051	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1052	if (dl_task(rq->curr))
1053		check_preempt_curr_dl(rq, p, 0);
1054	else
1055		resched_curr(rq);
1056
1057#ifdef CONFIG_SMP
1058	/*
1059	 * Queueing this task back might have overloaded rq, check if we need
1060	 * to kick someone away.
1061	 */
1062	if (has_pushable_dl_tasks(rq)) {
1063		/*
1064		 * Nothing relies on rq->lock after this, so its safe to drop
1065		 * rq->lock.
1066		 */
1067		rq_unpin_lock(rq, &rf);
1068		push_dl_task(rq);
1069		rq_repin_lock(rq, &rf);
1070	}
1071#endif
1072
1073unlock:
1074	task_rq_unlock(rq, p, &rf);
1075
1076	/*
1077	 * This can free the task_struct, including this hrtimer, do not touch
1078	 * anything related to that after this.
1079	 */
1080	put_task_struct(p);
1081
1082	return HRTIMER_NORESTART;
1083}
1084
1085void init_dl_task_timer(struct sched_dl_entity *dl_se)
1086{
1087	struct hrtimer *timer = &dl_se->dl_timer;
1088
1089	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1090	timer->function = dl_task_timer;
1091}
1092
1093/*
1094 * During the activation, CBS checks if it can reuse the current task's
1095 * runtime and period. If the deadline of the task is in the past, CBS
1096 * cannot use the runtime, and so it replenishes the task. This rule
1097 * works fine for implicit deadline tasks (deadline == period), and the
1098 * CBS was designed for implicit deadline tasks. However, a task with
1099 * constrained deadline (deadine < period) might be awakened after the
1100 * deadline, but before the next period. In this case, replenishing the
1101 * task would allow it to run for runtime / deadline. As in this case
1102 * deadline < period, CBS enables a task to run for more than the
1103 * runtime / period. In a very loaded system, this can cause a domino
1104 * effect, making other tasks miss their deadlines.
1105 *
1106 * To avoid this problem, in the activation of a constrained deadline
1107 * task after the deadline but before the next period, throttle the
1108 * task and set the replenishing timer to the begin of the next period,
1109 * unless it is boosted.
1110 */
1111static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1112{
1113	struct task_struct *p = dl_task_of(dl_se);
1114	struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1115
1116	if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1117	    dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1118		if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1119			return;
1120		dl_se->dl_throttled = 1;
1121		if (dl_se->runtime > 0)
1122			dl_se->runtime = 0;
1123	}
1124}
1125
1126static
1127int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1128{
1129	return (dl_se->runtime <= 0);
1130}
1131
1132extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1133
1134/*
1135 * This function implements the GRUB accounting rule:
1136 * according to the GRUB reclaiming algorithm, the runtime is
1137 * not decreased as "dq = -dt", but as
1138 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1139 * where u is the utilization of the task, Umax is the maximum reclaimable
1140 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1141 * as the difference between the "total runqueue utilization" and the
1142 * runqueue active utilization, and Uextra is the (per runqueue) extra
1143 * reclaimable utilization.
1144 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1145 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1146 * BW_SHIFT.
1147 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1148 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1149 * Since delta is a 64 bit variable, to have an overflow its value
1150 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1151 * So, overflow is not an issue here.
1152 */
1153static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1154{
1155	u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1156	u64 u_act;
1157	u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1158
1159	/*
1160	 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1161	 * we compare u_inact + rq->dl.extra_bw with
1162	 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1163	 * u_inact + rq->dl.extra_bw can be larger than
1164	 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1165	 * leading to wrong results)
1166	 */
1167	if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1168		u_act = u_act_min;
1169	else
1170		u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1171
1172	return (delta * u_act) >> BW_SHIFT;
1173}
1174
1175/*
1176 * Update the current task's runtime statistics (provided it is still
1177 * a -deadline task and has not been removed from the dl_rq).
1178 */
1179static void update_curr_dl(struct rq *rq)
1180{
1181	struct task_struct *curr = rq->curr;
1182	struct sched_dl_entity *dl_se = &curr->dl;
1183	u64 delta_exec, scaled_delta_exec;
1184	int cpu = cpu_of(rq);
1185	u64 now;
1186
1187	if (!dl_task(curr) || !on_dl_rq(dl_se))
1188		return;
1189
1190	/*
1191	 * Consumed budget is computed considering the time as
1192	 * observed by schedulable tasks (excluding time spent
1193	 * in hardirq context, etc.). Deadlines are instead
1194	 * computed using hard walltime. This seems to be the more
1195	 * natural solution, but the full ramifications of this
1196	 * approach need further study.
1197	 */
1198	now = rq_clock_task(rq);
1199	delta_exec = now - curr->se.exec_start;
1200	if (unlikely((s64)delta_exec <= 0)) {
1201		if (unlikely(dl_se->dl_yielded))
1202			goto throttle;
1203		return;
1204	}
1205
 
 
 
1206	schedstat_set(curr->se.statistics.exec_max,
1207		      max(curr->se.statistics.exec_max, delta_exec));
1208
1209	curr->se.sum_exec_runtime += delta_exec;
1210	account_group_exec_runtime(curr, delta_exec);
1211
1212	curr->se.exec_start = now;
1213	cgroup_account_cputime(curr, delta_exec);
1214
1215	if (dl_entity_is_special(dl_se))
1216		return;
1217
1218	/*
1219	 * For tasks that participate in GRUB, we implement GRUB-PA: the
1220	 * spare reclaimed bandwidth is used to clock down frequency.
1221	 *
1222	 * For the others, we still need to scale reservation parameters
1223	 * according to current frequency and CPU maximum capacity.
1224	 */
1225	if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1226		scaled_delta_exec = grub_reclaim(delta_exec,
1227						 rq,
1228						 &curr->dl);
1229	} else {
1230		unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1231		unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1232
1233		scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1234		scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1235	}
1236
1237	dl_se->runtime -= scaled_delta_exec;
1238
1239throttle:
1240	if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1241		dl_se->dl_throttled = 1;
1242
1243		/* If requested, inform the user about runtime overruns. */
1244		if (dl_runtime_exceeded(dl_se) &&
1245		    (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1246			dl_se->dl_overrun = 1;
1247
1248		__dequeue_task_dl(rq, curr, 0);
1249		if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1250			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1251
1252		if (!is_leftmost(curr, &rq->dl))
1253			resched_curr(rq);
1254	}
1255
1256	/*
1257	 * Because -- for now -- we share the rt bandwidth, we need to
1258	 * account our runtime there too, otherwise actual rt tasks
1259	 * would be able to exceed the shared quota.
1260	 *
1261	 * Account to the root rt group for now.
1262	 *
1263	 * The solution we're working towards is having the RT groups scheduled
1264	 * using deadline servers -- however there's a few nasties to figure
1265	 * out before that can happen.
1266	 */
1267	if (rt_bandwidth_enabled()) {
1268		struct rt_rq *rt_rq = &rq->rt;
1269
1270		raw_spin_lock(&rt_rq->rt_runtime_lock);
1271		/*
1272		 * We'll let actual RT tasks worry about the overflow here, we
1273		 * have our own CBS to keep us inline; only account when RT
1274		 * bandwidth is relevant.
1275		 */
1276		if (sched_rt_bandwidth_account(rt_rq))
1277			rt_rq->rt_time += delta_exec;
1278		raw_spin_unlock(&rt_rq->rt_runtime_lock);
1279	}
1280}
1281
1282static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1283{
1284	struct sched_dl_entity *dl_se = container_of(timer,
1285						     struct sched_dl_entity,
1286						     inactive_timer);
1287	struct task_struct *p = dl_task_of(dl_se);
1288	struct rq_flags rf;
1289	struct rq *rq;
1290
1291	rq = task_rq_lock(p, &rf);
1292
1293	sched_clock_tick();
1294	update_rq_clock(rq);
1295
1296	if (!dl_task(p) || p->state == TASK_DEAD) {
1297		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1298
1299		if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1300			sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1301			sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1302			dl_se->dl_non_contending = 0;
1303		}
1304
1305		raw_spin_lock(&dl_b->lock);
1306		__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1307		raw_spin_unlock(&dl_b->lock);
1308		__dl_clear_params(p);
1309
1310		goto unlock;
1311	}
1312	if (dl_se->dl_non_contending == 0)
1313		goto unlock;
1314
1315	sub_running_bw(dl_se, &rq->dl);
1316	dl_se->dl_non_contending = 0;
1317unlock:
1318	task_rq_unlock(rq, p, &rf);
1319	put_task_struct(p);
1320
1321	return HRTIMER_NORESTART;
1322}
1323
1324void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1325{
1326	struct hrtimer *timer = &dl_se->inactive_timer;
1327
1328	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1329	timer->function = inactive_task_timer;
1330}
1331
1332#ifdef CONFIG_SMP
1333
1334static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1335{
1336	struct rq *rq = rq_of_dl_rq(dl_rq);
1337
1338	if (dl_rq->earliest_dl.curr == 0 ||
1339	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1340		dl_rq->earliest_dl.curr = deadline;
1341		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1342	}
1343}
1344
1345static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1346{
1347	struct rq *rq = rq_of_dl_rq(dl_rq);
1348
1349	/*
1350	 * Since we may have removed our earliest (and/or next earliest)
1351	 * task we must recompute them.
1352	 */
1353	if (!dl_rq->dl_nr_running) {
1354		dl_rq->earliest_dl.curr = 0;
1355		dl_rq->earliest_dl.next = 0;
1356		cpudl_clear(&rq->rd->cpudl, rq->cpu);
1357	} else {
1358		struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1359		struct sched_dl_entity *entry;
1360
1361		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1362		dl_rq->earliest_dl.curr = entry->deadline;
1363		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1364	}
1365}
1366
1367#else
1368
1369static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1370static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1371
1372#endif /* CONFIG_SMP */
1373
1374static inline
1375void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1376{
1377	int prio = dl_task_of(dl_se)->prio;
1378	u64 deadline = dl_se->deadline;
1379
1380	WARN_ON(!dl_prio(prio));
1381	dl_rq->dl_nr_running++;
1382	add_nr_running(rq_of_dl_rq(dl_rq), 1);
1383
1384	inc_dl_deadline(dl_rq, deadline);
1385	inc_dl_migration(dl_se, dl_rq);
1386}
1387
1388static inline
1389void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1390{
1391	int prio = dl_task_of(dl_se)->prio;
1392
1393	WARN_ON(!dl_prio(prio));
1394	WARN_ON(!dl_rq->dl_nr_running);
1395	dl_rq->dl_nr_running--;
1396	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1397
1398	dec_dl_deadline(dl_rq, dl_se->deadline);
1399	dec_dl_migration(dl_se, dl_rq);
1400}
1401
1402static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1403{
1404	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1405	struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1406	struct rb_node *parent = NULL;
1407	struct sched_dl_entity *entry;
1408	int leftmost = 1;
1409
1410	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1411
1412	while (*link) {
1413		parent = *link;
1414		entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1415		if (dl_time_before(dl_se->deadline, entry->deadline))
1416			link = &parent->rb_left;
1417		else {
1418			link = &parent->rb_right;
1419			leftmost = 0;
1420		}
1421	}
1422
 
 
 
1423	rb_link_node(&dl_se->rb_node, parent, link);
1424	rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1425
1426	inc_dl_tasks(dl_se, dl_rq);
1427}
1428
1429static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1430{
1431	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1432
1433	if (RB_EMPTY_NODE(&dl_se->rb_node))
1434		return;
1435
1436	rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
 
 
 
 
 
 
 
1437	RB_CLEAR_NODE(&dl_se->rb_node);
1438
1439	dec_dl_tasks(dl_se, dl_rq);
1440}
1441
1442static void
1443enqueue_dl_entity(struct sched_dl_entity *dl_se,
1444		  struct sched_dl_entity *pi_se, int flags)
1445{
1446	BUG_ON(on_dl_rq(dl_se));
1447
1448	/*
1449	 * If this is a wakeup or a new instance, the scheduling
1450	 * parameters of the task might need updating. Otherwise,
1451	 * we want a replenishment of its runtime.
1452	 */
1453	if (flags & ENQUEUE_WAKEUP) {
1454		task_contending(dl_se, flags);
1455		update_dl_entity(dl_se, pi_se);
1456	} else if (flags & ENQUEUE_REPLENISH) {
1457		replenish_dl_entity(dl_se, pi_se);
1458	} else if ((flags & ENQUEUE_RESTORE) &&
1459		  dl_time_before(dl_se->deadline,
1460				 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1461		setup_new_dl_entity(dl_se);
1462	}
1463
1464	__enqueue_dl_entity(dl_se);
1465}
1466
1467static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1468{
1469	__dequeue_dl_entity(dl_se);
1470}
1471
1472static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1473{
1474	struct task_struct *pi_task = rt_mutex_get_top_task(p);
1475	struct sched_dl_entity *pi_se = &p->dl;
1476
1477	/*
1478	 * Use the scheduling parameters of the top pi-waiter task if:
1479	 * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1480	 * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1481	 *   smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1482	 *   boosted due to a SCHED_DEADLINE pi-waiter).
1483	 * Otherwise we keep our runtime and deadline.
1484	 */
1485	if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1486		pi_se = &pi_task->dl;
1487	} else if (!dl_prio(p->normal_prio)) {
1488		/*
1489		 * Special case in which we have a !SCHED_DEADLINE task
1490		 * that is going to be deboosted, but exceeds its
1491		 * runtime while doing so. No point in replenishing
1492		 * it, as it's going to return back to its original
1493		 * scheduling class after this.
1494		 */
1495		BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1496		return;
1497	}
1498
1499	/*
1500	 * Check if a constrained deadline task was activated
1501	 * after the deadline but before the next period.
1502	 * If that is the case, the task will be throttled and
1503	 * the replenishment timer will be set to the next period.
1504	 */
1505	if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1506		dl_check_constrained_dl(&p->dl);
1507
1508	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1509		add_rq_bw(&p->dl, &rq->dl);
1510		add_running_bw(&p->dl, &rq->dl);
1511	}
1512
1513	/*
1514	 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1515	 * its budget it needs a replenishment and, since it now is on
1516	 * its rq, the bandwidth timer callback (which clearly has not
1517	 * run yet) will take care of this.
1518	 * However, the active utilization does not depend on the fact
1519	 * that the task is on the runqueue or not (but depends on the
1520	 * task's state - in GRUB parlance, "inactive" vs "active contending").
1521	 * In other words, even if a task is throttled its utilization must
1522	 * be counted in the active utilization; hence, we need to call
1523	 * add_running_bw().
1524	 */
1525	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1526		if (flags & ENQUEUE_WAKEUP)
1527			task_contending(&p->dl, flags);
1528
1529		return;
1530	}
1531
1532	enqueue_dl_entity(&p->dl, pi_se, flags);
1533
1534	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1535		enqueue_pushable_dl_task(rq, p);
1536}
1537
1538static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1539{
1540	dequeue_dl_entity(&p->dl);
1541	dequeue_pushable_dl_task(rq, p);
1542}
1543
1544static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1545{
1546	update_curr_dl(rq);
1547	__dequeue_task_dl(rq, p, flags);
1548
1549	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1550		sub_running_bw(&p->dl, &rq->dl);
1551		sub_rq_bw(&p->dl, &rq->dl);
1552	}
1553
1554	/*
1555	 * This check allows to start the inactive timer (or to immediately
1556	 * decrease the active utilization, if needed) in two cases:
1557	 * when the task blocks and when it is terminating
1558	 * (p->state == TASK_DEAD). We can handle the two cases in the same
1559	 * way, because from GRUB's point of view the same thing is happening
1560	 * (the task moves from "active contending" to "active non contending"
1561	 * or "inactive")
1562	 */
1563	if (flags & DEQUEUE_SLEEP)
1564		task_non_contending(p);
1565}
1566
1567/*
1568 * Yield task semantic for -deadline tasks is:
1569 *
1570 *   get off from the CPU until our next instance, with
1571 *   a new runtime. This is of little use now, since we
1572 *   don't have a bandwidth reclaiming mechanism. Anyway,
1573 *   bandwidth reclaiming is planned for the future, and
1574 *   yield_task_dl will indicate that some spare budget
1575 *   is available for other task instances to use it.
1576 */
1577static void yield_task_dl(struct rq *rq)
1578{
1579	/*
1580	 * We make the task go to sleep until its current deadline by
1581	 * forcing its runtime to zero. This way, update_curr_dl() stops
1582	 * it and the bandwidth timer will wake it up and will give it
1583	 * new scheduling parameters (thanks to dl_yielded=1).
1584	 */
1585	rq->curr->dl.dl_yielded = 1;
1586
1587	update_rq_clock(rq);
1588	update_curr_dl(rq);
1589	/*
1590	 * Tell update_rq_clock() that we've just updated,
1591	 * so we don't do microscopic update in schedule()
1592	 * and double the fastpath cost.
1593	 */
1594	rq_clock_skip_update(rq);
1595}
1596
1597#ifdef CONFIG_SMP
1598
1599static int find_later_rq(struct task_struct *task);
1600
1601static int
1602select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1603{
1604	struct task_struct *curr;
1605	struct rq *rq;
1606
1607	if (sd_flag != SD_BALANCE_WAKE)
1608		goto out;
1609
1610	rq = cpu_rq(cpu);
1611
1612	rcu_read_lock();
1613	curr = READ_ONCE(rq->curr); /* unlocked access */
1614
1615	/*
1616	 * If we are dealing with a -deadline task, we must
1617	 * decide where to wake it up.
1618	 * If it has a later deadline and the current task
1619	 * on this rq can't move (provided the waking task
1620	 * can!) we prefer to send it somewhere else. On the
1621	 * other hand, if it has a shorter deadline, we
1622	 * try to make it stay here, it might be important.
1623	 */
1624	if (unlikely(dl_task(curr)) &&
1625	    (curr->nr_cpus_allowed < 2 ||
1626	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1627	    (p->nr_cpus_allowed > 1)) {
1628		int target = find_later_rq(p);
1629
1630		if (target != -1 &&
1631				(dl_time_before(p->dl.deadline,
1632					cpu_rq(target)->dl.earliest_dl.curr) ||
1633				(cpu_rq(target)->dl.dl_nr_running == 0)))
1634			cpu = target;
1635	}
1636	rcu_read_unlock();
1637
1638out:
1639	return cpu;
1640}
1641
1642static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1643{
1644	struct rq *rq;
1645
1646	if (p->state != TASK_WAKING)
1647		return;
1648
1649	rq = task_rq(p);
1650	/*
1651	 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1652	 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1653	 * rq->lock is not... So, lock it
1654	 */
1655	raw_spin_lock(&rq->lock);
1656	if (p->dl.dl_non_contending) {
1657		sub_running_bw(&p->dl, &rq->dl);
1658		p->dl.dl_non_contending = 0;
1659		/*
1660		 * If the timer handler is currently running and the
1661		 * timer cannot be cancelled, inactive_task_timer()
1662		 * will see that dl_not_contending is not set, and
1663		 * will not touch the rq's active utilization,
1664		 * so we are still safe.
1665		 */
1666		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1667			put_task_struct(p);
1668	}
1669	sub_rq_bw(&p->dl, &rq->dl);
1670	raw_spin_unlock(&rq->lock);
1671}
1672
1673static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1674{
1675	/*
1676	 * Current can't be migrated, useless to reschedule,
1677	 * let's hope p can move out.
1678	 */
1679	if (rq->curr->nr_cpus_allowed == 1 ||
1680	    !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1681		return;
1682
1683	/*
1684	 * p is migratable, so let's not schedule it and
1685	 * see if it is pushed or pulled somewhere else.
1686	 */
1687	if (p->nr_cpus_allowed != 1 &&
1688	    cpudl_find(&rq->rd->cpudl, p, NULL))
1689		return;
1690
1691	resched_curr(rq);
1692}
1693
1694static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1695{
1696	if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1697		/*
1698		 * This is OK, because current is on_cpu, which avoids it being
1699		 * picked for load-balance and preemption/IRQs are still
1700		 * disabled avoiding further scheduler activity on it and we've
1701		 * not yet started the picking loop.
1702		 */
1703		rq_unpin_lock(rq, rf);
1704		pull_dl_task(rq);
1705		rq_repin_lock(rq, rf);
1706	}
1707
1708	return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1709}
1710#endif /* CONFIG_SMP */
1711
1712/*
1713 * Only called when both the current and waking task are -deadline
1714 * tasks.
1715 */
1716static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1717				  int flags)
1718{
1719	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1720		resched_curr(rq);
1721		return;
1722	}
1723
1724#ifdef CONFIG_SMP
1725	/*
1726	 * In the unlikely case current and p have the same deadline
1727	 * let us try to decide what's the best thing to do...
1728	 */
1729	if ((p->dl.deadline == rq->curr->dl.deadline) &&
1730	    !test_tsk_need_resched(rq->curr))
1731		check_preempt_equal_dl(rq, p);
1732#endif /* CONFIG_SMP */
1733}
1734
1735#ifdef CONFIG_SCHED_HRTICK
1736static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1737{
1738	hrtick_start(rq, p->dl.runtime);
1739}
1740#else /* !CONFIG_SCHED_HRTICK */
1741static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1742{
1743}
1744#endif
1745
1746static void set_next_task_dl(struct rq *rq, struct task_struct *p)
1747{
1748	p->se.exec_start = rq_clock_task(rq);
1749
1750	/* You can't push away the running task */
1751	dequeue_pushable_dl_task(rq, p);
1752
1753	if (hrtick_enabled(rq))
1754		start_hrtick_dl(rq, p);
1755
1756	if (rq->curr->sched_class != &dl_sched_class)
1757		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1758
1759	deadline_queue_push_tasks(rq);
1760}
1761
1762static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1763						   struct dl_rq *dl_rq)
1764{
1765	struct rb_node *left = rb_first_cached(&dl_rq->root);
1766
1767	if (!left)
1768		return NULL;
1769
1770	return rb_entry(left, struct sched_dl_entity, rb_node);
1771}
1772
1773static struct task_struct *
1774pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1775{
1776	struct sched_dl_entity *dl_se;
1777	struct dl_rq *dl_rq = &rq->dl;
1778	struct task_struct *p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1779
1780	WARN_ON_ONCE(prev || rf);
 
 
 
 
 
1781
1782	if (!sched_dl_runnable(rq))
1783		return NULL;
1784
 
 
1785	dl_se = pick_next_dl_entity(rq, dl_rq);
1786	BUG_ON(!dl_se);
 
1787	p = dl_task_of(dl_se);
1788	set_next_task_dl(rq, p);
 
 
 
 
 
 
 
 
 
1789	return p;
1790}
1791
1792static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1793{
1794	update_curr_dl(rq);
1795
1796	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1797	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1798		enqueue_pushable_dl_task(rq, p);
1799}
1800
1801/*
1802 * scheduler tick hitting a task of our scheduling class.
1803 *
1804 * NOTE: This function can be called remotely by the tick offload that
1805 * goes along full dynticks. Therefore no local assumption can be made
1806 * and everything must be accessed through the @rq and @curr passed in
1807 * parameters.
1808 */
1809static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1810{
1811	update_curr_dl(rq);
1812
1813	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1814	/*
1815	 * Even when we have runtime, update_curr_dl() might have resulted in us
1816	 * not being the leftmost task anymore. In that case NEED_RESCHED will
1817	 * be set and schedule() will start a new hrtick for the next task.
1818	 */
1819	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1820	    is_leftmost(p, &rq->dl))
1821		start_hrtick_dl(rq, p);
1822}
1823
1824static void task_fork_dl(struct task_struct *p)
1825{
1826	/*
1827	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1828	 * sched_fork()
1829	 */
1830}
1831
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1832#ifdef CONFIG_SMP
1833
1834/* Only try algorithms three times */
1835#define DL_MAX_TRIES 3
1836
1837static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1838{
1839	if (!task_running(rq, p) &&
1840	    cpumask_test_cpu(cpu, p->cpus_ptr))
1841		return 1;
1842	return 0;
1843}
1844
1845/*
1846 * Return the earliest pushable rq's task, which is suitable to be executed
1847 * on the CPU, NULL otherwise:
1848 */
1849static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1850{
1851	struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1852	struct task_struct *p = NULL;
1853
1854	if (!has_pushable_dl_tasks(rq))
1855		return NULL;
1856
1857next_node:
1858	if (next_node) {
1859		p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1860
1861		if (pick_dl_task(rq, p, cpu))
1862			return p;
1863
1864		next_node = rb_next(next_node);
1865		goto next_node;
1866	}
1867
1868	return NULL;
1869}
1870
1871static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1872
1873static int find_later_rq(struct task_struct *task)
1874{
1875	struct sched_domain *sd;
1876	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1877	int this_cpu = smp_processor_id();
1878	int cpu = task_cpu(task);
1879
1880	/* Make sure the mask is initialized first */
1881	if (unlikely(!later_mask))
1882		return -1;
1883
1884	if (task->nr_cpus_allowed == 1)
1885		return -1;
1886
1887	/*
1888	 * We have to consider system topology and task affinity
1889	 * first, then we can look for a suitable CPU.
1890	 */
1891	if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
 
 
1892		return -1;
1893
1894	/*
1895	 * If we are here, some targets have been found, including
1896	 * the most suitable which is, among the runqueues where the
1897	 * current tasks have later deadlines than the task's one, the
1898	 * rq with the latest possible one.
 
1899	 *
1900	 * Now we check how well this matches with task's
1901	 * affinity and system topology.
1902	 *
1903	 * The last CPU where the task run is our first
1904	 * guess, since it is most likely cache-hot there.
1905	 */
1906	if (cpumask_test_cpu(cpu, later_mask))
1907		return cpu;
1908	/*
1909	 * Check if this_cpu is to be skipped (i.e., it is
1910	 * not in the mask) or not.
1911	 */
1912	if (!cpumask_test_cpu(this_cpu, later_mask))
1913		this_cpu = -1;
1914
1915	rcu_read_lock();
1916	for_each_domain(cpu, sd) {
1917		if (sd->flags & SD_WAKE_AFFINE) {
1918			int best_cpu;
1919
1920			/*
1921			 * If possible, preempting this_cpu is
1922			 * cheaper than migrating.
1923			 */
1924			if (this_cpu != -1 &&
1925			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1926				rcu_read_unlock();
1927				return this_cpu;
1928			}
1929
1930			best_cpu = cpumask_first_and(later_mask,
1931							sched_domain_span(sd));
1932			/*
1933			 * Last chance: if a CPU being in both later_mask
1934			 * and current sd span is valid, that becomes our
1935			 * choice. Of course, the latest possible CPU is
1936			 * already under consideration through later_mask.
1937			 */
1938			if (best_cpu < nr_cpu_ids) {
 
1939				rcu_read_unlock();
1940				return best_cpu;
1941			}
1942		}
1943	}
1944	rcu_read_unlock();
1945
1946	/*
1947	 * At this point, all our guesses failed, we just return
1948	 * 'something', and let the caller sort the things out.
1949	 */
1950	if (this_cpu != -1)
1951		return this_cpu;
1952
1953	cpu = cpumask_any(later_mask);
1954	if (cpu < nr_cpu_ids)
1955		return cpu;
1956
1957	return -1;
1958}
1959
1960/* Locks the rq it finds */
1961static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1962{
1963	struct rq *later_rq = NULL;
1964	int tries;
1965	int cpu;
1966
1967	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1968		cpu = find_later_rq(task);
1969
1970		if ((cpu == -1) || (cpu == rq->cpu))
1971			break;
1972
1973		later_rq = cpu_rq(cpu);
1974
1975		if (later_rq->dl.dl_nr_running &&
1976		    !dl_time_before(task->dl.deadline,
1977					later_rq->dl.earliest_dl.curr)) {
1978			/*
1979			 * Target rq has tasks of equal or earlier deadline,
1980			 * retrying does not release any lock and is unlikely
1981			 * to yield a different result.
1982			 */
1983			later_rq = NULL;
1984			break;
1985		}
1986
1987		/* Retry if something changed. */
1988		if (double_lock_balance(rq, later_rq)) {
1989			if (unlikely(task_rq(task) != rq ||
1990				     !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
 
1991				     task_running(rq, task) ||
1992				     !dl_task(task) ||
1993				     !task_on_rq_queued(task))) {
1994				double_unlock_balance(rq, later_rq);
1995				later_rq = NULL;
1996				break;
1997			}
1998		}
1999
2000		/*
2001		 * If the rq we found has no -deadline task, or
2002		 * its earliest one has a later deadline than our
2003		 * task, the rq is a good one.
2004		 */
2005		if (!later_rq->dl.dl_nr_running ||
2006		    dl_time_before(task->dl.deadline,
2007				   later_rq->dl.earliest_dl.curr))
2008			break;
2009
2010		/* Otherwise we try again. */
2011		double_unlock_balance(rq, later_rq);
2012		later_rq = NULL;
2013	}
2014
2015	return later_rq;
2016}
2017
2018static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2019{
2020	struct task_struct *p;
2021
2022	if (!has_pushable_dl_tasks(rq))
2023		return NULL;
2024
2025	p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2026		     struct task_struct, pushable_dl_tasks);
2027
2028	BUG_ON(rq->cpu != task_cpu(p));
2029	BUG_ON(task_current(rq, p));
2030	BUG_ON(p->nr_cpus_allowed <= 1);
2031
2032	BUG_ON(!task_on_rq_queued(p));
2033	BUG_ON(!dl_task(p));
2034
2035	return p;
2036}
2037
2038/*
2039 * See if the non running -deadline tasks on this rq
2040 * can be sent to some other CPU where they can preempt
2041 * and start executing.
2042 */
2043static int push_dl_task(struct rq *rq)
2044{
2045	struct task_struct *next_task;
2046	struct rq *later_rq;
2047	int ret = 0;
2048
2049	if (!rq->dl.overloaded)
2050		return 0;
2051
2052	next_task = pick_next_pushable_dl_task(rq);
2053	if (!next_task)
2054		return 0;
2055
2056retry:
2057	if (WARN_ON(next_task == rq->curr))
 
2058		return 0;
 
2059
2060	/*
2061	 * If next_task preempts rq->curr, and rq->curr
2062	 * can move away, it makes sense to just reschedule
2063	 * without going further in pushing next_task.
2064	 */
2065	if (dl_task(rq->curr) &&
2066	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2067	    rq->curr->nr_cpus_allowed > 1) {
2068		resched_curr(rq);
2069		return 0;
2070	}
2071
2072	/* We might release rq lock */
2073	get_task_struct(next_task);
2074
2075	/* Will lock the rq it'll find */
2076	later_rq = find_lock_later_rq(next_task, rq);
2077	if (!later_rq) {
2078		struct task_struct *task;
2079
2080		/*
2081		 * We must check all this again, since
2082		 * find_lock_later_rq releases rq->lock and it is
2083		 * then possible that next_task has migrated.
2084		 */
2085		task = pick_next_pushable_dl_task(rq);
2086		if (task == next_task) {
2087			/*
2088			 * The task is still there. We don't try
2089			 * again, some other CPU will pull it when ready.
2090			 */
2091			goto out;
2092		}
2093
2094		if (!task)
2095			/* No more tasks */
2096			goto out;
2097
2098		put_task_struct(next_task);
2099		next_task = task;
2100		goto retry;
2101	}
2102
2103	deactivate_task(rq, next_task, 0);
2104	set_task_cpu(next_task, later_rq->cpu);
2105
2106	/*
2107	 * Update the later_rq clock here, because the clock is used
2108	 * by the cpufreq_update_util() inside __add_running_bw().
2109	 */
2110	update_rq_clock(later_rq);
2111	activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2112	ret = 1;
2113
2114	resched_curr(later_rq);
2115
2116	double_unlock_balance(rq, later_rq);
2117
2118out:
2119	put_task_struct(next_task);
2120
2121	return ret;
2122}
2123
2124static void push_dl_tasks(struct rq *rq)
2125{
2126	/* push_dl_task() will return true if it moved a -deadline task */
2127	while (push_dl_task(rq))
2128		;
2129}
2130
2131static void pull_dl_task(struct rq *this_rq)
2132{
2133	int this_cpu = this_rq->cpu, cpu;
2134	struct task_struct *p;
2135	bool resched = false;
2136	struct rq *src_rq;
2137	u64 dmin = LONG_MAX;
2138
2139	if (likely(!dl_overloaded(this_rq)))
2140		return;
2141
2142	/*
2143	 * Match the barrier from dl_set_overloaded; this guarantees that if we
2144	 * see overloaded we must also see the dlo_mask bit.
2145	 */
2146	smp_rmb();
2147
2148	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2149		if (this_cpu == cpu)
2150			continue;
2151
2152		src_rq = cpu_rq(cpu);
2153
2154		/*
2155		 * It looks racy, abd it is! However, as in sched_rt.c,
2156		 * we are fine with this.
2157		 */
2158		if (this_rq->dl.dl_nr_running &&
2159		    dl_time_before(this_rq->dl.earliest_dl.curr,
2160				   src_rq->dl.earliest_dl.next))
2161			continue;
2162
2163		/* Might drop this_rq->lock */
2164		double_lock_balance(this_rq, src_rq);
2165
2166		/*
2167		 * If there are no more pullable tasks on the
2168		 * rq, we're done with it.
2169		 */
2170		if (src_rq->dl.dl_nr_running <= 1)
2171			goto skip;
2172
2173		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2174
2175		/*
2176		 * We found a task to be pulled if:
2177		 *  - it preempts our current (if there's one),
2178		 *  - it will preempt the last one we pulled (if any).
2179		 */
2180		if (p && dl_time_before(p->dl.deadline, dmin) &&
2181		    (!this_rq->dl.dl_nr_running ||
2182		     dl_time_before(p->dl.deadline,
2183				    this_rq->dl.earliest_dl.curr))) {
2184			WARN_ON(p == src_rq->curr);
2185			WARN_ON(!task_on_rq_queued(p));
2186
2187			/*
2188			 * Then we pull iff p has actually an earlier
2189			 * deadline than the current task of its runqueue.
2190			 */
2191			if (dl_time_before(p->dl.deadline,
2192					   src_rq->curr->dl.deadline))
2193				goto skip;
2194
2195			resched = true;
2196
2197			deactivate_task(src_rq, p, 0);
2198			set_task_cpu(p, this_cpu);
2199			activate_task(this_rq, p, 0);
2200			dmin = p->dl.deadline;
2201
2202			/* Is there any other task even earlier? */
2203		}
2204skip:
2205		double_unlock_balance(this_rq, src_rq);
2206	}
2207
2208	if (resched)
2209		resched_curr(this_rq);
2210}
2211
2212/*
2213 * Since the task is not running and a reschedule is not going to happen
2214 * anytime soon on its runqueue, we try pushing it away now.
2215 */
2216static void task_woken_dl(struct rq *rq, struct task_struct *p)
2217{
2218	if (!task_running(rq, p) &&
2219	    !test_tsk_need_resched(rq->curr) &&
2220	    p->nr_cpus_allowed > 1 &&
2221	    dl_task(rq->curr) &&
2222	    (rq->curr->nr_cpus_allowed < 2 ||
2223	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2224		push_dl_tasks(rq);
2225	}
2226}
2227
2228static void set_cpus_allowed_dl(struct task_struct *p,
2229				const struct cpumask *new_mask)
2230{
2231	struct root_domain *src_rd;
2232	struct rq *rq;
2233
2234	BUG_ON(!dl_task(p));
2235
2236	rq = task_rq(p);
2237	src_rd = rq->rd;
2238	/*
2239	 * Migrating a SCHED_DEADLINE task between exclusive
2240	 * cpusets (different root_domains) entails a bandwidth
2241	 * update. We already made space for us in the destination
2242	 * domain (see cpuset_can_attach()).
2243	 */
2244	if (!cpumask_intersects(src_rd->span, new_mask)) {
2245		struct dl_bw *src_dl_b;
2246
2247		src_dl_b = dl_bw_of(cpu_of(rq));
2248		/*
2249		 * We now free resources of the root_domain we are migrating
2250		 * off. In the worst case, sched_setattr() may temporary fail
2251		 * until we complete the update.
2252		 */
2253		raw_spin_lock(&src_dl_b->lock);
2254		__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2255		raw_spin_unlock(&src_dl_b->lock);
2256	}
2257
2258	set_cpus_allowed_common(p, new_mask);
2259}
2260
2261/* Assumes rq->lock is held */
2262static void rq_online_dl(struct rq *rq)
2263{
2264	if (rq->dl.overloaded)
2265		dl_set_overload(rq);
2266
2267	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2268	if (rq->dl.dl_nr_running > 0)
2269		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2270}
2271
2272/* Assumes rq->lock is held */
2273static void rq_offline_dl(struct rq *rq)
2274{
2275	if (rq->dl.overloaded)
2276		dl_clear_overload(rq);
2277
2278	cpudl_clear(&rq->rd->cpudl, rq->cpu);
2279	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2280}
2281
2282void __init init_sched_dl_class(void)
2283{
2284	unsigned int i;
2285
2286	for_each_possible_cpu(i)
2287		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2288					GFP_KERNEL, cpu_to_node(i));
2289}
2290
2291void dl_add_task_root_domain(struct task_struct *p)
2292{
2293	struct rq_flags rf;
2294	struct rq *rq;
2295	struct dl_bw *dl_b;
2296
2297	rq = task_rq_lock(p, &rf);
2298	if (!dl_task(p))
2299		goto unlock;
2300
2301	dl_b = &rq->rd->dl_bw;
2302	raw_spin_lock(&dl_b->lock);
2303
2304	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2305
2306	raw_spin_unlock(&dl_b->lock);
2307
2308unlock:
2309	task_rq_unlock(rq, p, &rf);
2310}
2311
2312void dl_clear_root_domain(struct root_domain *rd)
2313{
2314	unsigned long flags;
2315
2316	raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2317	rd->dl_bw.total_bw = 0;
2318	raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2319}
2320
2321#endif /* CONFIG_SMP */
2322
2323static void switched_from_dl(struct rq *rq, struct task_struct *p)
2324{
2325	/*
2326	 * task_non_contending() can start the "inactive timer" (if the 0-lag
2327	 * time is in the future). If the task switches back to dl before
2328	 * the "inactive timer" fires, it can continue to consume its current
2329	 * runtime using its current deadline. If it stays outside of
2330	 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2331	 * will reset the task parameters.
2332	 */
2333	if (task_on_rq_queued(p) && p->dl.dl_runtime)
2334		task_non_contending(p);
2335
2336	if (!task_on_rq_queued(p)) {
2337		/*
2338		 * Inactive timer is armed. However, p is leaving DEADLINE and
2339		 * might migrate away from this rq while continuing to run on
2340		 * some other class. We need to remove its contribution from
2341		 * this rq running_bw now, or sub_rq_bw (below) will complain.
2342		 */
2343		if (p->dl.dl_non_contending)
2344			sub_running_bw(&p->dl, &rq->dl);
2345		sub_rq_bw(&p->dl, &rq->dl);
2346	}
2347
2348	/*
2349	 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2350	 * at the 0-lag time, because the task could have been migrated
2351	 * while SCHED_OTHER in the meanwhile.
2352	 */
2353	if (p->dl.dl_non_contending)
2354		p->dl.dl_non_contending = 0;
2355
2356	/*
2357	 * Since this might be the only -deadline task on the rq,
2358	 * this is the right place to try to pull some other one
2359	 * from an overloaded CPU, if any.
2360	 */
2361	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2362		return;
2363
2364	deadline_queue_pull_task(rq);
2365}
2366
2367/*
2368 * When switching to -deadline, we may overload the rq, then
2369 * we try to push someone off, if possible.
2370 */
2371static void switched_to_dl(struct rq *rq, struct task_struct *p)
2372{
2373	if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2374		put_task_struct(p);
2375
2376	/* If p is not queued we will update its parameters at next wakeup. */
2377	if (!task_on_rq_queued(p)) {
2378		add_rq_bw(&p->dl, &rq->dl);
2379
2380		return;
2381	}
 
 
 
 
 
2382
2383	if (rq->curr != p) {
2384#ifdef CONFIG_SMP
2385		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2386			deadline_queue_push_tasks(rq);
2387#endif
2388		if (dl_task(rq->curr))
2389			check_preempt_curr_dl(rq, p, 0);
2390		else
2391			resched_curr(rq);
2392	}
2393}
2394
2395/*
2396 * If the scheduling parameters of a -deadline task changed,
2397 * a push or pull operation might be needed.
2398 */
2399static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2400			    int oldprio)
2401{
2402	if (task_on_rq_queued(p) || rq->curr == p) {
2403#ifdef CONFIG_SMP
2404		/*
2405		 * This might be too much, but unfortunately
2406		 * we don't have the old deadline value, and
2407		 * we can't argue if the task is increasing
2408		 * or lowering its prio, so...
2409		 */
2410		if (!rq->dl.overloaded)
2411			deadline_queue_pull_task(rq);
2412
2413		/*
2414		 * If we now have a earlier deadline task than p,
2415		 * then reschedule, provided p is still on this
2416		 * runqueue.
2417		 */
2418		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2419			resched_curr(rq);
2420#else
2421		/*
2422		 * Again, we don't know if p has a earlier
2423		 * or later deadline, so let's blindly set a
2424		 * (maybe not needed) rescheduling point.
2425		 */
2426		resched_curr(rq);
2427#endif /* CONFIG_SMP */
2428	}
2429}
2430
2431const struct sched_class dl_sched_class = {
2432	.next			= &rt_sched_class,
2433	.enqueue_task		= enqueue_task_dl,
2434	.dequeue_task		= dequeue_task_dl,
2435	.yield_task		= yield_task_dl,
2436
2437	.check_preempt_curr	= check_preempt_curr_dl,
2438
2439	.pick_next_task		= pick_next_task_dl,
2440	.put_prev_task		= put_prev_task_dl,
2441	.set_next_task		= set_next_task_dl,
2442
2443#ifdef CONFIG_SMP
2444	.balance		= balance_dl,
2445	.select_task_rq		= select_task_rq_dl,
2446	.migrate_task_rq	= migrate_task_rq_dl,
2447	.set_cpus_allowed       = set_cpus_allowed_dl,
2448	.rq_online              = rq_online_dl,
2449	.rq_offline             = rq_offline_dl,
2450	.task_woken		= task_woken_dl,
2451#endif
2452
 
2453	.task_tick		= task_tick_dl,
2454	.task_fork              = task_fork_dl,
 
2455
2456	.prio_changed           = prio_changed_dl,
2457	.switched_from		= switched_from_dl,
2458	.switched_to		= switched_to_dl,
2459
2460	.update_curr		= update_curr_dl,
2461};
2462
2463int sched_dl_global_validate(void)
2464{
2465	u64 runtime = global_rt_runtime();
2466	u64 period = global_rt_period();
2467	u64 new_bw = to_ratio(period, runtime);
2468	struct dl_bw *dl_b;
2469	int cpu, ret = 0;
2470	unsigned long flags;
2471
2472	/*
2473	 * Here we want to check the bandwidth not being set to some
2474	 * value smaller than the currently allocated bandwidth in
2475	 * any of the root_domains.
2476	 *
2477	 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2478	 * cycling on root_domains... Discussion on different/better
2479	 * solutions is welcome!
2480	 */
2481	for_each_possible_cpu(cpu) {
2482		rcu_read_lock_sched();
2483		dl_b = dl_bw_of(cpu);
2484
2485		raw_spin_lock_irqsave(&dl_b->lock, flags);
2486		if (new_bw < dl_b->total_bw)
2487			ret = -EBUSY;
2488		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2489
2490		rcu_read_unlock_sched();
2491
2492		if (ret)
2493			break;
2494	}
2495
2496	return ret;
2497}
2498
2499void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2500{
2501	if (global_rt_runtime() == RUNTIME_INF) {
2502		dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2503		dl_rq->extra_bw = 1 << BW_SHIFT;
2504	} else {
2505		dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2506			  global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2507		dl_rq->extra_bw = to_ratio(global_rt_period(),
2508						    global_rt_runtime());
2509	}
2510}
2511
2512void sched_dl_do_global(void)
2513{
2514	u64 new_bw = -1;
2515	struct dl_bw *dl_b;
2516	int cpu;
2517	unsigned long flags;
2518
2519	def_dl_bandwidth.dl_period = global_rt_period();
2520	def_dl_bandwidth.dl_runtime = global_rt_runtime();
2521
2522	if (global_rt_runtime() != RUNTIME_INF)
2523		new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2524
2525	/*
2526	 * FIXME: As above...
2527	 */
2528	for_each_possible_cpu(cpu) {
2529		rcu_read_lock_sched();
2530		dl_b = dl_bw_of(cpu);
2531
2532		raw_spin_lock_irqsave(&dl_b->lock, flags);
2533		dl_b->bw = new_bw;
2534		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2535
2536		rcu_read_unlock_sched();
2537		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2538	}
2539}
2540
2541/*
2542 * We must be sure that accepting a new task (or allowing changing the
2543 * parameters of an existing one) is consistent with the bandwidth
2544 * constraints. If yes, this function also accordingly updates the currently
2545 * allocated bandwidth to reflect the new situation.
2546 *
2547 * This function is called while holding p's rq->lock.
2548 */
2549int sched_dl_overflow(struct task_struct *p, int policy,
2550		      const struct sched_attr *attr)
2551{
2552	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2553	u64 period = attr->sched_period ?: attr->sched_deadline;
2554	u64 runtime = attr->sched_runtime;
2555	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2556	int cpus, err = -1;
2557
2558	if (attr->sched_flags & SCHED_FLAG_SUGOV)
2559		return 0;
2560
2561	/* !deadline task may carry old deadline bandwidth */
2562	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2563		return 0;
2564
2565	/*
2566	 * Either if a task, enters, leave, or stays -deadline but changes
2567	 * its parameters, we may need to update accordingly the total
2568	 * allocated bandwidth of the container.
2569	 */
2570	raw_spin_lock(&dl_b->lock);
2571	cpus = dl_bw_cpus(task_cpu(p));
2572	if (dl_policy(policy) && !task_has_dl_policy(p) &&
2573	    !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2574		if (hrtimer_active(&p->dl.inactive_timer))
2575			__dl_sub(dl_b, p->dl.dl_bw, cpus);
2576		__dl_add(dl_b, new_bw, cpus);
2577		err = 0;
2578	} else if (dl_policy(policy) && task_has_dl_policy(p) &&
2579		   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2580		/*
2581		 * XXX this is slightly incorrect: when the task
2582		 * utilization decreases, we should delay the total
2583		 * utilization change until the task's 0-lag point.
2584		 * But this would require to set the task's "inactive
2585		 * timer" when the task is not inactive.
2586		 */
2587		__dl_sub(dl_b, p->dl.dl_bw, cpus);
2588		__dl_add(dl_b, new_bw, cpus);
2589		dl_change_utilization(p, new_bw);
2590		err = 0;
2591	} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2592		/*
2593		 * Do not decrease the total deadline utilization here,
2594		 * switched_from_dl() will take care to do it at the correct
2595		 * (0-lag) time.
2596		 */
2597		err = 0;
2598	}
2599	raw_spin_unlock(&dl_b->lock);
2600
2601	return err;
2602}
2603
2604/*
2605 * This function initializes the sched_dl_entity of a newly becoming
2606 * SCHED_DEADLINE task.
2607 *
2608 * Only the static values are considered here, the actual runtime and the
2609 * absolute deadline will be properly calculated when the task is enqueued
2610 * for the first time with its new policy.
2611 */
2612void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2613{
2614	struct sched_dl_entity *dl_se = &p->dl;
2615
2616	dl_se->dl_runtime = attr->sched_runtime;
2617	dl_se->dl_deadline = attr->sched_deadline;
2618	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2619	dl_se->flags = attr->sched_flags;
2620	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2621	dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2622}
2623
2624void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2625{
2626	struct sched_dl_entity *dl_se = &p->dl;
2627
2628	attr->sched_priority = p->rt_priority;
2629	attr->sched_runtime = dl_se->dl_runtime;
2630	attr->sched_deadline = dl_se->dl_deadline;
2631	attr->sched_period = dl_se->dl_period;
2632	attr->sched_flags = dl_se->flags;
2633}
2634
2635/*
2636 * This function validates the new parameters of a -deadline task.
2637 * We ask for the deadline not being zero, and greater or equal
2638 * than the runtime, as well as the period of being zero or
2639 * greater than deadline. Furthermore, we have to be sure that
2640 * user parameters are above the internal resolution of 1us (we
2641 * check sched_runtime only since it is always the smaller one) and
2642 * below 2^63 ns (we have to check both sched_deadline and
2643 * sched_period, as the latter can be zero).
2644 */
2645bool __checkparam_dl(const struct sched_attr *attr)
2646{
2647	/* special dl tasks don't actually use any parameter */
2648	if (attr->sched_flags & SCHED_FLAG_SUGOV)
2649		return true;
2650
2651	/* deadline != 0 */
2652	if (attr->sched_deadline == 0)
2653		return false;
2654
2655	/*
2656	 * Since we truncate DL_SCALE bits, make sure we're at least
2657	 * that big.
2658	 */
2659	if (attr->sched_runtime < (1ULL << DL_SCALE))
2660		return false;
2661
2662	/*
2663	 * Since we use the MSB for wrap-around and sign issues, make
2664	 * sure it's not set (mind that period can be equal to zero).
2665	 */
2666	if (attr->sched_deadline & (1ULL << 63) ||
2667	    attr->sched_period & (1ULL << 63))
2668		return false;
2669
2670	/* runtime <= deadline <= period (if period != 0) */
2671	if ((attr->sched_period != 0 &&
2672	     attr->sched_period < attr->sched_deadline) ||
2673	    attr->sched_deadline < attr->sched_runtime)
2674		return false;
2675
2676	return true;
2677}
2678
2679/*
2680 * This function clears the sched_dl_entity static params.
2681 */
2682void __dl_clear_params(struct task_struct *p)
2683{
2684	struct sched_dl_entity *dl_se = &p->dl;
2685
2686	dl_se->dl_runtime		= 0;
2687	dl_se->dl_deadline		= 0;
2688	dl_se->dl_period		= 0;
2689	dl_se->flags			= 0;
2690	dl_se->dl_bw			= 0;
2691	dl_se->dl_density		= 0;
2692
2693	dl_se->dl_throttled		= 0;
2694	dl_se->dl_yielded		= 0;
2695	dl_se->dl_non_contending	= 0;
2696	dl_se->dl_overrun		= 0;
2697}
2698
2699bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2700{
2701	struct sched_dl_entity *dl_se = &p->dl;
2702
2703	if (dl_se->dl_runtime != attr->sched_runtime ||
2704	    dl_se->dl_deadline != attr->sched_deadline ||
2705	    dl_se->dl_period != attr->sched_period ||
2706	    dl_se->flags != attr->sched_flags)
2707		return true;
2708
2709	return false;
2710}
2711
2712#ifdef CONFIG_SMP
2713int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2714{
2715	unsigned int dest_cpu;
2716	struct dl_bw *dl_b;
2717	bool overflow;
2718	int cpus, ret;
2719	unsigned long flags;
2720
2721	dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2722
2723	rcu_read_lock_sched();
2724	dl_b = dl_bw_of(dest_cpu);
2725	raw_spin_lock_irqsave(&dl_b->lock, flags);
2726	cpus = dl_bw_cpus(dest_cpu);
2727	overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
2728	if (overflow) {
2729		ret = -EBUSY;
2730	} else {
2731		/*
2732		 * We reserve space for this task in the destination
2733		 * root_domain, as we can't fail after this point.
2734		 * We will free resources in the source root_domain
2735		 * later on (see set_cpus_allowed_dl()).
2736		 */
2737		__dl_add(dl_b, p->dl.dl_bw, cpus);
2738		ret = 0;
2739	}
2740	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2741	rcu_read_unlock_sched();
2742
2743	return ret;
2744}
2745
2746int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2747				 const struct cpumask *trial)
2748{
2749	int ret = 1, trial_cpus;
2750	struct dl_bw *cur_dl_b;
2751	unsigned long flags;
2752
2753	rcu_read_lock_sched();
2754	cur_dl_b = dl_bw_of(cpumask_any(cur));
2755	trial_cpus = cpumask_weight(trial);
2756
2757	raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2758	if (cur_dl_b->bw != -1 &&
2759	    cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2760		ret = 0;
2761	raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2762	rcu_read_unlock_sched();
2763
2764	return ret;
2765}
2766
2767bool dl_cpu_busy(unsigned int cpu)
2768{
2769	unsigned long flags;
2770	struct dl_bw *dl_b;
2771	bool overflow;
2772	int cpus;
2773
2774	rcu_read_lock_sched();
2775	dl_b = dl_bw_of(cpu);
2776	raw_spin_lock_irqsave(&dl_b->lock, flags);
2777	cpus = dl_bw_cpus(cpu);
2778	overflow = __dl_overflow(dl_b, cpus, 0, 0);
2779	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2780	rcu_read_unlock_sched();
2781
2782	return overflow;
2783}
2784#endif
2785
2786#ifdef CONFIG_SCHED_DEBUG
2787void print_dl_stats(struct seq_file *m, int cpu)
2788{
2789	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2790}
2791#endif /* CONFIG_SCHED_DEBUG */