Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * Deadline Scheduling Class (SCHED_DEADLINE)
   3 *
   4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
   5 *
   6 * Tasks that periodically executes their instances for less than their
   7 * runtime won't miss any of their deadlines.
   8 * Tasks that are not periodic or sporadic or that tries to execute more
   9 * than their reserved bandwidth will be slowed down (and may potentially
  10 * miss some of their deadlines), and won't affect any other task.
  11 *
  12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
  13 *                    Juri Lelli <juri.lelli@gmail.com>,
  14 *                    Michael Trimarchi <michael@amarulasolutions.com>,
  15 *                    Fabio Checconi <fchecconi@gmail.com>
  16 */
  17#include "sched.h"
  18
  19#include <linux/slab.h>
  20
  21struct dl_bandwidth def_dl_bandwidth;
  22
  23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
  24{
  25	return container_of(dl_se, struct task_struct, dl);
  26}
  27
  28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
  29{
  30	return container_of(dl_rq, struct rq, dl);
  31}
  32
  33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
  34{
  35	struct task_struct *p = dl_task_of(dl_se);
  36	struct rq *rq = task_rq(p);
  37
  38	return &rq->dl;
  39}
  40
  41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
  42{
  43	return !RB_EMPTY_NODE(&dl_se->rb_node);
  44}
  45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
  47{
  48	struct sched_dl_entity *dl_se = &p->dl;
  49
  50	return dl_rq->rb_leftmost == &dl_se->rb_node;
  51}
  52
 
 
  53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
  54{
  55	raw_spin_lock_init(&dl_b->dl_runtime_lock);
  56	dl_b->dl_period = period;
  57	dl_b->dl_runtime = runtime;
  58}
  59
  60void init_dl_bw(struct dl_bw *dl_b)
  61{
  62	raw_spin_lock_init(&dl_b->lock);
  63	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
  64	if (global_rt_runtime() == RUNTIME_INF)
  65		dl_b->bw = -1;
  66	else
  67		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
  68	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
  69	dl_b->total_bw = 0;
  70}
  71
  72void init_dl_rq(struct dl_rq *dl_rq)
  73{
  74	dl_rq->rb_root = RB_ROOT;
  75
  76#ifdef CONFIG_SMP
  77	/* zero means no -deadline tasks */
  78	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
  79
  80	dl_rq->dl_nr_migratory = 0;
  81	dl_rq->overloaded = 0;
  82	dl_rq->pushable_dl_tasks_root = RB_ROOT;
  83#else
  84	init_dl_bw(&dl_rq->dl_bw);
  85#endif
 
 
 
 
  86}
  87
  88#ifdef CONFIG_SMP
  89
  90static inline int dl_overloaded(struct rq *rq)
  91{
  92	return atomic_read(&rq->rd->dlo_count);
  93}
  94
  95static inline void dl_set_overload(struct rq *rq)
  96{
  97	if (!rq->online)
  98		return;
  99
 100	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
 101	/*
 102	 * Must be visible before the overload count is
 103	 * set (as in sched_rt.c).
 104	 *
 105	 * Matched by the barrier in pull_dl_task().
 106	 */
 107	smp_wmb();
 108	atomic_inc(&rq->rd->dlo_count);
 109}
 110
 111static inline void dl_clear_overload(struct rq *rq)
 112{
 113	if (!rq->online)
 114		return;
 115
 116	atomic_dec(&rq->rd->dlo_count);
 117	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
 118}
 119
 120static void update_dl_migration(struct dl_rq *dl_rq)
 121{
 122	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
 123		if (!dl_rq->overloaded) {
 124			dl_set_overload(rq_of_dl_rq(dl_rq));
 125			dl_rq->overloaded = 1;
 126		}
 127	} else if (dl_rq->overloaded) {
 128		dl_clear_overload(rq_of_dl_rq(dl_rq));
 129		dl_rq->overloaded = 0;
 130	}
 131}
 132
 133static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 134{
 135	struct task_struct *p = dl_task_of(dl_se);
 136
 137	if (tsk_nr_cpus_allowed(p) > 1)
 138		dl_rq->dl_nr_migratory++;
 139
 140	update_dl_migration(dl_rq);
 141}
 142
 143static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 144{
 145	struct task_struct *p = dl_task_of(dl_se);
 146
 147	if (tsk_nr_cpus_allowed(p) > 1)
 148		dl_rq->dl_nr_migratory--;
 149
 150	update_dl_migration(dl_rq);
 151}
 152
 
 
 
 
 
 
 
 
 153/*
 154 * The list of pushable -deadline task is not a plist, like in
 155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
 156 */
 157static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 158{
 159	struct dl_rq *dl_rq = &rq->dl;
 160	struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
 161	struct rb_node *parent = NULL;
 162	struct task_struct *entry;
 163	int leftmost = 1;
 164
 165	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
 166
 167	while (*link) {
 168		parent = *link;
 169		entry = rb_entry(parent, struct task_struct,
 170				 pushable_dl_tasks);
 171		if (dl_entity_preempt(&p->dl, &entry->dl))
 172			link = &parent->rb_left;
 173		else {
 174			link = &parent->rb_right;
 175			leftmost = 0;
 176		}
 177	}
 178
 179	if (leftmost) {
 180		dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
 181		dl_rq->earliest_dl.next = p->dl.deadline;
 182	}
 183
 184	rb_link_node(&p->pushable_dl_tasks, parent, link);
 185	rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 186}
 187
 188static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 189{
 190	struct dl_rq *dl_rq = &rq->dl;
 
 
 191
 192	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
 193		return;
 194
 195	if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
 196		struct rb_node *next_node;
 197
 198		next_node = rb_next(&p->pushable_dl_tasks);
 199		dl_rq->pushable_dl_tasks_leftmost = next_node;
 200		if (next_node) {
 201			dl_rq->earliest_dl.next = rb_entry(next_node,
 202				struct task_struct, pushable_dl_tasks)->dl.deadline;
 203		}
 204	}
 205
 206	rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 207	RB_CLEAR_NODE(&p->pushable_dl_tasks);
 208}
 209
 210static inline int has_pushable_dl_tasks(struct rq *rq)
 211{
 212	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
 213}
 214
 215static int push_dl_task(struct rq *rq);
 216
 217static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 218{
 219	return dl_task(prev);
 220}
 221
 222static DEFINE_PER_CPU(struct callback_head, dl_push_head);
 223static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
 224
 225static void push_dl_tasks(struct rq *);
 226static void pull_dl_task(struct rq *);
 227
 228static inline void queue_push_tasks(struct rq *rq)
 229{
 230	if (!has_pushable_dl_tasks(rq))
 231		return;
 232
 233	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
 234}
 235
 236static inline void queue_pull_task(struct rq *rq)
 237{
 238	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
 239}
 240
 241static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 242
 243static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 244{
 245	struct rq *later_rq = NULL;
 
 246
 247	later_rq = find_lock_later_rq(p, rq);
 248	if (!later_rq) {
 249		int cpu;
 250
 251		/*
 252		 * If we cannot preempt any rq, fall back to pick any
 253		 * online cpu.
 254		 */
 255		cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
 256		if (cpu >= nr_cpu_ids) {
 257			/*
 258			 * Fail to find any suitable cpu.
 259			 * The task will never come back!
 260			 */
 261			BUG_ON(dl_bandwidth_enabled());
 262
 263			/*
 264			 * If admission control is disabled we
 265			 * try a little harder to let the task
 266			 * run.
 267			 */
 268			cpu = cpumask_any(cpu_active_mask);
 269		}
 270		later_rq = cpu_rq(cpu);
 271		double_lock_balance(rq, later_rq);
 272	}
 273
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 274	set_task_cpu(p, later_rq->cpu);
 275	double_unlock_balance(later_rq, rq);
 276
 277	return later_rq;
 278}
 279
 280#else
 281
 282static inline
 283void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 284{
 285}
 286
 287static inline
 288void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 289{
 290}
 291
 292static inline
 293void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 294{
 295}
 296
 297static inline
 298void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 299{
 300}
 301
 302static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 303{
 304	return false;
 305}
 306
 307static inline void pull_dl_task(struct rq *rq)
 308{
 309}
 310
 311static inline void queue_push_tasks(struct rq *rq)
 312{
 313}
 314
 315static inline void queue_pull_task(struct rq *rq)
 316{
 317}
 318#endif /* CONFIG_SMP */
 319
 320static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 321static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 322static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
 323				  int flags);
 324
 325/*
 326 * We are being explicitly informed that a new instance is starting,
 327 * and this means that:
 328 *  - the absolute deadline of the entity has to be placed at
 329 *    current time + relative deadline;
 330 *  - the runtime of the entity has to be set to the maximum value.
 331 *
 332 * The capability of specifying such event is useful whenever a -deadline
 333 * entity wants to (try to!) synchronize its behaviour with the scheduler's
 334 * one, and to (try to!) reconcile itself with its own scheduling
 335 * parameters.
 336 */
 337static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
 338{
 339	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 340	struct rq *rq = rq_of_dl_rq(dl_rq);
 341
 342	WARN_ON(dl_se->dl_boosted);
 343	WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
 344
 345	/*
 346	 * We are racing with the deadline timer. So, do nothing because
 347	 * the deadline timer handler will take care of properly recharging
 348	 * the runtime and postponing the deadline
 349	 */
 350	if (dl_se->dl_throttled)
 351		return;
 352
 353	/*
 354	 * We use the regular wall clock time to set deadlines in the
 355	 * future; in fact, we must consider execution overheads (time
 356	 * spent on hardirq context, etc.).
 357	 */
 358	dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
 359	dl_se->runtime = dl_se->dl_runtime;
 360}
 361
 362/*
 363 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
 364 * possibility of a entity lasting more than what it declared, and thus
 365 * exhausting its runtime.
 366 *
 367 * Here we are interested in making runtime overrun possible, but we do
 368 * not want a entity which is misbehaving to affect the scheduling of all
 369 * other entities.
 370 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
 371 * is used, in order to confine each entity within its own bandwidth.
 372 *
 373 * This function deals exactly with that, and ensures that when the runtime
 374 * of a entity is replenished, its deadline is also postponed. That ensures
 375 * the overrunning entity can't interfere with other entity in the system and
 376 * can't make them miss their deadlines. Reasons why this kind of overruns
 377 * could happen are, typically, a entity voluntarily trying to overcome its
 378 * runtime, or it just underestimated it during sched_setattr().
 379 */
 380static void replenish_dl_entity(struct sched_dl_entity *dl_se,
 381				struct sched_dl_entity *pi_se)
 382{
 383	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 384	struct rq *rq = rq_of_dl_rq(dl_rq);
 385
 386	BUG_ON(pi_se->dl_runtime <= 0);
 387
 388	/*
 389	 * This could be the case for a !-dl task that is boosted.
 390	 * Just go with full inherited parameters.
 391	 */
 392	if (dl_se->dl_deadline == 0) {
 393		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 394		dl_se->runtime = pi_se->dl_runtime;
 395	}
 396
 397	if (dl_se->dl_yielded && dl_se->runtime > 0)
 398		dl_se->runtime = 0;
 399
 400	/*
 401	 * We keep moving the deadline away until we get some
 402	 * available runtime for the entity. This ensures correct
 403	 * handling of situations where the runtime overrun is
 404	 * arbitrary large.
 405	 */
 406	while (dl_se->runtime <= 0) {
 407		dl_se->deadline += pi_se->dl_period;
 408		dl_se->runtime += pi_se->dl_runtime;
 409	}
 410
 411	/*
 412	 * At this point, the deadline really should be "in
 413	 * the future" with respect to rq->clock. If it's
 414	 * not, we are, for some reason, lagging too much!
 415	 * Anyway, after having warn userspace abut that,
 416	 * we still try to keep the things running by
 417	 * resetting the deadline and the budget of the
 418	 * entity.
 419	 */
 420	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
 421		printk_deferred_once("sched: DL replenish lagged too much\n");
 422		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 423		dl_se->runtime = pi_se->dl_runtime;
 424	}
 425
 426	if (dl_se->dl_yielded)
 427		dl_se->dl_yielded = 0;
 428	if (dl_se->dl_throttled)
 429		dl_se->dl_throttled = 0;
 430}
 431
 432/*
 433 * Here we check if --at time t-- an entity (which is probably being
 434 * [re]activated or, in general, enqueued) can use its remaining runtime
 435 * and its current deadline _without_ exceeding the bandwidth it is
 436 * assigned (function returns true if it can't). We are in fact applying
 437 * one of the CBS rules: when a task wakes up, if the residual runtime
 438 * over residual deadline fits within the allocated bandwidth, then we
 439 * can keep the current (absolute) deadline and residual budget without
 440 * disrupting the schedulability of the system. Otherwise, we should
 441 * refill the runtime and set the deadline a period in the future,
 442 * because keeping the current (absolute) deadline of the task would
 443 * result in breaking guarantees promised to other tasks (refer to
 444 * Documentation/scheduler/sched-deadline.txt for more informations).
 445 *
 446 * This function returns true if:
 447 *
 448 *   runtime / (deadline - t) > dl_runtime / dl_period ,
 449 *
 450 * IOW we can't recycle current parameters.
 451 *
 452 * Notice that the bandwidth check is done against the period. For
 453 * task with deadline equal to period this is the same of using
 454 * dl_deadline instead of dl_period in the equation above.
 455 */
 456static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 457			       struct sched_dl_entity *pi_se, u64 t)
 458{
 459	u64 left, right;
 460
 461	/*
 462	 * left and right are the two sides of the equation above,
 463	 * after a bit of shuffling to use multiplications instead
 464	 * of divisions.
 465	 *
 466	 * Note that none of the time values involved in the two
 467	 * multiplications are absolute: dl_deadline and dl_runtime
 468	 * are the relative deadline and the maximum runtime of each
 469	 * instance, runtime is the runtime left for the last instance
 470	 * and (deadline - t), since t is rq->clock, is the time left
 471	 * to the (absolute) deadline. Even if overflowing the u64 type
 472	 * is very unlikely to occur in both cases, here we scale down
 473	 * as we want to avoid that risk at all. Scaling down by 10
 474	 * means that we reduce granularity to 1us. We are fine with it,
 475	 * since this is only a true/false check and, anyway, thinking
 476	 * of anything below microseconds resolution is actually fiction
 477	 * (but still we want to give the user that illusion >;).
 478	 */
 479	left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 480	right = ((dl_se->deadline - t) >> DL_SCALE) *
 481		(pi_se->dl_runtime >> DL_SCALE);
 482
 483	return dl_time_before(right, left);
 484}
 485
 486/*
 487 * When a -deadline entity is queued back on the runqueue, its runtime and
 488 * deadline might need updating.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 489 *
 490 * The policy here is that we update the deadline of the entity only if:
 491 *  - the current deadline is in the past,
 492 *  - using the remaining runtime with the current deadline would make
 493 *    the entity exceed its bandwidth.
 
 
 
 
 
 
 
 
 494 */
 495static void update_dl_entity(struct sched_dl_entity *dl_se,
 496			     struct sched_dl_entity *pi_se)
 497{
 498	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 499	struct rq *rq = rq_of_dl_rq(dl_rq);
 500
 501	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 502	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
 503		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 504		dl_se->runtime = pi_se->dl_runtime;
 
 
 
 
 
 
 
 
 505	}
 506}
 507
 
 
 
 
 
 508/*
 509 * If the entity depleted all its runtime, and if we want it to sleep
 510 * while waiting for some new execution time to become available, we
 511 * set the bandwidth enforcement timer to the replenishment instant
 512 * and try to activate it.
 513 *
 514 * Notice that it is important for the caller to know if the timer
 515 * actually started or not (i.e., the replenishment instant is in
 516 * the future or in the past).
 517 */
 518static int start_dl_timer(struct task_struct *p)
 519{
 520	struct sched_dl_entity *dl_se = &p->dl;
 521	struct hrtimer *timer = &dl_se->dl_timer;
 522	struct rq *rq = task_rq(p);
 523	ktime_t now, act;
 524	s64 delta;
 525
 526	lockdep_assert_held(&rq->lock);
 527
 528	/*
 529	 * We want the timer to fire at the deadline, but considering
 530	 * that it is actually coming from rq->clock and not from
 531	 * hrtimer's time base reading.
 532	 */
 533	act = ns_to_ktime(dl_se->deadline);
 534	now = hrtimer_cb_get_time(timer);
 535	delta = ktime_to_ns(now) - rq_clock(rq);
 536	act = ktime_add_ns(act, delta);
 537
 538	/*
 539	 * If the expiry time already passed, e.g., because the value
 540	 * chosen as the deadline is too small, don't even try to
 541	 * start the timer in the past!
 542	 */
 543	if (ktime_us_delta(act, now) < 0)
 544		return 0;
 545
 546	/*
 547	 * !enqueued will guarantee another callback; even if one is already in
 548	 * progress. This ensures a balanced {get,put}_task_struct().
 549	 *
 550	 * The race against __run_timer() clearing the enqueued state is
 551	 * harmless because we're holding task_rq()->lock, therefore the timer
 552	 * expiring after we've done the check will wait on its task_rq_lock()
 553	 * and observe our state.
 554	 */
 555	if (!hrtimer_is_queued(timer)) {
 556		get_task_struct(p);
 557		hrtimer_start(timer, act, HRTIMER_MODE_ABS);
 558	}
 559
 560	return 1;
 561}
 562
 563/*
 564 * This is the bandwidth enforcement timer callback. If here, we know
 565 * a task is not on its dl_rq, since the fact that the timer was running
 566 * means the task is throttled and needs a runtime replenishment.
 567 *
 568 * However, what we actually do depends on the fact the task is active,
 569 * (it is on its rq) or has been removed from there by a call to
 570 * dequeue_task_dl(). In the former case we must issue the runtime
 571 * replenishment and add the task back to the dl_rq; in the latter, we just
 572 * do nothing but clearing dl_throttled, so that runtime and deadline
 573 * updating (and the queueing back to dl_rq) will be done by the
 574 * next call to enqueue_task_dl().
 575 */
 576static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 577{
 578	struct sched_dl_entity *dl_se = container_of(timer,
 579						     struct sched_dl_entity,
 580						     dl_timer);
 581	struct task_struct *p = dl_task_of(dl_se);
 582	struct rq_flags rf;
 583	struct rq *rq;
 584
 585	rq = task_rq_lock(p, &rf);
 586
 587	/*
 588	 * The task might have changed its scheduling policy to something
 589	 * different than SCHED_DEADLINE (through switched_from_dl()).
 590	 */
 591	if (!dl_task(p)) {
 592		__dl_clear_params(p);
 593		goto unlock;
 594	}
 595
 596	/*
 597	 * The task might have been boosted by someone else and might be in the
 598	 * boosting/deboosting path, its not throttled.
 599	 */
 600	if (dl_se->dl_boosted)
 601		goto unlock;
 602
 603	/*
 604	 * Spurious timer due to start_dl_timer() race; or we already received
 605	 * a replenishment from rt_mutex_setprio().
 606	 */
 607	if (!dl_se->dl_throttled)
 608		goto unlock;
 609
 610	sched_clock_tick();
 611	update_rq_clock(rq);
 612
 613	/*
 614	 * If the throttle happened during sched-out; like:
 615	 *
 616	 *   schedule()
 617	 *     deactivate_task()
 618	 *       dequeue_task_dl()
 619	 *         update_curr_dl()
 620	 *           start_dl_timer()
 621	 *         __dequeue_task_dl()
 622	 *     prev->on_rq = 0;
 623	 *
 624	 * We can be both throttled and !queued. Replenish the counter
 625	 * but do not enqueue -- wait for our wakeup to do that.
 626	 */
 627	if (!task_on_rq_queued(p)) {
 628		replenish_dl_entity(dl_se, dl_se);
 629		goto unlock;
 630	}
 631
 632#ifdef CONFIG_SMP
 633	if (unlikely(!rq->online)) {
 634		/*
 635		 * If the runqueue is no longer available, migrate the
 636		 * task elsewhere. This necessarily changes rq.
 637		 */
 638		lockdep_unpin_lock(&rq->lock, rf.cookie);
 639		rq = dl_task_offline_migration(rq, p);
 640		rf.cookie = lockdep_pin_lock(&rq->lock);
 
 641
 642		/*
 643		 * Now that the task has been migrated to the new RQ and we
 644		 * have that locked, proceed as normal and enqueue the task
 645		 * there.
 646		 */
 647	}
 648#endif
 649
 650	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
 651	if (dl_task(rq->curr))
 652		check_preempt_curr_dl(rq, p, 0);
 653	else
 654		resched_curr(rq);
 655
 656#ifdef CONFIG_SMP
 657	/*
 658	 * Queueing this task back might have overloaded rq, check if we need
 659	 * to kick someone away.
 660	 */
 661	if (has_pushable_dl_tasks(rq)) {
 662		/*
 663		 * Nothing relies on rq->lock after this, so its safe to drop
 664		 * rq->lock.
 665		 */
 666		lockdep_unpin_lock(&rq->lock, rf.cookie);
 667		push_dl_task(rq);
 668		lockdep_repin_lock(&rq->lock, rf.cookie);
 669	}
 670#endif
 671
 672unlock:
 673	task_rq_unlock(rq, p, &rf);
 674
 675	/*
 676	 * This can free the task_struct, including this hrtimer, do not touch
 677	 * anything related to that after this.
 678	 */
 679	put_task_struct(p);
 680
 681	return HRTIMER_NORESTART;
 682}
 683
 684void init_dl_task_timer(struct sched_dl_entity *dl_se)
 685{
 686	struct hrtimer *timer = &dl_se->dl_timer;
 687
 688	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 689	timer->function = dl_task_timer;
 690}
 691
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 692static
 693int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
 694{
 695	return (dl_se->runtime <= 0);
 696}
 697
 698extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
 699
 700/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 701 * Update the current task's runtime statistics (provided it is still
 702 * a -deadline task and has not been removed from the dl_rq).
 703 */
 704static void update_curr_dl(struct rq *rq)
 705{
 706	struct task_struct *curr = rq->curr;
 707	struct sched_dl_entity *dl_se = &curr->dl;
 708	u64 delta_exec;
 
 
 709
 710	if (!dl_task(curr) || !on_dl_rq(dl_se))
 711		return;
 712
 713	/*
 714	 * Consumed budget is computed considering the time as
 715	 * observed by schedulable tasks (excluding time spent
 716	 * in hardirq context, etc.). Deadlines are instead
 717	 * computed using hard walltime. This seems to be the more
 718	 * natural solution, but the full ramifications of this
 719	 * approach need further study.
 720	 */
 721	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
 
 722	if (unlikely((s64)delta_exec <= 0)) {
 723		if (unlikely(dl_se->dl_yielded))
 724			goto throttle;
 725		return;
 726	}
 727
 728	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
 729	cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL);
 730
 731	schedstat_set(curr->se.statistics.exec_max,
 732		      max(curr->se.statistics.exec_max, delta_exec));
 733
 734	curr->se.sum_exec_runtime += delta_exec;
 735	account_group_exec_runtime(curr, delta_exec);
 736
 737	curr->se.exec_start = rq_clock_task(rq);
 738	cpuacct_charge(curr, delta_exec);
 739
 740	sched_rt_avg_update(rq, delta_exec);
 
 741
 742	dl_se->runtime -= delta_exec;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 743
 744throttle:
 745	if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
 746		dl_se->dl_throttled = 1;
 
 
 
 
 
 
 747		__dequeue_task_dl(rq, curr, 0);
 748		if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
 749			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
 750
 751		if (!is_leftmost(curr, &rq->dl))
 752			resched_curr(rq);
 753	}
 754
 755	/*
 756	 * Because -- for now -- we share the rt bandwidth, we need to
 757	 * account our runtime there too, otherwise actual rt tasks
 758	 * would be able to exceed the shared quota.
 759	 *
 760	 * Account to the root rt group for now.
 761	 *
 762	 * The solution we're working towards is having the RT groups scheduled
 763	 * using deadline servers -- however there's a few nasties to figure
 764	 * out before that can happen.
 765	 */
 766	if (rt_bandwidth_enabled()) {
 767		struct rt_rq *rt_rq = &rq->rt;
 768
 769		raw_spin_lock(&rt_rq->rt_runtime_lock);
 770		/*
 771		 * We'll let actual RT tasks worry about the overflow here, we
 772		 * have our own CBS to keep us inline; only account when RT
 773		 * bandwidth is relevant.
 774		 */
 775		if (sched_rt_bandwidth_account(rt_rq))
 776			rt_rq->rt_time += delta_exec;
 777		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 778	}
 779}
 780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781#ifdef CONFIG_SMP
 782
 783static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
 784{
 785	struct rq *rq = rq_of_dl_rq(dl_rq);
 786
 787	if (dl_rq->earliest_dl.curr == 0 ||
 788	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
 
 
 789		dl_rq->earliest_dl.curr = deadline;
 790		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
 791	}
 792}
 793
 794static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
 795{
 796	struct rq *rq = rq_of_dl_rq(dl_rq);
 797
 798	/*
 799	 * Since we may have removed our earliest (and/or next earliest)
 800	 * task we must recompute them.
 801	 */
 802	if (!dl_rq->dl_nr_running) {
 803		dl_rq->earliest_dl.curr = 0;
 804		dl_rq->earliest_dl.next = 0;
 805		cpudl_clear(&rq->rd->cpudl, rq->cpu);
 
 806	} else {
 807		struct rb_node *leftmost = dl_rq->rb_leftmost;
 808		struct sched_dl_entity *entry;
 809
 810		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
 811		dl_rq->earliest_dl.curr = entry->deadline;
 812		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
 813	}
 814}
 815
 816#else
 817
 818static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
 819static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
 820
 821#endif /* CONFIG_SMP */
 822
 823static inline
 824void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 825{
 826	int prio = dl_task_of(dl_se)->prio;
 827	u64 deadline = dl_se->deadline;
 828
 829	WARN_ON(!dl_prio(prio));
 830	dl_rq->dl_nr_running++;
 831	add_nr_running(rq_of_dl_rq(dl_rq), 1);
 832
 833	inc_dl_deadline(dl_rq, deadline);
 834	inc_dl_migration(dl_se, dl_rq);
 835}
 836
 837static inline
 838void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 839{
 840	int prio = dl_task_of(dl_se)->prio;
 841
 842	WARN_ON(!dl_prio(prio));
 843	WARN_ON(!dl_rq->dl_nr_running);
 844	dl_rq->dl_nr_running--;
 845	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
 846
 847	dec_dl_deadline(dl_rq, dl_se->deadline);
 848	dec_dl_migration(dl_se, dl_rq);
 849}
 850
 
 
 
 
 
 
 
 
 851static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
 852{
 853	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 854	struct rb_node **link = &dl_rq->rb_root.rb_node;
 855	struct rb_node *parent = NULL;
 856	struct sched_dl_entity *entry;
 857	int leftmost = 1;
 858
 859	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
 860
 861	while (*link) {
 862		parent = *link;
 863		entry = rb_entry(parent, struct sched_dl_entity, rb_node);
 864		if (dl_time_before(dl_se->deadline, entry->deadline))
 865			link = &parent->rb_left;
 866		else {
 867			link = &parent->rb_right;
 868			leftmost = 0;
 869		}
 870	}
 871
 872	if (leftmost)
 873		dl_rq->rb_leftmost = &dl_se->rb_node;
 874
 875	rb_link_node(&dl_se->rb_node, parent, link);
 876	rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
 877
 878	inc_dl_tasks(dl_se, dl_rq);
 879}
 880
 881static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
 882{
 883	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 884
 885	if (RB_EMPTY_NODE(&dl_se->rb_node))
 886		return;
 887
 888	if (dl_rq->rb_leftmost == &dl_se->rb_node) {
 889		struct rb_node *next_node;
 890
 891		next_node = rb_next(&dl_se->rb_node);
 892		dl_rq->rb_leftmost = next_node;
 893	}
 894
 895	rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
 896	RB_CLEAR_NODE(&dl_se->rb_node);
 897
 898	dec_dl_tasks(dl_se, dl_rq);
 899}
 900
 901static void
 902enqueue_dl_entity(struct sched_dl_entity *dl_se,
 903		  struct sched_dl_entity *pi_se, int flags)
 904{
 905	BUG_ON(on_dl_rq(dl_se));
 906
 907	/*
 908	 * If this is a wakeup or a new instance, the scheduling
 909	 * parameters of the task might need updating. Otherwise,
 910	 * we want a replenishment of its runtime.
 911	 */
 912	if (flags & ENQUEUE_WAKEUP)
 913		update_dl_entity(dl_se, pi_se);
 914	else if (flags & ENQUEUE_REPLENISH)
 915		replenish_dl_entity(dl_se, pi_se);
 
 
 
 
 
 
 916
 917	__enqueue_dl_entity(dl_se);
 918}
 919
 920static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
 921{
 922	__dequeue_dl_entity(dl_se);
 923}
 924
 925static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 926{
 927	struct task_struct *pi_task = rt_mutex_get_top_task(p);
 928	struct sched_dl_entity *pi_se = &p->dl;
 929
 930	/*
 931	 * Use the scheduling parameters of the top pi-waiter
 932	 * task if we have one and its (absolute) deadline is
 933	 * smaller than our one... OTW we keep our runtime and
 934	 * deadline.
 935	 */
 936	if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
 937		pi_se = &pi_task->dl;
 
 
 
 
 
 
 
 
 
 
 
 938	} else if (!dl_prio(p->normal_prio)) {
 939		/*
 940		 * Special case in which we have a !SCHED_DEADLINE task
 941		 * that is going to be deboosted, but exceedes its
 942		 * runtime while doing so. No point in replenishing
 943		 * it, as it's going to return back to its original
 944		 * scheduling class after this.
 
 
 945		 */
 946		BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
 
 947		return;
 948	}
 949
 950	/*
 951	 * If p is throttled, we do nothing. In fact, if it exhausted
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 952	 * its budget it needs a replenishment and, since it now is on
 953	 * its rq, the bandwidth timer callback (which clearly has not
 954	 * run yet) will take care of this.
 955	 */
 956	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
 
 
 
 
 
 
 
 
 
 957		return;
 
 958
 959	enqueue_dl_entity(&p->dl, pi_se, flags);
 960
 961	if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
 962		enqueue_pushable_dl_task(rq, p);
 963}
 964
 965static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 966{
 967	dequeue_dl_entity(&p->dl);
 968	dequeue_pushable_dl_task(rq, p);
 969}
 970
 971static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 972{
 973	update_curr_dl(rq);
 974	__dequeue_task_dl(rq, p, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975}
 976
 977/*
 978 * Yield task semantic for -deadline tasks is:
 979 *
 980 *   get off from the CPU until our next instance, with
 981 *   a new runtime. This is of little use now, since we
 982 *   don't have a bandwidth reclaiming mechanism. Anyway,
 983 *   bandwidth reclaiming is planned for the future, and
 984 *   yield_task_dl will indicate that some spare budget
 985 *   is available for other task instances to use it.
 986 */
 987static void yield_task_dl(struct rq *rq)
 988{
 989	/*
 990	 * We make the task go to sleep until its current deadline by
 991	 * forcing its runtime to zero. This way, update_curr_dl() stops
 992	 * it and the bandwidth timer will wake it up and will give it
 993	 * new scheduling parameters (thanks to dl_yielded=1).
 994	 */
 995	rq->curr->dl.dl_yielded = 1;
 996
 997	update_rq_clock(rq);
 998	update_curr_dl(rq);
 999	/*
1000	 * Tell update_rq_clock() that we've just updated,
1001	 * so we don't do microscopic update in schedule()
1002	 * and double the fastpath cost.
1003	 */
1004	rq_clock_skip_update(rq, true);
1005}
1006
1007#ifdef CONFIG_SMP
1008
1009static int find_later_rq(struct task_struct *task);
1010
1011static int
1012select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1013{
1014	struct task_struct *curr;
 
1015	struct rq *rq;
1016
1017	if (sd_flag != SD_BALANCE_WAKE)
1018		goto out;
1019
1020	rq = cpu_rq(cpu);
1021
1022	rcu_read_lock();
1023	curr = READ_ONCE(rq->curr); /* unlocked access */
1024
1025	/*
1026	 * If we are dealing with a -deadline task, we must
1027	 * decide where to wake it up.
1028	 * If it has a later deadline and the current task
1029	 * on this rq can't move (provided the waking task
1030	 * can!) we prefer to send it somewhere else. On the
1031	 * other hand, if it has a shorter deadline, we
1032	 * try to make it stay here, it might be important.
1033	 */
1034	if (unlikely(dl_task(curr)) &&
1035	    (tsk_nr_cpus_allowed(curr) < 2 ||
1036	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1037	    (tsk_nr_cpus_allowed(p) > 1)) {
 
 
 
 
 
 
 
 
 
1038		int target = find_later_rq(p);
1039
1040		if (target != -1 &&
1041				(dl_time_before(p->dl.deadline,
1042					cpu_rq(target)->dl.earliest_dl.curr) ||
1043				(cpu_rq(target)->dl.dl_nr_running == 0)))
1044			cpu = target;
1045	}
1046	rcu_read_unlock();
1047
1048out:
1049	return cpu;
1050}
1051
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1052static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1053{
1054	/*
1055	 * Current can't be migrated, useless to reschedule,
1056	 * let's hope p can move out.
1057	 */
1058	if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
1059	    cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1060		return;
1061
1062	/*
1063	 * p is migratable, so let's not schedule it and
1064	 * see if it is pushed or pulled somewhere else.
1065	 */
1066	if (tsk_nr_cpus_allowed(p) != 1 &&
1067	    cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1068		return;
1069
1070	resched_curr(rq);
1071}
1072
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073#endif /* CONFIG_SMP */
1074
1075/*
1076 * Only called when both the current and waking task are -deadline
1077 * tasks.
1078 */
1079static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1080				  int flags)
1081{
1082	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1083		resched_curr(rq);
1084		return;
1085	}
1086
1087#ifdef CONFIG_SMP
1088	/*
1089	 * In the unlikely case current and p have the same deadline
1090	 * let us try to decide what's the best thing to do...
1091	 */
1092	if ((p->dl.deadline == rq->curr->dl.deadline) &&
1093	    !test_tsk_need_resched(rq->curr))
1094		check_preempt_equal_dl(rq, p);
1095#endif /* CONFIG_SMP */
1096}
1097
1098#ifdef CONFIG_SCHED_HRTICK
1099static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1100{
1101	hrtick_start(rq, p->dl.runtime);
1102}
1103#else /* !CONFIG_SCHED_HRTICK */
1104static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1105{
1106}
1107#endif
1108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1109static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1110						   struct dl_rq *dl_rq)
1111{
1112	struct rb_node *left = dl_rq->rb_leftmost;
1113
1114	if (!left)
1115		return NULL;
1116
1117	return rb_entry(left, struct sched_dl_entity, rb_node);
1118}
1119
1120struct task_struct *
1121pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
1122{
1123	struct sched_dl_entity *dl_se;
 
1124	struct task_struct *p;
1125	struct dl_rq *dl_rq;
1126
1127	dl_rq = &rq->dl;
1128
1129	if (need_pull_dl_task(rq, prev)) {
1130		/*
1131		 * This is OK, because current is on_cpu, which avoids it being
1132		 * picked for load-balance and preemption/IRQs are still
1133		 * disabled avoiding further scheduler activity on it and we're
1134		 * being very careful to re-start the picking loop.
1135		 */
1136		lockdep_unpin_lock(&rq->lock, cookie);
1137		pull_dl_task(rq);
1138		lockdep_repin_lock(&rq->lock, cookie);
1139		/*
1140		 * pull_dl_task() can drop (and re-acquire) rq->lock; this
1141		 * means a stop task can slip in, in which case we need to
1142		 * re-start task selection.
1143		 */
1144		if (rq->stop && task_on_rq_queued(rq->stop))
1145			return RETRY_TASK;
1146	}
1147
1148	/*
1149	 * When prev is DL, we may throttle it in put_prev_task().
1150	 * So, we update time before we check for dl_nr_running.
1151	 */
1152	if (prev->sched_class == &dl_sched_class)
1153		update_curr_dl(rq);
1154
1155	if (unlikely(!dl_rq->dl_nr_running))
1156		return NULL;
1157
1158	put_prev_task(rq, prev);
1159
1160	dl_se = pick_next_dl_entity(rq, dl_rq);
1161	BUG_ON(!dl_se);
1162
1163	p = dl_task_of(dl_se);
1164	p->se.exec_start = rq_clock_task(rq);
1165
1166	/* Running task will never be pushed. */
1167       dequeue_pushable_dl_task(rq, p);
1168
1169	if (hrtick_enabled(rq))
1170		start_hrtick_dl(rq, p);
 
1171
1172	queue_push_tasks(rq);
 
 
1173
1174	return p;
1175}
1176
1177static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1178{
1179	update_curr_dl(rq);
1180
1181	if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1)
 
1182		enqueue_pushable_dl_task(rq, p);
1183}
1184
 
 
 
 
 
 
 
 
1185static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1186{
1187	update_curr_dl(rq);
1188
 
1189	/*
1190	 * Even when we have runtime, update_curr_dl() might have resulted in us
1191	 * not being the leftmost task anymore. In that case NEED_RESCHED will
1192	 * be set and schedule() will start a new hrtick for the next task.
1193	 */
1194	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1195	    is_leftmost(p, &rq->dl))
1196		start_hrtick_dl(rq, p);
1197}
1198
1199static void task_fork_dl(struct task_struct *p)
1200{
1201	/*
1202	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1203	 * sched_fork()
1204	 */
1205}
1206
1207static void task_dead_dl(struct task_struct *p)
1208{
1209	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1210
1211	/*
1212	 * Since we are TASK_DEAD we won't slip out of the domain!
1213	 */
1214	raw_spin_lock_irq(&dl_b->lock);
1215	/* XXX we should retain the bw until 0-lag */
1216	dl_b->total_bw -= p->dl.dl_bw;
1217	raw_spin_unlock_irq(&dl_b->lock);
1218}
1219
1220static void set_curr_task_dl(struct rq *rq)
1221{
1222	struct task_struct *p = rq->curr;
1223
1224	p->se.exec_start = rq_clock_task(rq);
1225
1226	/* You can't push away the running task */
1227	dequeue_pushable_dl_task(rq, p);
1228}
1229
1230#ifdef CONFIG_SMP
1231
1232/* Only try algorithms three times */
1233#define DL_MAX_TRIES 3
1234
1235static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1236{
1237	if (!task_running(rq, p) &&
1238	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1239		return 1;
1240	return 0;
1241}
1242
1243/*
1244 * Return the earliest pushable rq's task, which is suitable to be executed
1245 * on the CPU, NULL otherwise:
1246 */
1247static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1248{
1249	struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1250	struct task_struct *p = NULL;
1251
1252	if (!has_pushable_dl_tasks(rq))
1253		return NULL;
1254
1255next_node:
1256	if (next_node) {
1257		p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1258
1259		if (pick_dl_task(rq, p, cpu))
1260			return p;
1261
1262		next_node = rb_next(next_node);
1263		goto next_node;
1264	}
1265
1266	return NULL;
1267}
1268
1269static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1270
1271static int find_later_rq(struct task_struct *task)
1272{
1273	struct sched_domain *sd;
1274	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1275	int this_cpu = smp_processor_id();
1276	int best_cpu, cpu = task_cpu(task);
1277
1278	/* Make sure the mask is initialized first */
1279	if (unlikely(!later_mask))
1280		return -1;
1281
1282	if (tsk_nr_cpus_allowed(task) == 1)
1283		return -1;
1284
1285	/*
1286	 * We have to consider system topology and task affinity
1287	 * first, then we can look for a suitable cpu.
1288	 */
1289	best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1290			task, later_mask);
1291	if (best_cpu == -1)
1292		return -1;
1293
1294	/*
1295	 * If we are here, some target has been found,
1296	 * the most suitable of which is cached in best_cpu.
1297	 * This is, among the runqueues where the current tasks
1298	 * have later deadlines than the task's one, the rq
1299	 * with the latest possible one.
1300	 *
1301	 * Now we check how well this matches with task's
1302	 * affinity and system topology.
1303	 *
1304	 * The last cpu where the task run is our first
1305	 * guess, since it is most likely cache-hot there.
1306	 */
1307	if (cpumask_test_cpu(cpu, later_mask))
1308		return cpu;
1309	/*
1310	 * Check if this_cpu is to be skipped (i.e., it is
1311	 * not in the mask) or not.
1312	 */
1313	if (!cpumask_test_cpu(this_cpu, later_mask))
1314		this_cpu = -1;
1315
1316	rcu_read_lock();
1317	for_each_domain(cpu, sd) {
1318		if (sd->flags & SD_WAKE_AFFINE) {
 
1319
1320			/*
1321			 * If possible, preempting this_cpu is
1322			 * cheaper than migrating.
1323			 */
1324			if (this_cpu != -1 &&
1325			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1326				rcu_read_unlock();
1327				return this_cpu;
1328			}
1329
 
 
1330			/*
1331			 * Last chance: if best_cpu is valid and is
1332			 * in the mask, that becomes our choice.
 
 
1333			 */
1334			if (best_cpu < nr_cpu_ids &&
1335			    cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1336				rcu_read_unlock();
1337				return best_cpu;
1338			}
1339		}
1340	}
1341	rcu_read_unlock();
1342
1343	/*
1344	 * At this point, all our guesses failed, we just return
1345	 * 'something', and let the caller sort the things out.
1346	 */
1347	if (this_cpu != -1)
1348		return this_cpu;
1349
1350	cpu = cpumask_any(later_mask);
1351	if (cpu < nr_cpu_ids)
1352		return cpu;
1353
1354	return -1;
1355}
1356
1357/* Locks the rq it finds */
1358static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1359{
1360	struct rq *later_rq = NULL;
1361	int tries;
1362	int cpu;
1363
1364	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1365		cpu = find_later_rq(task);
1366
1367		if ((cpu == -1) || (cpu == rq->cpu))
1368			break;
1369
1370		later_rq = cpu_rq(cpu);
1371
1372		if (later_rq->dl.dl_nr_running &&
1373		    !dl_time_before(task->dl.deadline,
1374					later_rq->dl.earliest_dl.curr)) {
1375			/*
1376			 * Target rq has tasks of equal or earlier deadline,
1377			 * retrying does not release any lock and is unlikely
1378			 * to yield a different result.
1379			 */
1380			later_rq = NULL;
1381			break;
1382		}
1383
1384		/* Retry if something changed. */
1385		if (double_lock_balance(rq, later_rq)) {
1386			if (unlikely(task_rq(task) != rq ||
1387				     !cpumask_test_cpu(later_rq->cpu,
1388						       tsk_cpus_allowed(task)) ||
1389				     task_running(rq, task) ||
1390				     !dl_task(task) ||
1391				     !task_on_rq_queued(task))) {
1392				double_unlock_balance(rq, later_rq);
1393				later_rq = NULL;
1394				break;
1395			}
1396		}
1397
1398		/*
1399		 * If the rq we found has no -deadline task, or
1400		 * its earliest one has a later deadline than our
1401		 * task, the rq is a good one.
1402		 */
1403		if (!later_rq->dl.dl_nr_running ||
1404		    dl_time_before(task->dl.deadline,
1405				   later_rq->dl.earliest_dl.curr))
1406			break;
1407
1408		/* Otherwise we try again. */
1409		double_unlock_balance(rq, later_rq);
1410		later_rq = NULL;
1411	}
1412
1413	return later_rq;
1414}
1415
1416static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1417{
1418	struct task_struct *p;
1419
1420	if (!has_pushable_dl_tasks(rq))
1421		return NULL;
1422
1423	p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1424		     struct task_struct, pushable_dl_tasks);
1425
1426	BUG_ON(rq->cpu != task_cpu(p));
1427	BUG_ON(task_current(rq, p));
1428	BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
1429
1430	BUG_ON(!task_on_rq_queued(p));
1431	BUG_ON(!dl_task(p));
1432
1433	return p;
1434}
1435
1436/*
1437 * See if the non running -deadline tasks on this rq
1438 * can be sent to some other CPU where they can preempt
1439 * and start executing.
1440 */
1441static int push_dl_task(struct rq *rq)
1442{
1443	struct task_struct *next_task;
1444	struct rq *later_rq;
1445	int ret = 0;
1446
1447	if (!rq->dl.overloaded)
1448		return 0;
1449
1450	next_task = pick_next_pushable_dl_task(rq);
1451	if (!next_task)
1452		return 0;
1453
1454retry:
1455	if (unlikely(next_task == rq->curr)) {
1456		WARN_ON(1);
 
 
1457		return 0;
1458	}
1459
1460	/*
1461	 * If next_task preempts rq->curr, and rq->curr
1462	 * can move away, it makes sense to just reschedule
1463	 * without going further in pushing next_task.
1464	 */
1465	if (dl_task(rq->curr) &&
1466	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1467	    tsk_nr_cpus_allowed(rq->curr) > 1) {
1468		resched_curr(rq);
1469		return 0;
1470	}
1471
1472	/* We might release rq lock */
1473	get_task_struct(next_task);
1474
1475	/* Will lock the rq it'll find */
1476	later_rq = find_lock_later_rq(next_task, rq);
1477	if (!later_rq) {
1478		struct task_struct *task;
1479
1480		/*
1481		 * We must check all this again, since
1482		 * find_lock_later_rq releases rq->lock and it is
1483		 * then possible that next_task has migrated.
1484		 */
1485		task = pick_next_pushable_dl_task(rq);
1486		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1487			/*
1488			 * The task is still there. We don't try
1489			 * again, some other cpu will pull it when ready.
1490			 */
1491			goto out;
1492		}
1493
1494		if (!task)
1495			/* No more tasks */
1496			goto out;
1497
1498		put_task_struct(next_task);
1499		next_task = task;
1500		goto retry;
1501	}
1502
1503	deactivate_task(rq, next_task, 0);
1504	set_task_cpu(next_task, later_rq->cpu);
1505	activate_task(later_rq, next_task, 0);
 
 
 
 
 
 
1506	ret = 1;
1507
1508	resched_curr(later_rq);
1509
1510	double_unlock_balance(rq, later_rq);
1511
1512out:
1513	put_task_struct(next_task);
1514
1515	return ret;
1516}
1517
1518static void push_dl_tasks(struct rq *rq)
1519{
1520	/* push_dl_task() will return true if it moved a -deadline task */
1521	while (push_dl_task(rq))
1522		;
1523}
1524
1525static void pull_dl_task(struct rq *this_rq)
1526{
1527	int this_cpu = this_rq->cpu, cpu;
1528	struct task_struct *p;
1529	bool resched = false;
1530	struct rq *src_rq;
1531	u64 dmin = LONG_MAX;
1532
1533	if (likely(!dl_overloaded(this_rq)))
1534		return;
1535
1536	/*
1537	 * Match the barrier from dl_set_overloaded; this guarantees that if we
1538	 * see overloaded we must also see the dlo_mask bit.
1539	 */
1540	smp_rmb();
1541
1542	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1543		if (this_cpu == cpu)
1544			continue;
1545
1546		src_rq = cpu_rq(cpu);
1547
1548		/*
1549		 * It looks racy, abd it is! However, as in sched_rt.c,
1550		 * we are fine with this.
1551		 */
1552		if (this_rq->dl.dl_nr_running &&
1553		    dl_time_before(this_rq->dl.earliest_dl.curr,
1554				   src_rq->dl.earliest_dl.next))
1555			continue;
1556
1557		/* Might drop this_rq->lock */
 
1558		double_lock_balance(this_rq, src_rq);
1559
1560		/*
1561		 * If there are no more pullable tasks on the
1562		 * rq, we're done with it.
1563		 */
1564		if (src_rq->dl.dl_nr_running <= 1)
1565			goto skip;
1566
1567		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1568
1569		/*
1570		 * We found a task to be pulled if:
1571		 *  - it preempts our current (if there's one),
1572		 *  - it will preempt the last one we pulled (if any).
1573		 */
1574		if (p && dl_time_before(p->dl.deadline, dmin) &&
1575		    (!this_rq->dl.dl_nr_running ||
1576		     dl_time_before(p->dl.deadline,
1577				    this_rq->dl.earliest_dl.curr))) {
1578			WARN_ON(p == src_rq->curr);
1579			WARN_ON(!task_on_rq_queued(p));
1580
1581			/*
1582			 * Then we pull iff p has actually an earlier
1583			 * deadline than the current task of its runqueue.
1584			 */
1585			if (dl_time_before(p->dl.deadline,
1586					   src_rq->curr->dl.deadline))
1587				goto skip;
1588
1589			resched = true;
1590
1591			deactivate_task(src_rq, p, 0);
1592			set_task_cpu(p, this_cpu);
1593			activate_task(this_rq, p, 0);
1594			dmin = p->dl.deadline;
 
 
 
1595
1596			/* Is there any other task even earlier? */
1597		}
1598skip:
1599		double_unlock_balance(this_rq, src_rq);
 
 
 
 
 
 
 
1600	}
1601
1602	if (resched)
1603		resched_curr(this_rq);
1604}
1605
1606/*
1607 * Since the task is not running and a reschedule is not going to happen
1608 * anytime soon on its runqueue, we try pushing it away now.
1609 */
1610static void task_woken_dl(struct rq *rq, struct task_struct *p)
1611{
1612	if (!task_running(rq, p) &&
1613	    !test_tsk_need_resched(rq->curr) &&
1614	    tsk_nr_cpus_allowed(p) > 1 &&
1615	    dl_task(rq->curr) &&
1616	    (tsk_nr_cpus_allowed(rq->curr) < 2 ||
1617	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1618		push_dl_tasks(rq);
1619	}
1620}
1621
1622static void set_cpus_allowed_dl(struct task_struct *p,
1623				const struct cpumask *new_mask)
 
1624{
1625	struct root_domain *src_rd;
1626	struct rq *rq;
1627
1628	BUG_ON(!dl_task(p));
1629
1630	rq = task_rq(p);
1631	src_rd = rq->rd;
1632	/*
1633	 * Migrating a SCHED_DEADLINE task between exclusive
1634	 * cpusets (different root_domains) entails a bandwidth
1635	 * update. We already made space for us in the destination
1636	 * domain (see cpuset_can_attach()).
1637	 */
1638	if (!cpumask_intersects(src_rd->span, new_mask)) {
1639		struct dl_bw *src_dl_b;
1640
1641		src_dl_b = dl_bw_of(cpu_of(rq));
1642		/*
1643		 * We now free resources of the root_domain we are migrating
1644		 * off. In the worst case, sched_setattr() may temporary fail
1645		 * until we complete the update.
1646		 */
1647		raw_spin_lock(&src_dl_b->lock);
1648		__dl_clear(src_dl_b, p->dl.dl_bw);
1649		raw_spin_unlock(&src_dl_b->lock);
1650	}
1651
1652	set_cpus_allowed_common(p, new_mask);
1653}
1654
1655/* Assumes rq->lock is held */
1656static void rq_online_dl(struct rq *rq)
1657{
1658	if (rq->dl.overloaded)
1659		dl_set_overload(rq);
1660
1661	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1662	if (rq->dl.dl_nr_running > 0)
1663		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
1664}
1665
1666/* Assumes rq->lock is held */
1667static void rq_offline_dl(struct rq *rq)
1668{
1669	if (rq->dl.overloaded)
1670		dl_clear_overload(rq);
1671
1672	cpudl_clear(&rq->rd->cpudl, rq->cpu);
1673	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1674}
1675
1676void __init init_sched_dl_class(void)
1677{
1678	unsigned int i;
1679
1680	for_each_possible_cpu(i)
1681		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1682					GFP_KERNEL, cpu_to_node(i));
1683}
1684
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1685#endif /* CONFIG_SMP */
1686
1687static void switched_from_dl(struct rq *rq, struct task_struct *p)
1688{
1689	/*
1690	 * Start the deadline timer; if we switch back to dl before this we'll
1691	 * continue consuming our current CBS slice. If we stay outside of
1692	 * SCHED_DEADLINE until the deadline passes, the timer will reset the
1693	 * task.
 
 
1694	 */
1695	if (!start_dl_timer(p))
1696		__dl_clear_params(p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1697
1698	/*
1699	 * Since this might be the only -deadline task on the rq,
1700	 * this is the right place to try to pull some other one
1701	 * from an overloaded cpu, if any.
1702	 */
1703	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1704		return;
1705
1706	queue_pull_task(rq);
1707}
1708
1709/*
1710 * When switching to -deadline, we may overload the rq, then
1711 * we try to push someone off, if possible.
1712 */
1713static void switched_to_dl(struct rq *rq, struct task_struct *p)
1714{
 
 
1715
1716	/* If p is not queued we will update its parameters at next wakeup. */
1717	if (!task_on_rq_queued(p))
1718		return;
1719
1720	/*
1721	 * If p is boosted we already updated its params in
1722	 * rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH),
1723	 * p's deadline being now already after rq_clock(rq).
1724	 */
1725	if (dl_time_before(p->dl.deadline, rq_clock(rq)))
1726		setup_new_dl_entity(&p->dl);
1727
1728	if (rq->curr != p) {
1729#ifdef CONFIG_SMP
1730		if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
1731			queue_push_tasks(rq);
1732#endif
1733		if (dl_task(rq->curr))
1734			check_preempt_curr_dl(rq, p, 0);
1735		else
1736			resched_curr(rq);
 
 
1737	}
1738}
1739
1740/*
1741 * If the scheduling parameters of a -deadline task changed,
1742 * a push or pull operation might be needed.
1743 */
1744static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1745			    int oldprio)
1746{
1747	if (task_on_rq_queued(p) || rq->curr == p) {
1748#ifdef CONFIG_SMP
1749		/*
1750		 * This might be too much, but unfortunately
1751		 * we don't have the old deadline value, and
1752		 * we can't argue if the task is increasing
1753		 * or lowering its prio, so...
1754		 */
1755		if (!rq->dl.overloaded)
1756			queue_pull_task(rq);
1757
1758		/*
1759		 * If we now have a earlier deadline task than p,
1760		 * then reschedule, provided p is still on this
1761		 * runqueue.
1762		 */
1763		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1764			resched_curr(rq);
1765#else
1766		/*
1767		 * Again, we don't know if p has a earlier
1768		 * or later deadline, so let's blindly set a
1769		 * (maybe not needed) rescheduling point.
1770		 */
1771		resched_curr(rq);
1772#endif /* CONFIG_SMP */
1773	}
1774}
1775
1776const struct sched_class dl_sched_class = {
1777	.next			= &rt_sched_class,
1778	.enqueue_task		= enqueue_task_dl,
1779	.dequeue_task		= dequeue_task_dl,
1780	.yield_task		= yield_task_dl,
1781
1782	.check_preempt_curr	= check_preempt_curr_dl,
1783
1784	.pick_next_task		= pick_next_task_dl,
1785	.put_prev_task		= put_prev_task_dl,
 
1786
1787#ifdef CONFIG_SMP
 
 
1788	.select_task_rq		= select_task_rq_dl,
 
1789	.set_cpus_allowed       = set_cpus_allowed_dl,
1790	.rq_online              = rq_online_dl,
1791	.rq_offline             = rq_offline_dl,
1792	.task_woken		= task_woken_dl,
 
1793#endif
1794
1795	.set_curr_task		= set_curr_task_dl,
1796	.task_tick		= task_tick_dl,
1797	.task_fork              = task_fork_dl,
1798	.task_dead		= task_dead_dl,
1799
1800	.prio_changed           = prio_changed_dl,
1801	.switched_from		= switched_from_dl,
1802	.switched_to		= switched_to_dl,
1803
1804	.update_curr		= update_curr_dl,
1805};
1806
1807#ifdef CONFIG_SCHED_DEBUG
1808extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1809
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1810void print_dl_stats(struct seq_file *m, int cpu)
1811{
1812	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1813}
1814#endif /* CONFIG_SCHED_DEBUG */
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Deadline Scheduling Class (SCHED_DEADLINE)
   4 *
   5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
   6 *
   7 * Tasks that periodically executes their instances for less than their
   8 * runtime won't miss any of their deadlines.
   9 * Tasks that are not periodic or sporadic or that tries to execute more
  10 * than their reserved bandwidth will be slowed down (and may potentially
  11 * miss some of their deadlines), and won't affect any other task.
  12 *
  13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
  14 *                    Juri Lelli <juri.lelli@gmail.com>,
  15 *                    Michael Trimarchi <michael@amarulasolutions.com>,
  16 *                    Fabio Checconi <fchecconi@gmail.com>
  17 */
  18#include "sched.h"
  19#include "pelt.h"
 
  20
  21struct dl_bandwidth def_dl_bandwidth;
  22
  23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
  24{
  25	return container_of(dl_se, struct task_struct, dl);
  26}
  27
  28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
  29{
  30	return container_of(dl_rq, struct rq, dl);
  31}
  32
  33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
  34{
  35	struct task_struct *p = dl_task_of(dl_se);
  36	struct rq *rq = task_rq(p);
  37
  38	return &rq->dl;
  39}
  40
  41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
  42{
  43	return !RB_EMPTY_NODE(&dl_se->rb_node);
  44}
  45
  46#ifdef CONFIG_RT_MUTEXES
  47static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
  48{
  49	return dl_se->pi_se;
  50}
  51
  52static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
  53{
  54	return pi_of(dl_se) != dl_se;
  55}
  56#else
  57static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
  58{
  59	return dl_se;
  60}
  61
  62static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
  63{
  64	return false;
  65}
  66#endif
  67
  68#ifdef CONFIG_SMP
  69static inline struct dl_bw *dl_bw_of(int i)
  70{
  71	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
  72			 "sched RCU must be held");
  73	return &cpu_rq(i)->rd->dl_bw;
  74}
  75
  76static inline int dl_bw_cpus(int i)
  77{
  78	struct root_domain *rd = cpu_rq(i)->rd;
  79	int cpus;
  80
  81	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
  82			 "sched RCU must be held");
  83
  84	if (cpumask_subset(rd->span, cpu_active_mask))
  85		return cpumask_weight(rd->span);
  86
  87	cpus = 0;
  88
  89	for_each_cpu_and(i, rd->span, cpu_active_mask)
  90		cpus++;
  91
  92	return cpus;
  93}
  94
  95static inline unsigned long __dl_bw_capacity(int i)
  96{
  97	struct root_domain *rd = cpu_rq(i)->rd;
  98	unsigned long cap = 0;
  99
 100	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
 101			 "sched RCU must be held");
 102
 103	for_each_cpu_and(i, rd->span, cpu_active_mask)
 104		cap += capacity_orig_of(i);
 105
 106	return cap;
 107}
 108
 109/*
 110 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
 111 * of the CPU the task is running on rather rd's \Sum CPU capacity.
 112 */
 113static inline unsigned long dl_bw_capacity(int i)
 114{
 115	if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
 116	    capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
 117		return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
 118	} else {
 119		return __dl_bw_capacity(i);
 120	}
 121}
 122
 123static inline bool dl_bw_visited(int cpu, u64 gen)
 124{
 125	struct root_domain *rd = cpu_rq(cpu)->rd;
 126
 127	if (rd->visit_gen == gen)
 128		return true;
 129
 130	rd->visit_gen = gen;
 131	return false;
 132}
 133#else
 134static inline struct dl_bw *dl_bw_of(int i)
 135{
 136	return &cpu_rq(i)->dl.dl_bw;
 137}
 138
 139static inline int dl_bw_cpus(int i)
 140{
 141	return 1;
 142}
 143
 144static inline unsigned long dl_bw_capacity(int i)
 145{
 146	return SCHED_CAPACITY_SCALE;
 147}
 148
 149static inline bool dl_bw_visited(int cpu, u64 gen)
 150{
 151	return false;
 152}
 153#endif
 154
 155static inline
 156void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
 157{
 158	u64 old = dl_rq->running_bw;
 159
 160	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
 161	dl_rq->running_bw += dl_bw;
 162	SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
 163	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
 164	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
 165	cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
 166}
 167
 168static inline
 169void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
 170{
 171	u64 old = dl_rq->running_bw;
 172
 173	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
 174	dl_rq->running_bw -= dl_bw;
 175	SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
 176	if (dl_rq->running_bw > old)
 177		dl_rq->running_bw = 0;
 178	/* kick cpufreq (see the comment in kernel/sched/sched.h). */
 179	cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
 180}
 181
 182static inline
 183void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
 184{
 185	u64 old = dl_rq->this_bw;
 186
 187	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
 188	dl_rq->this_bw += dl_bw;
 189	SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
 190}
 191
 192static inline
 193void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
 194{
 195	u64 old = dl_rq->this_bw;
 196
 197	lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
 198	dl_rq->this_bw -= dl_bw;
 199	SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
 200	if (dl_rq->this_bw > old)
 201		dl_rq->this_bw = 0;
 202	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
 203}
 204
 205static inline
 206void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 207{
 208	if (!dl_entity_is_special(dl_se))
 209		__add_rq_bw(dl_se->dl_bw, dl_rq);
 210}
 211
 212static inline
 213void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 214{
 215	if (!dl_entity_is_special(dl_se))
 216		__sub_rq_bw(dl_se->dl_bw, dl_rq);
 217}
 218
 219static inline
 220void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 221{
 222	if (!dl_entity_is_special(dl_se))
 223		__add_running_bw(dl_se->dl_bw, dl_rq);
 224}
 225
 226static inline
 227void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 228{
 229	if (!dl_entity_is_special(dl_se))
 230		__sub_running_bw(dl_se->dl_bw, dl_rq);
 231}
 232
 233static void dl_change_utilization(struct task_struct *p, u64 new_bw)
 234{
 235	struct rq *rq;
 236
 237	BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
 238
 239	if (task_on_rq_queued(p))
 240		return;
 241
 242	rq = task_rq(p);
 243	if (p->dl.dl_non_contending) {
 244		sub_running_bw(&p->dl, &rq->dl);
 245		p->dl.dl_non_contending = 0;
 246		/*
 247		 * If the timer handler is currently running and the
 248		 * timer cannot be canceled, inactive_task_timer()
 249		 * will see that dl_not_contending is not set, and
 250		 * will not touch the rq's active utilization,
 251		 * so we are still safe.
 252		 */
 253		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
 254			put_task_struct(p);
 255	}
 256	__sub_rq_bw(p->dl.dl_bw, &rq->dl);
 257	__add_rq_bw(new_bw, &rq->dl);
 258}
 259
 260/*
 261 * The utilization of a task cannot be immediately removed from
 262 * the rq active utilization (running_bw) when the task blocks.
 263 * Instead, we have to wait for the so called "0-lag time".
 264 *
 265 * If a task blocks before the "0-lag time", a timer (the inactive
 266 * timer) is armed, and running_bw is decreased when the timer
 267 * fires.
 268 *
 269 * If the task wakes up again before the inactive timer fires,
 270 * the timer is canceled, whereas if the task wakes up after the
 271 * inactive timer fired (and running_bw has been decreased) the
 272 * task's utilization has to be added to running_bw again.
 273 * A flag in the deadline scheduling entity (dl_non_contending)
 274 * is used to avoid race conditions between the inactive timer handler
 275 * and task wakeups.
 276 *
 277 * The following diagram shows how running_bw is updated. A task is
 278 * "ACTIVE" when its utilization contributes to running_bw; an
 279 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
 280 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
 281 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
 282 * time already passed, which does not contribute to running_bw anymore.
 283 *                              +------------------+
 284 *             wakeup           |    ACTIVE        |
 285 *          +------------------>+   contending     |
 286 *          | add_running_bw    |                  |
 287 *          |                   +----+------+------+
 288 *          |                        |      ^
 289 *          |                dequeue |      |
 290 * +--------+-------+                |      |
 291 * |                |   t >= 0-lag   |      | wakeup
 292 * |    INACTIVE    |<---------------+      |
 293 * |                | sub_running_bw |      |
 294 * +--------+-------+                |      |
 295 *          ^                        |      |
 296 *          |              t < 0-lag |      |
 297 *          |                        |      |
 298 *          |                        V      |
 299 *          |                   +----+------+------+
 300 *          | sub_running_bw    |    ACTIVE        |
 301 *          +-------------------+                  |
 302 *            inactive timer    |  non contending  |
 303 *            fired             +------------------+
 304 *
 305 * The task_non_contending() function is invoked when a task
 306 * blocks, and checks if the 0-lag time already passed or
 307 * not (in the first case, it directly updates running_bw;
 308 * in the second case, it arms the inactive timer).
 309 *
 310 * The task_contending() function is invoked when a task wakes
 311 * up, and checks if the task is still in the "ACTIVE non contending"
 312 * state or not (in the second case, it updates running_bw).
 313 */
 314static void task_non_contending(struct task_struct *p)
 315{
 316	struct sched_dl_entity *dl_se = &p->dl;
 317	struct hrtimer *timer = &dl_se->inactive_timer;
 318	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 319	struct rq *rq = rq_of_dl_rq(dl_rq);
 320	s64 zerolag_time;
 321
 322	/*
 323	 * If this is a non-deadline task that has been boosted,
 324	 * do nothing
 325	 */
 326	if (dl_se->dl_runtime == 0)
 327		return;
 328
 329	if (dl_entity_is_special(dl_se))
 330		return;
 331
 332	WARN_ON(dl_se->dl_non_contending);
 333
 334	zerolag_time = dl_se->deadline -
 335		 div64_long((dl_se->runtime * dl_se->dl_period),
 336			dl_se->dl_runtime);
 337
 338	/*
 339	 * Using relative times instead of the absolute "0-lag time"
 340	 * allows to simplify the code
 341	 */
 342	zerolag_time -= rq_clock(rq);
 343
 344	/*
 345	 * If the "0-lag time" already passed, decrease the active
 346	 * utilization now, instead of starting a timer
 347	 */
 348	if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
 349		if (dl_task(p))
 350			sub_running_bw(dl_se, dl_rq);
 351		if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
 352			struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
 353
 354			if (READ_ONCE(p->__state) == TASK_DEAD)
 355				sub_rq_bw(&p->dl, &rq->dl);
 356			raw_spin_lock(&dl_b->lock);
 357			__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
 358			__dl_clear_params(p);
 359			raw_spin_unlock(&dl_b->lock);
 360		}
 361
 362		return;
 363	}
 364
 365	dl_se->dl_non_contending = 1;
 366	get_task_struct(p);
 367	hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
 368}
 369
 370static void task_contending(struct sched_dl_entity *dl_se, int flags)
 371{
 372	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 373
 374	/*
 375	 * If this is a non-deadline task that has been boosted,
 376	 * do nothing
 377	 */
 378	if (dl_se->dl_runtime == 0)
 379		return;
 380
 381	if (flags & ENQUEUE_MIGRATED)
 382		add_rq_bw(dl_se, dl_rq);
 383
 384	if (dl_se->dl_non_contending) {
 385		dl_se->dl_non_contending = 0;
 386		/*
 387		 * If the timer handler is currently running and the
 388		 * timer cannot be canceled, inactive_task_timer()
 389		 * will see that dl_not_contending is not set, and
 390		 * will not touch the rq's active utilization,
 391		 * so we are still safe.
 392		 */
 393		if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
 394			put_task_struct(dl_task_of(dl_se));
 395	} else {
 396		/*
 397		 * Since "dl_non_contending" is not set, the
 398		 * task's utilization has already been removed from
 399		 * active utilization (either when the task blocked,
 400		 * when the "inactive timer" fired).
 401		 * So, add it back.
 402		 */
 403		add_running_bw(dl_se, dl_rq);
 404	}
 405}
 406
 407static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
 408{
 409	struct sched_dl_entity *dl_se = &p->dl;
 410
 411	return dl_rq->root.rb_leftmost == &dl_se->rb_node;
 412}
 413
 414static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
 415
 416void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
 417{
 418	raw_spin_lock_init(&dl_b->dl_runtime_lock);
 419	dl_b->dl_period = period;
 420	dl_b->dl_runtime = runtime;
 421}
 422
 423void init_dl_bw(struct dl_bw *dl_b)
 424{
 425	raw_spin_lock_init(&dl_b->lock);
 426	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
 427	if (global_rt_runtime() == RUNTIME_INF)
 428		dl_b->bw = -1;
 429	else
 430		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
 431	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
 432	dl_b->total_bw = 0;
 433}
 434
 435void init_dl_rq(struct dl_rq *dl_rq)
 436{
 437	dl_rq->root = RB_ROOT_CACHED;
 438
 439#ifdef CONFIG_SMP
 440	/* zero means no -deadline tasks */
 441	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
 442
 443	dl_rq->dl_nr_migratory = 0;
 444	dl_rq->overloaded = 0;
 445	dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
 446#else
 447	init_dl_bw(&dl_rq->dl_bw);
 448#endif
 449
 450	dl_rq->running_bw = 0;
 451	dl_rq->this_bw = 0;
 452	init_dl_rq_bw_ratio(dl_rq);
 453}
 454
 455#ifdef CONFIG_SMP
 456
 457static inline int dl_overloaded(struct rq *rq)
 458{
 459	return atomic_read(&rq->rd->dlo_count);
 460}
 461
 462static inline void dl_set_overload(struct rq *rq)
 463{
 464	if (!rq->online)
 465		return;
 466
 467	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
 468	/*
 469	 * Must be visible before the overload count is
 470	 * set (as in sched_rt.c).
 471	 *
 472	 * Matched by the barrier in pull_dl_task().
 473	 */
 474	smp_wmb();
 475	atomic_inc(&rq->rd->dlo_count);
 476}
 477
 478static inline void dl_clear_overload(struct rq *rq)
 479{
 480	if (!rq->online)
 481		return;
 482
 483	atomic_dec(&rq->rd->dlo_count);
 484	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
 485}
 486
 487static void update_dl_migration(struct dl_rq *dl_rq)
 488{
 489	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
 490		if (!dl_rq->overloaded) {
 491			dl_set_overload(rq_of_dl_rq(dl_rq));
 492			dl_rq->overloaded = 1;
 493		}
 494	} else if (dl_rq->overloaded) {
 495		dl_clear_overload(rq_of_dl_rq(dl_rq));
 496		dl_rq->overloaded = 0;
 497	}
 498}
 499
 500static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 501{
 502	struct task_struct *p = dl_task_of(dl_se);
 503
 504	if (p->nr_cpus_allowed > 1)
 505		dl_rq->dl_nr_migratory++;
 506
 507	update_dl_migration(dl_rq);
 508}
 509
 510static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 511{
 512	struct task_struct *p = dl_task_of(dl_se);
 513
 514	if (p->nr_cpus_allowed > 1)
 515		dl_rq->dl_nr_migratory--;
 516
 517	update_dl_migration(dl_rq);
 518}
 519
 520#define __node_2_pdl(node) \
 521	rb_entry((node), struct task_struct, pushable_dl_tasks)
 522
 523static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
 524{
 525	return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
 526}
 527
 528/*
 529 * The list of pushable -deadline task is not a plist, like in
 530 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
 531 */
 532static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 533{
 534	struct rb_node *leftmost;
 
 
 
 
 535
 536	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
 537
 538	leftmost = rb_add_cached(&p->pushable_dl_tasks,
 539				 &rq->dl.pushable_dl_tasks_root,
 540				 __pushable_less);
 541	if (leftmost)
 542		rq->dl.earliest_dl.next = p->dl.deadline;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 543}
 544
 545static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 546{
 547	struct dl_rq *dl_rq = &rq->dl;
 548	struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
 549	struct rb_node *leftmost;
 550
 551	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
 552		return;
 553
 554	leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
 555	if (leftmost)
 556		dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
 
 
 
 
 
 
 
 557
 
 558	RB_CLEAR_NODE(&p->pushable_dl_tasks);
 559}
 560
 561static inline int has_pushable_dl_tasks(struct rq *rq)
 562{
 563	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
 564}
 565
 566static int push_dl_task(struct rq *rq);
 567
 568static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 569{
 570	return rq->online && dl_task(prev);
 571}
 572
 573static DEFINE_PER_CPU(struct callback_head, dl_push_head);
 574static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
 575
 576static void push_dl_tasks(struct rq *);
 577static void pull_dl_task(struct rq *);
 578
 579static inline void deadline_queue_push_tasks(struct rq *rq)
 580{
 581	if (!has_pushable_dl_tasks(rq))
 582		return;
 583
 584	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
 585}
 586
 587static inline void deadline_queue_pull_task(struct rq *rq)
 588{
 589	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
 590}
 591
 592static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 593
 594static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 595{
 596	struct rq *later_rq = NULL;
 597	struct dl_bw *dl_b;
 598
 599	later_rq = find_lock_later_rq(p, rq);
 600	if (!later_rq) {
 601		int cpu;
 602
 603		/*
 604		 * If we cannot preempt any rq, fall back to pick any
 605		 * online CPU:
 606		 */
 607		cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
 608		if (cpu >= nr_cpu_ids) {
 609			/*
 610			 * Failed to find any suitable CPU.
 611			 * The task will never come back!
 612			 */
 613			BUG_ON(dl_bandwidth_enabled());
 614
 615			/*
 616			 * If admission control is disabled we
 617			 * try a little harder to let the task
 618			 * run.
 619			 */
 620			cpu = cpumask_any(cpu_active_mask);
 621		}
 622		later_rq = cpu_rq(cpu);
 623		double_lock_balance(rq, later_rq);
 624	}
 625
 626	if (p->dl.dl_non_contending || p->dl.dl_throttled) {
 627		/*
 628		 * Inactive timer is armed (or callback is running, but
 629		 * waiting for us to release rq locks). In any case, when it
 630		 * will fire (or continue), it will see running_bw of this
 631		 * task migrated to later_rq (and correctly handle it).
 632		 */
 633		sub_running_bw(&p->dl, &rq->dl);
 634		sub_rq_bw(&p->dl, &rq->dl);
 635
 636		add_rq_bw(&p->dl, &later_rq->dl);
 637		add_running_bw(&p->dl, &later_rq->dl);
 638	} else {
 639		sub_rq_bw(&p->dl, &rq->dl);
 640		add_rq_bw(&p->dl, &later_rq->dl);
 641	}
 642
 643	/*
 644	 * And we finally need to fixup root_domain(s) bandwidth accounting,
 645	 * since p is still hanging out in the old (now moved to default) root
 646	 * domain.
 647	 */
 648	dl_b = &rq->rd->dl_bw;
 649	raw_spin_lock(&dl_b->lock);
 650	__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
 651	raw_spin_unlock(&dl_b->lock);
 652
 653	dl_b = &later_rq->rd->dl_bw;
 654	raw_spin_lock(&dl_b->lock);
 655	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
 656	raw_spin_unlock(&dl_b->lock);
 657
 658	set_task_cpu(p, later_rq->cpu);
 659	double_unlock_balance(later_rq, rq);
 660
 661	return later_rq;
 662}
 663
 664#else
 665
 666static inline
 667void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 668{
 669}
 670
 671static inline
 672void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 673{
 674}
 675
 676static inline
 677void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 678{
 679}
 680
 681static inline
 682void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 683{
 684}
 685
 686static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 687{
 688	return false;
 689}
 690
 691static inline void pull_dl_task(struct rq *rq)
 692{
 693}
 694
 695static inline void deadline_queue_push_tasks(struct rq *rq)
 696{
 697}
 698
 699static inline void deadline_queue_pull_task(struct rq *rq)
 700{
 701}
 702#endif /* CONFIG_SMP */
 703
 704static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 705static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 706static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
 
 707
 708/*
 709 * We are being explicitly informed that a new instance is starting,
 710 * and this means that:
 711 *  - the absolute deadline of the entity has to be placed at
 712 *    current time + relative deadline;
 713 *  - the runtime of the entity has to be set to the maximum value.
 714 *
 715 * The capability of specifying such event is useful whenever a -deadline
 716 * entity wants to (try to!) synchronize its behaviour with the scheduler's
 717 * one, and to (try to!) reconcile itself with its own scheduling
 718 * parameters.
 719 */
 720static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
 721{
 722	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 723	struct rq *rq = rq_of_dl_rq(dl_rq);
 724
 725	WARN_ON(is_dl_boosted(dl_se));
 726	WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
 727
 728	/*
 729	 * We are racing with the deadline timer. So, do nothing because
 730	 * the deadline timer handler will take care of properly recharging
 731	 * the runtime and postponing the deadline
 732	 */
 733	if (dl_se->dl_throttled)
 734		return;
 735
 736	/*
 737	 * We use the regular wall clock time to set deadlines in the
 738	 * future; in fact, we must consider execution overheads (time
 739	 * spent on hardirq context, etc.).
 740	 */
 741	dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
 742	dl_se->runtime = dl_se->dl_runtime;
 743}
 744
 745/*
 746 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
 747 * possibility of a entity lasting more than what it declared, and thus
 748 * exhausting its runtime.
 749 *
 750 * Here we are interested in making runtime overrun possible, but we do
 751 * not want a entity which is misbehaving to affect the scheduling of all
 752 * other entities.
 753 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
 754 * is used, in order to confine each entity within its own bandwidth.
 755 *
 756 * This function deals exactly with that, and ensures that when the runtime
 757 * of a entity is replenished, its deadline is also postponed. That ensures
 758 * the overrunning entity can't interfere with other entity in the system and
 759 * can't make them miss their deadlines. Reasons why this kind of overruns
 760 * could happen are, typically, a entity voluntarily trying to overcome its
 761 * runtime, or it just underestimated it during sched_setattr().
 762 */
 763static void replenish_dl_entity(struct sched_dl_entity *dl_se)
 
 764{
 765	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 766	struct rq *rq = rq_of_dl_rq(dl_rq);
 767
 768	BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
 769
 770	/*
 771	 * This could be the case for a !-dl task that is boosted.
 772	 * Just go with full inherited parameters.
 773	 */
 774	if (dl_se->dl_deadline == 0) {
 775		dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
 776		dl_se->runtime = pi_of(dl_se)->dl_runtime;
 777	}
 778
 779	if (dl_se->dl_yielded && dl_se->runtime > 0)
 780		dl_se->runtime = 0;
 781
 782	/*
 783	 * We keep moving the deadline away until we get some
 784	 * available runtime for the entity. This ensures correct
 785	 * handling of situations where the runtime overrun is
 786	 * arbitrary large.
 787	 */
 788	while (dl_se->runtime <= 0) {
 789		dl_se->deadline += pi_of(dl_se)->dl_period;
 790		dl_se->runtime += pi_of(dl_se)->dl_runtime;
 791	}
 792
 793	/*
 794	 * At this point, the deadline really should be "in
 795	 * the future" with respect to rq->clock. If it's
 796	 * not, we are, for some reason, lagging too much!
 797	 * Anyway, after having warn userspace abut that,
 798	 * we still try to keep the things running by
 799	 * resetting the deadline and the budget of the
 800	 * entity.
 801	 */
 802	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
 803		printk_deferred_once("sched: DL replenish lagged too much\n");
 804		dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
 805		dl_se->runtime = pi_of(dl_se)->dl_runtime;
 806	}
 807
 808	if (dl_se->dl_yielded)
 809		dl_se->dl_yielded = 0;
 810	if (dl_se->dl_throttled)
 811		dl_se->dl_throttled = 0;
 812}
 813
 814/*
 815 * Here we check if --at time t-- an entity (which is probably being
 816 * [re]activated or, in general, enqueued) can use its remaining runtime
 817 * and its current deadline _without_ exceeding the bandwidth it is
 818 * assigned (function returns true if it can't). We are in fact applying
 819 * one of the CBS rules: when a task wakes up, if the residual runtime
 820 * over residual deadline fits within the allocated bandwidth, then we
 821 * can keep the current (absolute) deadline and residual budget without
 822 * disrupting the schedulability of the system. Otherwise, we should
 823 * refill the runtime and set the deadline a period in the future,
 824 * because keeping the current (absolute) deadline of the task would
 825 * result in breaking guarantees promised to other tasks (refer to
 826 * Documentation/scheduler/sched-deadline.rst for more information).
 827 *
 828 * This function returns true if:
 829 *
 830 *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
 831 *
 832 * IOW we can't recycle current parameters.
 833 *
 834 * Notice that the bandwidth check is done against the deadline. For
 835 * task with deadline equal to period this is the same of using
 836 * dl_period instead of dl_deadline in the equation above.
 837 */
 838static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
 
 839{
 840	u64 left, right;
 841
 842	/*
 843	 * left and right are the two sides of the equation above,
 844	 * after a bit of shuffling to use multiplications instead
 845	 * of divisions.
 846	 *
 847	 * Note that none of the time values involved in the two
 848	 * multiplications are absolute: dl_deadline and dl_runtime
 849	 * are the relative deadline and the maximum runtime of each
 850	 * instance, runtime is the runtime left for the last instance
 851	 * and (deadline - t), since t is rq->clock, is the time left
 852	 * to the (absolute) deadline. Even if overflowing the u64 type
 853	 * is very unlikely to occur in both cases, here we scale down
 854	 * as we want to avoid that risk at all. Scaling down by 10
 855	 * means that we reduce granularity to 1us. We are fine with it,
 856	 * since this is only a true/false check and, anyway, thinking
 857	 * of anything below microseconds resolution is actually fiction
 858	 * (but still we want to give the user that illusion >;).
 859	 */
 860	left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 861	right = ((dl_se->deadline - t) >> DL_SCALE) *
 862		(pi_of(dl_se)->dl_runtime >> DL_SCALE);
 863
 864	return dl_time_before(right, left);
 865}
 866
 867/*
 868 * Revised wakeup rule [1]: For self-suspending tasks, rather then
 869 * re-initializing task's runtime and deadline, the revised wakeup
 870 * rule adjusts the task's runtime to avoid the task to overrun its
 871 * density.
 872 *
 873 * Reasoning: a task may overrun the density if:
 874 *    runtime / (deadline - t) > dl_runtime / dl_deadline
 875 *
 876 * Therefore, runtime can be adjusted to:
 877 *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
 878 *
 879 * In such way that runtime will be equal to the maximum density
 880 * the task can use without breaking any rule.
 881 *
 882 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
 883 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
 884 */
 885static void
 886update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
 887{
 888	u64 laxity = dl_se->deadline - rq_clock(rq);
 889
 890	/*
 891	 * If the task has deadline < period, and the deadline is in the past,
 892	 * it should already be throttled before this check.
 893	 *
 894	 * See update_dl_entity() comments for further details.
 895	 */
 896	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
 897
 898	dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
 899}
 900
 901/*
 902 * Regarding the deadline, a task with implicit deadline has a relative
 903 * deadline == relative period. A task with constrained deadline has a
 904 * relative deadline <= relative period.
 905 *
 906 * We support constrained deadline tasks. However, there are some restrictions
 907 * applied only for tasks which do not have an implicit deadline. See
 908 * update_dl_entity() to know more about such restrictions.
 909 *
 910 * The dl_is_implicit() returns true if the task has an implicit deadline.
 911 */
 912static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
 913{
 914	return dl_se->dl_deadline == dl_se->dl_period;
 915}
 916
 917/*
 918 * When a deadline entity is placed in the runqueue, its runtime and deadline
 919 * might need to be updated. This is done by a CBS wake up rule. There are two
 920 * different rules: 1) the original CBS; and 2) the Revisited CBS.
 921 *
 922 * When the task is starting a new period, the Original CBS is used. In this
 923 * case, the runtime is replenished and a new absolute deadline is set.
 924 *
 925 * When a task is queued before the begin of the next period, using the
 926 * remaining runtime and deadline could make the entity to overflow, see
 927 * dl_entity_overflow() to find more about runtime overflow. When such case
 928 * is detected, the runtime and deadline need to be updated.
 929 *
 930 * If the task has an implicit deadline, i.e., deadline == period, the Original
 931 * CBS is applied. the runtime is replenished and a new absolute deadline is
 932 * set, as in the previous cases.
 933 *
 934 * However, the Original CBS does not work properly for tasks with
 935 * deadline < period, which are said to have a constrained deadline. By
 936 * applying the Original CBS, a constrained deadline task would be able to run
 937 * runtime/deadline in a period. With deadline < period, the task would
 938 * overrun the runtime/period allowed bandwidth, breaking the admission test.
 939 *
 940 * In order to prevent this misbehave, the Revisited CBS is used for
 941 * constrained deadline tasks when a runtime overflow is detected. In the
 942 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
 943 * the remaining runtime of the task is reduced to avoid runtime overflow.
 944 * Please refer to the comments update_dl_revised_wakeup() function to find
 945 * more about the Revised CBS rule.
 946 */
 947static void update_dl_entity(struct sched_dl_entity *dl_se)
 
 948{
 949	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 950	struct rq *rq = rq_of_dl_rq(dl_rq);
 951
 952	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 953	    dl_entity_overflow(dl_se, rq_clock(rq))) {
 954
 955		if (unlikely(!dl_is_implicit(dl_se) &&
 956			     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
 957			     !is_dl_boosted(dl_se))) {
 958			update_dl_revised_wakeup(dl_se, rq);
 959			return;
 960		}
 961
 962		dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
 963		dl_se->runtime = pi_of(dl_se)->dl_runtime;
 964	}
 965}
 966
 967static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
 968{
 969	return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
 970}
 971
 972/*
 973 * If the entity depleted all its runtime, and if we want it to sleep
 974 * while waiting for some new execution time to become available, we
 975 * set the bandwidth replenishment timer to the replenishment instant
 976 * and try to activate it.
 977 *
 978 * Notice that it is important for the caller to know if the timer
 979 * actually started or not (i.e., the replenishment instant is in
 980 * the future or in the past).
 981 */
 982static int start_dl_timer(struct task_struct *p)
 983{
 984	struct sched_dl_entity *dl_se = &p->dl;
 985	struct hrtimer *timer = &dl_se->dl_timer;
 986	struct rq *rq = task_rq(p);
 987	ktime_t now, act;
 988	s64 delta;
 989
 990	lockdep_assert_rq_held(rq);
 991
 992	/*
 993	 * We want the timer to fire at the deadline, but considering
 994	 * that it is actually coming from rq->clock and not from
 995	 * hrtimer's time base reading.
 996	 */
 997	act = ns_to_ktime(dl_next_period(dl_se));
 998	now = hrtimer_cb_get_time(timer);
 999	delta = ktime_to_ns(now) - rq_clock(rq);
1000	act = ktime_add_ns(act, delta);
1001
1002	/*
1003	 * If the expiry time already passed, e.g., because the value
1004	 * chosen as the deadline is too small, don't even try to
1005	 * start the timer in the past!
1006	 */
1007	if (ktime_us_delta(act, now) < 0)
1008		return 0;
1009
1010	/*
1011	 * !enqueued will guarantee another callback; even if one is already in
1012	 * progress. This ensures a balanced {get,put}_task_struct().
1013	 *
1014	 * The race against __run_timer() clearing the enqueued state is
1015	 * harmless because we're holding task_rq()->lock, therefore the timer
1016	 * expiring after we've done the check will wait on its task_rq_lock()
1017	 * and observe our state.
1018	 */
1019	if (!hrtimer_is_queued(timer)) {
1020		get_task_struct(p);
1021		hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1022	}
1023
1024	return 1;
1025}
1026
1027/*
1028 * This is the bandwidth enforcement timer callback. If here, we know
1029 * a task is not on its dl_rq, since the fact that the timer was running
1030 * means the task is throttled and needs a runtime replenishment.
1031 *
1032 * However, what we actually do depends on the fact the task is active,
1033 * (it is on its rq) or has been removed from there by a call to
1034 * dequeue_task_dl(). In the former case we must issue the runtime
1035 * replenishment and add the task back to the dl_rq; in the latter, we just
1036 * do nothing but clearing dl_throttled, so that runtime and deadline
1037 * updating (and the queueing back to dl_rq) will be done by the
1038 * next call to enqueue_task_dl().
1039 */
1040static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1041{
1042	struct sched_dl_entity *dl_se = container_of(timer,
1043						     struct sched_dl_entity,
1044						     dl_timer);
1045	struct task_struct *p = dl_task_of(dl_se);
1046	struct rq_flags rf;
1047	struct rq *rq;
1048
1049	rq = task_rq_lock(p, &rf);
1050
1051	/*
1052	 * The task might have changed its scheduling policy to something
1053	 * different than SCHED_DEADLINE (through switched_from_dl()).
1054	 */
1055	if (!dl_task(p))
 
1056		goto unlock;
 
1057
1058	/*
1059	 * The task might have been boosted by someone else and might be in the
1060	 * boosting/deboosting path, its not throttled.
1061	 */
1062	if (is_dl_boosted(dl_se))
1063		goto unlock;
1064
1065	/*
1066	 * Spurious timer due to start_dl_timer() race; or we already received
1067	 * a replenishment from rt_mutex_setprio().
1068	 */
1069	if (!dl_se->dl_throttled)
1070		goto unlock;
1071
1072	sched_clock_tick();
1073	update_rq_clock(rq);
1074
1075	/*
1076	 * If the throttle happened during sched-out; like:
1077	 *
1078	 *   schedule()
1079	 *     deactivate_task()
1080	 *       dequeue_task_dl()
1081	 *         update_curr_dl()
1082	 *           start_dl_timer()
1083	 *         __dequeue_task_dl()
1084	 *     prev->on_rq = 0;
1085	 *
1086	 * We can be both throttled and !queued. Replenish the counter
1087	 * but do not enqueue -- wait for our wakeup to do that.
1088	 */
1089	if (!task_on_rq_queued(p)) {
1090		replenish_dl_entity(dl_se);
1091		goto unlock;
1092	}
1093
1094#ifdef CONFIG_SMP
1095	if (unlikely(!rq->online)) {
1096		/*
1097		 * If the runqueue is no longer available, migrate the
1098		 * task elsewhere. This necessarily changes rq.
1099		 */
1100		lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1101		rq = dl_task_offline_migration(rq, p);
1102		rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1103		update_rq_clock(rq);
1104
1105		/*
1106		 * Now that the task has been migrated to the new RQ and we
1107		 * have that locked, proceed as normal and enqueue the task
1108		 * there.
1109		 */
1110	}
1111#endif
1112
1113	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1114	if (dl_task(rq->curr))
1115		check_preempt_curr_dl(rq, p, 0);
1116	else
1117		resched_curr(rq);
1118
1119#ifdef CONFIG_SMP
1120	/*
1121	 * Queueing this task back might have overloaded rq, check if we need
1122	 * to kick someone away.
1123	 */
1124	if (has_pushable_dl_tasks(rq)) {
1125		/*
1126		 * Nothing relies on rq->lock after this, so its safe to drop
1127		 * rq->lock.
1128		 */
1129		rq_unpin_lock(rq, &rf);
1130		push_dl_task(rq);
1131		rq_repin_lock(rq, &rf);
1132	}
1133#endif
1134
1135unlock:
1136	task_rq_unlock(rq, p, &rf);
1137
1138	/*
1139	 * This can free the task_struct, including this hrtimer, do not touch
1140	 * anything related to that after this.
1141	 */
1142	put_task_struct(p);
1143
1144	return HRTIMER_NORESTART;
1145}
1146
1147void init_dl_task_timer(struct sched_dl_entity *dl_se)
1148{
1149	struct hrtimer *timer = &dl_se->dl_timer;
1150
1151	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1152	timer->function = dl_task_timer;
1153}
1154
1155/*
1156 * During the activation, CBS checks if it can reuse the current task's
1157 * runtime and period. If the deadline of the task is in the past, CBS
1158 * cannot use the runtime, and so it replenishes the task. This rule
1159 * works fine for implicit deadline tasks (deadline == period), and the
1160 * CBS was designed for implicit deadline tasks. However, a task with
1161 * constrained deadline (deadline < period) might be awakened after the
1162 * deadline, but before the next period. In this case, replenishing the
1163 * task would allow it to run for runtime / deadline. As in this case
1164 * deadline < period, CBS enables a task to run for more than the
1165 * runtime / period. In a very loaded system, this can cause a domino
1166 * effect, making other tasks miss their deadlines.
1167 *
1168 * To avoid this problem, in the activation of a constrained deadline
1169 * task after the deadline but before the next period, throttle the
1170 * task and set the replenishing timer to the begin of the next period,
1171 * unless it is boosted.
1172 */
1173static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1174{
1175	struct task_struct *p = dl_task_of(dl_se);
1176	struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1177
1178	if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1179	    dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1180		if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1181			return;
1182		dl_se->dl_throttled = 1;
1183		if (dl_se->runtime > 0)
1184			dl_se->runtime = 0;
1185	}
1186}
1187
1188static
1189int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1190{
1191	return (dl_se->runtime <= 0);
1192}
1193
1194extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1195
1196/*
1197 * This function implements the GRUB accounting rule:
1198 * according to the GRUB reclaiming algorithm, the runtime is
1199 * not decreased as "dq = -dt", but as
1200 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1201 * where u is the utilization of the task, Umax is the maximum reclaimable
1202 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1203 * as the difference between the "total runqueue utilization" and the
1204 * runqueue active utilization, and Uextra is the (per runqueue) extra
1205 * reclaimable utilization.
1206 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1207 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1208 * BW_SHIFT.
1209 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT,
1210 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1211 * Since delta is a 64 bit variable, to have an overflow its value
1212 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1213 * So, overflow is not an issue here.
1214 */
1215static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1216{
1217	u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1218	u64 u_act;
1219	u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1220
1221	/*
1222	 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1223	 * we compare u_inact + rq->dl.extra_bw with
1224	 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1225	 * u_inact + rq->dl.extra_bw can be larger than
1226	 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1227	 * leading to wrong results)
1228	 */
1229	if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1230		u_act = u_act_min;
1231	else
1232		u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1233
1234	return (delta * u_act) >> BW_SHIFT;
1235}
1236
1237/*
1238 * Update the current task's runtime statistics (provided it is still
1239 * a -deadline task and has not been removed from the dl_rq).
1240 */
1241static void update_curr_dl(struct rq *rq)
1242{
1243	struct task_struct *curr = rq->curr;
1244	struct sched_dl_entity *dl_se = &curr->dl;
1245	u64 delta_exec, scaled_delta_exec;
1246	int cpu = cpu_of(rq);
1247	u64 now;
1248
1249	if (!dl_task(curr) || !on_dl_rq(dl_se))
1250		return;
1251
1252	/*
1253	 * Consumed budget is computed considering the time as
1254	 * observed by schedulable tasks (excluding time spent
1255	 * in hardirq context, etc.). Deadlines are instead
1256	 * computed using hard walltime. This seems to be the more
1257	 * natural solution, but the full ramifications of this
1258	 * approach need further study.
1259	 */
1260	now = rq_clock_task(rq);
1261	delta_exec = now - curr->se.exec_start;
1262	if (unlikely((s64)delta_exec <= 0)) {
1263		if (unlikely(dl_se->dl_yielded))
1264			goto throttle;
1265		return;
1266	}
1267
 
 
 
1268	schedstat_set(curr->se.statistics.exec_max,
1269		      max(curr->se.statistics.exec_max, delta_exec));
1270
1271	curr->se.sum_exec_runtime += delta_exec;
1272	account_group_exec_runtime(curr, delta_exec);
1273
1274	curr->se.exec_start = now;
1275	cgroup_account_cputime(curr, delta_exec);
1276
1277	if (dl_entity_is_special(dl_se))
1278		return;
1279
1280	/*
1281	 * For tasks that participate in GRUB, we implement GRUB-PA: the
1282	 * spare reclaimed bandwidth is used to clock down frequency.
1283	 *
1284	 * For the others, we still need to scale reservation parameters
1285	 * according to current frequency and CPU maximum capacity.
1286	 */
1287	if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1288		scaled_delta_exec = grub_reclaim(delta_exec,
1289						 rq,
1290						 &curr->dl);
1291	} else {
1292		unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1293		unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1294
1295		scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1296		scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1297	}
1298
1299	dl_se->runtime -= scaled_delta_exec;
1300
1301throttle:
1302	if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1303		dl_se->dl_throttled = 1;
1304
1305		/* If requested, inform the user about runtime overruns. */
1306		if (dl_runtime_exceeded(dl_se) &&
1307		    (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1308			dl_se->dl_overrun = 1;
1309
1310		__dequeue_task_dl(rq, curr, 0);
1311		if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1312			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1313
1314		if (!is_leftmost(curr, &rq->dl))
1315			resched_curr(rq);
1316	}
1317
1318	/*
1319	 * Because -- for now -- we share the rt bandwidth, we need to
1320	 * account our runtime there too, otherwise actual rt tasks
1321	 * would be able to exceed the shared quota.
1322	 *
1323	 * Account to the root rt group for now.
1324	 *
1325	 * The solution we're working towards is having the RT groups scheduled
1326	 * using deadline servers -- however there's a few nasties to figure
1327	 * out before that can happen.
1328	 */
1329	if (rt_bandwidth_enabled()) {
1330		struct rt_rq *rt_rq = &rq->rt;
1331
1332		raw_spin_lock(&rt_rq->rt_runtime_lock);
1333		/*
1334		 * We'll let actual RT tasks worry about the overflow here, we
1335		 * have our own CBS to keep us inline; only account when RT
1336		 * bandwidth is relevant.
1337		 */
1338		if (sched_rt_bandwidth_account(rt_rq))
1339			rt_rq->rt_time += delta_exec;
1340		raw_spin_unlock(&rt_rq->rt_runtime_lock);
1341	}
1342}
1343
1344static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1345{
1346	struct sched_dl_entity *dl_se = container_of(timer,
1347						     struct sched_dl_entity,
1348						     inactive_timer);
1349	struct task_struct *p = dl_task_of(dl_se);
1350	struct rq_flags rf;
1351	struct rq *rq;
1352
1353	rq = task_rq_lock(p, &rf);
1354
1355	sched_clock_tick();
1356	update_rq_clock(rq);
1357
1358	if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1359		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1360
1361		if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1362			sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1363			sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1364			dl_se->dl_non_contending = 0;
1365		}
1366
1367		raw_spin_lock(&dl_b->lock);
1368		__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1369		raw_spin_unlock(&dl_b->lock);
1370		__dl_clear_params(p);
1371
1372		goto unlock;
1373	}
1374	if (dl_se->dl_non_contending == 0)
1375		goto unlock;
1376
1377	sub_running_bw(dl_se, &rq->dl);
1378	dl_se->dl_non_contending = 0;
1379unlock:
1380	task_rq_unlock(rq, p, &rf);
1381	put_task_struct(p);
1382
1383	return HRTIMER_NORESTART;
1384}
1385
1386void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1387{
1388	struct hrtimer *timer = &dl_se->inactive_timer;
1389
1390	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1391	timer->function = inactive_task_timer;
1392}
1393
1394#ifdef CONFIG_SMP
1395
1396static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1397{
1398	struct rq *rq = rq_of_dl_rq(dl_rq);
1399
1400	if (dl_rq->earliest_dl.curr == 0 ||
1401	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1402		if (dl_rq->earliest_dl.curr == 0)
1403			cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1404		dl_rq->earliest_dl.curr = deadline;
1405		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1406	}
1407}
1408
1409static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1410{
1411	struct rq *rq = rq_of_dl_rq(dl_rq);
1412
1413	/*
1414	 * Since we may have removed our earliest (and/or next earliest)
1415	 * task we must recompute them.
1416	 */
1417	if (!dl_rq->dl_nr_running) {
1418		dl_rq->earliest_dl.curr = 0;
1419		dl_rq->earliest_dl.next = 0;
1420		cpudl_clear(&rq->rd->cpudl, rq->cpu);
1421		cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1422	} else {
1423		struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1424		struct sched_dl_entity *entry;
1425
1426		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1427		dl_rq->earliest_dl.curr = entry->deadline;
1428		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1429	}
1430}
1431
1432#else
1433
1434static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1435static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1436
1437#endif /* CONFIG_SMP */
1438
1439static inline
1440void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1441{
1442	int prio = dl_task_of(dl_se)->prio;
1443	u64 deadline = dl_se->deadline;
1444
1445	WARN_ON(!dl_prio(prio));
1446	dl_rq->dl_nr_running++;
1447	add_nr_running(rq_of_dl_rq(dl_rq), 1);
1448
1449	inc_dl_deadline(dl_rq, deadline);
1450	inc_dl_migration(dl_se, dl_rq);
1451}
1452
1453static inline
1454void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1455{
1456	int prio = dl_task_of(dl_se)->prio;
1457
1458	WARN_ON(!dl_prio(prio));
1459	WARN_ON(!dl_rq->dl_nr_running);
1460	dl_rq->dl_nr_running--;
1461	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1462
1463	dec_dl_deadline(dl_rq, dl_se->deadline);
1464	dec_dl_migration(dl_se, dl_rq);
1465}
1466
1467#define __node_2_dle(node) \
1468	rb_entry((node), struct sched_dl_entity, rb_node)
1469
1470static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1471{
1472	return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1473}
1474
1475static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1476{
1477	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 
 
 
 
1478
1479	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1480
1481	rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1482
1483	inc_dl_tasks(dl_se, dl_rq);
1484}
1485
1486static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1487{
1488	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1489
1490	if (RB_EMPTY_NODE(&dl_se->rb_node))
1491		return;
1492
1493	rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
 
 
 
 
 
1494
 
1495	RB_CLEAR_NODE(&dl_se->rb_node);
1496
1497	dec_dl_tasks(dl_se, dl_rq);
1498}
1499
1500static void
1501enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
 
1502{
1503	BUG_ON(on_dl_rq(dl_se));
1504
1505	/*
1506	 * If this is a wakeup or a new instance, the scheduling
1507	 * parameters of the task might need updating. Otherwise,
1508	 * we want a replenishment of its runtime.
1509	 */
1510	if (flags & ENQUEUE_WAKEUP) {
1511		task_contending(dl_se, flags);
1512		update_dl_entity(dl_se);
1513	} else if (flags & ENQUEUE_REPLENISH) {
1514		replenish_dl_entity(dl_se);
1515	} else if ((flags & ENQUEUE_RESTORE) &&
1516		  dl_time_before(dl_se->deadline,
1517				 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1518		setup_new_dl_entity(dl_se);
1519	}
1520
1521	__enqueue_dl_entity(dl_se);
1522}
1523
1524static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1525{
1526	__dequeue_dl_entity(dl_se);
1527}
1528
1529static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1530{
1531	if (is_dl_boosted(&p->dl)) {
1532		/*
1533		 * Because of delays in the detection of the overrun of a
1534		 * thread's runtime, it might be the case that a thread
1535		 * goes to sleep in a rt mutex with negative runtime. As
1536		 * a consequence, the thread will be throttled.
1537		 *
1538		 * While waiting for the mutex, this thread can also be
1539		 * boosted via PI, resulting in a thread that is throttled
1540		 * and boosted at the same time.
1541		 *
1542		 * In this case, the boost overrides the throttle.
1543		 */
1544		if (p->dl.dl_throttled) {
1545			/*
1546			 * The replenish timer needs to be canceled. No
1547			 * problem if it fires concurrently: boosted threads
1548			 * are ignored in dl_task_timer().
1549			 */
1550			hrtimer_try_to_cancel(&p->dl.dl_timer);
1551			p->dl.dl_throttled = 0;
1552		}
1553	} else if (!dl_prio(p->normal_prio)) {
1554		/*
1555		 * Special case in which we have a !SCHED_DEADLINE task that is going
1556		 * to be deboosted, but exceeds its runtime while doing so. No point in
1557		 * replenishing it, as it's going to return back to its original
1558		 * scheduling class after this. If it has been throttled, we need to
1559		 * clear the flag, otherwise the task may wake up as throttled after
1560		 * being boosted again with no means to replenish the runtime and clear
1561		 * the throttle.
1562		 */
1563		p->dl.dl_throttled = 0;
1564		BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
1565		return;
1566	}
1567
1568	/*
1569	 * Check if a constrained deadline task was activated
1570	 * after the deadline but before the next period.
1571	 * If that is the case, the task will be throttled and
1572	 * the replenishment timer will be set to the next period.
1573	 */
1574	if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1575		dl_check_constrained_dl(&p->dl);
1576
1577	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1578		add_rq_bw(&p->dl, &rq->dl);
1579		add_running_bw(&p->dl, &rq->dl);
1580	}
1581
1582	/*
1583	 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1584	 * its budget it needs a replenishment and, since it now is on
1585	 * its rq, the bandwidth timer callback (which clearly has not
1586	 * run yet) will take care of this.
1587	 * However, the active utilization does not depend on the fact
1588	 * that the task is on the runqueue or not (but depends on the
1589	 * task's state - in GRUB parlance, "inactive" vs "active contending").
1590	 * In other words, even if a task is throttled its utilization must
1591	 * be counted in the active utilization; hence, we need to call
1592	 * add_running_bw().
1593	 */
1594	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1595		if (flags & ENQUEUE_WAKEUP)
1596			task_contending(&p->dl, flags);
1597
1598		return;
1599	}
1600
1601	enqueue_dl_entity(&p->dl, flags);
1602
1603	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1604		enqueue_pushable_dl_task(rq, p);
1605}
1606
1607static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1608{
1609	dequeue_dl_entity(&p->dl);
1610	dequeue_pushable_dl_task(rq, p);
1611}
1612
1613static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1614{
1615	update_curr_dl(rq);
1616	__dequeue_task_dl(rq, p, flags);
1617
1618	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1619		sub_running_bw(&p->dl, &rq->dl);
1620		sub_rq_bw(&p->dl, &rq->dl);
1621	}
1622
1623	/*
1624	 * This check allows to start the inactive timer (or to immediately
1625	 * decrease the active utilization, if needed) in two cases:
1626	 * when the task blocks and when it is terminating
1627	 * (p->state == TASK_DEAD). We can handle the two cases in the same
1628	 * way, because from GRUB's point of view the same thing is happening
1629	 * (the task moves from "active contending" to "active non contending"
1630	 * or "inactive")
1631	 */
1632	if (flags & DEQUEUE_SLEEP)
1633		task_non_contending(p);
1634}
1635
1636/*
1637 * Yield task semantic for -deadline tasks is:
1638 *
1639 *   get off from the CPU until our next instance, with
1640 *   a new runtime. This is of little use now, since we
1641 *   don't have a bandwidth reclaiming mechanism. Anyway,
1642 *   bandwidth reclaiming is planned for the future, and
1643 *   yield_task_dl will indicate that some spare budget
1644 *   is available for other task instances to use it.
1645 */
1646static void yield_task_dl(struct rq *rq)
1647{
1648	/*
1649	 * We make the task go to sleep until its current deadline by
1650	 * forcing its runtime to zero. This way, update_curr_dl() stops
1651	 * it and the bandwidth timer will wake it up and will give it
1652	 * new scheduling parameters (thanks to dl_yielded=1).
1653	 */
1654	rq->curr->dl.dl_yielded = 1;
1655
1656	update_rq_clock(rq);
1657	update_curr_dl(rq);
1658	/*
1659	 * Tell update_rq_clock() that we've just updated,
1660	 * so we don't do microscopic update in schedule()
1661	 * and double the fastpath cost.
1662	 */
1663	rq_clock_skip_update(rq);
1664}
1665
1666#ifdef CONFIG_SMP
1667
1668static int find_later_rq(struct task_struct *task);
1669
1670static int
1671select_task_rq_dl(struct task_struct *p, int cpu, int flags)
1672{
1673	struct task_struct *curr;
1674	bool select_rq;
1675	struct rq *rq;
1676
1677	if (!(flags & WF_TTWU))
1678		goto out;
1679
1680	rq = cpu_rq(cpu);
1681
1682	rcu_read_lock();
1683	curr = READ_ONCE(rq->curr); /* unlocked access */
1684
1685	/*
1686	 * If we are dealing with a -deadline task, we must
1687	 * decide where to wake it up.
1688	 * If it has a later deadline and the current task
1689	 * on this rq can't move (provided the waking task
1690	 * can!) we prefer to send it somewhere else. On the
1691	 * other hand, if it has a shorter deadline, we
1692	 * try to make it stay here, it might be important.
1693	 */
1694	select_rq = unlikely(dl_task(curr)) &&
1695		    (curr->nr_cpus_allowed < 2 ||
1696		     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1697		    p->nr_cpus_allowed > 1;
1698
1699	/*
1700	 * Take the capacity of the CPU into account to
1701	 * ensure it fits the requirement of the task.
1702	 */
1703	if (static_branch_unlikely(&sched_asym_cpucapacity))
1704		select_rq |= !dl_task_fits_capacity(p, cpu);
1705
1706	if (select_rq) {
1707		int target = find_later_rq(p);
1708
1709		if (target != -1 &&
1710				(dl_time_before(p->dl.deadline,
1711					cpu_rq(target)->dl.earliest_dl.curr) ||
1712				(cpu_rq(target)->dl.dl_nr_running == 0)))
1713			cpu = target;
1714	}
1715	rcu_read_unlock();
1716
1717out:
1718	return cpu;
1719}
1720
1721static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1722{
1723	struct rq *rq;
1724
1725	if (READ_ONCE(p->__state) != TASK_WAKING)
1726		return;
1727
1728	rq = task_rq(p);
1729	/*
1730	 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1731	 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1732	 * rq->lock is not... So, lock it
1733	 */
1734	raw_spin_rq_lock(rq);
1735	if (p->dl.dl_non_contending) {
1736		update_rq_clock(rq);
1737		sub_running_bw(&p->dl, &rq->dl);
1738		p->dl.dl_non_contending = 0;
1739		/*
1740		 * If the timer handler is currently running and the
1741		 * timer cannot be canceled, inactive_task_timer()
1742		 * will see that dl_not_contending is not set, and
1743		 * will not touch the rq's active utilization,
1744		 * so we are still safe.
1745		 */
1746		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1747			put_task_struct(p);
1748	}
1749	sub_rq_bw(&p->dl, &rq->dl);
1750	raw_spin_rq_unlock(rq);
1751}
1752
1753static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1754{
1755	/*
1756	 * Current can't be migrated, useless to reschedule,
1757	 * let's hope p can move out.
1758	 */
1759	if (rq->curr->nr_cpus_allowed == 1 ||
1760	    !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1761		return;
1762
1763	/*
1764	 * p is migratable, so let's not schedule it and
1765	 * see if it is pushed or pulled somewhere else.
1766	 */
1767	if (p->nr_cpus_allowed != 1 &&
1768	    cpudl_find(&rq->rd->cpudl, p, NULL))
1769		return;
1770
1771	resched_curr(rq);
1772}
1773
1774static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1775{
1776	if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1777		/*
1778		 * This is OK, because current is on_cpu, which avoids it being
1779		 * picked for load-balance and preemption/IRQs are still
1780		 * disabled avoiding further scheduler activity on it and we've
1781		 * not yet started the picking loop.
1782		 */
1783		rq_unpin_lock(rq, rf);
1784		pull_dl_task(rq);
1785		rq_repin_lock(rq, rf);
1786	}
1787
1788	return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1789}
1790#endif /* CONFIG_SMP */
1791
1792/*
1793 * Only called when both the current and waking task are -deadline
1794 * tasks.
1795 */
1796static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1797				  int flags)
1798{
1799	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1800		resched_curr(rq);
1801		return;
1802	}
1803
1804#ifdef CONFIG_SMP
1805	/*
1806	 * In the unlikely case current and p have the same deadline
1807	 * let us try to decide what's the best thing to do...
1808	 */
1809	if ((p->dl.deadline == rq->curr->dl.deadline) &&
1810	    !test_tsk_need_resched(rq->curr))
1811		check_preempt_equal_dl(rq, p);
1812#endif /* CONFIG_SMP */
1813}
1814
1815#ifdef CONFIG_SCHED_HRTICK
1816static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1817{
1818	hrtick_start(rq, p->dl.runtime);
1819}
1820#else /* !CONFIG_SCHED_HRTICK */
1821static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1822{
1823}
1824#endif
1825
1826static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1827{
1828	p->se.exec_start = rq_clock_task(rq);
1829
1830	/* You can't push away the running task */
1831	dequeue_pushable_dl_task(rq, p);
1832
1833	if (!first)
1834		return;
1835
1836	if (hrtick_enabled_dl(rq))
1837		start_hrtick_dl(rq, p);
1838
1839	if (rq->curr->sched_class != &dl_sched_class)
1840		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1841
1842	deadline_queue_push_tasks(rq);
1843}
1844
1845static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1846						   struct dl_rq *dl_rq)
1847{
1848	struct rb_node *left = rb_first_cached(&dl_rq->root);
1849
1850	if (!left)
1851		return NULL;
1852
1853	return rb_entry(left, struct sched_dl_entity, rb_node);
1854}
1855
1856static struct task_struct *pick_task_dl(struct rq *rq)
 
1857{
1858	struct sched_dl_entity *dl_se;
1859	struct dl_rq *dl_rq = &rq->dl;
1860	struct task_struct *p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1861
1862	if (!sched_dl_runnable(rq))
 
 
 
 
 
 
 
1863		return NULL;
1864
 
 
1865	dl_se = pick_next_dl_entity(rq, dl_rq);
1866	BUG_ON(!dl_se);
 
1867	p = dl_task_of(dl_se);
 
1868
1869	return p;
1870}
1871
1872static struct task_struct *pick_next_task_dl(struct rq *rq)
1873{
1874	struct task_struct *p;
1875
1876	p = pick_task_dl(rq);
1877	if (p)
1878		set_next_task_dl(rq, p, true);
1879
1880	return p;
1881}
1882
1883static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1884{
1885	update_curr_dl(rq);
1886
1887	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1888	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1889		enqueue_pushable_dl_task(rq, p);
1890}
1891
1892/*
1893 * scheduler tick hitting a task of our scheduling class.
1894 *
1895 * NOTE: This function can be called remotely by the tick offload that
1896 * goes along full dynticks. Therefore no local assumption can be made
1897 * and everything must be accessed through the @rq and @curr passed in
1898 * parameters.
1899 */
1900static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1901{
1902	update_curr_dl(rq);
1903
1904	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1905	/*
1906	 * Even when we have runtime, update_curr_dl() might have resulted in us
1907	 * not being the leftmost task anymore. In that case NEED_RESCHED will
1908	 * be set and schedule() will start a new hrtick for the next task.
1909	 */
1910	if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
1911	    is_leftmost(p, &rq->dl))
1912		start_hrtick_dl(rq, p);
1913}
1914
1915static void task_fork_dl(struct task_struct *p)
1916{
1917	/*
1918	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1919	 * sched_fork()
1920	 */
1921}
1922
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1923#ifdef CONFIG_SMP
1924
1925/* Only try algorithms three times */
1926#define DL_MAX_TRIES 3
1927
1928static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1929{
1930	if (!task_running(rq, p) &&
1931	    cpumask_test_cpu(cpu, &p->cpus_mask))
1932		return 1;
1933	return 0;
1934}
1935
1936/*
1937 * Return the earliest pushable rq's task, which is suitable to be executed
1938 * on the CPU, NULL otherwise:
1939 */
1940static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1941{
1942	struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1943	struct task_struct *p = NULL;
1944
1945	if (!has_pushable_dl_tasks(rq))
1946		return NULL;
1947
1948next_node:
1949	if (next_node) {
1950		p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1951
1952		if (pick_dl_task(rq, p, cpu))
1953			return p;
1954
1955		next_node = rb_next(next_node);
1956		goto next_node;
1957	}
1958
1959	return NULL;
1960}
1961
1962static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1963
1964static int find_later_rq(struct task_struct *task)
1965{
1966	struct sched_domain *sd;
1967	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1968	int this_cpu = smp_processor_id();
1969	int cpu = task_cpu(task);
1970
1971	/* Make sure the mask is initialized first */
1972	if (unlikely(!later_mask))
1973		return -1;
1974
1975	if (task->nr_cpus_allowed == 1)
1976		return -1;
1977
1978	/*
1979	 * We have to consider system topology and task affinity
1980	 * first, then we can look for a suitable CPU.
1981	 */
1982	if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
 
 
1983		return -1;
1984
1985	/*
1986	 * If we are here, some targets have been found, including
1987	 * the most suitable which is, among the runqueues where the
1988	 * current tasks have later deadlines than the task's one, the
1989	 * rq with the latest possible one.
 
1990	 *
1991	 * Now we check how well this matches with task's
1992	 * affinity and system topology.
1993	 *
1994	 * The last CPU where the task run is our first
1995	 * guess, since it is most likely cache-hot there.
1996	 */
1997	if (cpumask_test_cpu(cpu, later_mask))
1998		return cpu;
1999	/*
2000	 * Check if this_cpu is to be skipped (i.e., it is
2001	 * not in the mask) or not.
2002	 */
2003	if (!cpumask_test_cpu(this_cpu, later_mask))
2004		this_cpu = -1;
2005
2006	rcu_read_lock();
2007	for_each_domain(cpu, sd) {
2008		if (sd->flags & SD_WAKE_AFFINE) {
2009			int best_cpu;
2010
2011			/*
2012			 * If possible, preempting this_cpu is
2013			 * cheaper than migrating.
2014			 */
2015			if (this_cpu != -1 &&
2016			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2017				rcu_read_unlock();
2018				return this_cpu;
2019			}
2020
2021			best_cpu = cpumask_any_and_distribute(later_mask,
2022							      sched_domain_span(sd));
2023			/*
2024			 * Last chance: if a CPU being in both later_mask
2025			 * and current sd span is valid, that becomes our
2026			 * choice. Of course, the latest possible CPU is
2027			 * already under consideration through later_mask.
2028			 */
2029			if (best_cpu < nr_cpu_ids) {
 
2030				rcu_read_unlock();
2031				return best_cpu;
2032			}
2033		}
2034	}
2035	rcu_read_unlock();
2036
2037	/*
2038	 * At this point, all our guesses failed, we just return
2039	 * 'something', and let the caller sort the things out.
2040	 */
2041	if (this_cpu != -1)
2042		return this_cpu;
2043
2044	cpu = cpumask_any_distribute(later_mask);
2045	if (cpu < nr_cpu_ids)
2046		return cpu;
2047
2048	return -1;
2049}
2050
2051/* Locks the rq it finds */
2052static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2053{
2054	struct rq *later_rq = NULL;
2055	int tries;
2056	int cpu;
2057
2058	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2059		cpu = find_later_rq(task);
2060
2061		if ((cpu == -1) || (cpu == rq->cpu))
2062			break;
2063
2064		later_rq = cpu_rq(cpu);
2065
2066		if (later_rq->dl.dl_nr_running &&
2067		    !dl_time_before(task->dl.deadline,
2068					later_rq->dl.earliest_dl.curr)) {
2069			/*
2070			 * Target rq has tasks of equal or earlier deadline,
2071			 * retrying does not release any lock and is unlikely
2072			 * to yield a different result.
2073			 */
2074			later_rq = NULL;
2075			break;
2076		}
2077
2078		/* Retry if something changed. */
2079		if (double_lock_balance(rq, later_rq)) {
2080			if (unlikely(task_rq(task) != rq ||
2081				     !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
 
2082				     task_running(rq, task) ||
2083				     !dl_task(task) ||
2084				     !task_on_rq_queued(task))) {
2085				double_unlock_balance(rq, later_rq);
2086				later_rq = NULL;
2087				break;
2088			}
2089		}
2090
2091		/*
2092		 * If the rq we found has no -deadline task, or
2093		 * its earliest one has a later deadline than our
2094		 * task, the rq is a good one.
2095		 */
2096		if (!later_rq->dl.dl_nr_running ||
2097		    dl_time_before(task->dl.deadline,
2098				   later_rq->dl.earliest_dl.curr))
2099			break;
2100
2101		/* Otherwise we try again. */
2102		double_unlock_balance(rq, later_rq);
2103		later_rq = NULL;
2104	}
2105
2106	return later_rq;
2107}
2108
2109static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2110{
2111	struct task_struct *p;
2112
2113	if (!has_pushable_dl_tasks(rq))
2114		return NULL;
2115
2116	p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2117		     struct task_struct, pushable_dl_tasks);
2118
2119	BUG_ON(rq->cpu != task_cpu(p));
2120	BUG_ON(task_current(rq, p));
2121	BUG_ON(p->nr_cpus_allowed <= 1);
2122
2123	BUG_ON(!task_on_rq_queued(p));
2124	BUG_ON(!dl_task(p));
2125
2126	return p;
2127}
2128
2129/*
2130 * See if the non running -deadline tasks on this rq
2131 * can be sent to some other CPU where they can preempt
2132 * and start executing.
2133 */
2134static int push_dl_task(struct rq *rq)
2135{
2136	struct task_struct *next_task;
2137	struct rq *later_rq;
2138	int ret = 0;
2139
2140	if (!rq->dl.overloaded)
2141		return 0;
2142
2143	next_task = pick_next_pushable_dl_task(rq);
2144	if (!next_task)
2145		return 0;
2146
2147retry:
2148	if (is_migration_disabled(next_task))
2149		return 0;
2150
2151	if (WARN_ON(next_task == rq->curr))
2152		return 0;
 
2153
2154	/*
2155	 * If next_task preempts rq->curr, and rq->curr
2156	 * can move away, it makes sense to just reschedule
2157	 * without going further in pushing next_task.
2158	 */
2159	if (dl_task(rq->curr) &&
2160	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2161	    rq->curr->nr_cpus_allowed > 1) {
2162		resched_curr(rq);
2163		return 0;
2164	}
2165
2166	/* We might release rq lock */
2167	get_task_struct(next_task);
2168
2169	/* Will lock the rq it'll find */
2170	later_rq = find_lock_later_rq(next_task, rq);
2171	if (!later_rq) {
2172		struct task_struct *task;
2173
2174		/*
2175		 * We must check all this again, since
2176		 * find_lock_later_rq releases rq->lock and it is
2177		 * then possible that next_task has migrated.
2178		 */
2179		task = pick_next_pushable_dl_task(rq);
2180		if (task == next_task) {
2181			/*
2182			 * The task is still there. We don't try
2183			 * again, some other CPU will pull it when ready.
2184			 */
2185			goto out;
2186		}
2187
2188		if (!task)
2189			/* No more tasks */
2190			goto out;
2191
2192		put_task_struct(next_task);
2193		next_task = task;
2194		goto retry;
2195	}
2196
2197	deactivate_task(rq, next_task, 0);
2198	set_task_cpu(next_task, later_rq->cpu);
2199
2200	/*
2201	 * Update the later_rq clock here, because the clock is used
2202	 * by the cpufreq_update_util() inside __add_running_bw().
2203	 */
2204	update_rq_clock(later_rq);
2205	activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2206	ret = 1;
2207
2208	resched_curr(later_rq);
2209
2210	double_unlock_balance(rq, later_rq);
2211
2212out:
2213	put_task_struct(next_task);
2214
2215	return ret;
2216}
2217
2218static void push_dl_tasks(struct rq *rq)
2219{
2220	/* push_dl_task() will return true if it moved a -deadline task */
2221	while (push_dl_task(rq))
2222		;
2223}
2224
2225static void pull_dl_task(struct rq *this_rq)
2226{
2227	int this_cpu = this_rq->cpu, cpu;
2228	struct task_struct *p, *push_task;
2229	bool resched = false;
2230	struct rq *src_rq;
2231	u64 dmin = LONG_MAX;
2232
2233	if (likely(!dl_overloaded(this_rq)))
2234		return;
2235
2236	/*
2237	 * Match the barrier from dl_set_overloaded; this guarantees that if we
2238	 * see overloaded we must also see the dlo_mask bit.
2239	 */
2240	smp_rmb();
2241
2242	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2243		if (this_cpu == cpu)
2244			continue;
2245
2246		src_rq = cpu_rq(cpu);
2247
2248		/*
2249		 * It looks racy, abd it is! However, as in sched_rt.c,
2250		 * we are fine with this.
2251		 */
2252		if (this_rq->dl.dl_nr_running &&
2253		    dl_time_before(this_rq->dl.earliest_dl.curr,
2254				   src_rq->dl.earliest_dl.next))
2255			continue;
2256
2257		/* Might drop this_rq->lock */
2258		push_task = NULL;
2259		double_lock_balance(this_rq, src_rq);
2260
2261		/*
2262		 * If there are no more pullable tasks on the
2263		 * rq, we're done with it.
2264		 */
2265		if (src_rq->dl.dl_nr_running <= 1)
2266			goto skip;
2267
2268		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2269
2270		/*
2271		 * We found a task to be pulled if:
2272		 *  - it preempts our current (if there's one),
2273		 *  - it will preempt the last one we pulled (if any).
2274		 */
2275		if (p && dl_time_before(p->dl.deadline, dmin) &&
2276		    (!this_rq->dl.dl_nr_running ||
2277		     dl_time_before(p->dl.deadline,
2278				    this_rq->dl.earliest_dl.curr))) {
2279			WARN_ON(p == src_rq->curr);
2280			WARN_ON(!task_on_rq_queued(p));
2281
2282			/*
2283			 * Then we pull iff p has actually an earlier
2284			 * deadline than the current task of its runqueue.
2285			 */
2286			if (dl_time_before(p->dl.deadline,
2287					   src_rq->curr->dl.deadline))
2288				goto skip;
2289
2290			if (is_migration_disabled(p)) {
2291				push_task = get_push_task(src_rq);
2292			} else {
2293				deactivate_task(src_rq, p, 0);
2294				set_task_cpu(p, this_cpu);
2295				activate_task(this_rq, p, 0);
2296				dmin = p->dl.deadline;
2297				resched = true;
2298			}
2299
2300			/* Is there any other task even earlier? */
2301		}
2302skip:
2303		double_unlock_balance(this_rq, src_rq);
2304
2305		if (push_task) {
2306			raw_spin_rq_unlock(this_rq);
2307			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2308					    push_task, &src_rq->push_work);
2309			raw_spin_rq_lock(this_rq);
2310		}
2311	}
2312
2313	if (resched)
2314		resched_curr(this_rq);
2315}
2316
2317/*
2318 * Since the task is not running and a reschedule is not going to happen
2319 * anytime soon on its runqueue, we try pushing it away now.
2320 */
2321static void task_woken_dl(struct rq *rq, struct task_struct *p)
2322{
2323	if (!task_running(rq, p) &&
2324	    !test_tsk_need_resched(rq->curr) &&
2325	    p->nr_cpus_allowed > 1 &&
2326	    dl_task(rq->curr) &&
2327	    (rq->curr->nr_cpus_allowed < 2 ||
2328	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2329		push_dl_tasks(rq);
2330	}
2331}
2332
2333static void set_cpus_allowed_dl(struct task_struct *p,
2334				const struct cpumask *new_mask,
2335				u32 flags)
2336{
2337	struct root_domain *src_rd;
2338	struct rq *rq;
2339
2340	BUG_ON(!dl_task(p));
2341
2342	rq = task_rq(p);
2343	src_rd = rq->rd;
2344	/*
2345	 * Migrating a SCHED_DEADLINE task between exclusive
2346	 * cpusets (different root_domains) entails a bandwidth
2347	 * update. We already made space for us in the destination
2348	 * domain (see cpuset_can_attach()).
2349	 */
2350	if (!cpumask_intersects(src_rd->span, new_mask)) {
2351		struct dl_bw *src_dl_b;
2352
2353		src_dl_b = dl_bw_of(cpu_of(rq));
2354		/*
2355		 * We now free resources of the root_domain we are migrating
2356		 * off. In the worst case, sched_setattr() may temporary fail
2357		 * until we complete the update.
2358		 */
2359		raw_spin_lock(&src_dl_b->lock);
2360		__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2361		raw_spin_unlock(&src_dl_b->lock);
2362	}
2363
2364	set_cpus_allowed_common(p, new_mask, flags);
2365}
2366
2367/* Assumes rq->lock is held */
2368static void rq_online_dl(struct rq *rq)
2369{
2370	if (rq->dl.overloaded)
2371		dl_set_overload(rq);
2372
2373	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2374	if (rq->dl.dl_nr_running > 0)
2375		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2376}
2377
2378/* Assumes rq->lock is held */
2379static void rq_offline_dl(struct rq *rq)
2380{
2381	if (rq->dl.overloaded)
2382		dl_clear_overload(rq);
2383
2384	cpudl_clear(&rq->rd->cpudl, rq->cpu);
2385	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2386}
2387
2388void __init init_sched_dl_class(void)
2389{
2390	unsigned int i;
2391
2392	for_each_possible_cpu(i)
2393		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2394					GFP_KERNEL, cpu_to_node(i));
2395}
2396
2397void dl_add_task_root_domain(struct task_struct *p)
2398{
2399	struct rq_flags rf;
2400	struct rq *rq;
2401	struct dl_bw *dl_b;
2402
2403	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2404	if (!dl_task(p)) {
2405		raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2406		return;
2407	}
2408
2409	rq = __task_rq_lock(p, &rf);
2410
2411	dl_b = &rq->rd->dl_bw;
2412	raw_spin_lock(&dl_b->lock);
2413
2414	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2415
2416	raw_spin_unlock(&dl_b->lock);
2417
2418	task_rq_unlock(rq, p, &rf);
2419}
2420
2421void dl_clear_root_domain(struct root_domain *rd)
2422{
2423	unsigned long flags;
2424
2425	raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2426	rd->dl_bw.total_bw = 0;
2427	raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2428}
2429
2430#endif /* CONFIG_SMP */
2431
2432static void switched_from_dl(struct rq *rq, struct task_struct *p)
2433{
2434	/*
2435	 * task_non_contending() can start the "inactive timer" (if the 0-lag
2436	 * time is in the future). If the task switches back to dl before
2437	 * the "inactive timer" fires, it can continue to consume its current
2438	 * runtime using its current deadline. If it stays outside of
2439	 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2440	 * will reset the task parameters.
2441	 */
2442	if (task_on_rq_queued(p) && p->dl.dl_runtime)
2443		task_non_contending(p);
2444
2445	if (!task_on_rq_queued(p)) {
2446		/*
2447		 * Inactive timer is armed. However, p is leaving DEADLINE and
2448		 * might migrate away from this rq while continuing to run on
2449		 * some other class. We need to remove its contribution from
2450		 * this rq running_bw now, or sub_rq_bw (below) will complain.
2451		 */
2452		if (p->dl.dl_non_contending)
2453			sub_running_bw(&p->dl, &rq->dl);
2454		sub_rq_bw(&p->dl, &rq->dl);
2455	}
2456
2457	/*
2458	 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2459	 * at the 0-lag time, because the task could have been migrated
2460	 * while SCHED_OTHER in the meanwhile.
2461	 */
2462	if (p->dl.dl_non_contending)
2463		p->dl.dl_non_contending = 0;
2464
2465	/*
2466	 * Since this might be the only -deadline task on the rq,
2467	 * this is the right place to try to pull some other one
2468	 * from an overloaded CPU, if any.
2469	 */
2470	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2471		return;
2472
2473	deadline_queue_pull_task(rq);
2474}
2475
2476/*
2477 * When switching to -deadline, we may overload the rq, then
2478 * we try to push someone off, if possible.
2479 */
2480static void switched_to_dl(struct rq *rq, struct task_struct *p)
2481{
2482	if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2483		put_task_struct(p);
2484
2485	/* If p is not queued we will update its parameters at next wakeup. */
2486	if (!task_on_rq_queued(p)) {
2487		add_rq_bw(&p->dl, &rq->dl);
2488
2489		return;
2490	}
 
 
 
 
 
2491
2492	if (rq->curr != p) {
2493#ifdef CONFIG_SMP
2494		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2495			deadline_queue_push_tasks(rq);
2496#endif
2497		if (dl_task(rq->curr))
2498			check_preempt_curr_dl(rq, p, 0);
2499		else
2500			resched_curr(rq);
2501	} else {
2502		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2503	}
2504}
2505
2506/*
2507 * If the scheduling parameters of a -deadline task changed,
2508 * a push or pull operation might be needed.
2509 */
2510static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2511			    int oldprio)
2512{
2513	if (task_on_rq_queued(p) || task_current(rq, p)) {
2514#ifdef CONFIG_SMP
2515		/*
2516		 * This might be too much, but unfortunately
2517		 * we don't have the old deadline value, and
2518		 * we can't argue if the task is increasing
2519		 * or lowering its prio, so...
2520		 */
2521		if (!rq->dl.overloaded)
2522			deadline_queue_pull_task(rq);
2523
2524		/*
2525		 * If we now have a earlier deadline task than p,
2526		 * then reschedule, provided p is still on this
2527		 * runqueue.
2528		 */
2529		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2530			resched_curr(rq);
2531#else
2532		/*
2533		 * Again, we don't know if p has a earlier
2534		 * or later deadline, so let's blindly set a
2535		 * (maybe not needed) rescheduling point.
2536		 */
2537		resched_curr(rq);
2538#endif /* CONFIG_SMP */
2539	}
2540}
2541
2542DEFINE_SCHED_CLASS(dl) = {
2543
2544	.enqueue_task		= enqueue_task_dl,
2545	.dequeue_task		= dequeue_task_dl,
2546	.yield_task		= yield_task_dl,
2547
2548	.check_preempt_curr	= check_preempt_curr_dl,
2549
2550	.pick_next_task		= pick_next_task_dl,
2551	.put_prev_task		= put_prev_task_dl,
2552	.set_next_task		= set_next_task_dl,
2553
2554#ifdef CONFIG_SMP
2555	.balance		= balance_dl,
2556	.pick_task		= pick_task_dl,
2557	.select_task_rq		= select_task_rq_dl,
2558	.migrate_task_rq	= migrate_task_rq_dl,
2559	.set_cpus_allowed       = set_cpus_allowed_dl,
2560	.rq_online              = rq_online_dl,
2561	.rq_offline             = rq_offline_dl,
2562	.task_woken		= task_woken_dl,
2563	.find_lock_rq		= find_lock_later_rq,
2564#endif
2565
 
2566	.task_tick		= task_tick_dl,
2567	.task_fork              = task_fork_dl,
 
2568
2569	.prio_changed           = prio_changed_dl,
2570	.switched_from		= switched_from_dl,
2571	.switched_to		= switched_to_dl,
2572
2573	.update_curr		= update_curr_dl,
2574};
2575
2576/* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
2577static u64 dl_generation;
2578
2579int sched_dl_global_validate(void)
2580{
2581	u64 runtime = global_rt_runtime();
2582	u64 period = global_rt_period();
2583	u64 new_bw = to_ratio(period, runtime);
2584	u64 gen = ++dl_generation;
2585	struct dl_bw *dl_b;
2586	int cpu, cpus, ret = 0;
2587	unsigned long flags;
2588
2589	/*
2590	 * Here we want to check the bandwidth not being set to some
2591	 * value smaller than the currently allocated bandwidth in
2592	 * any of the root_domains.
2593	 */
2594	for_each_possible_cpu(cpu) {
2595		rcu_read_lock_sched();
2596
2597		if (dl_bw_visited(cpu, gen))
2598			goto next;
2599
2600		dl_b = dl_bw_of(cpu);
2601		cpus = dl_bw_cpus(cpu);
2602
2603		raw_spin_lock_irqsave(&dl_b->lock, flags);
2604		if (new_bw * cpus < dl_b->total_bw)
2605			ret = -EBUSY;
2606		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2607
2608next:
2609		rcu_read_unlock_sched();
2610
2611		if (ret)
2612			break;
2613	}
2614
2615	return ret;
2616}
2617
2618static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2619{
2620	if (global_rt_runtime() == RUNTIME_INF) {
2621		dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2622		dl_rq->extra_bw = 1 << BW_SHIFT;
2623	} else {
2624		dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2625			  global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2626		dl_rq->extra_bw = to_ratio(global_rt_period(),
2627						    global_rt_runtime());
2628	}
2629}
2630
2631void sched_dl_do_global(void)
2632{
2633	u64 new_bw = -1;
2634	u64 gen = ++dl_generation;
2635	struct dl_bw *dl_b;
2636	int cpu;
2637	unsigned long flags;
2638
2639	def_dl_bandwidth.dl_period = global_rt_period();
2640	def_dl_bandwidth.dl_runtime = global_rt_runtime();
2641
2642	if (global_rt_runtime() != RUNTIME_INF)
2643		new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2644
2645	for_each_possible_cpu(cpu) {
2646		rcu_read_lock_sched();
2647
2648		if (dl_bw_visited(cpu, gen)) {
2649			rcu_read_unlock_sched();
2650			continue;
2651		}
2652
2653		dl_b = dl_bw_of(cpu);
2654
2655		raw_spin_lock_irqsave(&dl_b->lock, flags);
2656		dl_b->bw = new_bw;
2657		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2658
2659		rcu_read_unlock_sched();
2660		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2661	}
2662}
2663
2664/*
2665 * We must be sure that accepting a new task (or allowing changing the
2666 * parameters of an existing one) is consistent with the bandwidth
2667 * constraints. If yes, this function also accordingly updates the currently
2668 * allocated bandwidth to reflect the new situation.
2669 *
2670 * This function is called while holding p's rq->lock.
2671 */
2672int sched_dl_overflow(struct task_struct *p, int policy,
2673		      const struct sched_attr *attr)
2674{
2675	u64 period = attr->sched_period ?: attr->sched_deadline;
2676	u64 runtime = attr->sched_runtime;
2677	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2678	int cpus, err = -1, cpu = task_cpu(p);
2679	struct dl_bw *dl_b = dl_bw_of(cpu);
2680	unsigned long cap;
2681
2682	if (attr->sched_flags & SCHED_FLAG_SUGOV)
2683		return 0;
2684
2685	/* !deadline task may carry old deadline bandwidth */
2686	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2687		return 0;
2688
2689	/*
2690	 * Either if a task, enters, leave, or stays -deadline but changes
2691	 * its parameters, we may need to update accordingly the total
2692	 * allocated bandwidth of the container.
2693	 */
2694	raw_spin_lock(&dl_b->lock);
2695	cpus = dl_bw_cpus(cpu);
2696	cap = dl_bw_capacity(cpu);
2697
2698	if (dl_policy(policy) && !task_has_dl_policy(p) &&
2699	    !__dl_overflow(dl_b, cap, 0, new_bw)) {
2700		if (hrtimer_active(&p->dl.inactive_timer))
2701			__dl_sub(dl_b, p->dl.dl_bw, cpus);
2702		__dl_add(dl_b, new_bw, cpus);
2703		err = 0;
2704	} else if (dl_policy(policy) && task_has_dl_policy(p) &&
2705		   !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2706		/*
2707		 * XXX this is slightly incorrect: when the task
2708		 * utilization decreases, we should delay the total
2709		 * utilization change until the task's 0-lag point.
2710		 * But this would require to set the task's "inactive
2711		 * timer" when the task is not inactive.
2712		 */
2713		__dl_sub(dl_b, p->dl.dl_bw, cpus);
2714		__dl_add(dl_b, new_bw, cpus);
2715		dl_change_utilization(p, new_bw);
2716		err = 0;
2717	} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2718		/*
2719		 * Do not decrease the total deadline utilization here,
2720		 * switched_from_dl() will take care to do it at the correct
2721		 * (0-lag) time.
2722		 */
2723		err = 0;
2724	}
2725	raw_spin_unlock(&dl_b->lock);
2726
2727	return err;
2728}
2729
2730/*
2731 * This function initializes the sched_dl_entity of a newly becoming
2732 * SCHED_DEADLINE task.
2733 *
2734 * Only the static values are considered here, the actual runtime and the
2735 * absolute deadline will be properly calculated when the task is enqueued
2736 * for the first time with its new policy.
2737 */
2738void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2739{
2740	struct sched_dl_entity *dl_se = &p->dl;
2741
2742	dl_se->dl_runtime = attr->sched_runtime;
2743	dl_se->dl_deadline = attr->sched_deadline;
2744	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2745	dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
2746	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2747	dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2748}
2749
2750void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2751{
2752	struct sched_dl_entity *dl_se = &p->dl;
2753
2754	attr->sched_priority = p->rt_priority;
2755	attr->sched_runtime = dl_se->dl_runtime;
2756	attr->sched_deadline = dl_se->dl_deadline;
2757	attr->sched_period = dl_se->dl_period;
2758	attr->sched_flags &= ~SCHED_DL_FLAGS;
2759	attr->sched_flags |= dl_se->flags;
2760}
2761
2762/*
2763 * Default limits for DL period; on the top end we guard against small util
2764 * tasks still getting ridiculously long effective runtimes, on the bottom end we
2765 * guard against timer DoS.
2766 */
2767unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
2768unsigned int sysctl_sched_dl_period_min = 100;     /* 100 us */
2769
2770/*
2771 * This function validates the new parameters of a -deadline task.
2772 * We ask for the deadline not being zero, and greater or equal
2773 * than the runtime, as well as the period of being zero or
2774 * greater than deadline. Furthermore, we have to be sure that
2775 * user parameters are above the internal resolution of 1us (we
2776 * check sched_runtime only since it is always the smaller one) and
2777 * below 2^63 ns (we have to check both sched_deadline and
2778 * sched_period, as the latter can be zero).
2779 */
2780bool __checkparam_dl(const struct sched_attr *attr)
2781{
2782	u64 period, max, min;
2783
2784	/* special dl tasks don't actually use any parameter */
2785	if (attr->sched_flags & SCHED_FLAG_SUGOV)
2786		return true;
2787
2788	/* deadline != 0 */
2789	if (attr->sched_deadline == 0)
2790		return false;
2791
2792	/*
2793	 * Since we truncate DL_SCALE bits, make sure we're at least
2794	 * that big.
2795	 */
2796	if (attr->sched_runtime < (1ULL << DL_SCALE))
2797		return false;
2798
2799	/*
2800	 * Since we use the MSB for wrap-around and sign issues, make
2801	 * sure it's not set (mind that period can be equal to zero).
2802	 */
2803	if (attr->sched_deadline & (1ULL << 63) ||
2804	    attr->sched_period & (1ULL << 63))
2805		return false;
2806
2807	period = attr->sched_period;
2808	if (!period)
2809		period = attr->sched_deadline;
2810
2811	/* runtime <= deadline <= period (if period != 0) */
2812	if (period < attr->sched_deadline ||
2813	    attr->sched_deadline < attr->sched_runtime)
2814		return false;
2815
2816	max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
2817	min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
2818
2819	if (period < min || period > max)
2820		return false;
2821
2822	return true;
2823}
2824
2825/*
2826 * This function clears the sched_dl_entity static params.
2827 */
2828void __dl_clear_params(struct task_struct *p)
2829{
2830	struct sched_dl_entity *dl_se = &p->dl;
2831
2832	dl_se->dl_runtime		= 0;
2833	dl_se->dl_deadline		= 0;
2834	dl_se->dl_period		= 0;
2835	dl_se->flags			= 0;
2836	dl_se->dl_bw			= 0;
2837	dl_se->dl_density		= 0;
2838
2839	dl_se->dl_throttled		= 0;
2840	dl_se->dl_yielded		= 0;
2841	dl_se->dl_non_contending	= 0;
2842	dl_se->dl_overrun		= 0;
2843
2844#ifdef CONFIG_RT_MUTEXES
2845	dl_se->pi_se			= dl_se;
2846#endif
2847}
2848
2849bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2850{
2851	struct sched_dl_entity *dl_se = &p->dl;
2852
2853	if (dl_se->dl_runtime != attr->sched_runtime ||
2854	    dl_se->dl_deadline != attr->sched_deadline ||
2855	    dl_se->dl_period != attr->sched_period ||
2856	    dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
2857		return true;
2858
2859	return false;
2860}
2861
2862#ifdef CONFIG_SMP
2863int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2864{
2865	unsigned long flags, cap;
2866	unsigned int dest_cpu;
2867	struct dl_bw *dl_b;
2868	bool overflow;
2869	int ret;
2870
2871	dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2872
2873	rcu_read_lock_sched();
2874	dl_b = dl_bw_of(dest_cpu);
2875	raw_spin_lock_irqsave(&dl_b->lock, flags);
2876	cap = dl_bw_capacity(dest_cpu);
2877	overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw);
2878	if (overflow) {
2879		ret = -EBUSY;
2880	} else {
2881		/*
2882		 * We reserve space for this task in the destination
2883		 * root_domain, as we can't fail after this point.
2884		 * We will free resources in the source root_domain
2885		 * later on (see set_cpus_allowed_dl()).
2886		 */
2887		int cpus = dl_bw_cpus(dest_cpu);
2888
2889		__dl_add(dl_b, p->dl.dl_bw, cpus);
2890		ret = 0;
2891	}
2892	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2893	rcu_read_unlock_sched();
2894
2895	return ret;
2896}
2897
2898int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2899				 const struct cpumask *trial)
2900{
2901	int ret = 1, trial_cpus;
2902	struct dl_bw *cur_dl_b;
2903	unsigned long flags;
2904
2905	rcu_read_lock_sched();
2906	cur_dl_b = dl_bw_of(cpumask_any(cur));
2907	trial_cpus = cpumask_weight(trial);
2908
2909	raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2910	if (cur_dl_b->bw != -1 &&
2911	    cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2912		ret = 0;
2913	raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2914	rcu_read_unlock_sched();
2915
2916	return ret;
2917}
2918
2919bool dl_cpu_busy(unsigned int cpu)
2920{
2921	unsigned long flags, cap;
2922	struct dl_bw *dl_b;
2923	bool overflow;
2924
2925	rcu_read_lock_sched();
2926	dl_b = dl_bw_of(cpu);
2927	raw_spin_lock_irqsave(&dl_b->lock, flags);
2928	cap = dl_bw_capacity(cpu);
2929	overflow = __dl_overflow(dl_b, cap, 0, 0);
2930	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2931	rcu_read_unlock_sched();
2932
2933	return overflow;
2934}
2935#endif
2936
2937#ifdef CONFIG_SCHED_DEBUG
2938void print_dl_stats(struct seq_file *m, int cpu)
2939{
2940	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2941}
2942#endif /* CONFIG_SCHED_DEBUG */