Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * Deadline Scheduling Class (SCHED_DEADLINE)
   3 *
   4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
   5 *
   6 * Tasks that periodically executes their instances for less than their
   7 * runtime won't miss any of their deadlines.
   8 * Tasks that are not periodic or sporadic or that tries to execute more
   9 * than their reserved bandwidth will be slowed down (and may potentially
  10 * miss some of their deadlines), and won't affect any other task.
  11 *
  12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
  13 *                    Juri Lelli <juri.lelli@gmail.com>,
  14 *                    Michael Trimarchi <michael@amarulasolutions.com>,
  15 *                    Fabio Checconi <fchecconi@gmail.com>
  16 */
  17#include "sched.h"
  18
  19#include <linux/slab.h>
  20
  21struct dl_bandwidth def_dl_bandwidth;
  22
  23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
  24{
  25	return container_of(dl_se, struct task_struct, dl);
  26}
  27
  28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
  29{
  30	return container_of(dl_rq, struct rq, dl);
  31}
  32
  33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
  34{
  35	struct task_struct *p = dl_task_of(dl_se);
  36	struct rq *rq = task_rq(p);
  37
  38	return &rq->dl;
  39}
  40
  41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
  42{
  43	return !RB_EMPTY_NODE(&dl_se->rb_node);
  44}
  45
  46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
  47{
  48	struct sched_dl_entity *dl_se = &p->dl;
  49
  50	return dl_rq->rb_leftmost == &dl_se->rb_node;
  51}
  52
  53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
  54{
  55	raw_spin_lock_init(&dl_b->dl_runtime_lock);
  56	dl_b->dl_period = period;
  57	dl_b->dl_runtime = runtime;
  58}
  59
  60extern unsigned long to_ratio(u64 period, u64 runtime);
  61
  62void init_dl_bw(struct dl_bw *dl_b)
  63{
  64	raw_spin_lock_init(&dl_b->lock);
  65	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
  66	if (global_rt_runtime() == RUNTIME_INF)
  67		dl_b->bw = -1;
  68	else
  69		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
  70	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
  71	dl_b->total_bw = 0;
  72}
  73
  74void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
  75{
  76	dl_rq->rb_root = RB_ROOT;
  77
  78#ifdef CONFIG_SMP
  79	/* zero means no -deadline tasks */
  80	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
  81
  82	dl_rq->dl_nr_migratory = 0;
  83	dl_rq->overloaded = 0;
  84	dl_rq->pushable_dl_tasks_root = RB_ROOT;
  85#else
  86	init_dl_bw(&dl_rq->dl_bw);
  87#endif
  88}
  89
  90#ifdef CONFIG_SMP
  91
  92static inline int dl_overloaded(struct rq *rq)
  93{
  94	return atomic_read(&rq->rd->dlo_count);
  95}
  96
  97static inline void dl_set_overload(struct rq *rq)
  98{
  99	if (!rq->online)
 100		return;
 101
 102	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
 103	/*
 104	 * Must be visible before the overload count is
 105	 * set (as in sched_rt.c).
 106	 *
 107	 * Matched by the barrier in pull_dl_task().
 108	 */
 109	smp_wmb();
 110	atomic_inc(&rq->rd->dlo_count);
 111}
 112
 113static inline void dl_clear_overload(struct rq *rq)
 114{
 115	if (!rq->online)
 116		return;
 117
 118	atomic_dec(&rq->rd->dlo_count);
 119	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
 120}
 121
 122static void update_dl_migration(struct dl_rq *dl_rq)
 123{
 124	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
 125		if (!dl_rq->overloaded) {
 126			dl_set_overload(rq_of_dl_rq(dl_rq));
 127			dl_rq->overloaded = 1;
 128		}
 129	} else if (dl_rq->overloaded) {
 130		dl_clear_overload(rq_of_dl_rq(dl_rq));
 131		dl_rq->overloaded = 0;
 132	}
 133}
 134
 135static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 136{
 137	struct task_struct *p = dl_task_of(dl_se);
 138
 139	if (p->nr_cpus_allowed > 1)
 140		dl_rq->dl_nr_migratory++;
 141
 142	update_dl_migration(dl_rq);
 143}
 144
 145static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 146{
 147	struct task_struct *p = dl_task_of(dl_se);
 148
 149	if (p->nr_cpus_allowed > 1)
 150		dl_rq->dl_nr_migratory--;
 151
 152	update_dl_migration(dl_rq);
 153}
 154
 155/*
 156 * The list of pushable -deadline task is not a plist, like in
 157 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
 158 */
 159static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 160{
 161	struct dl_rq *dl_rq = &rq->dl;
 162	struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
 163	struct rb_node *parent = NULL;
 164	struct task_struct *entry;
 165	int leftmost = 1;
 166
 167	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
 168
 169	while (*link) {
 170		parent = *link;
 171		entry = rb_entry(parent, struct task_struct,
 172				 pushable_dl_tasks);
 173		if (dl_entity_preempt(&p->dl, &entry->dl))
 174			link = &parent->rb_left;
 175		else {
 176			link = &parent->rb_right;
 177			leftmost = 0;
 178		}
 179	}
 180
 181	if (leftmost)
 182		dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
 
 
 183
 184	rb_link_node(&p->pushable_dl_tasks, parent, link);
 185	rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 186}
 187
 188static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 189{
 190	struct dl_rq *dl_rq = &rq->dl;
 191
 192	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
 193		return;
 194
 195	if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
 196		struct rb_node *next_node;
 197
 198		next_node = rb_next(&p->pushable_dl_tasks);
 199		dl_rq->pushable_dl_tasks_leftmost = next_node;
 
 
 
 
 200	}
 201
 202	rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 203	RB_CLEAR_NODE(&p->pushable_dl_tasks);
 204}
 205
 206static inline int has_pushable_dl_tasks(struct rq *rq)
 207{
 208	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
 209}
 210
 211static int push_dl_task(struct rq *rq);
 212
 213static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 214{
 215	return dl_task(prev);
 216}
 217
 218static inline void set_post_schedule(struct rq *rq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 219{
 220	rq->post_schedule = has_pushable_dl_tasks(rq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 221}
 222
 223#else
 224
 225static inline
 226void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 227{
 228}
 229
 230static inline
 231void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 232{
 233}
 234
 235static inline
 236void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 237{
 238}
 239
 240static inline
 241void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 242{
 243}
 244
 245static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 246{
 247	return false;
 248}
 249
 250static inline int pull_dl_task(struct rq *rq)
 
 
 
 
 251{
 252	return 0;
 253}
 254
 255static inline void set_post_schedule(struct rq *rq)
 256{
 257}
 258#endif /* CONFIG_SMP */
 259
 260static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 261static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 262static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
 263				  int flags);
 264
 265/*
 266 * We are being explicitly informed that a new instance is starting,
 267 * and this means that:
 268 *  - the absolute deadline of the entity has to be placed at
 269 *    current time + relative deadline;
 270 *  - the runtime of the entity has to be set to the maximum value.
 271 *
 272 * The capability of specifying such event is useful whenever a -deadline
 273 * entity wants to (try to!) synchronize its behaviour with the scheduler's
 274 * one, and to (try to!) reconcile itself with its own scheduling
 275 * parameters.
 276 */
 277static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
 278				       struct sched_dl_entity *pi_se)
 279{
 280	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 281	struct rq *rq = rq_of_dl_rq(dl_rq);
 282
 283	WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
 
 
 
 
 
 
 
 
 284
 285	/*
 286	 * We use the regular wall clock time to set deadlines in the
 287	 * future; in fact, we must consider execution overheads (time
 288	 * spent on hardirq context, etc.).
 289	 */
 290	dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 291	dl_se->runtime = pi_se->dl_runtime;
 292	dl_se->dl_new = 0;
 293}
 294
 295/*
 296 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
 297 * possibility of a entity lasting more than what it declared, and thus
 298 * exhausting its runtime.
 299 *
 300 * Here we are interested in making runtime overrun possible, but we do
 301 * not want a entity which is misbehaving to affect the scheduling of all
 302 * other entities.
 303 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
 304 * is used, in order to confine each entity within its own bandwidth.
 305 *
 306 * This function deals exactly with that, and ensures that when the runtime
 307 * of a entity is replenished, its deadline is also postponed. That ensures
 308 * the overrunning entity can't interfere with other entity in the system and
 309 * can't make them miss their deadlines. Reasons why this kind of overruns
 310 * could happen are, typically, a entity voluntarily trying to overcome its
 311 * runtime, or it just underestimated it during sched_setscheduler_ex().
 312 */
 313static void replenish_dl_entity(struct sched_dl_entity *dl_se,
 314				struct sched_dl_entity *pi_se)
 315{
 316	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 317	struct rq *rq = rq_of_dl_rq(dl_rq);
 318
 319	BUG_ON(pi_se->dl_runtime <= 0);
 320
 321	/*
 322	 * This could be the case for a !-dl task that is boosted.
 323	 * Just go with full inherited parameters.
 324	 */
 325	if (dl_se->dl_deadline == 0) {
 326		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 327		dl_se->runtime = pi_se->dl_runtime;
 328	}
 329
 
 
 
 330	/*
 331	 * We keep moving the deadline away until we get some
 332	 * available runtime for the entity. This ensures correct
 333	 * handling of situations where the runtime overrun is
 334	 * arbitrary large.
 335	 */
 336	while (dl_se->runtime <= 0) {
 337		dl_se->deadline += pi_se->dl_period;
 338		dl_se->runtime += pi_se->dl_runtime;
 339	}
 340
 341	/*
 342	 * At this point, the deadline really should be "in
 343	 * the future" with respect to rq->clock. If it's
 344	 * not, we are, for some reason, lagging too much!
 345	 * Anyway, after having warn userspace abut that,
 346	 * we still try to keep the things running by
 347	 * resetting the deadline and the budget of the
 348	 * entity.
 349	 */
 350	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
 351		static bool lag_once = false;
 352
 353		if (!lag_once) {
 354			lag_once = true;
 355			printk_sched("sched: DL replenish lagged to much\n");
 356		}
 357		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 358		dl_se->runtime = pi_se->dl_runtime;
 359	}
 
 
 
 
 
 360}
 361
 362/*
 363 * Here we check if --at time t-- an entity (which is probably being
 364 * [re]activated or, in general, enqueued) can use its remaining runtime
 365 * and its current deadline _without_ exceeding the bandwidth it is
 366 * assigned (function returns true if it can't). We are in fact applying
 367 * one of the CBS rules: when a task wakes up, if the residual runtime
 368 * over residual deadline fits within the allocated bandwidth, then we
 369 * can keep the current (absolute) deadline and residual budget without
 370 * disrupting the schedulability of the system. Otherwise, we should
 371 * refill the runtime and set the deadline a period in the future,
 372 * because keeping the current (absolute) deadline of the task would
 373 * result in breaking guarantees promised to other tasks (refer to
 374 * Documentation/scheduler/sched-deadline.txt for more informations).
 375 *
 376 * This function returns true if:
 377 *
 378 *   runtime / (deadline - t) > dl_runtime / dl_period ,
 379 *
 380 * IOW we can't recycle current parameters.
 381 *
 382 * Notice that the bandwidth check is done against the period. For
 383 * task with deadline equal to period this is the same of using
 384 * dl_deadline instead of dl_period in the equation above.
 385 */
 386static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 387			       struct sched_dl_entity *pi_se, u64 t)
 388{
 389	u64 left, right;
 390
 391	/*
 392	 * left and right are the two sides of the equation above,
 393	 * after a bit of shuffling to use multiplications instead
 394	 * of divisions.
 395	 *
 396	 * Note that none of the time values involved in the two
 397	 * multiplications are absolute: dl_deadline and dl_runtime
 398	 * are the relative deadline and the maximum runtime of each
 399	 * instance, runtime is the runtime left for the last instance
 400	 * and (deadline - t), since t is rq->clock, is the time left
 401	 * to the (absolute) deadline. Even if overflowing the u64 type
 402	 * is very unlikely to occur in both cases, here we scale down
 403	 * as we want to avoid that risk at all. Scaling down by 10
 404	 * means that we reduce granularity to 1us. We are fine with it,
 405	 * since this is only a true/false check and, anyway, thinking
 406	 * of anything below microseconds resolution is actually fiction
 407	 * (but still we want to give the user that illusion >;).
 408	 */
 409	left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 410	right = ((dl_se->deadline - t) >> DL_SCALE) *
 411		(pi_se->dl_runtime >> DL_SCALE);
 412
 413	return dl_time_before(right, left);
 414}
 415
 416/*
 417 * When a -deadline entity is queued back on the runqueue, its runtime and
 418 * deadline might need updating.
 419 *
 420 * The policy here is that we update the deadline of the entity only if:
 421 *  - the current deadline is in the past,
 422 *  - using the remaining runtime with the current deadline would make
 423 *    the entity exceed its bandwidth.
 424 */
 425static void update_dl_entity(struct sched_dl_entity *dl_se,
 426			     struct sched_dl_entity *pi_se)
 427{
 428	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 429	struct rq *rq = rq_of_dl_rq(dl_rq);
 430
 431	/*
 432	 * The arrival of a new instance needs special treatment, i.e.,
 433	 * the actual scheduling parameters have to be "renewed".
 434	 */
 435	if (dl_se->dl_new) {
 436		setup_new_dl_entity(dl_se, pi_se);
 437		return;
 438	}
 439
 440	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 441	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
 442		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 443		dl_se->runtime = pi_se->dl_runtime;
 444	}
 445}
 446
 447/*
 448 * If the entity depleted all its runtime, and if we want it to sleep
 449 * while waiting for some new execution time to become available, we
 450 * set the bandwidth enforcement timer to the replenishment instant
 451 * and try to activate it.
 452 *
 453 * Notice that it is important for the caller to know if the timer
 454 * actually started or not (i.e., the replenishment instant is in
 455 * the future or in the past).
 456 */
 457static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
 458{
 459	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 460	struct rq *rq = rq_of_dl_rq(dl_rq);
 
 461	ktime_t now, act;
 462	ktime_t soft, hard;
 463	unsigned long range;
 464	s64 delta;
 465
 466	if (boosted)
 467		return 0;
 468	/*
 469	 * We want the timer to fire at the deadline, but considering
 470	 * that it is actually coming from rq->clock and not from
 471	 * hrtimer's time base reading.
 472	 */
 473	act = ns_to_ktime(dl_se->deadline);
 474	now = hrtimer_cb_get_time(&dl_se->dl_timer);
 475	delta = ktime_to_ns(now) - rq_clock(rq);
 476	act = ktime_add_ns(act, delta);
 477
 478	/*
 479	 * If the expiry time already passed, e.g., because the value
 480	 * chosen as the deadline is too small, don't even try to
 481	 * start the timer in the past!
 482	 */
 483	if (ktime_us_delta(act, now) < 0)
 484		return 0;
 485
 486	hrtimer_set_expires(&dl_se->dl_timer, act);
 487
 488	soft = hrtimer_get_softexpires(&dl_se->dl_timer);
 489	hard = hrtimer_get_expires(&dl_se->dl_timer);
 490	range = ktime_to_ns(ktime_sub(hard, soft));
 491	__hrtimer_start_range_ns(&dl_se->dl_timer, soft,
 492				 range, HRTIMER_MODE_ABS, 0);
 
 
 
 
 
 
 493
 494	return hrtimer_active(&dl_se->dl_timer);
 495}
 496
 497/*
 498 * This is the bandwidth enforcement timer callback. If here, we know
 499 * a task is not on its dl_rq, since the fact that the timer was running
 500 * means the task is throttled and needs a runtime replenishment.
 501 *
 502 * However, what we actually do depends on the fact the task is active,
 503 * (it is on its rq) or has been removed from there by a call to
 504 * dequeue_task_dl(). In the former case we must issue the runtime
 505 * replenishment and add the task back to the dl_rq; in the latter, we just
 506 * do nothing but clearing dl_throttled, so that runtime and deadline
 507 * updating (and the queueing back to dl_rq) will be done by the
 508 * next call to enqueue_task_dl().
 509 */
 510static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 511{
 512	struct sched_dl_entity *dl_se = container_of(timer,
 513						     struct sched_dl_entity,
 514						     dl_timer);
 515	struct task_struct *p = dl_task_of(dl_se);
 
 516	struct rq *rq;
 517again:
 518	rq = task_rq(p);
 519	raw_spin_lock(&rq->lock);
 520
 521	if (rq != task_rq(p)) {
 522		/* Task was moved, retrying. */
 523		raw_spin_unlock(&rq->lock);
 524		goto again;
 
 
 
 
 
 525	}
 526
 527	/*
 528	 * We need to take care of a possible races here. In fact, the
 529	 * task might have changed its scheduling policy to something
 530	 * different from SCHED_DEADLINE or changed its reservation
 531	 * parameters (through sched_setscheduler()).
 532	 */
 533	if (!dl_task(p) || dl_se->dl_new)
 
 
 
 
 
 
 
 534		goto unlock;
 535
 536	sched_clock_tick();
 537	update_rq_clock(rq);
 538	dl_se->dl_throttled = 0;
 539	dl_se->dl_yielded = 0;
 540	if (p->on_rq) {
 541		enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
 542		if (task_has_dl_policy(rq->curr))
 543			check_preempt_curr_dl(rq, p, 0);
 544		else
 545			resched_task(rq->curr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 546#ifdef CONFIG_SMP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 547		/*
 548		 * Queueing this task back might have overloaded rq,
 549		 * check if we need to kick someone away.
 550		 */
 551		if (has_pushable_dl_tasks(rq))
 552			push_dl_task(rq);
 553#endif
 554	}
 
 
 555unlock:
 556	raw_spin_unlock(&rq->lock);
 
 
 
 
 
 
 557
 558	return HRTIMER_NORESTART;
 559}
 560
 561void init_dl_task_timer(struct sched_dl_entity *dl_se)
 562{
 563	struct hrtimer *timer = &dl_se->dl_timer;
 564
 565	if (hrtimer_active(timer)) {
 566		hrtimer_try_to_cancel(timer);
 567		return;
 568	}
 569
 570	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 571	timer->function = dl_task_timer;
 572}
 573
 574static
 575int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
 576{
 577	int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
 578	int rorun = dl_se->runtime <= 0;
 579
 580	if (!rorun && !dmiss)
 581		return 0;
 582
 583	/*
 584	 * If we are beyond our current deadline and we are still
 585	 * executing, then we have already used some of the runtime of
 586	 * the next instance. Thus, if we do not account that, we are
 587	 * stealing bandwidth from the system at each deadline miss!
 588	 */
 589	if (dmiss) {
 590		dl_se->runtime = rorun ? dl_se->runtime : 0;
 591		dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
 592	}
 593
 594	return 1;
 595}
 596
 597extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
 598
 599/*
 600 * Update the current task's runtime statistics (provided it is still
 601 * a -deadline task and has not been removed from the dl_rq).
 602 */
 603static void update_curr_dl(struct rq *rq)
 604{
 605	struct task_struct *curr = rq->curr;
 606	struct sched_dl_entity *dl_se = &curr->dl;
 607	u64 delta_exec;
 608
 609	if (!dl_task(curr) || !on_dl_rq(dl_se))
 610		return;
 611
 
 
 
 
 612	/*
 613	 * Consumed budget is computed considering the time as
 614	 * observed by schedulable tasks (excluding time spent
 615	 * in hardirq context, etc.). Deadlines are instead
 616	 * computed using hard walltime. This seems to be the more
 617	 * natural solution, but the full ramifications of this
 618	 * approach need further study.
 619	 */
 620	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
 621	if (unlikely((s64)delta_exec <= 0))
 
 
 622		return;
 
 623
 624	schedstat_set(curr->se.statistics.exec_max,
 625		      max(curr->se.statistics.exec_max, delta_exec));
 626
 627	curr->se.sum_exec_runtime += delta_exec;
 628	account_group_exec_runtime(curr, delta_exec);
 629
 630	curr->se.exec_start = rq_clock_task(rq);
 631	cpuacct_charge(curr, delta_exec);
 632
 633	sched_rt_avg_update(rq, delta_exec);
 634
 635	dl_se->runtime -= delta_exec;
 636	if (dl_runtime_exceeded(rq, dl_se)) {
 
 
 
 637		__dequeue_task_dl(rq, curr, 0);
 638		if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
 639			dl_se->dl_throttled = 1;
 640		else
 641			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
 642
 643		if (!is_leftmost(curr, &rq->dl))
 644			resched_task(curr);
 645	}
 646
 647	/*
 648	 * Because -- for now -- we share the rt bandwidth, we need to
 649	 * account our runtime there too, otherwise actual rt tasks
 650	 * would be able to exceed the shared quota.
 651	 *
 652	 * Account to the root rt group for now.
 653	 *
 654	 * The solution we're working towards is having the RT groups scheduled
 655	 * using deadline servers -- however there's a few nasties to figure
 656	 * out before that can happen.
 657	 */
 658	if (rt_bandwidth_enabled()) {
 659		struct rt_rq *rt_rq = &rq->rt;
 660
 661		raw_spin_lock(&rt_rq->rt_runtime_lock);
 662		/*
 663		 * We'll let actual RT tasks worry about the overflow here, we
 664		 * have our own CBS to keep us inline; only account when RT
 665		 * bandwidth is relevant.
 666		 */
 667		if (sched_rt_bandwidth_account(rt_rq))
 668			rt_rq->rt_time += delta_exec;
 669		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 670	}
 671}
 672
 673#ifdef CONFIG_SMP
 674
 675static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
 676
 677static inline u64 next_deadline(struct rq *rq)
 678{
 679	struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
 680
 681	if (next && dl_prio(next->prio))
 682		return next->dl.deadline;
 683	else
 684		return 0;
 685}
 686
 687static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
 688{
 689	struct rq *rq = rq_of_dl_rq(dl_rq);
 690
 691	if (dl_rq->earliest_dl.curr == 0 ||
 692	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
 693		/*
 694		 * If the dl_rq had no -deadline tasks, or if the new task
 695		 * has shorter deadline than the current one on dl_rq, we
 696		 * know that the previous earliest becomes our next earliest,
 697		 * as the new task becomes the earliest itself.
 698		 */
 699		dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
 700		dl_rq->earliest_dl.curr = deadline;
 701		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
 702	} else if (dl_rq->earliest_dl.next == 0 ||
 703		   dl_time_before(deadline, dl_rq->earliest_dl.next)) {
 704		/*
 705		 * On the other hand, if the new -deadline task has a
 706		 * a later deadline than the earliest one on dl_rq, but
 707		 * it is earlier than the next (if any), we must
 708		 * recompute the next-earliest.
 709		 */
 710		dl_rq->earliest_dl.next = next_deadline(rq);
 711	}
 712}
 713
 714static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
 715{
 716	struct rq *rq = rq_of_dl_rq(dl_rq);
 717
 718	/*
 719	 * Since we may have removed our earliest (and/or next earliest)
 720	 * task we must recompute them.
 721	 */
 722	if (!dl_rq->dl_nr_running) {
 723		dl_rq->earliest_dl.curr = 0;
 724		dl_rq->earliest_dl.next = 0;
 725		cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
 726	} else {
 727		struct rb_node *leftmost = dl_rq->rb_leftmost;
 728		struct sched_dl_entity *entry;
 729
 730		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
 731		dl_rq->earliest_dl.curr = entry->deadline;
 732		dl_rq->earliest_dl.next = next_deadline(rq);
 733		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
 734	}
 735}
 736
 737#else
 738
 739static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
 740static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
 741
 742#endif /* CONFIG_SMP */
 743
 744static inline
 745void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 746{
 747	int prio = dl_task_of(dl_se)->prio;
 748	u64 deadline = dl_se->deadline;
 749
 750	WARN_ON(!dl_prio(prio));
 751	dl_rq->dl_nr_running++;
 752	inc_nr_running(rq_of_dl_rq(dl_rq));
 753
 754	inc_dl_deadline(dl_rq, deadline);
 755	inc_dl_migration(dl_se, dl_rq);
 756}
 757
 758static inline
 759void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 760{
 761	int prio = dl_task_of(dl_se)->prio;
 762
 763	WARN_ON(!dl_prio(prio));
 764	WARN_ON(!dl_rq->dl_nr_running);
 765	dl_rq->dl_nr_running--;
 766	dec_nr_running(rq_of_dl_rq(dl_rq));
 767
 768	dec_dl_deadline(dl_rq, dl_se->deadline);
 769	dec_dl_migration(dl_se, dl_rq);
 770}
 771
 772static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
 773{
 774	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 775	struct rb_node **link = &dl_rq->rb_root.rb_node;
 776	struct rb_node *parent = NULL;
 777	struct sched_dl_entity *entry;
 778	int leftmost = 1;
 779
 780	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
 781
 782	while (*link) {
 783		parent = *link;
 784		entry = rb_entry(parent, struct sched_dl_entity, rb_node);
 785		if (dl_time_before(dl_se->deadline, entry->deadline))
 786			link = &parent->rb_left;
 787		else {
 788			link = &parent->rb_right;
 789			leftmost = 0;
 790		}
 791	}
 792
 793	if (leftmost)
 794		dl_rq->rb_leftmost = &dl_se->rb_node;
 795
 796	rb_link_node(&dl_se->rb_node, parent, link);
 797	rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
 798
 799	inc_dl_tasks(dl_se, dl_rq);
 800}
 801
 802static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
 803{
 804	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 805
 806	if (RB_EMPTY_NODE(&dl_se->rb_node))
 807		return;
 808
 809	if (dl_rq->rb_leftmost == &dl_se->rb_node) {
 810		struct rb_node *next_node;
 811
 812		next_node = rb_next(&dl_se->rb_node);
 813		dl_rq->rb_leftmost = next_node;
 814	}
 815
 816	rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
 817	RB_CLEAR_NODE(&dl_se->rb_node);
 818
 819	dec_dl_tasks(dl_se, dl_rq);
 820}
 821
 822static void
 823enqueue_dl_entity(struct sched_dl_entity *dl_se,
 824		  struct sched_dl_entity *pi_se, int flags)
 825{
 826	BUG_ON(on_dl_rq(dl_se));
 827
 828	/*
 829	 * If this is a wakeup or a new instance, the scheduling
 830	 * parameters of the task might need updating. Otherwise,
 831	 * we want a replenishment of its runtime.
 832	 */
 833	if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
 834		replenish_dl_entity(dl_se, pi_se);
 835	else
 836		update_dl_entity(dl_se, pi_se);
 
 
 837
 838	__enqueue_dl_entity(dl_se);
 839}
 840
 841static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
 842{
 843	__dequeue_dl_entity(dl_se);
 844}
 845
 846static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 847{
 848	struct task_struct *pi_task = rt_mutex_get_top_task(p);
 849	struct sched_dl_entity *pi_se = &p->dl;
 850
 851	/*
 852	 * Use the scheduling parameters of the top pi-waiter
 853	 * task if we have one and its (relative) deadline is
 854	 * smaller than our one... OTW we keep our runtime and
 855	 * deadline.
 856	 */
 857	if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio))
 858		pi_se = &pi_task->dl;
 
 
 
 
 
 
 
 
 
 
 
 859
 860	/*
 861	 * If p is throttled, we do nothing. In fact, if it exhausted
 862	 * its budget it needs a replenishment and, since it now is on
 863	 * its rq, the bandwidth timer callback (which clearly has not
 864	 * run yet) will take care of this.
 865	 */
 866	if (p->dl.dl_throttled)
 867		return;
 868
 869	enqueue_dl_entity(&p->dl, pi_se, flags);
 870
 871	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
 872		enqueue_pushable_dl_task(rq, p);
 873}
 874
 875static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 876{
 877	dequeue_dl_entity(&p->dl);
 878	dequeue_pushable_dl_task(rq, p);
 879}
 880
 881static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 882{
 883	update_curr_dl(rq);
 884	__dequeue_task_dl(rq, p, flags);
 885}
 886
 887/*
 888 * Yield task semantic for -deadline tasks is:
 889 *
 890 *   get off from the CPU until our next instance, with
 891 *   a new runtime. This is of little use now, since we
 892 *   don't have a bandwidth reclaiming mechanism. Anyway,
 893 *   bandwidth reclaiming is planned for the future, and
 894 *   yield_task_dl will indicate that some spare budget
 895 *   is available for other task instances to use it.
 896 */
 897static void yield_task_dl(struct rq *rq)
 898{
 899	struct task_struct *p = rq->curr;
 900
 901	/*
 902	 * We make the task go to sleep until its current deadline by
 903	 * forcing its runtime to zero. This way, update_curr_dl() stops
 904	 * it and the bandwidth timer will wake it up and will give it
 905	 * new scheduling parameters (thanks to dl_yielded=1).
 906	 */
 907	if (p->dl.runtime > 0) {
 908		rq->curr->dl.dl_yielded = 1;
 909		p->dl.runtime = 0;
 910	}
 911	update_curr_dl(rq);
 
 
 
 
 
 
 912}
 913
 914#ifdef CONFIG_SMP
 915
 916static int find_later_rq(struct task_struct *task);
 917
 918static int
 919select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
 920{
 921	struct task_struct *curr;
 922	struct rq *rq;
 923
 924	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
 925		goto out;
 926
 927	rq = cpu_rq(cpu);
 928
 929	rcu_read_lock();
 930	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
 931
 932	/*
 933	 * If we are dealing with a -deadline task, we must
 934	 * decide where to wake it up.
 935	 * If it has a later deadline and the current task
 936	 * on this rq can't move (provided the waking task
 937	 * can!) we prefer to send it somewhere else. On the
 938	 * other hand, if it has a shorter deadline, we
 939	 * try to make it stay here, it might be important.
 940	 */
 941	if (unlikely(dl_task(curr)) &&
 942	    (curr->nr_cpus_allowed < 2 ||
 943	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
 944	    (p->nr_cpus_allowed > 1)) {
 945		int target = find_later_rq(p);
 946
 947		if (target != -1)
 
 
 
 948			cpu = target;
 949	}
 950	rcu_read_unlock();
 951
 952out:
 953	return cpu;
 954}
 955
 956static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
 957{
 958	/*
 959	 * Current can't be migrated, useless to reschedule,
 960	 * let's hope p can move out.
 961	 */
 962	if (rq->curr->nr_cpus_allowed == 1 ||
 963	    cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
 964		return;
 965
 966	/*
 967	 * p is migratable, so let's not schedule it and
 968	 * see if it is pushed or pulled somewhere else.
 969	 */
 970	if (p->nr_cpus_allowed != 1 &&
 971	    cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
 972		return;
 973
 974	resched_task(rq->curr);
 975}
 976
 977static int pull_dl_task(struct rq *this_rq);
 978
 979#endif /* CONFIG_SMP */
 980
 981/*
 982 * Only called when both the current and waking task are -deadline
 983 * tasks.
 984 */
 985static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
 986				  int flags)
 987{
 988	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
 989		resched_task(rq->curr);
 990		return;
 991	}
 992
 993#ifdef CONFIG_SMP
 994	/*
 995	 * In the unlikely case current and p have the same deadline
 996	 * let us try to decide what's the best thing to do...
 997	 */
 998	if ((p->dl.deadline == rq->curr->dl.deadline) &&
 999	    !test_tsk_need_resched(rq->curr))
1000		check_preempt_equal_dl(rq, p);
1001#endif /* CONFIG_SMP */
1002}
1003
1004#ifdef CONFIG_SCHED_HRTICK
1005static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1006{
1007	s64 delta = p->dl.dl_runtime - p->dl.runtime;
1008
1009	if (delta > 10000)
1010		hrtick_start(rq, p->dl.runtime);
 
1011}
1012#endif
1013
1014static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1015						   struct dl_rq *dl_rq)
1016{
1017	struct rb_node *left = dl_rq->rb_leftmost;
1018
1019	if (!left)
1020		return NULL;
1021
1022	return rb_entry(left, struct sched_dl_entity, rb_node);
1023}
1024
1025struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1026{
1027	struct sched_dl_entity *dl_se;
1028	struct task_struct *p;
1029	struct dl_rq *dl_rq;
1030
1031	dl_rq = &rq->dl;
1032
1033	if (need_pull_dl_task(rq, prev)) {
 
 
 
 
 
 
 
1034		pull_dl_task(rq);
 
1035		/*
1036		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1037		 * means a stop task can slip in, in which case we need to
1038		 * re-start task selection.
1039		 */
1040		if (rq->stop && rq->stop->on_rq)
1041			return RETRY_TASK;
1042	}
1043
1044	/*
1045	 * When prev is DL, we may throttle it in put_prev_task().
1046	 * So, we update time before we check for dl_nr_running.
1047	 */
1048	if (prev->sched_class == &dl_sched_class)
1049		update_curr_dl(rq);
1050
1051	if (unlikely(!dl_rq->dl_nr_running))
1052		return NULL;
1053
1054	put_prev_task(rq, prev);
1055
1056	dl_se = pick_next_dl_entity(rq, dl_rq);
1057	BUG_ON(!dl_se);
1058
1059	p = dl_task_of(dl_se);
1060	p->se.exec_start = rq_clock_task(rq);
1061
1062	/* Running task will never be pushed. */
1063       dequeue_pushable_dl_task(rq, p);
1064
1065#ifdef CONFIG_SCHED_HRTICK
1066	if (hrtick_enabled(rq))
1067		start_hrtick_dl(rq, p);
1068#endif
1069
1070	set_post_schedule(rq);
1071
1072	return p;
1073}
1074
1075static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1076{
1077	update_curr_dl(rq);
1078
1079	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1080		enqueue_pushable_dl_task(rq, p);
1081}
1082
1083static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1084{
1085	update_curr_dl(rq);
1086
1087#ifdef CONFIG_SCHED_HRTICK
1088	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
 
 
 
 
 
1089		start_hrtick_dl(rq, p);
1090#endif
1091}
1092
1093static void task_fork_dl(struct task_struct *p)
1094{
1095	/*
1096	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1097	 * sched_fork()
1098	 */
1099}
1100
1101static void task_dead_dl(struct task_struct *p)
1102{
1103	struct hrtimer *timer = &p->dl.dl_timer;
1104	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1105
1106	/*
1107	 * Since we are TASK_DEAD we won't slip out of the domain!
1108	 */
1109	raw_spin_lock_irq(&dl_b->lock);
 
1110	dl_b->total_bw -= p->dl.dl_bw;
1111	raw_spin_unlock_irq(&dl_b->lock);
1112
1113	hrtimer_cancel(timer);
1114}
1115
1116static void set_curr_task_dl(struct rq *rq)
1117{
1118	struct task_struct *p = rq->curr;
1119
1120	p->se.exec_start = rq_clock_task(rq);
1121
1122	/* You can't push away the running task */
1123	dequeue_pushable_dl_task(rq, p);
1124}
1125
1126#ifdef CONFIG_SMP
1127
1128/* Only try algorithms three times */
1129#define DL_MAX_TRIES 3
1130
1131static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1132{
1133	if (!task_running(rq, p) &&
1134	    (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
1135	    (p->nr_cpus_allowed > 1))
1136		return 1;
1137
1138	return 0;
1139}
1140
1141/* Returns the second earliest -deadline task, NULL otherwise */
1142static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
 
 
 
1143{
1144	struct rb_node *next_node = rq->dl.rb_leftmost;
1145	struct sched_dl_entity *dl_se;
1146	struct task_struct *p = NULL;
1147
 
 
 
1148next_node:
1149	next_node = rb_next(next_node);
1150	if (next_node) {
1151		dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1152		p = dl_task_of(dl_se);
1153
1154		if (pick_dl_task(rq, p, cpu))
1155			return p;
1156
 
1157		goto next_node;
1158	}
1159
1160	return NULL;
1161}
1162
1163static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1164
1165static int find_later_rq(struct task_struct *task)
1166{
1167	struct sched_domain *sd;
1168	struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl);
1169	int this_cpu = smp_processor_id();
1170	int best_cpu, cpu = task_cpu(task);
1171
1172	/* Make sure the mask is initialized first */
1173	if (unlikely(!later_mask))
1174		return -1;
1175
1176	if (task->nr_cpus_allowed == 1)
1177		return -1;
1178
 
 
 
 
1179	best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1180			task, later_mask);
1181	if (best_cpu == -1)
1182		return -1;
1183
1184	/*
1185	 * If we are here, some target has been found,
1186	 * the most suitable of which is cached in best_cpu.
1187	 * This is, among the runqueues where the current tasks
1188	 * have later deadlines than the task's one, the rq
1189	 * with the latest possible one.
1190	 *
1191	 * Now we check how well this matches with task's
1192	 * affinity and system topology.
1193	 *
1194	 * The last cpu where the task run is our first
1195	 * guess, since it is most likely cache-hot there.
1196	 */
1197	if (cpumask_test_cpu(cpu, later_mask))
1198		return cpu;
1199	/*
1200	 * Check if this_cpu is to be skipped (i.e., it is
1201	 * not in the mask) or not.
1202	 */
1203	if (!cpumask_test_cpu(this_cpu, later_mask))
1204		this_cpu = -1;
1205
1206	rcu_read_lock();
1207	for_each_domain(cpu, sd) {
1208		if (sd->flags & SD_WAKE_AFFINE) {
1209
1210			/*
1211			 * If possible, preempting this_cpu is
1212			 * cheaper than migrating.
1213			 */
1214			if (this_cpu != -1 &&
1215			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1216				rcu_read_unlock();
1217				return this_cpu;
1218			}
1219
1220			/*
1221			 * Last chance: if best_cpu is valid and is
1222			 * in the mask, that becomes our choice.
1223			 */
1224			if (best_cpu < nr_cpu_ids &&
1225			    cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1226				rcu_read_unlock();
1227				return best_cpu;
1228			}
1229		}
1230	}
1231	rcu_read_unlock();
1232
1233	/*
1234	 * At this point, all our guesses failed, we just return
1235	 * 'something', and let the caller sort the things out.
1236	 */
1237	if (this_cpu != -1)
1238		return this_cpu;
1239
1240	cpu = cpumask_any(later_mask);
1241	if (cpu < nr_cpu_ids)
1242		return cpu;
1243
1244	return -1;
1245}
1246
1247/* Locks the rq it finds */
1248static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1249{
1250	struct rq *later_rq = NULL;
1251	int tries;
1252	int cpu;
1253
1254	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1255		cpu = find_later_rq(task);
1256
1257		if ((cpu == -1) || (cpu == rq->cpu))
1258			break;
1259
1260		later_rq = cpu_rq(cpu);
1261
 
 
 
 
 
 
 
 
 
 
 
 
1262		/* Retry if something changed. */
1263		if (double_lock_balance(rq, later_rq)) {
1264			if (unlikely(task_rq(task) != rq ||
1265				     !cpumask_test_cpu(later_rq->cpu,
1266				                       &task->cpus_allowed) ||
1267				     task_running(rq, task) || !task->on_rq)) {
 
 
1268				double_unlock_balance(rq, later_rq);
1269				later_rq = NULL;
1270				break;
1271			}
1272		}
1273
1274		/*
1275		 * If the rq we found has no -deadline task, or
1276		 * its earliest one has a later deadline than our
1277		 * task, the rq is a good one.
1278		 */
1279		if (!later_rq->dl.dl_nr_running ||
1280		    dl_time_before(task->dl.deadline,
1281				   later_rq->dl.earliest_dl.curr))
1282			break;
1283
1284		/* Otherwise we try again. */
1285		double_unlock_balance(rq, later_rq);
1286		later_rq = NULL;
1287	}
1288
1289	return later_rq;
1290}
1291
1292static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1293{
1294	struct task_struct *p;
1295
1296	if (!has_pushable_dl_tasks(rq))
1297		return NULL;
1298
1299	p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1300		     struct task_struct, pushable_dl_tasks);
1301
1302	BUG_ON(rq->cpu != task_cpu(p));
1303	BUG_ON(task_current(rq, p));
1304	BUG_ON(p->nr_cpus_allowed <= 1);
1305
1306	BUG_ON(!p->on_rq);
1307	BUG_ON(!dl_task(p));
1308
1309	return p;
1310}
1311
1312/*
1313 * See if the non running -deadline tasks on this rq
1314 * can be sent to some other CPU where they can preempt
1315 * and start executing.
1316 */
1317static int push_dl_task(struct rq *rq)
1318{
1319	struct task_struct *next_task;
1320	struct rq *later_rq;
 
1321
1322	if (!rq->dl.overloaded)
1323		return 0;
1324
1325	next_task = pick_next_pushable_dl_task(rq);
1326	if (!next_task)
1327		return 0;
1328
1329retry:
1330	if (unlikely(next_task == rq->curr)) {
1331		WARN_ON(1);
1332		return 0;
1333	}
1334
1335	/*
1336	 * If next_task preempts rq->curr, and rq->curr
1337	 * can move away, it makes sense to just reschedule
1338	 * without going further in pushing next_task.
1339	 */
1340	if (dl_task(rq->curr) &&
1341	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1342	    rq->curr->nr_cpus_allowed > 1) {
1343		resched_task(rq->curr);
1344		return 0;
1345	}
1346
1347	/* We might release rq lock */
1348	get_task_struct(next_task);
1349
1350	/* Will lock the rq it'll find */
1351	later_rq = find_lock_later_rq(next_task, rq);
1352	if (!later_rq) {
1353		struct task_struct *task;
1354
1355		/*
1356		 * We must check all this again, since
1357		 * find_lock_later_rq releases rq->lock and it is
1358		 * then possible that next_task has migrated.
1359		 */
1360		task = pick_next_pushable_dl_task(rq);
1361		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1362			/*
1363			 * The task is still there. We don't try
1364			 * again, some other cpu will pull it when ready.
1365			 */
1366			dequeue_pushable_dl_task(rq, next_task);
1367			goto out;
1368		}
1369
1370		if (!task)
1371			/* No more tasks */
1372			goto out;
1373
1374		put_task_struct(next_task);
1375		next_task = task;
1376		goto retry;
1377	}
1378
1379	deactivate_task(rq, next_task, 0);
1380	set_task_cpu(next_task, later_rq->cpu);
1381	activate_task(later_rq, next_task, 0);
 
1382
1383	resched_task(later_rq->curr);
1384
1385	double_unlock_balance(rq, later_rq);
1386
1387out:
1388	put_task_struct(next_task);
1389
1390	return 1;
1391}
1392
1393static void push_dl_tasks(struct rq *rq)
1394{
1395	/* Terminates as it moves a -deadline task */
1396	while (push_dl_task(rq))
1397		;
1398}
1399
1400static int pull_dl_task(struct rq *this_rq)
1401{
1402	int this_cpu = this_rq->cpu, ret = 0, cpu;
1403	struct task_struct *p;
 
1404	struct rq *src_rq;
1405	u64 dmin = LONG_MAX;
1406
1407	if (likely(!dl_overloaded(this_rq)))
1408		return 0;
1409
1410	/*
1411	 * Match the barrier from dl_set_overloaded; this guarantees that if we
1412	 * see overloaded we must also see the dlo_mask bit.
1413	 */
1414	smp_rmb();
1415
1416	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1417		if (this_cpu == cpu)
1418			continue;
1419
1420		src_rq = cpu_rq(cpu);
1421
1422		/*
1423		 * It looks racy, abd it is! However, as in sched_rt.c,
1424		 * we are fine with this.
1425		 */
1426		if (this_rq->dl.dl_nr_running &&
1427		    dl_time_before(this_rq->dl.earliest_dl.curr,
1428				   src_rq->dl.earliest_dl.next))
1429			continue;
1430
1431		/* Might drop this_rq->lock */
1432		double_lock_balance(this_rq, src_rq);
1433
1434		/*
1435		 * If there are no more pullable tasks on the
1436		 * rq, we're done with it.
1437		 */
1438		if (src_rq->dl.dl_nr_running <= 1)
1439			goto skip;
1440
1441		p = pick_next_earliest_dl_task(src_rq, this_cpu);
1442
1443		/*
1444		 * We found a task to be pulled if:
1445		 *  - it preempts our current (if there's one),
1446		 *  - it will preempt the last one we pulled (if any).
1447		 */
1448		if (p && dl_time_before(p->dl.deadline, dmin) &&
1449		    (!this_rq->dl.dl_nr_running ||
1450		     dl_time_before(p->dl.deadline,
1451				    this_rq->dl.earliest_dl.curr))) {
1452			WARN_ON(p == src_rq->curr);
1453			WARN_ON(!p->on_rq);
1454
1455			/*
1456			 * Then we pull iff p has actually an earlier
1457			 * deadline than the current task of its runqueue.
1458			 */
1459			if (dl_time_before(p->dl.deadline,
1460					   src_rq->curr->dl.deadline))
1461				goto skip;
1462
1463			ret = 1;
1464
1465			deactivate_task(src_rq, p, 0);
1466			set_task_cpu(p, this_cpu);
1467			activate_task(this_rq, p, 0);
1468			dmin = p->dl.deadline;
1469
1470			/* Is there any other task even earlier? */
1471		}
1472skip:
1473		double_unlock_balance(this_rq, src_rq);
1474	}
1475
1476	return ret;
1477}
1478
1479static void post_schedule_dl(struct rq *rq)
1480{
1481	push_dl_tasks(rq);
1482}
1483
1484/*
1485 * Since the task is not running and a reschedule is not going to happen
1486 * anytime soon on its runqueue, we try pushing it away now.
1487 */
1488static void task_woken_dl(struct rq *rq, struct task_struct *p)
1489{
1490	if (!task_running(rq, p) &&
1491	    !test_tsk_need_resched(rq->curr) &&
1492	    has_pushable_dl_tasks(rq) &&
1493	    p->nr_cpus_allowed > 1 &&
1494	    dl_task(rq->curr) &&
1495	    (rq->curr->nr_cpus_allowed < 2 ||
1496	     dl_entity_preempt(&rq->curr->dl, &p->dl))) {
1497		push_dl_tasks(rq);
1498	}
1499}
1500
1501static void set_cpus_allowed_dl(struct task_struct *p,
1502				const struct cpumask *new_mask)
1503{
 
1504	struct rq *rq;
1505	int weight;
1506
1507	BUG_ON(!dl_task(p));
1508
1509	/*
1510	 * Update only if the task is actually running (i.e.,
1511	 * it is on the rq AND it is not throttled).
1512	 */
1513	if (!on_dl_rq(&p->dl))
1514		return;
1515
1516	weight = cpumask_weight(new_mask);
1517
1518	/*
1519	 * Only update if the process changes its state from whether it
1520	 * can migrate or not.
1521	 */
1522	if ((p->nr_cpus_allowed > 1) == (weight > 1))
1523		return;
1524
1525	rq = task_rq(p);
1526
1527	/*
1528	 * The process used to be able to migrate OR it can now migrate
 
 
 
1529	 */
1530	if (weight <= 1) {
1531		if (!task_current(rq, p))
1532			dequeue_pushable_dl_task(rq, p);
1533		BUG_ON(!rq->dl.dl_nr_migratory);
1534		rq->dl.dl_nr_migratory--;
1535	} else {
1536		if (!task_current(rq, p))
1537			enqueue_pushable_dl_task(rq, p);
1538		rq->dl.dl_nr_migratory++;
 
 
 
1539	}
1540
1541	update_dl_migration(&rq->dl);
1542}
1543
1544/* Assumes rq->lock is held */
1545static void rq_online_dl(struct rq *rq)
1546{
1547	if (rq->dl.overloaded)
1548		dl_set_overload(rq);
1549
 
1550	if (rq->dl.dl_nr_running > 0)
1551		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1552}
1553
1554/* Assumes rq->lock is held */
1555static void rq_offline_dl(struct rq *rq)
1556{
1557	if (rq->dl.overloaded)
1558		dl_clear_overload(rq);
1559
1560	cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
 
1561}
1562
1563void init_sched_dl_class(void)
1564{
1565	unsigned int i;
1566
1567	for_each_possible_cpu(i)
1568		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1569					GFP_KERNEL, cpu_to_node(i));
1570}
1571
1572#endif /* CONFIG_SMP */
1573
1574static void switched_from_dl(struct rq *rq, struct task_struct *p)
1575{
1576	if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy))
1577		hrtimer_try_to_cancel(&p->dl.dl_timer);
 
 
 
 
 
 
1578
1579#ifdef CONFIG_SMP
1580	/*
1581	 * Since this might be the only -deadline task on the rq,
1582	 * this is the right place to try to pull some other one
1583	 * from an overloaded cpu, if any.
1584	 */
1585	if (!rq->dl.dl_nr_running)
1586		pull_dl_task(rq);
1587#endif
 
1588}
1589
1590/*
1591 * When switching to -deadline, we may overload the rq, then
1592 * we try to push someone off, if possible.
1593 */
1594static void switched_to_dl(struct rq *rq, struct task_struct *p)
1595{
1596	int check_resched = 1;
1597
1598	/*
1599	 * If p is throttled, don't consider the possibility
1600	 * of preempting rq->curr, the check will be done right
1601	 * after its runtime will get replenished.
1602	 */
1603	if (unlikely(p->dl.dl_throttled))
1604		return;
1605
1606	if (p->on_rq && rq->curr != p) {
1607#ifdef CONFIG_SMP
1608		if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
1609			/* Only reschedule if pushing failed */
1610			check_resched = 0;
1611#endif /* CONFIG_SMP */
1612		if (check_resched && task_has_dl_policy(rq->curr))
1613			check_preempt_curr_dl(rq, p, 0);
 
 
 
1614	}
1615}
1616
1617/*
1618 * If the scheduling parameters of a -deadline task changed,
1619 * a push or pull operation might be needed.
1620 */
1621static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1622			    int oldprio)
1623{
1624	if (p->on_rq || rq->curr == p) {
1625#ifdef CONFIG_SMP
1626		/*
1627		 * This might be too much, but unfortunately
1628		 * we don't have the old deadline value, and
1629		 * we can't argue if the task is increasing
1630		 * or lowering its prio, so...
1631		 */
1632		if (!rq->dl.overloaded)
1633			pull_dl_task(rq);
1634
1635		/*
1636		 * If we now have a earlier deadline task than p,
1637		 * then reschedule, provided p is still on this
1638		 * runqueue.
1639		 */
1640		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
1641		    rq->curr == p)
1642			resched_task(p);
1643#else
1644		/*
1645		 * Again, we don't know if p has a earlier
1646		 * or later deadline, so let's blindly set a
1647		 * (maybe not needed) rescheduling point.
1648		 */
1649		resched_task(p);
1650#endif /* CONFIG_SMP */
1651	} else
1652		switched_to_dl(rq, p);
1653}
1654
1655const struct sched_class dl_sched_class = {
1656	.next			= &rt_sched_class,
1657	.enqueue_task		= enqueue_task_dl,
1658	.dequeue_task		= dequeue_task_dl,
1659	.yield_task		= yield_task_dl,
1660
1661	.check_preempt_curr	= check_preempt_curr_dl,
1662
1663	.pick_next_task		= pick_next_task_dl,
1664	.put_prev_task		= put_prev_task_dl,
1665
1666#ifdef CONFIG_SMP
1667	.select_task_rq		= select_task_rq_dl,
1668	.set_cpus_allowed       = set_cpus_allowed_dl,
1669	.rq_online              = rq_online_dl,
1670	.rq_offline             = rq_offline_dl,
1671	.post_schedule		= post_schedule_dl,
1672	.task_woken		= task_woken_dl,
1673#endif
1674
1675	.set_curr_task		= set_curr_task_dl,
1676	.task_tick		= task_tick_dl,
1677	.task_fork              = task_fork_dl,
1678	.task_dead		= task_dead_dl,
1679
1680	.prio_changed           = prio_changed_dl,
1681	.switched_from		= switched_from_dl,
1682	.switched_to		= switched_to_dl,
 
 
1683};
v4.6
   1/*
   2 * Deadline Scheduling Class (SCHED_DEADLINE)
   3 *
   4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
   5 *
   6 * Tasks that periodically executes their instances for less than their
   7 * runtime won't miss any of their deadlines.
   8 * Tasks that are not periodic or sporadic or that tries to execute more
   9 * than their reserved bandwidth will be slowed down (and may potentially
  10 * miss some of their deadlines), and won't affect any other task.
  11 *
  12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
  13 *                    Juri Lelli <juri.lelli@gmail.com>,
  14 *                    Michael Trimarchi <michael@amarulasolutions.com>,
  15 *                    Fabio Checconi <fchecconi@gmail.com>
  16 */
  17#include "sched.h"
  18
  19#include <linux/slab.h>
  20
  21struct dl_bandwidth def_dl_bandwidth;
  22
  23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
  24{
  25	return container_of(dl_se, struct task_struct, dl);
  26}
  27
  28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
  29{
  30	return container_of(dl_rq, struct rq, dl);
  31}
  32
  33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
  34{
  35	struct task_struct *p = dl_task_of(dl_se);
  36	struct rq *rq = task_rq(p);
  37
  38	return &rq->dl;
  39}
  40
  41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
  42{
  43	return !RB_EMPTY_NODE(&dl_se->rb_node);
  44}
  45
  46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
  47{
  48	struct sched_dl_entity *dl_se = &p->dl;
  49
  50	return dl_rq->rb_leftmost == &dl_se->rb_node;
  51}
  52
  53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
  54{
  55	raw_spin_lock_init(&dl_b->dl_runtime_lock);
  56	dl_b->dl_period = period;
  57	dl_b->dl_runtime = runtime;
  58}
  59
 
 
  60void init_dl_bw(struct dl_bw *dl_b)
  61{
  62	raw_spin_lock_init(&dl_b->lock);
  63	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
  64	if (global_rt_runtime() == RUNTIME_INF)
  65		dl_b->bw = -1;
  66	else
  67		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
  68	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
  69	dl_b->total_bw = 0;
  70}
  71
  72void init_dl_rq(struct dl_rq *dl_rq)
  73{
  74	dl_rq->rb_root = RB_ROOT;
  75
  76#ifdef CONFIG_SMP
  77	/* zero means no -deadline tasks */
  78	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
  79
  80	dl_rq->dl_nr_migratory = 0;
  81	dl_rq->overloaded = 0;
  82	dl_rq->pushable_dl_tasks_root = RB_ROOT;
  83#else
  84	init_dl_bw(&dl_rq->dl_bw);
  85#endif
  86}
  87
  88#ifdef CONFIG_SMP
  89
  90static inline int dl_overloaded(struct rq *rq)
  91{
  92	return atomic_read(&rq->rd->dlo_count);
  93}
  94
  95static inline void dl_set_overload(struct rq *rq)
  96{
  97	if (!rq->online)
  98		return;
  99
 100	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
 101	/*
 102	 * Must be visible before the overload count is
 103	 * set (as in sched_rt.c).
 104	 *
 105	 * Matched by the barrier in pull_dl_task().
 106	 */
 107	smp_wmb();
 108	atomic_inc(&rq->rd->dlo_count);
 109}
 110
 111static inline void dl_clear_overload(struct rq *rq)
 112{
 113	if (!rq->online)
 114		return;
 115
 116	atomic_dec(&rq->rd->dlo_count);
 117	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
 118}
 119
 120static void update_dl_migration(struct dl_rq *dl_rq)
 121{
 122	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
 123		if (!dl_rq->overloaded) {
 124			dl_set_overload(rq_of_dl_rq(dl_rq));
 125			dl_rq->overloaded = 1;
 126		}
 127	} else if (dl_rq->overloaded) {
 128		dl_clear_overload(rq_of_dl_rq(dl_rq));
 129		dl_rq->overloaded = 0;
 130	}
 131}
 132
 133static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 134{
 135	struct task_struct *p = dl_task_of(dl_se);
 136
 137	if (p->nr_cpus_allowed > 1)
 138		dl_rq->dl_nr_migratory++;
 139
 140	update_dl_migration(dl_rq);
 141}
 142
 143static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 144{
 145	struct task_struct *p = dl_task_of(dl_se);
 146
 147	if (p->nr_cpus_allowed > 1)
 148		dl_rq->dl_nr_migratory--;
 149
 150	update_dl_migration(dl_rq);
 151}
 152
 153/*
 154 * The list of pushable -deadline task is not a plist, like in
 155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
 156 */
 157static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 158{
 159	struct dl_rq *dl_rq = &rq->dl;
 160	struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
 161	struct rb_node *parent = NULL;
 162	struct task_struct *entry;
 163	int leftmost = 1;
 164
 165	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
 166
 167	while (*link) {
 168		parent = *link;
 169		entry = rb_entry(parent, struct task_struct,
 170				 pushable_dl_tasks);
 171		if (dl_entity_preempt(&p->dl, &entry->dl))
 172			link = &parent->rb_left;
 173		else {
 174			link = &parent->rb_right;
 175			leftmost = 0;
 176		}
 177	}
 178
 179	if (leftmost) {
 180		dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
 181		dl_rq->earliest_dl.next = p->dl.deadline;
 182	}
 183
 184	rb_link_node(&p->pushable_dl_tasks, parent, link);
 185	rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 186}
 187
 188static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 189{
 190	struct dl_rq *dl_rq = &rq->dl;
 191
 192	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
 193		return;
 194
 195	if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
 196		struct rb_node *next_node;
 197
 198		next_node = rb_next(&p->pushable_dl_tasks);
 199		dl_rq->pushable_dl_tasks_leftmost = next_node;
 200		if (next_node) {
 201			dl_rq->earliest_dl.next = rb_entry(next_node,
 202				struct task_struct, pushable_dl_tasks)->dl.deadline;
 203		}
 204	}
 205
 206	rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 207	RB_CLEAR_NODE(&p->pushable_dl_tasks);
 208}
 209
 210static inline int has_pushable_dl_tasks(struct rq *rq)
 211{
 212	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
 213}
 214
 215static int push_dl_task(struct rq *rq);
 216
 217static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 218{
 219	return dl_task(prev);
 220}
 221
 222static DEFINE_PER_CPU(struct callback_head, dl_push_head);
 223static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
 224
 225static void push_dl_tasks(struct rq *);
 226static void pull_dl_task(struct rq *);
 227
 228static inline void queue_push_tasks(struct rq *rq)
 229{
 230	if (!has_pushable_dl_tasks(rq))
 231		return;
 232
 233	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
 234}
 235
 236static inline void queue_pull_task(struct rq *rq)
 237{
 238	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
 239}
 240
 241static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 242
 243static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 244{
 245	struct rq *later_rq = NULL;
 246	bool fallback = false;
 247
 248	later_rq = find_lock_later_rq(p, rq);
 249
 250	if (!later_rq) {
 251		int cpu;
 252
 253		/*
 254		 * If we cannot preempt any rq, fall back to pick any
 255		 * online cpu.
 256		 */
 257		fallback = true;
 258		cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
 259		if (cpu >= nr_cpu_ids) {
 260			/*
 261			 * Fail to find any suitable cpu.
 262			 * The task will never come back!
 263			 */
 264			BUG_ON(dl_bandwidth_enabled());
 265
 266			/*
 267			 * If admission control is disabled we
 268			 * try a little harder to let the task
 269			 * run.
 270			 */
 271			cpu = cpumask_any(cpu_active_mask);
 272		}
 273		later_rq = cpu_rq(cpu);
 274		double_lock_balance(rq, later_rq);
 275	}
 276
 277	/*
 278	 * By now the task is replenished and enqueued; migrate it.
 279	 */
 280	deactivate_task(rq, p, 0);
 281	set_task_cpu(p, later_rq->cpu);
 282	activate_task(later_rq, p, 0);
 283
 284	if (!fallback)
 285		resched_curr(later_rq);
 286
 287	double_unlock_balance(later_rq, rq);
 288
 289	return later_rq;
 290}
 291
 292#else
 293
 294static inline
 295void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 296{
 297}
 298
 299static inline
 300void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 301{
 302}
 303
 304static inline
 305void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 306{
 307}
 308
 309static inline
 310void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 311{
 312}
 313
 314static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 315{
 316	return false;
 317}
 318
 319static inline void pull_dl_task(struct rq *rq)
 320{
 321}
 322
 323static inline void queue_push_tasks(struct rq *rq)
 324{
 
 325}
 326
 327static inline void queue_pull_task(struct rq *rq)
 328{
 329}
 330#endif /* CONFIG_SMP */
 331
 332static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 333static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 334static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
 335				  int flags);
 336
 337/*
 338 * We are being explicitly informed that a new instance is starting,
 339 * and this means that:
 340 *  - the absolute deadline of the entity has to be placed at
 341 *    current time + relative deadline;
 342 *  - the runtime of the entity has to be set to the maximum value.
 343 *
 344 * The capability of specifying such event is useful whenever a -deadline
 345 * entity wants to (try to!) synchronize its behaviour with the scheduler's
 346 * one, and to (try to!) reconcile itself with its own scheduling
 347 * parameters.
 348 */
 349static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
 350				       struct sched_dl_entity *pi_se)
 351{
 352	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 353	struct rq *rq = rq_of_dl_rq(dl_rq);
 354
 355	WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
 356
 357	/*
 358	 * We are racing with the deadline timer. So, do nothing because
 359	 * the deadline timer handler will take care of properly recharging
 360	 * the runtime and postponing the deadline
 361	 */
 362	if (dl_se->dl_throttled)
 363		return;
 364
 365	/*
 366	 * We use the regular wall clock time to set deadlines in the
 367	 * future; in fact, we must consider execution overheads (time
 368	 * spent on hardirq context, etc.).
 369	 */
 370	dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 371	dl_se->runtime = pi_se->dl_runtime;
 
 372}
 373
 374/*
 375 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
 376 * possibility of a entity lasting more than what it declared, and thus
 377 * exhausting its runtime.
 378 *
 379 * Here we are interested in making runtime overrun possible, but we do
 380 * not want a entity which is misbehaving to affect the scheduling of all
 381 * other entities.
 382 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
 383 * is used, in order to confine each entity within its own bandwidth.
 384 *
 385 * This function deals exactly with that, and ensures that when the runtime
 386 * of a entity is replenished, its deadline is also postponed. That ensures
 387 * the overrunning entity can't interfere with other entity in the system and
 388 * can't make them miss their deadlines. Reasons why this kind of overruns
 389 * could happen are, typically, a entity voluntarily trying to overcome its
 390 * runtime, or it just underestimated it during sched_setattr().
 391 */
 392static void replenish_dl_entity(struct sched_dl_entity *dl_se,
 393				struct sched_dl_entity *pi_se)
 394{
 395	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 396	struct rq *rq = rq_of_dl_rq(dl_rq);
 397
 398	BUG_ON(pi_se->dl_runtime <= 0);
 399
 400	/*
 401	 * This could be the case for a !-dl task that is boosted.
 402	 * Just go with full inherited parameters.
 403	 */
 404	if (dl_se->dl_deadline == 0) {
 405		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 406		dl_se->runtime = pi_se->dl_runtime;
 407	}
 408
 409	if (dl_se->dl_yielded && dl_se->runtime > 0)
 410		dl_se->runtime = 0;
 411
 412	/*
 413	 * We keep moving the deadline away until we get some
 414	 * available runtime for the entity. This ensures correct
 415	 * handling of situations where the runtime overrun is
 416	 * arbitrary large.
 417	 */
 418	while (dl_se->runtime <= 0) {
 419		dl_se->deadline += pi_se->dl_period;
 420		dl_se->runtime += pi_se->dl_runtime;
 421	}
 422
 423	/*
 424	 * At this point, the deadline really should be "in
 425	 * the future" with respect to rq->clock. If it's
 426	 * not, we are, for some reason, lagging too much!
 427	 * Anyway, after having warn userspace abut that,
 428	 * we still try to keep the things running by
 429	 * resetting the deadline and the budget of the
 430	 * entity.
 431	 */
 432	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
 433		printk_deferred_once("sched: DL replenish lagged too much\n");
 
 
 
 
 
 434		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 435		dl_se->runtime = pi_se->dl_runtime;
 436	}
 437
 438	if (dl_se->dl_yielded)
 439		dl_se->dl_yielded = 0;
 440	if (dl_se->dl_throttled)
 441		dl_se->dl_throttled = 0;
 442}
 443
 444/*
 445 * Here we check if --at time t-- an entity (which is probably being
 446 * [re]activated or, in general, enqueued) can use its remaining runtime
 447 * and its current deadline _without_ exceeding the bandwidth it is
 448 * assigned (function returns true if it can't). We are in fact applying
 449 * one of the CBS rules: when a task wakes up, if the residual runtime
 450 * over residual deadline fits within the allocated bandwidth, then we
 451 * can keep the current (absolute) deadline and residual budget without
 452 * disrupting the schedulability of the system. Otherwise, we should
 453 * refill the runtime and set the deadline a period in the future,
 454 * because keeping the current (absolute) deadline of the task would
 455 * result in breaking guarantees promised to other tasks (refer to
 456 * Documentation/scheduler/sched-deadline.txt for more informations).
 457 *
 458 * This function returns true if:
 459 *
 460 *   runtime / (deadline - t) > dl_runtime / dl_period ,
 461 *
 462 * IOW we can't recycle current parameters.
 463 *
 464 * Notice that the bandwidth check is done against the period. For
 465 * task with deadline equal to period this is the same of using
 466 * dl_deadline instead of dl_period in the equation above.
 467 */
 468static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 469			       struct sched_dl_entity *pi_se, u64 t)
 470{
 471	u64 left, right;
 472
 473	/*
 474	 * left and right are the two sides of the equation above,
 475	 * after a bit of shuffling to use multiplications instead
 476	 * of divisions.
 477	 *
 478	 * Note that none of the time values involved in the two
 479	 * multiplications are absolute: dl_deadline and dl_runtime
 480	 * are the relative deadline and the maximum runtime of each
 481	 * instance, runtime is the runtime left for the last instance
 482	 * and (deadline - t), since t is rq->clock, is the time left
 483	 * to the (absolute) deadline. Even if overflowing the u64 type
 484	 * is very unlikely to occur in both cases, here we scale down
 485	 * as we want to avoid that risk at all. Scaling down by 10
 486	 * means that we reduce granularity to 1us. We are fine with it,
 487	 * since this is only a true/false check and, anyway, thinking
 488	 * of anything below microseconds resolution is actually fiction
 489	 * (but still we want to give the user that illusion >;).
 490	 */
 491	left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 492	right = ((dl_se->deadline - t) >> DL_SCALE) *
 493		(pi_se->dl_runtime >> DL_SCALE);
 494
 495	return dl_time_before(right, left);
 496}
 497
 498/*
 499 * When a -deadline entity is queued back on the runqueue, its runtime and
 500 * deadline might need updating.
 501 *
 502 * The policy here is that we update the deadline of the entity only if:
 503 *  - the current deadline is in the past,
 504 *  - using the remaining runtime with the current deadline would make
 505 *    the entity exceed its bandwidth.
 506 */
 507static void update_dl_entity(struct sched_dl_entity *dl_se,
 508			     struct sched_dl_entity *pi_se)
 509{
 510	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 511	struct rq *rq = rq_of_dl_rq(dl_rq);
 512
 
 
 
 
 
 
 
 
 
 513	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 514	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
 515		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 516		dl_se->runtime = pi_se->dl_runtime;
 517	}
 518}
 519
 520/*
 521 * If the entity depleted all its runtime, and if we want it to sleep
 522 * while waiting for some new execution time to become available, we
 523 * set the bandwidth enforcement timer to the replenishment instant
 524 * and try to activate it.
 525 *
 526 * Notice that it is important for the caller to know if the timer
 527 * actually started or not (i.e., the replenishment instant is in
 528 * the future or in the past).
 529 */
 530static int start_dl_timer(struct task_struct *p)
 531{
 532	struct sched_dl_entity *dl_se = &p->dl;
 533	struct hrtimer *timer = &dl_se->dl_timer;
 534	struct rq *rq = task_rq(p);
 535	ktime_t now, act;
 
 
 536	s64 delta;
 537
 538	lockdep_assert_held(&rq->lock);
 539
 540	/*
 541	 * We want the timer to fire at the deadline, but considering
 542	 * that it is actually coming from rq->clock and not from
 543	 * hrtimer's time base reading.
 544	 */
 545	act = ns_to_ktime(dl_se->deadline);
 546	now = hrtimer_cb_get_time(timer);
 547	delta = ktime_to_ns(now) - rq_clock(rq);
 548	act = ktime_add_ns(act, delta);
 549
 550	/*
 551	 * If the expiry time already passed, e.g., because the value
 552	 * chosen as the deadline is too small, don't even try to
 553	 * start the timer in the past!
 554	 */
 555	if (ktime_us_delta(act, now) < 0)
 556		return 0;
 557
 558	/*
 559	 * !enqueued will guarantee another callback; even if one is already in
 560	 * progress. This ensures a balanced {get,put}_task_struct().
 561	 *
 562	 * The race against __run_timer() clearing the enqueued state is
 563	 * harmless because we're holding task_rq()->lock, therefore the timer
 564	 * expiring after we've done the check will wait on its task_rq_lock()
 565	 * and observe our state.
 566	 */
 567	if (!hrtimer_is_queued(timer)) {
 568		get_task_struct(p);
 569		hrtimer_start(timer, act, HRTIMER_MODE_ABS);
 570	}
 571
 572	return 1;
 573}
 574
 575/*
 576 * This is the bandwidth enforcement timer callback. If here, we know
 577 * a task is not on its dl_rq, since the fact that the timer was running
 578 * means the task is throttled and needs a runtime replenishment.
 579 *
 580 * However, what we actually do depends on the fact the task is active,
 581 * (it is on its rq) or has been removed from there by a call to
 582 * dequeue_task_dl(). In the former case we must issue the runtime
 583 * replenishment and add the task back to the dl_rq; in the latter, we just
 584 * do nothing but clearing dl_throttled, so that runtime and deadline
 585 * updating (and the queueing back to dl_rq) will be done by the
 586 * next call to enqueue_task_dl().
 587 */
 588static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 589{
 590	struct sched_dl_entity *dl_se = container_of(timer,
 591						     struct sched_dl_entity,
 592						     dl_timer);
 593	struct task_struct *p = dl_task_of(dl_se);
 594	unsigned long flags;
 595	struct rq *rq;
 
 
 
 596
 597	rq = task_rq_lock(p, &flags);
 598
 599	/*
 600	 * The task might have changed its scheduling policy to something
 601	 * different than SCHED_DEADLINE (through switched_fromd_dl()).
 602	 */
 603	if (!dl_task(p)) {
 604		__dl_clear_params(p);
 605		goto unlock;
 606	}
 607
 608	/*
 609	 * The task might have been boosted by someone else and might be in the
 610	 * boosting/deboosting path, its not throttled.
 
 
 611	 */
 612	if (dl_se->dl_boosted)
 613		goto unlock;
 614
 615	/*
 616	 * Spurious timer due to start_dl_timer() race; or we already received
 617	 * a replenishment from rt_mutex_setprio().
 618	 */
 619	if (!dl_se->dl_throttled)
 620		goto unlock;
 621
 622	sched_clock_tick();
 623	update_rq_clock(rq);
 624
 625	/*
 626	 * If the throttle happened during sched-out; like:
 627	 *
 628	 *   schedule()
 629	 *     deactivate_task()
 630	 *       dequeue_task_dl()
 631	 *         update_curr_dl()
 632	 *           start_dl_timer()
 633	 *         __dequeue_task_dl()
 634	 *     prev->on_rq = 0;
 635	 *
 636	 * We can be both throttled and !queued. Replenish the counter
 637	 * but do not enqueue -- wait for our wakeup to do that.
 638	 */
 639	if (!task_on_rq_queued(p)) {
 640		replenish_dl_entity(dl_se, dl_se);
 641		goto unlock;
 642	}
 643
 644	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
 645	if (dl_task(rq->curr))
 646		check_preempt_curr_dl(rq, p, 0);
 647	else
 648		resched_curr(rq);
 649
 650#ifdef CONFIG_SMP
 651	/*
 652	 * Perform balancing operations here; after the replenishments.  We
 653	 * cannot drop rq->lock before this, otherwise the assertion in
 654	 * start_dl_timer() about not missing updates is not true.
 655	 *
 656	 * If we find that the rq the task was on is no longer available, we
 657	 * need to select a new rq.
 658	 *
 659	 * XXX figure out if select_task_rq_dl() deals with offline cpus.
 660	 */
 661	if (unlikely(!rq->online))
 662		rq = dl_task_offline_migration(rq, p);
 663
 664	/*
 665	 * Queueing this task back might have overloaded rq, check if we need
 666	 * to kick someone away.
 667	 */
 668	if (has_pushable_dl_tasks(rq)) {
 669		/*
 670		 * Nothing relies on rq->lock after this, so its safe to drop
 671		 * rq->lock.
 672		 */
 673		lockdep_unpin_lock(&rq->lock);
 674		push_dl_task(rq);
 675		lockdep_pin_lock(&rq->lock);
 676	}
 677#endif
 678
 679unlock:
 680	task_rq_unlock(rq, p, &flags);
 681
 682	/*
 683	 * This can free the task_struct, including this hrtimer, do not touch
 684	 * anything related to that after this.
 685	 */
 686	put_task_struct(p);
 687
 688	return HRTIMER_NORESTART;
 689}
 690
 691void init_dl_task_timer(struct sched_dl_entity *dl_se)
 692{
 693	struct hrtimer *timer = &dl_se->dl_timer;
 694
 
 
 
 
 
 695	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 696	timer->function = dl_task_timer;
 697}
 698
 699static
 700int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
 701{
 702	return (dl_se->runtime <= 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 703}
 704
 705extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
 706
 707/*
 708 * Update the current task's runtime statistics (provided it is still
 709 * a -deadline task and has not been removed from the dl_rq).
 710 */
 711static void update_curr_dl(struct rq *rq)
 712{
 713	struct task_struct *curr = rq->curr;
 714	struct sched_dl_entity *dl_se = &curr->dl;
 715	u64 delta_exec;
 716
 717	if (!dl_task(curr) || !on_dl_rq(dl_se))
 718		return;
 719
 720	/* Kick cpufreq (see the comment in linux/cpufreq.h). */
 721	if (cpu_of(rq) == smp_processor_id())
 722		cpufreq_trigger_update(rq_clock(rq));
 723
 724	/*
 725	 * Consumed budget is computed considering the time as
 726	 * observed by schedulable tasks (excluding time spent
 727	 * in hardirq context, etc.). Deadlines are instead
 728	 * computed using hard walltime. This seems to be the more
 729	 * natural solution, but the full ramifications of this
 730	 * approach need further study.
 731	 */
 732	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
 733	if (unlikely((s64)delta_exec <= 0)) {
 734		if (unlikely(dl_se->dl_yielded))
 735			goto throttle;
 736		return;
 737	}
 738
 739	schedstat_set(curr->se.statistics.exec_max,
 740		      max(curr->se.statistics.exec_max, delta_exec));
 741
 742	curr->se.sum_exec_runtime += delta_exec;
 743	account_group_exec_runtime(curr, delta_exec);
 744
 745	curr->se.exec_start = rq_clock_task(rq);
 746	cpuacct_charge(curr, delta_exec);
 747
 748	sched_rt_avg_update(rq, delta_exec);
 749
 750	dl_se->runtime -= delta_exec;
 751
 752throttle:
 753	if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
 754		dl_se->dl_throttled = 1;
 755		__dequeue_task_dl(rq, curr, 0);
 756		if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
 
 
 757			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
 758
 759		if (!is_leftmost(curr, &rq->dl))
 760			resched_curr(rq);
 761	}
 762
 763	/*
 764	 * Because -- for now -- we share the rt bandwidth, we need to
 765	 * account our runtime there too, otherwise actual rt tasks
 766	 * would be able to exceed the shared quota.
 767	 *
 768	 * Account to the root rt group for now.
 769	 *
 770	 * The solution we're working towards is having the RT groups scheduled
 771	 * using deadline servers -- however there's a few nasties to figure
 772	 * out before that can happen.
 773	 */
 774	if (rt_bandwidth_enabled()) {
 775		struct rt_rq *rt_rq = &rq->rt;
 776
 777		raw_spin_lock(&rt_rq->rt_runtime_lock);
 778		/*
 779		 * We'll let actual RT tasks worry about the overflow here, we
 780		 * have our own CBS to keep us inline; only account when RT
 781		 * bandwidth is relevant.
 782		 */
 783		if (sched_rt_bandwidth_account(rt_rq))
 784			rt_rq->rt_time += delta_exec;
 785		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 786	}
 787}
 788
 789#ifdef CONFIG_SMP
 790
 
 
 
 
 
 
 
 
 
 
 
 
 791static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
 792{
 793	struct rq *rq = rq_of_dl_rq(dl_rq);
 794
 795	if (dl_rq->earliest_dl.curr == 0 ||
 796	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
 
 
 
 
 
 
 
 797		dl_rq->earliest_dl.curr = deadline;
 798		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
 
 
 
 
 
 
 
 
 
 799	}
 800}
 801
 802static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
 803{
 804	struct rq *rq = rq_of_dl_rq(dl_rq);
 805
 806	/*
 807	 * Since we may have removed our earliest (and/or next earliest)
 808	 * task we must recompute them.
 809	 */
 810	if (!dl_rq->dl_nr_running) {
 811		dl_rq->earliest_dl.curr = 0;
 812		dl_rq->earliest_dl.next = 0;
 813		cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
 814	} else {
 815		struct rb_node *leftmost = dl_rq->rb_leftmost;
 816		struct sched_dl_entity *entry;
 817
 818		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
 819		dl_rq->earliest_dl.curr = entry->deadline;
 
 820		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
 821	}
 822}
 823
 824#else
 825
 826static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
 827static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
 828
 829#endif /* CONFIG_SMP */
 830
 831static inline
 832void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 833{
 834	int prio = dl_task_of(dl_se)->prio;
 835	u64 deadline = dl_se->deadline;
 836
 837	WARN_ON(!dl_prio(prio));
 838	dl_rq->dl_nr_running++;
 839	add_nr_running(rq_of_dl_rq(dl_rq), 1);
 840
 841	inc_dl_deadline(dl_rq, deadline);
 842	inc_dl_migration(dl_se, dl_rq);
 843}
 844
 845static inline
 846void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 847{
 848	int prio = dl_task_of(dl_se)->prio;
 849
 850	WARN_ON(!dl_prio(prio));
 851	WARN_ON(!dl_rq->dl_nr_running);
 852	dl_rq->dl_nr_running--;
 853	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
 854
 855	dec_dl_deadline(dl_rq, dl_se->deadline);
 856	dec_dl_migration(dl_se, dl_rq);
 857}
 858
 859static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
 860{
 861	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 862	struct rb_node **link = &dl_rq->rb_root.rb_node;
 863	struct rb_node *parent = NULL;
 864	struct sched_dl_entity *entry;
 865	int leftmost = 1;
 866
 867	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
 868
 869	while (*link) {
 870		parent = *link;
 871		entry = rb_entry(parent, struct sched_dl_entity, rb_node);
 872		if (dl_time_before(dl_se->deadline, entry->deadline))
 873			link = &parent->rb_left;
 874		else {
 875			link = &parent->rb_right;
 876			leftmost = 0;
 877		}
 878	}
 879
 880	if (leftmost)
 881		dl_rq->rb_leftmost = &dl_se->rb_node;
 882
 883	rb_link_node(&dl_se->rb_node, parent, link);
 884	rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
 885
 886	inc_dl_tasks(dl_se, dl_rq);
 887}
 888
 889static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
 890{
 891	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 892
 893	if (RB_EMPTY_NODE(&dl_se->rb_node))
 894		return;
 895
 896	if (dl_rq->rb_leftmost == &dl_se->rb_node) {
 897		struct rb_node *next_node;
 898
 899		next_node = rb_next(&dl_se->rb_node);
 900		dl_rq->rb_leftmost = next_node;
 901	}
 902
 903	rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
 904	RB_CLEAR_NODE(&dl_se->rb_node);
 905
 906	dec_dl_tasks(dl_se, dl_rq);
 907}
 908
 909static void
 910enqueue_dl_entity(struct sched_dl_entity *dl_se,
 911		  struct sched_dl_entity *pi_se, int flags)
 912{
 913	BUG_ON(on_dl_rq(dl_se));
 914
 915	/*
 916	 * If this is a wakeup or a new instance, the scheduling
 917	 * parameters of the task might need updating. Otherwise,
 918	 * we want a replenishment of its runtime.
 919	 */
 920	if (flags & ENQUEUE_WAKEUP)
 
 
 921		update_dl_entity(dl_se, pi_se);
 922	else if (flags & ENQUEUE_REPLENISH)
 923		replenish_dl_entity(dl_se, pi_se);
 924
 925	__enqueue_dl_entity(dl_se);
 926}
 927
 928static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
 929{
 930	__dequeue_dl_entity(dl_se);
 931}
 932
 933static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 934{
 935	struct task_struct *pi_task = rt_mutex_get_top_task(p);
 936	struct sched_dl_entity *pi_se = &p->dl;
 937
 938	/*
 939	 * Use the scheduling parameters of the top pi-waiter
 940	 * task if we have one and its (absolute) deadline is
 941	 * smaller than our one... OTW we keep our runtime and
 942	 * deadline.
 943	 */
 944	if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
 945		pi_se = &pi_task->dl;
 946	} else if (!dl_prio(p->normal_prio)) {
 947		/*
 948		 * Special case in which we have a !SCHED_DEADLINE task
 949		 * that is going to be deboosted, but exceedes its
 950		 * runtime while doing so. No point in replenishing
 951		 * it, as it's going to return back to its original
 952		 * scheduling class after this.
 953		 */
 954		BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
 955		return;
 956	}
 957
 958	/*
 959	 * If p is throttled, we do nothing. In fact, if it exhausted
 960	 * its budget it needs a replenishment and, since it now is on
 961	 * its rq, the bandwidth timer callback (which clearly has not
 962	 * run yet) will take care of this.
 963	 */
 964	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
 965		return;
 966
 967	enqueue_dl_entity(&p->dl, pi_se, flags);
 968
 969	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
 970		enqueue_pushable_dl_task(rq, p);
 971}
 972
 973static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 974{
 975	dequeue_dl_entity(&p->dl);
 976	dequeue_pushable_dl_task(rq, p);
 977}
 978
 979static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 980{
 981	update_curr_dl(rq);
 982	__dequeue_task_dl(rq, p, flags);
 983}
 984
 985/*
 986 * Yield task semantic for -deadline tasks is:
 987 *
 988 *   get off from the CPU until our next instance, with
 989 *   a new runtime. This is of little use now, since we
 990 *   don't have a bandwidth reclaiming mechanism. Anyway,
 991 *   bandwidth reclaiming is planned for the future, and
 992 *   yield_task_dl will indicate that some spare budget
 993 *   is available for other task instances to use it.
 994 */
 995static void yield_task_dl(struct rq *rq)
 996{
 
 
 997	/*
 998	 * We make the task go to sleep until its current deadline by
 999	 * forcing its runtime to zero. This way, update_curr_dl() stops
1000	 * it and the bandwidth timer will wake it up and will give it
1001	 * new scheduling parameters (thanks to dl_yielded=1).
1002	 */
1003	rq->curr->dl.dl_yielded = 1;
1004
1005	update_rq_clock(rq);
 
1006	update_curr_dl(rq);
1007	/*
1008	 * Tell update_rq_clock() that we've just updated,
1009	 * so we don't do microscopic update in schedule()
1010	 * and double the fastpath cost.
1011	 */
1012	rq_clock_skip_update(rq, true);
1013}
1014
1015#ifdef CONFIG_SMP
1016
1017static int find_later_rq(struct task_struct *task);
1018
1019static int
1020select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1021{
1022	struct task_struct *curr;
1023	struct rq *rq;
1024
1025	if (sd_flag != SD_BALANCE_WAKE)
1026		goto out;
1027
1028	rq = cpu_rq(cpu);
1029
1030	rcu_read_lock();
1031	curr = READ_ONCE(rq->curr); /* unlocked access */
1032
1033	/*
1034	 * If we are dealing with a -deadline task, we must
1035	 * decide where to wake it up.
1036	 * If it has a later deadline and the current task
1037	 * on this rq can't move (provided the waking task
1038	 * can!) we prefer to send it somewhere else. On the
1039	 * other hand, if it has a shorter deadline, we
1040	 * try to make it stay here, it might be important.
1041	 */
1042	if (unlikely(dl_task(curr)) &&
1043	    (curr->nr_cpus_allowed < 2 ||
1044	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1045	    (p->nr_cpus_allowed > 1)) {
1046		int target = find_later_rq(p);
1047
1048		if (target != -1 &&
1049				(dl_time_before(p->dl.deadline,
1050					cpu_rq(target)->dl.earliest_dl.curr) ||
1051				(cpu_rq(target)->dl.dl_nr_running == 0)))
1052			cpu = target;
1053	}
1054	rcu_read_unlock();
1055
1056out:
1057	return cpu;
1058}
1059
1060static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1061{
1062	/*
1063	 * Current can't be migrated, useless to reschedule,
1064	 * let's hope p can move out.
1065	 */
1066	if (rq->curr->nr_cpus_allowed == 1 ||
1067	    cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1068		return;
1069
1070	/*
1071	 * p is migratable, so let's not schedule it and
1072	 * see if it is pushed or pulled somewhere else.
1073	 */
1074	if (p->nr_cpus_allowed != 1 &&
1075	    cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1076		return;
1077
1078	resched_curr(rq);
1079}
1080
 
 
1081#endif /* CONFIG_SMP */
1082
1083/*
1084 * Only called when both the current and waking task are -deadline
1085 * tasks.
1086 */
1087static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1088				  int flags)
1089{
1090	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1091		resched_curr(rq);
1092		return;
1093	}
1094
1095#ifdef CONFIG_SMP
1096	/*
1097	 * In the unlikely case current and p have the same deadline
1098	 * let us try to decide what's the best thing to do...
1099	 */
1100	if ((p->dl.deadline == rq->curr->dl.deadline) &&
1101	    !test_tsk_need_resched(rq->curr))
1102		check_preempt_equal_dl(rq, p);
1103#endif /* CONFIG_SMP */
1104}
1105
1106#ifdef CONFIG_SCHED_HRTICK
1107static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1108{
1109	hrtick_start(rq, p->dl.runtime);
1110}
1111#else /* !CONFIG_SCHED_HRTICK */
1112static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1113{
1114}
1115#endif
1116
1117static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1118						   struct dl_rq *dl_rq)
1119{
1120	struct rb_node *left = dl_rq->rb_leftmost;
1121
1122	if (!left)
1123		return NULL;
1124
1125	return rb_entry(left, struct sched_dl_entity, rb_node);
1126}
1127
1128struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1129{
1130	struct sched_dl_entity *dl_se;
1131	struct task_struct *p;
1132	struct dl_rq *dl_rq;
1133
1134	dl_rq = &rq->dl;
1135
1136	if (need_pull_dl_task(rq, prev)) {
1137		/*
1138		 * This is OK, because current is on_cpu, which avoids it being
1139		 * picked for load-balance and preemption/IRQs are still
1140		 * disabled avoiding further scheduler activity on it and we're
1141		 * being very careful to re-start the picking loop.
1142		 */
1143		lockdep_unpin_lock(&rq->lock);
1144		pull_dl_task(rq);
1145		lockdep_pin_lock(&rq->lock);
1146		/*
1147		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1148		 * means a stop task can slip in, in which case we need to
1149		 * re-start task selection.
1150		 */
1151		if (rq->stop && task_on_rq_queued(rq->stop))
1152			return RETRY_TASK;
1153	}
1154
1155	/*
1156	 * When prev is DL, we may throttle it in put_prev_task().
1157	 * So, we update time before we check for dl_nr_running.
1158	 */
1159	if (prev->sched_class == &dl_sched_class)
1160		update_curr_dl(rq);
1161
1162	if (unlikely(!dl_rq->dl_nr_running))
1163		return NULL;
1164
1165	put_prev_task(rq, prev);
1166
1167	dl_se = pick_next_dl_entity(rq, dl_rq);
1168	BUG_ON(!dl_se);
1169
1170	p = dl_task_of(dl_se);
1171	p->se.exec_start = rq_clock_task(rq);
1172
1173	/* Running task will never be pushed. */
1174       dequeue_pushable_dl_task(rq, p);
1175
 
1176	if (hrtick_enabled(rq))
1177		start_hrtick_dl(rq, p);
 
1178
1179	queue_push_tasks(rq);
1180
1181	return p;
1182}
1183
1184static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1185{
1186	update_curr_dl(rq);
1187
1188	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1189		enqueue_pushable_dl_task(rq, p);
1190}
1191
1192static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1193{
1194	update_curr_dl(rq);
1195
1196	/*
1197	 * Even when we have runtime, update_curr_dl() might have resulted in us
1198	 * not being the leftmost task anymore. In that case NEED_RESCHED will
1199	 * be set and schedule() will start a new hrtick for the next task.
1200	 */
1201	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1202	    is_leftmost(p, &rq->dl))
1203		start_hrtick_dl(rq, p);
 
1204}
1205
1206static void task_fork_dl(struct task_struct *p)
1207{
1208	/*
1209	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1210	 * sched_fork()
1211	 */
1212}
1213
1214static void task_dead_dl(struct task_struct *p)
1215{
 
1216	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1217
1218	/*
1219	 * Since we are TASK_DEAD we won't slip out of the domain!
1220	 */
1221	raw_spin_lock_irq(&dl_b->lock);
1222	/* XXX we should retain the bw until 0-lag */
1223	dl_b->total_bw -= p->dl.dl_bw;
1224	raw_spin_unlock_irq(&dl_b->lock);
 
 
1225}
1226
1227static void set_curr_task_dl(struct rq *rq)
1228{
1229	struct task_struct *p = rq->curr;
1230
1231	p->se.exec_start = rq_clock_task(rq);
1232
1233	/* You can't push away the running task */
1234	dequeue_pushable_dl_task(rq, p);
1235}
1236
1237#ifdef CONFIG_SMP
1238
1239/* Only try algorithms three times */
1240#define DL_MAX_TRIES 3
1241
1242static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1243{
1244	if (!task_running(rq, p) &&
1245	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
 
1246		return 1;
 
1247	return 0;
1248}
1249
1250/*
1251 * Return the earliest pushable rq's task, which is suitable to be executed
1252 * on the CPU, NULL otherwise:
1253 */
1254static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1255{
1256	struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
 
1257	struct task_struct *p = NULL;
1258
1259	if (!has_pushable_dl_tasks(rq))
1260		return NULL;
1261
1262next_node:
 
1263	if (next_node) {
1264		p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
 
1265
1266		if (pick_dl_task(rq, p, cpu))
1267			return p;
1268
1269		next_node = rb_next(next_node);
1270		goto next_node;
1271	}
1272
1273	return NULL;
1274}
1275
1276static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1277
1278static int find_later_rq(struct task_struct *task)
1279{
1280	struct sched_domain *sd;
1281	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1282	int this_cpu = smp_processor_id();
1283	int best_cpu, cpu = task_cpu(task);
1284
1285	/* Make sure the mask is initialized first */
1286	if (unlikely(!later_mask))
1287		return -1;
1288
1289	if (task->nr_cpus_allowed == 1)
1290		return -1;
1291
1292	/*
1293	 * We have to consider system topology and task affinity
1294	 * first, then we can look for a suitable cpu.
1295	 */
1296	best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1297			task, later_mask);
1298	if (best_cpu == -1)
1299		return -1;
1300
1301	/*
1302	 * If we are here, some target has been found,
1303	 * the most suitable of which is cached in best_cpu.
1304	 * This is, among the runqueues where the current tasks
1305	 * have later deadlines than the task's one, the rq
1306	 * with the latest possible one.
1307	 *
1308	 * Now we check how well this matches with task's
1309	 * affinity and system topology.
1310	 *
1311	 * The last cpu where the task run is our first
1312	 * guess, since it is most likely cache-hot there.
1313	 */
1314	if (cpumask_test_cpu(cpu, later_mask))
1315		return cpu;
1316	/*
1317	 * Check if this_cpu is to be skipped (i.e., it is
1318	 * not in the mask) or not.
1319	 */
1320	if (!cpumask_test_cpu(this_cpu, later_mask))
1321		this_cpu = -1;
1322
1323	rcu_read_lock();
1324	for_each_domain(cpu, sd) {
1325		if (sd->flags & SD_WAKE_AFFINE) {
1326
1327			/*
1328			 * If possible, preempting this_cpu is
1329			 * cheaper than migrating.
1330			 */
1331			if (this_cpu != -1 &&
1332			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1333				rcu_read_unlock();
1334				return this_cpu;
1335			}
1336
1337			/*
1338			 * Last chance: if best_cpu is valid and is
1339			 * in the mask, that becomes our choice.
1340			 */
1341			if (best_cpu < nr_cpu_ids &&
1342			    cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1343				rcu_read_unlock();
1344				return best_cpu;
1345			}
1346		}
1347	}
1348	rcu_read_unlock();
1349
1350	/*
1351	 * At this point, all our guesses failed, we just return
1352	 * 'something', and let the caller sort the things out.
1353	 */
1354	if (this_cpu != -1)
1355		return this_cpu;
1356
1357	cpu = cpumask_any(later_mask);
1358	if (cpu < nr_cpu_ids)
1359		return cpu;
1360
1361	return -1;
1362}
1363
1364/* Locks the rq it finds */
1365static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1366{
1367	struct rq *later_rq = NULL;
1368	int tries;
1369	int cpu;
1370
1371	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1372		cpu = find_later_rq(task);
1373
1374		if ((cpu == -1) || (cpu == rq->cpu))
1375			break;
1376
1377		later_rq = cpu_rq(cpu);
1378
1379		if (later_rq->dl.dl_nr_running &&
1380		    !dl_time_before(task->dl.deadline,
1381					later_rq->dl.earliest_dl.curr)) {
1382			/*
1383			 * Target rq has tasks of equal or earlier deadline,
1384			 * retrying does not release any lock and is unlikely
1385			 * to yield a different result.
1386			 */
1387			later_rq = NULL;
1388			break;
1389		}
1390
1391		/* Retry if something changed. */
1392		if (double_lock_balance(rq, later_rq)) {
1393			if (unlikely(task_rq(task) != rq ||
1394				     !cpumask_test_cpu(later_rq->cpu,
1395				                       &task->cpus_allowed) ||
1396				     task_running(rq, task) ||
1397				     !dl_task(task) ||
1398				     !task_on_rq_queued(task))) {
1399				double_unlock_balance(rq, later_rq);
1400				later_rq = NULL;
1401				break;
1402			}
1403		}
1404
1405		/*
1406		 * If the rq we found has no -deadline task, or
1407		 * its earliest one has a later deadline than our
1408		 * task, the rq is a good one.
1409		 */
1410		if (!later_rq->dl.dl_nr_running ||
1411		    dl_time_before(task->dl.deadline,
1412				   later_rq->dl.earliest_dl.curr))
1413			break;
1414
1415		/* Otherwise we try again. */
1416		double_unlock_balance(rq, later_rq);
1417		later_rq = NULL;
1418	}
1419
1420	return later_rq;
1421}
1422
1423static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1424{
1425	struct task_struct *p;
1426
1427	if (!has_pushable_dl_tasks(rq))
1428		return NULL;
1429
1430	p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1431		     struct task_struct, pushable_dl_tasks);
1432
1433	BUG_ON(rq->cpu != task_cpu(p));
1434	BUG_ON(task_current(rq, p));
1435	BUG_ON(p->nr_cpus_allowed <= 1);
1436
1437	BUG_ON(!task_on_rq_queued(p));
1438	BUG_ON(!dl_task(p));
1439
1440	return p;
1441}
1442
1443/*
1444 * See if the non running -deadline tasks on this rq
1445 * can be sent to some other CPU where they can preempt
1446 * and start executing.
1447 */
1448static int push_dl_task(struct rq *rq)
1449{
1450	struct task_struct *next_task;
1451	struct rq *later_rq;
1452	int ret = 0;
1453
1454	if (!rq->dl.overloaded)
1455		return 0;
1456
1457	next_task = pick_next_pushable_dl_task(rq);
1458	if (!next_task)
1459		return 0;
1460
1461retry:
1462	if (unlikely(next_task == rq->curr)) {
1463		WARN_ON(1);
1464		return 0;
1465	}
1466
1467	/*
1468	 * If next_task preempts rq->curr, and rq->curr
1469	 * can move away, it makes sense to just reschedule
1470	 * without going further in pushing next_task.
1471	 */
1472	if (dl_task(rq->curr) &&
1473	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1474	    rq->curr->nr_cpus_allowed > 1) {
1475		resched_curr(rq);
1476		return 0;
1477	}
1478
1479	/* We might release rq lock */
1480	get_task_struct(next_task);
1481
1482	/* Will lock the rq it'll find */
1483	later_rq = find_lock_later_rq(next_task, rq);
1484	if (!later_rq) {
1485		struct task_struct *task;
1486
1487		/*
1488		 * We must check all this again, since
1489		 * find_lock_later_rq releases rq->lock and it is
1490		 * then possible that next_task has migrated.
1491		 */
1492		task = pick_next_pushable_dl_task(rq);
1493		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1494			/*
1495			 * The task is still there. We don't try
1496			 * again, some other cpu will pull it when ready.
1497			 */
 
1498			goto out;
1499		}
1500
1501		if (!task)
1502			/* No more tasks */
1503			goto out;
1504
1505		put_task_struct(next_task);
1506		next_task = task;
1507		goto retry;
1508	}
1509
1510	deactivate_task(rq, next_task, 0);
1511	set_task_cpu(next_task, later_rq->cpu);
1512	activate_task(later_rq, next_task, 0);
1513	ret = 1;
1514
1515	resched_curr(later_rq);
1516
1517	double_unlock_balance(rq, later_rq);
1518
1519out:
1520	put_task_struct(next_task);
1521
1522	return ret;
1523}
1524
1525static void push_dl_tasks(struct rq *rq)
1526{
1527	/* push_dl_task() will return true if it moved a -deadline task */
1528	while (push_dl_task(rq))
1529		;
1530}
1531
1532static void pull_dl_task(struct rq *this_rq)
1533{
1534	int this_cpu = this_rq->cpu, cpu;
1535	struct task_struct *p;
1536	bool resched = false;
1537	struct rq *src_rq;
1538	u64 dmin = LONG_MAX;
1539
1540	if (likely(!dl_overloaded(this_rq)))
1541		return;
1542
1543	/*
1544	 * Match the barrier from dl_set_overloaded; this guarantees that if we
1545	 * see overloaded we must also see the dlo_mask bit.
1546	 */
1547	smp_rmb();
1548
1549	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1550		if (this_cpu == cpu)
1551			continue;
1552
1553		src_rq = cpu_rq(cpu);
1554
1555		/*
1556		 * It looks racy, abd it is! However, as in sched_rt.c,
1557		 * we are fine with this.
1558		 */
1559		if (this_rq->dl.dl_nr_running &&
1560		    dl_time_before(this_rq->dl.earliest_dl.curr,
1561				   src_rq->dl.earliest_dl.next))
1562			continue;
1563
1564		/* Might drop this_rq->lock */
1565		double_lock_balance(this_rq, src_rq);
1566
1567		/*
1568		 * If there are no more pullable tasks on the
1569		 * rq, we're done with it.
1570		 */
1571		if (src_rq->dl.dl_nr_running <= 1)
1572			goto skip;
1573
1574		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1575
1576		/*
1577		 * We found a task to be pulled if:
1578		 *  - it preempts our current (if there's one),
1579		 *  - it will preempt the last one we pulled (if any).
1580		 */
1581		if (p && dl_time_before(p->dl.deadline, dmin) &&
1582		    (!this_rq->dl.dl_nr_running ||
1583		     dl_time_before(p->dl.deadline,
1584				    this_rq->dl.earliest_dl.curr))) {
1585			WARN_ON(p == src_rq->curr);
1586			WARN_ON(!task_on_rq_queued(p));
1587
1588			/*
1589			 * Then we pull iff p has actually an earlier
1590			 * deadline than the current task of its runqueue.
1591			 */
1592			if (dl_time_before(p->dl.deadline,
1593					   src_rq->curr->dl.deadline))
1594				goto skip;
1595
1596			resched = true;
1597
1598			deactivate_task(src_rq, p, 0);
1599			set_task_cpu(p, this_cpu);
1600			activate_task(this_rq, p, 0);
1601			dmin = p->dl.deadline;
1602
1603			/* Is there any other task even earlier? */
1604		}
1605skip:
1606		double_unlock_balance(this_rq, src_rq);
1607	}
1608
1609	if (resched)
1610		resched_curr(this_rq);
 
 
 
 
1611}
1612
1613/*
1614 * Since the task is not running and a reschedule is not going to happen
1615 * anytime soon on its runqueue, we try pushing it away now.
1616 */
1617static void task_woken_dl(struct rq *rq, struct task_struct *p)
1618{
1619	if (!task_running(rq, p) &&
1620	    !test_tsk_need_resched(rq->curr) &&
 
1621	    p->nr_cpus_allowed > 1 &&
1622	    dl_task(rq->curr) &&
1623	    (rq->curr->nr_cpus_allowed < 2 ||
1624	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1625		push_dl_tasks(rq);
1626	}
1627}
1628
1629static void set_cpus_allowed_dl(struct task_struct *p,
1630				const struct cpumask *new_mask)
1631{
1632	struct root_domain *src_rd;
1633	struct rq *rq;
 
1634
1635	BUG_ON(!dl_task(p));
1636
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1637	rq = task_rq(p);
1638	src_rd = rq->rd;
1639	/*
1640	 * Migrating a SCHED_DEADLINE task between exclusive
1641	 * cpusets (different root_domains) entails a bandwidth
1642	 * update. We already made space for us in the destination
1643	 * domain (see cpuset_can_attach()).
1644	 */
1645	if (!cpumask_intersects(src_rd->span, new_mask)) {
1646		struct dl_bw *src_dl_b;
1647
1648		src_dl_b = dl_bw_of(cpu_of(rq));
1649		/*
1650		 * We now free resources of the root_domain we are migrating
1651		 * off. In the worst case, sched_setattr() may temporary fail
1652		 * until we complete the update.
1653		 */
1654		raw_spin_lock(&src_dl_b->lock);
1655		__dl_clear(src_dl_b, p->dl.dl_bw);
1656		raw_spin_unlock(&src_dl_b->lock);
1657	}
1658
1659	set_cpus_allowed_common(p, new_mask);
1660}
1661
1662/* Assumes rq->lock is held */
1663static void rq_online_dl(struct rq *rq)
1664{
1665	if (rq->dl.overloaded)
1666		dl_set_overload(rq);
1667
1668	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1669	if (rq->dl.dl_nr_running > 0)
1670		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1671}
1672
1673/* Assumes rq->lock is held */
1674static void rq_offline_dl(struct rq *rq)
1675{
1676	if (rq->dl.overloaded)
1677		dl_clear_overload(rq);
1678
1679	cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1680	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1681}
1682
1683void __init init_sched_dl_class(void)
1684{
1685	unsigned int i;
1686
1687	for_each_possible_cpu(i)
1688		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1689					GFP_KERNEL, cpu_to_node(i));
1690}
1691
1692#endif /* CONFIG_SMP */
1693
1694static void switched_from_dl(struct rq *rq, struct task_struct *p)
1695{
1696	/*
1697	 * Start the deadline timer; if we switch back to dl before this we'll
1698	 * continue consuming our current CBS slice. If we stay outside of
1699	 * SCHED_DEADLINE until the deadline passes, the timer will reset the
1700	 * task.
1701	 */
1702	if (!start_dl_timer(p))
1703		__dl_clear_params(p);
1704
 
1705	/*
1706	 * Since this might be the only -deadline task on the rq,
1707	 * this is the right place to try to pull some other one
1708	 * from an overloaded cpu, if any.
1709	 */
1710	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1711		return;
1712
1713	queue_pull_task(rq);
1714}
1715
1716/*
1717 * When switching to -deadline, we may overload the rq, then
1718 * we try to push someone off, if possible.
1719 */
1720static void switched_to_dl(struct rq *rq, struct task_struct *p)
1721{
1722	if (dl_time_before(p->dl.deadline, rq_clock(rq)))
1723		setup_new_dl_entity(&p->dl, &p->dl);
 
 
 
 
 
 
 
1724
1725	if (task_on_rq_queued(p) && rq->curr != p) {
1726#ifdef CONFIG_SMP
1727		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
1728			queue_push_tasks(rq);
1729#else
1730		if (dl_task(rq->curr))
 
1731			check_preempt_curr_dl(rq, p, 0);
1732		else
1733			resched_curr(rq);
1734#endif
1735	}
1736}
1737
1738/*
1739 * If the scheduling parameters of a -deadline task changed,
1740 * a push or pull operation might be needed.
1741 */
1742static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1743			    int oldprio)
1744{
1745	if (task_on_rq_queued(p) || rq->curr == p) {
1746#ifdef CONFIG_SMP
1747		/*
1748		 * This might be too much, but unfortunately
1749		 * we don't have the old deadline value, and
1750		 * we can't argue if the task is increasing
1751		 * or lowering its prio, so...
1752		 */
1753		if (!rq->dl.overloaded)
1754			queue_pull_task(rq);
1755
1756		/*
1757		 * If we now have a earlier deadline task than p,
1758		 * then reschedule, provided p is still on this
1759		 * runqueue.
1760		 */
1761		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1762			resched_curr(rq);
 
1763#else
1764		/*
1765		 * Again, we don't know if p has a earlier
1766		 * or later deadline, so let's blindly set a
1767		 * (maybe not needed) rescheduling point.
1768		 */
1769		resched_curr(rq);
1770#endif /* CONFIG_SMP */
1771	}
 
1772}
1773
1774const struct sched_class dl_sched_class = {
1775	.next			= &rt_sched_class,
1776	.enqueue_task		= enqueue_task_dl,
1777	.dequeue_task		= dequeue_task_dl,
1778	.yield_task		= yield_task_dl,
1779
1780	.check_preempt_curr	= check_preempt_curr_dl,
1781
1782	.pick_next_task		= pick_next_task_dl,
1783	.put_prev_task		= put_prev_task_dl,
1784
1785#ifdef CONFIG_SMP
1786	.select_task_rq		= select_task_rq_dl,
1787	.set_cpus_allowed       = set_cpus_allowed_dl,
1788	.rq_online              = rq_online_dl,
1789	.rq_offline             = rq_offline_dl,
 
1790	.task_woken		= task_woken_dl,
1791#endif
1792
1793	.set_curr_task		= set_curr_task_dl,
1794	.task_tick		= task_tick_dl,
1795	.task_fork              = task_fork_dl,
1796	.task_dead		= task_dead_dl,
1797
1798	.prio_changed           = prio_changed_dl,
1799	.switched_from		= switched_from_dl,
1800	.switched_to		= switched_to_dl,
1801
1802	.update_curr		= update_curr_dl,
1803};
1804
1805#ifdef CONFIG_SCHED_DEBUG
1806extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1807
1808void print_dl_stats(struct seq_file *m, int cpu)
1809{
1810	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1811}
1812#endif /* CONFIG_SCHED_DEBUG */