Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/sched/core.c
4 *
5 * Core kernel scheduler code and related syscalls
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 */
9#include <linux/highmem.h>
10#include <linux/hrtimer_api.h>
11#include <linux/ktime_api.h>
12#include <linux/sched/signal.h>
13#include <linux/syscalls_api.h>
14#include <linux/debug_locks.h>
15#include <linux/prefetch.h>
16#include <linux/capability.h>
17#include <linux/pgtable_api.h>
18#include <linux/wait_bit.h>
19#include <linux/jiffies.h>
20#include <linux/spinlock_api.h>
21#include <linux/cpumask_api.h>
22#include <linux/lockdep_api.h>
23#include <linux/hardirq.h>
24#include <linux/softirq.h>
25#include <linux/refcount_api.h>
26#include <linux/topology.h>
27#include <linux/sched/clock.h>
28#include <linux/sched/cond_resched.h>
29#include <linux/sched/cputime.h>
30#include <linux/sched/debug.h>
31#include <linux/sched/hotplug.h>
32#include <linux/sched/init.h>
33#include <linux/sched/isolation.h>
34#include <linux/sched/loadavg.h>
35#include <linux/sched/mm.h>
36#include <linux/sched/nohz.h>
37#include <linux/sched/rseq_api.h>
38#include <linux/sched/rt.h>
39
40#include <linux/blkdev.h>
41#include <linux/context_tracking.h>
42#include <linux/cpuset.h>
43#include <linux/delayacct.h>
44#include <linux/init_task.h>
45#include <linux/interrupt.h>
46#include <linux/ioprio.h>
47#include <linux/kallsyms.h>
48#include <linux/kcov.h>
49#include <linux/kprobes.h>
50#include <linux/llist_api.h>
51#include <linux/mmu_context.h>
52#include <linux/mmzone.h>
53#include <linux/mutex_api.h>
54#include <linux/nmi.h>
55#include <linux/nospec.h>
56#include <linux/perf_event_api.h>
57#include <linux/profile.h>
58#include <linux/psi.h>
59#include <linux/rcuwait_api.h>
60#include <linux/sched/wake_q.h>
61#include <linux/scs.h>
62#include <linux/slab.h>
63#include <linux/syscalls.h>
64#include <linux/vtime.h>
65#include <linux/wait_api.h>
66#include <linux/workqueue_api.h>
67
68#ifdef CONFIG_PREEMPT_DYNAMIC
69# ifdef CONFIG_GENERIC_ENTRY
70# include <linux/entry-common.h>
71# endif
72#endif
73
74#include <uapi/linux/sched/types.h>
75
76#include <asm/irq_regs.h>
77#include <asm/switch_to.h>
78#include <asm/tlb.h>
79
80#define CREATE_TRACE_POINTS
81#include <linux/sched/rseq_api.h>
82#include <trace/events/sched.h>
83#undef CREATE_TRACE_POINTS
84
85#include "sched.h"
86#include "stats.h"
87#include "autogroup.h"
88
89#include "autogroup.h"
90#include "pelt.h"
91#include "smp.h"
92#include "stats.h"
93
94#include "../workqueue_internal.h"
95#include "../../io_uring/io-wq.h"
96#include "../smpboot.h"
97
98/*
99 * Export tracepoints that act as a bare tracehook (ie: have no trace event
100 * associated with them) to allow external modules to probe them.
101 */
102EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
103EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
104EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
105EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
106EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
107EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp);
108EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
109EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
110EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
111EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
112EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
113
114DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
115
116#ifdef CONFIG_SCHED_DEBUG
117/*
118 * Debugging: various feature bits
119 *
120 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
121 * sysctl_sched_features, defined in sched.h, to allow constants propagation
122 * at compile time and compiler optimization based on features default.
123 */
124#define SCHED_FEAT(name, enabled) \
125 (1UL << __SCHED_FEAT_##name) * enabled |
126const_debug unsigned int sysctl_sched_features =
127#include "features.h"
128 0;
129#undef SCHED_FEAT
130
131/*
132 * Print a warning if need_resched is set for the given duration (if
133 * LATENCY_WARN is enabled).
134 *
135 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
136 * per boot.
137 */
138__read_mostly int sysctl_resched_latency_warn_ms = 100;
139__read_mostly int sysctl_resched_latency_warn_once = 1;
140#endif /* CONFIG_SCHED_DEBUG */
141
142/*
143 * Number of tasks to iterate in a single balance run.
144 * Limited because this is done with IRQs disabled.
145 */
146const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
147
148__read_mostly int scheduler_running;
149
150#ifdef CONFIG_SCHED_CORE
151
152DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
153
154/* kernel prio, less is more */
155static inline int __task_prio(struct task_struct *p)
156{
157 if (p->sched_class == &stop_sched_class) /* trumps deadline */
158 return -2;
159
160 if (rt_prio(p->prio)) /* includes deadline */
161 return p->prio; /* [-1, 99] */
162
163 if (p->sched_class == &idle_sched_class)
164 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
165
166 return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
167}
168
169/*
170 * l(a,b)
171 * le(a,b) := !l(b,a)
172 * g(a,b) := l(b,a)
173 * ge(a,b) := !l(a,b)
174 */
175
176/* real prio, less is less */
177static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
178{
179
180 int pa = __task_prio(a), pb = __task_prio(b);
181
182 if (-pa < -pb)
183 return true;
184
185 if (-pb < -pa)
186 return false;
187
188 if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
189 return !dl_time_before(a->dl.deadline, b->dl.deadline);
190
191 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
192 return cfs_prio_less(a, b, in_fi);
193
194 return false;
195}
196
197static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b)
198{
199 if (a->core_cookie < b->core_cookie)
200 return true;
201
202 if (a->core_cookie > b->core_cookie)
203 return false;
204
205 /* flip prio, so high prio is leftmost */
206 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
207 return true;
208
209 return false;
210}
211
212#define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
213
214static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
215{
216 return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
217}
218
219static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
220{
221 const struct task_struct *p = __node_2_sc(node);
222 unsigned long cookie = (unsigned long)key;
223
224 if (cookie < p->core_cookie)
225 return -1;
226
227 if (cookie > p->core_cookie)
228 return 1;
229
230 return 0;
231}
232
233void sched_core_enqueue(struct rq *rq, struct task_struct *p)
234{
235 rq->core->core_task_seq++;
236
237 if (!p->core_cookie)
238 return;
239
240 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
241}
242
243void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
244{
245 rq->core->core_task_seq++;
246
247 if (sched_core_enqueued(p)) {
248 rb_erase(&p->core_node, &rq->core_tree);
249 RB_CLEAR_NODE(&p->core_node);
250 }
251
252 /*
253 * Migrating the last task off the cpu, with the cpu in forced idle
254 * state. Reschedule to create an accounting edge for forced idle,
255 * and re-examine whether the core is still in forced idle state.
256 */
257 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
258 rq->core->core_forceidle_count && rq->curr == rq->idle)
259 resched_curr(rq);
260}
261
262/*
263 * Find left-most (aka, highest priority) task matching @cookie.
264 */
265static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
266{
267 struct rb_node *node;
268
269 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
270 /*
271 * The idle task always matches any cookie!
272 */
273 if (!node)
274 return idle_sched_class.pick_task(rq);
275
276 return __node_2_sc(node);
277}
278
279static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
280{
281 struct rb_node *node = &p->core_node;
282
283 node = rb_next(node);
284 if (!node)
285 return NULL;
286
287 p = container_of(node, struct task_struct, core_node);
288 if (p->core_cookie != cookie)
289 return NULL;
290
291 return p;
292}
293
294/*
295 * Magic required such that:
296 *
297 * raw_spin_rq_lock(rq);
298 * ...
299 * raw_spin_rq_unlock(rq);
300 *
301 * ends up locking and unlocking the _same_ lock, and all CPUs
302 * always agree on what rq has what lock.
303 *
304 * XXX entirely possible to selectively enable cores, don't bother for now.
305 */
306
307static DEFINE_MUTEX(sched_core_mutex);
308static atomic_t sched_core_count;
309static struct cpumask sched_core_mask;
310
311static void sched_core_lock(int cpu, unsigned long *flags)
312{
313 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
314 int t, i = 0;
315
316 local_irq_save(*flags);
317 for_each_cpu(t, smt_mask)
318 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
319}
320
321static void sched_core_unlock(int cpu, unsigned long *flags)
322{
323 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
324 int t;
325
326 for_each_cpu(t, smt_mask)
327 raw_spin_unlock(&cpu_rq(t)->__lock);
328 local_irq_restore(*flags);
329}
330
331static void __sched_core_flip(bool enabled)
332{
333 unsigned long flags;
334 int cpu, t;
335
336 cpus_read_lock();
337
338 /*
339 * Toggle the online cores, one by one.
340 */
341 cpumask_copy(&sched_core_mask, cpu_online_mask);
342 for_each_cpu(cpu, &sched_core_mask) {
343 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
344
345 sched_core_lock(cpu, &flags);
346
347 for_each_cpu(t, smt_mask)
348 cpu_rq(t)->core_enabled = enabled;
349
350 cpu_rq(cpu)->core->core_forceidle_start = 0;
351
352 sched_core_unlock(cpu, &flags);
353
354 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
355 }
356
357 /*
358 * Toggle the offline CPUs.
359 */
360 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
361 cpu_rq(cpu)->core_enabled = enabled;
362
363 cpus_read_unlock();
364}
365
366static void sched_core_assert_empty(void)
367{
368 int cpu;
369
370 for_each_possible_cpu(cpu)
371 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
372}
373
374static void __sched_core_enable(void)
375{
376 static_branch_enable(&__sched_core_enabled);
377 /*
378 * Ensure all previous instances of raw_spin_rq_*lock() have finished
379 * and future ones will observe !sched_core_disabled().
380 */
381 synchronize_rcu();
382 __sched_core_flip(true);
383 sched_core_assert_empty();
384}
385
386static void __sched_core_disable(void)
387{
388 sched_core_assert_empty();
389 __sched_core_flip(false);
390 static_branch_disable(&__sched_core_enabled);
391}
392
393void sched_core_get(void)
394{
395 if (atomic_inc_not_zero(&sched_core_count))
396 return;
397
398 mutex_lock(&sched_core_mutex);
399 if (!atomic_read(&sched_core_count))
400 __sched_core_enable();
401
402 smp_mb__before_atomic();
403 atomic_inc(&sched_core_count);
404 mutex_unlock(&sched_core_mutex);
405}
406
407static void __sched_core_put(struct work_struct *work)
408{
409 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
410 __sched_core_disable();
411 mutex_unlock(&sched_core_mutex);
412 }
413}
414
415void sched_core_put(void)
416{
417 static DECLARE_WORK(_work, __sched_core_put);
418
419 /*
420 * "There can be only one"
421 *
422 * Either this is the last one, or we don't actually need to do any
423 * 'work'. If it is the last *again*, we rely on
424 * WORK_STRUCT_PENDING_BIT.
425 */
426 if (!atomic_add_unless(&sched_core_count, -1, 1))
427 schedule_work(&_work);
428}
429
430#else /* !CONFIG_SCHED_CORE */
431
432static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
433static inline void
434sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
435
436#endif /* CONFIG_SCHED_CORE */
437
438/*
439 * Serialization rules:
440 *
441 * Lock order:
442 *
443 * p->pi_lock
444 * rq->lock
445 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
446 *
447 * rq1->lock
448 * rq2->lock where: rq1 < rq2
449 *
450 * Regular state:
451 *
452 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
453 * local CPU's rq->lock, it optionally removes the task from the runqueue and
454 * always looks at the local rq data structures to find the most eligible task
455 * to run next.
456 *
457 * Task enqueue is also under rq->lock, possibly taken from another CPU.
458 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
459 * the local CPU to avoid bouncing the runqueue state around [ see
460 * ttwu_queue_wakelist() ]
461 *
462 * Task wakeup, specifically wakeups that involve migration, are horribly
463 * complicated to avoid having to take two rq->locks.
464 *
465 * Special state:
466 *
467 * System-calls and anything external will use task_rq_lock() which acquires
468 * both p->pi_lock and rq->lock. As a consequence the state they change is
469 * stable while holding either lock:
470 *
471 * - sched_setaffinity()/
472 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
473 * - set_user_nice(): p->se.load, p->*prio
474 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
475 * p->se.load, p->rt_priority,
476 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
477 * - sched_setnuma(): p->numa_preferred_nid
478 * - sched_move_task(): p->sched_task_group
479 * - uclamp_update_active() p->uclamp*
480 *
481 * p->state <- TASK_*:
482 *
483 * is changed locklessly using set_current_state(), __set_current_state() or
484 * set_special_state(), see their respective comments, or by
485 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
486 * concurrent self.
487 *
488 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
489 *
490 * is set by activate_task() and cleared by deactivate_task(), under
491 * rq->lock. Non-zero indicates the task is runnable, the special
492 * ON_RQ_MIGRATING state is used for migration without holding both
493 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
494 *
495 * p->on_cpu <- { 0, 1 }:
496 *
497 * is set by prepare_task() and cleared by finish_task() such that it will be
498 * set before p is scheduled-in and cleared after p is scheduled-out, both
499 * under rq->lock. Non-zero indicates the task is running on its CPU.
500 *
501 * [ The astute reader will observe that it is possible for two tasks on one
502 * CPU to have ->on_cpu = 1 at the same time. ]
503 *
504 * task_cpu(p): is changed by set_task_cpu(), the rules are:
505 *
506 * - Don't call set_task_cpu() on a blocked task:
507 *
508 * We don't care what CPU we're not running on, this simplifies hotplug,
509 * the CPU assignment of blocked tasks isn't required to be valid.
510 *
511 * - for try_to_wake_up(), called under p->pi_lock:
512 *
513 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
514 *
515 * - for migration called under rq->lock:
516 * [ see task_on_rq_migrating() in task_rq_lock() ]
517 *
518 * o move_queued_task()
519 * o detach_task()
520 *
521 * - for migration called under double_rq_lock():
522 *
523 * o __migrate_swap_task()
524 * o push_rt_task() / pull_rt_task()
525 * o push_dl_task() / pull_dl_task()
526 * o dl_task_offline_migration()
527 *
528 */
529
530void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
531{
532 raw_spinlock_t *lock;
533
534 /* Matches synchronize_rcu() in __sched_core_enable() */
535 preempt_disable();
536 if (sched_core_disabled()) {
537 raw_spin_lock_nested(&rq->__lock, subclass);
538 /* preempt_count *MUST* be > 1 */
539 preempt_enable_no_resched();
540 return;
541 }
542
543 for (;;) {
544 lock = __rq_lockp(rq);
545 raw_spin_lock_nested(lock, subclass);
546 if (likely(lock == __rq_lockp(rq))) {
547 /* preempt_count *MUST* be > 1 */
548 preempt_enable_no_resched();
549 return;
550 }
551 raw_spin_unlock(lock);
552 }
553}
554
555bool raw_spin_rq_trylock(struct rq *rq)
556{
557 raw_spinlock_t *lock;
558 bool ret;
559
560 /* Matches synchronize_rcu() in __sched_core_enable() */
561 preempt_disable();
562 if (sched_core_disabled()) {
563 ret = raw_spin_trylock(&rq->__lock);
564 preempt_enable();
565 return ret;
566 }
567
568 for (;;) {
569 lock = __rq_lockp(rq);
570 ret = raw_spin_trylock(lock);
571 if (!ret || (likely(lock == __rq_lockp(rq)))) {
572 preempt_enable();
573 return ret;
574 }
575 raw_spin_unlock(lock);
576 }
577}
578
579void raw_spin_rq_unlock(struct rq *rq)
580{
581 raw_spin_unlock(rq_lockp(rq));
582}
583
584#ifdef CONFIG_SMP
585/*
586 * double_rq_lock - safely lock two runqueues
587 */
588void double_rq_lock(struct rq *rq1, struct rq *rq2)
589{
590 lockdep_assert_irqs_disabled();
591
592 if (rq_order_less(rq2, rq1))
593 swap(rq1, rq2);
594
595 raw_spin_rq_lock(rq1);
596 if (__rq_lockp(rq1) != __rq_lockp(rq2))
597 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
598
599 double_rq_clock_clear_update(rq1, rq2);
600}
601#endif
602
603/*
604 * __task_rq_lock - lock the rq @p resides on.
605 */
606struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
607 __acquires(rq->lock)
608{
609 struct rq *rq;
610
611 lockdep_assert_held(&p->pi_lock);
612
613 for (;;) {
614 rq = task_rq(p);
615 raw_spin_rq_lock(rq);
616 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
617 rq_pin_lock(rq, rf);
618 return rq;
619 }
620 raw_spin_rq_unlock(rq);
621
622 while (unlikely(task_on_rq_migrating(p)))
623 cpu_relax();
624 }
625}
626
627/*
628 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
629 */
630struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
631 __acquires(p->pi_lock)
632 __acquires(rq->lock)
633{
634 struct rq *rq;
635
636 for (;;) {
637 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
638 rq = task_rq(p);
639 raw_spin_rq_lock(rq);
640 /*
641 * move_queued_task() task_rq_lock()
642 *
643 * ACQUIRE (rq->lock)
644 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
645 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
646 * [S] ->cpu = new_cpu [L] task_rq()
647 * [L] ->on_rq
648 * RELEASE (rq->lock)
649 *
650 * If we observe the old CPU in task_rq_lock(), the acquire of
651 * the old rq->lock will fully serialize against the stores.
652 *
653 * If we observe the new CPU in task_rq_lock(), the address
654 * dependency headed by '[L] rq = task_rq()' and the acquire
655 * will pair with the WMB to ensure we then also see migrating.
656 */
657 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
658 rq_pin_lock(rq, rf);
659 return rq;
660 }
661 raw_spin_rq_unlock(rq);
662 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
663
664 while (unlikely(task_on_rq_migrating(p)))
665 cpu_relax();
666 }
667}
668
669/*
670 * RQ-clock updating methods:
671 */
672
673static void update_rq_clock_task(struct rq *rq, s64 delta)
674{
675/*
676 * In theory, the compile should just see 0 here, and optimize out the call
677 * to sched_rt_avg_update. But I don't trust it...
678 */
679 s64 __maybe_unused steal = 0, irq_delta = 0;
680
681#ifdef CONFIG_IRQ_TIME_ACCOUNTING
682 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
683
684 /*
685 * Since irq_time is only updated on {soft,}irq_exit, we might run into
686 * this case when a previous update_rq_clock() happened inside a
687 * {soft,}irq region.
688 *
689 * When this happens, we stop ->clock_task and only update the
690 * prev_irq_time stamp to account for the part that fit, so that a next
691 * update will consume the rest. This ensures ->clock_task is
692 * monotonic.
693 *
694 * It does however cause some slight miss-attribution of {soft,}irq
695 * time, a more accurate solution would be to update the irq_time using
696 * the current rq->clock timestamp, except that would require using
697 * atomic ops.
698 */
699 if (irq_delta > delta)
700 irq_delta = delta;
701
702 rq->prev_irq_time += irq_delta;
703 delta -= irq_delta;
704 psi_account_irqtime(rq->curr, irq_delta);
705#endif
706#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
707 if (static_key_false((¶virt_steal_rq_enabled))) {
708 steal = paravirt_steal_clock(cpu_of(rq));
709 steal -= rq->prev_steal_time_rq;
710
711 if (unlikely(steal > delta))
712 steal = delta;
713
714 rq->prev_steal_time_rq += steal;
715 delta -= steal;
716 }
717#endif
718
719 rq->clock_task += delta;
720
721#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
722 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
723 update_irq_load_avg(rq, irq_delta + steal);
724#endif
725 update_rq_clock_pelt(rq, delta);
726}
727
728void update_rq_clock(struct rq *rq)
729{
730 s64 delta;
731
732 lockdep_assert_rq_held(rq);
733
734 if (rq->clock_update_flags & RQCF_ACT_SKIP)
735 return;
736
737#ifdef CONFIG_SCHED_DEBUG
738 if (sched_feat(WARN_DOUBLE_CLOCK))
739 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
740 rq->clock_update_flags |= RQCF_UPDATED;
741#endif
742
743 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
744 if (delta < 0)
745 return;
746 rq->clock += delta;
747 update_rq_clock_task(rq, delta);
748}
749
750#ifdef CONFIG_SCHED_HRTICK
751/*
752 * Use HR-timers to deliver accurate preemption points.
753 */
754
755static void hrtick_clear(struct rq *rq)
756{
757 if (hrtimer_active(&rq->hrtick_timer))
758 hrtimer_cancel(&rq->hrtick_timer);
759}
760
761/*
762 * High-resolution timer tick.
763 * Runs from hardirq context with interrupts disabled.
764 */
765static enum hrtimer_restart hrtick(struct hrtimer *timer)
766{
767 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
768 struct rq_flags rf;
769
770 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
771
772 rq_lock(rq, &rf);
773 update_rq_clock(rq);
774 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
775 rq_unlock(rq, &rf);
776
777 return HRTIMER_NORESTART;
778}
779
780#ifdef CONFIG_SMP
781
782static void __hrtick_restart(struct rq *rq)
783{
784 struct hrtimer *timer = &rq->hrtick_timer;
785 ktime_t time = rq->hrtick_time;
786
787 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
788}
789
790/*
791 * called from hardirq (IPI) context
792 */
793static void __hrtick_start(void *arg)
794{
795 struct rq *rq = arg;
796 struct rq_flags rf;
797
798 rq_lock(rq, &rf);
799 __hrtick_restart(rq);
800 rq_unlock(rq, &rf);
801}
802
803/*
804 * Called to set the hrtick timer state.
805 *
806 * called with rq->lock held and irqs disabled
807 */
808void hrtick_start(struct rq *rq, u64 delay)
809{
810 struct hrtimer *timer = &rq->hrtick_timer;
811 s64 delta;
812
813 /*
814 * Don't schedule slices shorter than 10000ns, that just
815 * doesn't make sense and can cause timer DoS.
816 */
817 delta = max_t(s64, delay, 10000LL);
818 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
819
820 if (rq == this_rq())
821 __hrtick_restart(rq);
822 else
823 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
824}
825
826#else
827/*
828 * Called to set the hrtick timer state.
829 *
830 * called with rq->lock held and irqs disabled
831 */
832void hrtick_start(struct rq *rq, u64 delay)
833{
834 /*
835 * Don't schedule slices shorter than 10000ns, that just
836 * doesn't make sense. Rely on vruntime for fairness.
837 */
838 delay = max_t(u64, delay, 10000LL);
839 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
840 HRTIMER_MODE_REL_PINNED_HARD);
841}
842
843#endif /* CONFIG_SMP */
844
845static void hrtick_rq_init(struct rq *rq)
846{
847#ifdef CONFIG_SMP
848 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
849#endif
850 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
851 rq->hrtick_timer.function = hrtick;
852}
853#else /* CONFIG_SCHED_HRTICK */
854static inline void hrtick_clear(struct rq *rq)
855{
856}
857
858static inline void hrtick_rq_init(struct rq *rq)
859{
860}
861#endif /* CONFIG_SCHED_HRTICK */
862
863/*
864 * cmpxchg based fetch_or, macro so it works for different integer types
865 */
866#define fetch_or(ptr, mask) \
867 ({ \
868 typeof(ptr) _ptr = (ptr); \
869 typeof(mask) _mask = (mask); \
870 typeof(*_ptr) _val = *_ptr; \
871 \
872 do { \
873 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
874 _val; \
875})
876
877#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
878/*
879 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
880 * this avoids any races wrt polling state changes and thereby avoids
881 * spurious IPIs.
882 */
883static inline bool set_nr_and_not_polling(struct task_struct *p)
884{
885 struct thread_info *ti = task_thread_info(p);
886 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
887}
888
889/*
890 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
891 *
892 * If this returns true, then the idle task promises to call
893 * sched_ttwu_pending() and reschedule soon.
894 */
895static bool set_nr_if_polling(struct task_struct *p)
896{
897 struct thread_info *ti = task_thread_info(p);
898 typeof(ti->flags) val = READ_ONCE(ti->flags);
899
900 for (;;) {
901 if (!(val & _TIF_POLLING_NRFLAG))
902 return false;
903 if (val & _TIF_NEED_RESCHED)
904 return true;
905 if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
906 break;
907 }
908 return true;
909}
910
911#else
912static inline bool set_nr_and_not_polling(struct task_struct *p)
913{
914 set_tsk_need_resched(p);
915 return true;
916}
917
918#ifdef CONFIG_SMP
919static inline bool set_nr_if_polling(struct task_struct *p)
920{
921 return false;
922}
923#endif
924#endif
925
926static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
927{
928 struct wake_q_node *node = &task->wake_q;
929
930 /*
931 * Atomically grab the task, if ->wake_q is !nil already it means
932 * it's already queued (either by us or someone else) and will get the
933 * wakeup due to that.
934 *
935 * In order to ensure that a pending wakeup will observe our pending
936 * state, even in the failed case, an explicit smp_mb() must be used.
937 */
938 smp_mb__before_atomic();
939 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
940 return false;
941
942 /*
943 * The head is context local, there can be no concurrency.
944 */
945 *head->lastp = node;
946 head->lastp = &node->next;
947 return true;
948}
949
950/**
951 * wake_q_add() - queue a wakeup for 'later' waking.
952 * @head: the wake_q_head to add @task to
953 * @task: the task to queue for 'later' wakeup
954 *
955 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
956 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
957 * instantly.
958 *
959 * This function must be used as-if it were wake_up_process(); IOW the task
960 * must be ready to be woken at this location.
961 */
962void wake_q_add(struct wake_q_head *head, struct task_struct *task)
963{
964 if (__wake_q_add(head, task))
965 get_task_struct(task);
966}
967
968/**
969 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
970 * @head: the wake_q_head to add @task to
971 * @task: the task to queue for 'later' wakeup
972 *
973 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
974 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
975 * instantly.
976 *
977 * This function must be used as-if it were wake_up_process(); IOW the task
978 * must be ready to be woken at this location.
979 *
980 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
981 * that already hold reference to @task can call the 'safe' version and trust
982 * wake_q to do the right thing depending whether or not the @task is already
983 * queued for wakeup.
984 */
985void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
986{
987 if (!__wake_q_add(head, task))
988 put_task_struct(task);
989}
990
991void wake_up_q(struct wake_q_head *head)
992{
993 struct wake_q_node *node = head->first;
994
995 while (node != WAKE_Q_TAIL) {
996 struct task_struct *task;
997
998 task = container_of(node, struct task_struct, wake_q);
999 /* Task can safely be re-inserted now: */
1000 node = node->next;
1001 task->wake_q.next = NULL;
1002
1003 /*
1004 * wake_up_process() executes a full barrier, which pairs with
1005 * the queueing in wake_q_add() so as not to miss wakeups.
1006 */
1007 wake_up_process(task);
1008 put_task_struct(task);
1009 }
1010}
1011
1012/*
1013 * resched_curr - mark rq's current task 'to be rescheduled now'.
1014 *
1015 * On UP this means the setting of the need_resched flag, on SMP it
1016 * might also involve a cross-CPU call to trigger the scheduler on
1017 * the target CPU.
1018 */
1019void resched_curr(struct rq *rq)
1020{
1021 struct task_struct *curr = rq->curr;
1022 int cpu;
1023
1024 lockdep_assert_rq_held(rq);
1025
1026 if (test_tsk_need_resched(curr))
1027 return;
1028
1029 cpu = cpu_of(rq);
1030
1031 if (cpu == smp_processor_id()) {
1032 set_tsk_need_resched(curr);
1033 set_preempt_need_resched();
1034 return;
1035 }
1036
1037 if (set_nr_and_not_polling(curr))
1038 smp_send_reschedule(cpu);
1039 else
1040 trace_sched_wake_idle_without_ipi(cpu);
1041}
1042
1043void resched_cpu(int cpu)
1044{
1045 struct rq *rq = cpu_rq(cpu);
1046 unsigned long flags;
1047
1048 raw_spin_rq_lock_irqsave(rq, flags);
1049 if (cpu_online(cpu) || cpu == smp_processor_id())
1050 resched_curr(rq);
1051 raw_spin_rq_unlock_irqrestore(rq, flags);
1052}
1053
1054#ifdef CONFIG_SMP
1055#ifdef CONFIG_NO_HZ_COMMON
1056/*
1057 * In the semi idle case, use the nearest busy CPU for migrating timers
1058 * from an idle CPU. This is good for power-savings.
1059 *
1060 * We don't do similar optimization for completely idle system, as
1061 * selecting an idle CPU will add more delays to the timers than intended
1062 * (as that CPU's timer base may not be uptodate wrt jiffies etc).
1063 */
1064int get_nohz_timer_target(void)
1065{
1066 int i, cpu = smp_processor_id(), default_cpu = -1;
1067 struct sched_domain *sd;
1068 const struct cpumask *hk_mask;
1069
1070 if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
1071 if (!idle_cpu(cpu))
1072 return cpu;
1073 default_cpu = cpu;
1074 }
1075
1076 hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
1077
1078 rcu_read_lock();
1079 for_each_domain(cpu, sd) {
1080 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1081 if (cpu == i)
1082 continue;
1083
1084 if (!idle_cpu(i)) {
1085 cpu = i;
1086 goto unlock;
1087 }
1088 }
1089 }
1090
1091 if (default_cpu == -1)
1092 default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
1093 cpu = default_cpu;
1094unlock:
1095 rcu_read_unlock();
1096 return cpu;
1097}
1098
1099/*
1100 * When add_timer_on() enqueues a timer into the timer wheel of an
1101 * idle CPU then this timer might expire before the next timer event
1102 * which is scheduled to wake up that CPU. In case of a completely
1103 * idle system the next event might even be infinite time into the
1104 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1105 * leaves the inner idle loop so the newly added timer is taken into
1106 * account when the CPU goes back to idle and evaluates the timer
1107 * wheel for the next timer event.
1108 */
1109static void wake_up_idle_cpu(int cpu)
1110{
1111 struct rq *rq = cpu_rq(cpu);
1112
1113 if (cpu == smp_processor_id())
1114 return;
1115
1116 if (set_nr_and_not_polling(rq->idle))
1117 smp_send_reschedule(cpu);
1118 else
1119 trace_sched_wake_idle_without_ipi(cpu);
1120}
1121
1122static bool wake_up_full_nohz_cpu(int cpu)
1123{
1124 /*
1125 * We just need the target to call irq_exit() and re-evaluate
1126 * the next tick. The nohz full kick at least implies that.
1127 * If needed we can still optimize that later with an
1128 * empty IRQ.
1129 */
1130 if (cpu_is_offline(cpu))
1131 return true; /* Don't try to wake offline CPUs. */
1132 if (tick_nohz_full_cpu(cpu)) {
1133 if (cpu != smp_processor_id() ||
1134 tick_nohz_tick_stopped())
1135 tick_nohz_full_kick_cpu(cpu);
1136 return true;
1137 }
1138
1139 return false;
1140}
1141
1142/*
1143 * Wake up the specified CPU. If the CPU is going offline, it is the
1144 * caller's responsibility to deal with the lost wakeup, for example,
1145 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1146 */
1147void wake_up_nohz_cpu(int cpu)
1148{
1149 if (!wake_up_full_nohz_cpu(cpu))
1150 wake_up_idle_cpu(cpu);
1151}
1152
1153static void nohz_csd_func(void *info)
1154{
1155 struct rq *rq = info;
1156 int cpu = cpu_of(rq);
1157 unsigned int flags;
1158
1159 /*
1160 * Release the rq::nohz_csd.
1161 */
1162 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1163 WARN_ON(!(flags & NOHZ_KICK_MASK));
1164
1165 rq->idle_balance = idle_cpu(cpu);
1166 if (rq->idle_balance && !need_resched()) {
1167 rq->nohz_idle_balance = flags;
1168 raise_softirq_irqoff(SCHED_SOFTIRQ);
1169 }
1170}
1171
1172#endif /* CONFIG_NO_HZ_COMMON */
1173
1174#ifdef CONFIG_NO_HZ_FULL
1175bool sched_can_stop_tick(struct rq *rq)
1176{
1177 int fifo_nr_running;
1178
1179 /* Deadline tasks, even if single, need the tick */
1180 if (rq->dl.dl_nr_running)
1181 return false;
1182
1183 /*
1184 * If there are more than one RR tasks, we need the tick to affect the
1185 * actual RR behaviour.
1186 */
1187 if (rq->rt.rr_nr_running) {
1188 if (rq->rt.rr_nr_running == 1)
1189 return true;
1190 else
1191 return false;
1192 }
1193
1194 /*
1195 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1196 * forced preemption between FIFO tasks.
1197 */
1198 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1199 if (fifo_nr_running)
1200 return true;
1201
1202 /*
1203 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
1204 * if there's more than one we need the tick for involuntary
1205 * preemption.
1206 */
1207 if (rq->nr_running > 1)
1208 return false;
1209
1210 return true;
1211}
1212#endif /* CONFIG_NO_HZ_FULL */
1213#endif /* CONFIG_SMP */
1214
1215#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1216 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1217/*
1218 * Iterate task_group tree rooted at *from, calling @down when first entering a
1219 * node and @up when leaving it for the final time.
1220 *
1221 * Caller must hold rcu_lock or sufficient equivalent.
1222 */
1223int walk_tg_tree_from(struct task_group *from,
1224 tg_visitor down, tg_visitor up, void *data)
1225{
1226 struct task_group *parent, *child;
1227 int ret;
1228
1229 parent = from;
1230
1231down:
1232 ret = (*down)(parent, data);
1233 if (ret)
1234 goto out;
1235 list_for_each_entry_rcu(child, &parent->children, siblings) {
1236 parent = child;
1237 goto down;
1238
1239up:
1240 continue;
1241 }
1242 ret = (*up)(parent, data);
1243 if (ret || parent == from)
1244 goto out;
1245
1246 child = parent;
1247 parent = parent->parent;
1248 if (parent)
1249 goto up;
1250out:
1251 return ret;
1252}
1253
1254int tg_nop(struct task_group *tg, void *data)
1255{
1256 return 0;
1257}
1258#endif
1259
1260static void set_load_weight(struct task_struct *p, bool update_load)
1261{
1262 int prio = p->static_prio - MAX_RT_PRIO;
1263 struct load_weight *load = &p->se.load;
1264
1265 /*
1266 * SCHED_IDLE tasks get minimal weight:
1267 */
1268 if (task_has_idle_policy(p)) {
1269 load->weight = scale_load(WEIGHT_IDLEPRIO);
1270 load->inv_weight = WMULT_IDLEPRIO;
1271 return;
1272 }
1273
1274 /*
1275 * SCHED_OTHER tasks have to update their load when changing their
1276 * weight
1277 */
1278 if (update_load && p->sched_class == &fair_sched_class) {
1279 reweight_task(p, prio);
1280 } else {
1281 load->weight = scale_load(sched_prio_to_weight[prio]);
1282 load->inv_weight = sched_prio_to_wmult[prio];
1283 }
1284}
1285
1286#ifdef CONFIG_UCLAMP_TASK
1287/*
1288 * Serializes updates of utilization clamp values
1289 *
1290 * The (slow-path) user-space triggers utilization clamp value updates which
1291 * can require updates on (fast-path) scheduler's data structures used to
1292 * support enqueue/dequeue operations.
1293 * While the per-CPU rq lock protects fast-path update operations, user-space
1294 * requests are serialized using a mutex to reduce the risk of conflicting
1295 * updates or API abuses.
1296 */
1297static DEFINE_MUTEX(uclamp_mutex);
1298
1299/* Max allowed minimum utilization */
1300static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1301
1302/* Max allowed maximum utilization */
1303static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1304
1305/*
1306 * By default RT tasks run at the maximum performance point/capacity of the
1307 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1308 * SCHED_CAPACITY_SCALE.
1309 *
1310 * This knob allows admins to change the default behavior when uclamp is being
1311 * used. In battery powered devices, particularly, running at the maximum
1312 * capacity and frequency will increase energy consumption and shorten the
1313 * battery life.
1314 *
1315 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1316 *
1317 * This knob will not override the system default sched_util_clamp_min defined
1318 * above.
1319 */
1320static unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1321
1322/* All clamps are required to be less or equal than these values */
1323static struct uclamp_se uclamp_default[UCLAMP_CNT];
1324
1325/*
1326 * This static key is used to reduce the uclamp overhead in the fast path. It
1327 * primarily disables the call to uclamp_rq_{inc, dec}() in
1328 * enqueue/dequeue_task().
1329 *
1330 * This allows users to continue to enable uclamp in their kernel config with
1331 * minimum uclamp overhead in the fast path.
1332 *
1333 * As soon as userspace modifies any of the uclamp knobs, the static key is
1334 * enabled, since we have an actual users that make use of uclamp
1335 * functionality.
1336 *
1337 * The knobs that would enable this static key are:
1338 *
1339 * * A task modifying its uclamp value with sched_setattr().
1340 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1341 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1342 */
1343DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1344
1345/* Integer rounded range for each bucket */
1346#define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
1347
1348#define for_each_clamp_id(clamp_id) \
1349 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
1350
1351static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
1352{
1353 return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
1354}
1355
1356static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
1357{
1358 if (clamp_id == UCLAMP_MIN)
1359 return 0;
1360 return SCHED_CAPACITY_SCALE;
1361}
1362
1363static inline void uclamp_se_set(struct uclamp_se *uc_se,
1364 unsigned int value, bool user_defined)
1365{
1366 uc_se->value = value;
1367 uc_se->bucket_id = uclamp_bucket_id(value);
1368 uc_se->user_defined = user_defined;
1369}
1370
1371static inline unsigned int
1372uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1373 unsigned int clamp_value)
1374{
1375 /*
1376 * Avoid blocked utilization pushing up the frequency when we go
1377 * idle (which drops the max-clamp) by retaining the last known
1378 * max-clamp.
1379 */
1380 if (clamp_id == UCLAMP_MAX) {
1381 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1382 return clamp_value;
1383 }
1384
1385 return uclamp_none(UCLAMP_MIN);
1386}
1387
1388static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1389 unsigned int clamp_value)
1390{
1391 /* Reset max-clamp retention only on idle exit */
1392 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1393 return;
1394
1395 uclamp_rq_set(rq, clamp_id, clamp_value);
1396}
1397
1398static inline
1399unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1400 unsigned int clamp_value)
1401{
1402 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1403 int bucket_id = UCLAMP_BUCKETS - 1;
1404
1405 /*
1406 * Since both min and max clamps are max aggregated, find the
1407 * top most bucket with tasks in.
1408 */
1409 for ( ; bucket_id >= 0; bucket_id--) {
1410 if (!bucket[bucket_id].tasks)
1411 continue;
1412 return bucket[bucket_id].value;
1413 }
1414
1415 /* No tasks -- default clamp values */
1416 return uclamp_idle_value(rq, clamp_id, clamp_value);
1417}
1418
1419static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1420{
1421 unsigned int default_util_min;
1422 struct uclamp_se *uc_se;
1423
1424 lockdep_assert_held(&p->pi_lock);
1425
1426 uc_se = &p->uclamp_req[UCLAMP_MIN];
1427
1428 /* Only sync if user didn't override the default */
1429 if (uc_se->user_defined)
1430 return;
1431
1432 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1433 uclamp_se_set(uc_se, default_util_min, false);
1434}
1435
1436static void uclamp_update_util_min_rt_default(struct task_struct *p)
1437{
1438 struct rq_flags rf;
1439 struct rq *rq;
1440
1441 if (!rt_task(p))
1442 return;
1443
1444 /* Protect updates to p->uclamp_* */
1445 rq = task_rq_lock(p, &rf);
1446 __uclamp_update_util_min_rt_default(p);
1447 task_rq_unlock(rq, p, &rf);
1448}
1449
1450static inline struct uclamp_se
1451uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1452{
1453 /* Copy by value as we could modify it */
1454 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1455#ifdef CONFIG_UCLAMP_TASK_GROUP
1456 unsigned int tg_min, tg_max, value;
1457
1458 /*
1459 * Tasks in autogroups or root task group will be
1460 * restricted by system defaults.
1461 */
1462 if (task_group_is_autogroup(task_group(p)))
1463 return uc_req;
1464 if (task_group(p) == &root_task_group)
1465 return uc_req;
1466
1467 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1468 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1469 value = uc_req.value;
1470 value = clamp(value, tg_min, tg_max);
1471 uclamp_se_set(&uc_req, value, false);
1472#endif
1473
1474 return uc_req;
1475}
1476
1477/*
1478 * The effective clamp bucket index of a task depends on, by increasing
1479 * priority:
1480 * - the task specific clamp value, when explicitly requested from userspace
1481 * - the task group effective clamp value, for tasks not either in the root
1482 * group or in an autogroup
1483 * - the system default clamp value, defined by the sysadmin
1484 */
1485static inline struct uclamp_se
1486uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1487{
1488 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1489 struct uclamp_se uc_max = uclamp_default[clamp_id];
1490
1491 /* System default restrictions always apply */
1492 if (unlikely(uc_req.value > uc_max.value))
1493 return uc_max;
1494
1495 return uc_req;
1496}
1497
1498unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1499{
1500 struct uclamp_se uc_eff;
1501
1502 /* Task currently refcounted: use back-annotated (effective) value */
1503 if (p->uclamp[clamp_id].active)
1504 return (unsigned long)p->uclamp[clamp_id].value;
1505
1506 uc_eff = uclamp_eff_get(p, clamp_id);
1507
1508 return (unsigned long)uc_eff.value;
1509}
1510
1511/*
1512 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1513 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1514 * updates the rq's clamp value if required.
1515 *
1516 * Tasks can have a task-specific value requested from user-space, track
1517 * within each bucket the maximum value for tasks refcounted in it.
1518 * This "local max aggregation" allows to track the exact "requested" value
1519 * for each bucket when all its RUNNABLE tasks require the same clamp.
1520 */
1521static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1522 enum uclamp_id clamp_id)
1523{
1524 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1525 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1526 struct uclamp_bucket *bucket;
1527
1528 lockdep_assert_rq_held(rq);
1529
1530 /* Update task effective clamp */
1531 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1532
1533 bucket = &uc_rq->bucket[uc_se->bucket_id];
1534 bucket->tasks++;
1535 uc_se->active = true;
1536
1537 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1538
1539 /*
1540 * Local max aggregation: rq buckets always track the max
1541 * "requested" clamp value of its RUNNABLE tasks.
1542 */
1543 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1544 bucket->value = uc_se->value;
1545
1546 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1547 uclamp_rq_set(rq, clamp_id, uc_se->value);
1548}
1549
1550/*
1551 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1552 * is released. If this is the last task reference counting the rq's max
1553 * active clamp value, then the rq's clamp value is updated.
1554 *
1555 * Both refcounted tasks and rq's cached clamp values are expected to be
1556 * always valid. If it's detected they are not, as defensive programming,
1557 * enforce the expected state and warn.
1558 */
1559static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1560 enum uclamp_id clamp_id)
1561{
1562 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1563 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1564 struct uclamp_bucket *bucket;
1565 unsigned int bkt_clamp;
1566 unsigned int rq_clamp;
1567
1568 lockdep_assert_rq_held(rq);
1569
1570 /*
1571 * If sched_uclamp_used was enabled after task @p was enqueued,
1572 * we could end up with unbalanced call to uclamp_rq_dec_id().
1573 *
1574 * In this case the uc_se->active flag should be false since no uclamp
1575 * accounting was performed at enqueue time and we can just return
1576 * here.
1577 *
1578 * Need to be careful of the following enqueue/dequeue ordering
1579 * problem too
1580 *
1581 * enqueue(taskA)
1582 * // sched_uclamp_used gets enabled
1583 * enqueue(taskB)
1584 * dequeue(taskA)
1585 * // Must not decrement bucket->tasks here
1586 * dequeue(taskB)
1587 *
1588 * where we could end up with stale data in uc_se and
1589 * bucket[uc_se->bucket_id].
1590 *
1591 * The following check here eliminates the possibility of such race.
1592 */
1593 if (unlikely(!uc_se->active))
1594 return;
1595
1596 bucket = &uc_rq->bucket[uc_se->bucket_id];
1597
1598 SCHED_WARN_ON(!bucket->tasks);
1599 if (likely(bucket->tasks))
1600 bucket->tasks--;
1601
1602 uc_se->active = false;
1603
1604 /*
1605 * Keep "local max aggregation" simple and accept to (possibly)
1606 * overboost some RUNNABLE tasks in the same bucket.
1607 * The rq clamp bucket value is reset to its base value whenever
1608 * there are no more RUNNABLE tasks refcounting it.
1609 */
1610 if (likely(bucket->tasks))
1611 return;
1612
1613 rq_clamp = uclamp_rq_get(rq, clamp_id);
1614 /*
1615 * Defensive programming: this should never happen. If it happens,
1616 * e.g. due to future modification, warn and fixup the expected value.
1617 */
1618 SCHED_WARN_ON(bucket->value > rq_clamp);
1619 if (bucket->value >= rq_clamp) {
1620 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1621 uclamp_rq_set(rq, clamp_id, bkt_clamp);
1622 }
1623}
1624
1625static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1626{
1627 enum uclamp_id clamp_id;
1628
1629 /*
1630 * Avoid any overhead until uclamp is actually used by the userspace.
1631 *
1632 * The condition is constructed such that a NOP is generated when
1633 * sched_uclamp_used is disabled.
1634 */
1635 if (!static_branch_unlikely(&sched_uclamp_used))
1636 return;
1637
1638 if (unlikely(!p->sched_class->uclamp_enabled))
1639 return;
1640
1641 for_each_clamp_id(clamp_id)
1642 uclamp_rq_inc_id(rq, p, clamp_id);
1643
1644 /* Reset clamp idle holding when there is one RUNNABLE task */
1645 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1646 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1647}
1648
1649static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1650{
1651 enum uclamp_id clamp_id;
1652
1653 /*
1654 * Avoid any overhead until uclamp is actually used by the userspace.
1655 *
1656 * The condition is constructed such that a NOP is generated when
1657 * sched_uclamp_used is disabled.
1658 */
1659 if (!static_branch_unlikely(&sched_uclamp_used))
1660 return;
1661
1662 if (unlikely(!p->sched_class->uclamp_enabled))
1663 return;
1664
1665 for_each_clamp_id(clamp_id)
1666 uclamp_rq_dec_id(rq, p, clamp_id);
1667}
1668
1669static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1670 enum uclamp_id clamp_id)
1671{
1672 if (!p->uclamp[clamp_id].active)
1673 return;
1674
1675 uclamp_rq_dec_id(rq, p, clamp_id);
1676 uclamp_rq_inc_id(rq, p, clamp_id);
1677
1678 /*
1679 * Make sure to clear the idle flag if we've transiently reached 0
1680 * active tasks on rq.
1681 */
1682 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1683 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1684}
1685
1686static inline void
1687uclamp_update_active(struct task_struct *p)
1688{
1689 enum uclamp_id clamp_id;
1690 struct rq_flags rf;
1691 struct rq *rq;
1692
1693 /*
1694 * Lock the task and the rq where the task is (or was) queued.
1695 *
1696 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1697 * price to pay to safely serialize util_{min,max} updates with
1698 * enqueues, dequeues and migration operations.
1699 * This is the same locking schema used by __set_cpus_allowed_ptr().
1700 */
1701 rq = task_rq_lock(p, &rf);
1702
1703 /*
1704 * Setting the clamp bucket is serialized by task_rq_lock().
1705 * If the task is not yet RUNNABLE and its task_struct is not
1706 * affecting a valid clamp bucket, the next time it's enqueued,
1707 * it will already see the updated clamp bucket value.
1708 */
1709 for_each_clamp_id(clamp_id)
1710 uclamp_rq_reinc_id(rq, p, clamp_id);
1711
1712 task_rq_unlock(rq, p, &rf);
1713}
1714
1715#ifdef CONFIG_UCLAMP_TASK_GROUP
1716static inline void
1717uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1718{
1719 struct css_task_iter it;
1720 struct task_struct *p;
1721
1722 css_task_iter_start(css, 0, &it);
1723 while ((p = css_task_iter_next(&it)))
1724 uclamp_update_active(p);
1725 css_task_iter_end(&it);
1726}
1727
1728static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1729#endif
1730
1731#ifdef CONFIG_SYSCTL
1732#ifdef CONFIG_UCLAMP_TASK
1733#ifdef CONFIG_UCLAMP_TASK_GROUP
1734static void uclamp_update_root_tg(void)
1735{
1736 struct task_group *tg = &root_task_group;
1737
1738 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1739 sysctl_sched_uclamp_util_min, false);
1740 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1741 sysctl_sched_uclamp_util_max, false);
1742
1743 rcu_read_lock();
1744 cpu_util_update_eff(&root_task_group.css);
1745 rcu_read_unlock();
1746}
1747#else
1748static void uclamp_update_root_tg(void) { }
1749#endif
1750
1751static void uclamp_sync_util_min_rt_default(void)
1752{
1753 struct task_struct *g, *p;
1754
1755 /*
1756 * copy_process() sysctl_uclamp
1757 * uclamp_min_rt = X;
1758 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1759 * // link thread smp_mb__after_spinlock()
1760 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1761 * sched_post_fork() for_each_process_thread()
1762 * __uclamp_sync_rt() __uclamp_sync_rt()
1763 *
1764 * Ensures that either sched_post_fork() will observe the new
1765 * uclamp_min_rt or for_each_process_thread() will observe the new
1766 * task.
1767 */
1768 read_lock(&tasklist_lock);
1769 smp_mb__after_spinlock();
1770 read_unlock(&tasklist_lock);
1771
1772 rcu_read_lock();
1773 for_each_process_thread(g, p)
1774 uclamp_update_util_min_rt_default(p);
1775 rcu_read_unlock();
1776}
1777
1778static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
1779 void *buffer, size_t *lenp, loff_t *ppos)
1780{
1781 bool update_root_tg = false;
1782 int old_min, old_max, old_min_rt;
1783 int result;
1784
1785 mutex_lock(&uclamp_mutex);
1786 old_min = sysctl_sched_uclamp_util_min;
1787 old_max = sysctl_sched_uclamp_util_max;
1788 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1789
1790 result = proc_dointvec(table, write, buffer, lenp, ppos);
1791 if (result)
1792 goto undo;
1793 if (!write)
1794 goto done;
1795
1796 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1797 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1798 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1799
1800 result = -EINVAL;
1801 goto undo;
1802 }
1803
1804 if (old_min != sysctl_sched_uclamp_util_min) {
1805 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1806 sysctl_sched_uclamp_util_min, false);
1807 update_root_tg = true;
1808 }
1809 if (old_max != sysctl_sched_uclamp_util_max) {
1810 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1811 sysctl_sched_uclamp_util_max, false);
1812 update_root_tg = true;
1813 }
1814
1815 if (update_root_tg) {
1816 static_branch_enable(&sched_uclamp_used);
1817 uclamp_update_root_tg();
1818 }
1819
1820 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1821 static_branch_enable(&sched_uclamp_used);
1822 uclamp_sync_util_min_rt_default();
1823 }
1824
1825 /*
1826 * We update all RUNNABLE tasks only when task groups are in use.
1827 * Otherwise, keep it simple and do just a lazy update at each next
1828 * task enqueue time.
1829 */
1830
1831 goto done;
1832
1833undo:
1834 sysctl_sched_uclamp_util_min = old_min;
1835 sysctl_sched_uclamp_util_max = old_max;
1836 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1837done:
1838 mutex_unlock(&uclamp_mutex);
1839
1840 return result;
1841}
1842#endif
1843#endif
1844
1845static int uclamp_validate(struct task_struct *p,
1846 const struct sched_attr *attr)
1847{
1848 int util_min = p->uclamp_req[UCLAMP_MIN].value;
1849 int util_max = p->uclamp_req[UCLAMP_MAX].value;
1850
1851 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
1852 util_min = attr->sched_util_min;
1853
1854 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
1855 return -EINVAL;
1856 }
1857
1858 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
1859 util_max = attr->sched_util_max;
1860
1861 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
1862 return -EINVAL;
1863 }
1864
1865 if (util_min != -1 && util_max != -1 && util_min > util_max)
1866 return -EINVAL;
1867
1868 /*
1869 * We have valid uclamp attributes; make sure uclamp is enabled.
1870 *
1871 * We need to do that here, because enabling static branches is a
1872 * blocking operation which obviously cannot be done while holding
1873 * scheduler locks.
1874 */
1875 static_branch_enable(&sched_uclamp_used);
1876
1877 return 0;
1878}
1879
1880static bool uclamp_reset(const struct sched_attr *attr,
1881 enum uclamp_id clamp_id,
1882 struct uclamp_se *uc_se)
1883{
1884 /* Reset on sched class change for a non user-defined clamp value. */
1885 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
1886 !uc_se->user_defined)
1887 return true;
1888
1889 /* Reset on sched_util_{min,max} == -1. */
1890 if (clamp_id == UCLAMP_MIN &&
1891 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1892 attr->sched_util_min == -1) {
1893 return true;
1894 }
1895
1896 if (clamp_id == UCLAMP_MAX &&
1897 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1898 attr->sched_util_max == -1) {
1899 return true;
1900 }
1901
1902 return false;
1903}
1904
1905static void __setscheduler_uclamp(struct task_struct *p,
1906 const struct sched_attr *attr)
1907{
1908 enum uclamp_id clamp_id;
1909
1910 for_each_clamp_id(clamp_id) {
1911 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
1912 unsigned int value;
1913
1914 if (!uclamp_reset(attr, clamp_id, uc_se))
1915 continue;
1916
1917 /*
1918 * RT by default have a 100% boost value that could be modified
1919 * at runtime.
1920 */
1921 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
1922 value = sysctl_sched_uclamp_util_min_rt_default;
1923 else
1924 value = uclamp_none(clamp_id);
1925
1926 uclamp_se_set(uc_se, value, false);
1927
1928 }
1929
1930 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
1931 return;
1932
1933 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1934 attr->sched_util_min != -1) {
1935 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
1936 attr->sched_util_min, true);
1937 }
1938
1939 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1940 attr->sched_util_max != -1) {
1941 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
1942 attr->sched_util_max, true);
1943 }
1944}
1945
1946static void uclamp_fork(struct task_struct *p)
1947{
1948 enum uclamp_id clamp_id;
1949
1950 /*
1951 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1952 * as the task is still at its early fork stages.
1953 */
1954 for_each_clamp_id(clamp_id)
1955 p->uclamp[clamp_id].active = false;
1956
1957 if (likely(!p->sched_reset_on_fork))
1958 return;
1959
1960 for_each_clamp_id(clamp_id) {
1961 uclamp_se_set(&p->uclamp_req[clamp_id],
1962 uclamp_none(clamp_id), false);
1963 }
1964}
1965
1966static void uclamp_post_fork(struct task_struct *p)
1967{
1968 uclamp_update_util_min_rt_default(p);
1969}
1970
1971static void __init init_uclamp_rq(struct rq *rq)
1972{
1973 enum uclamp_id clamp_id;
1974 struct uclamp_rq *uc_rq = rq->uclamp;
1975
1976 for_each_clamp_id(clamp_id) {
1977 uc_rq[clamp_id] = (struct uclamp_rq) {
1978 .value = uclamp_none(clamp_id)
1979 };
1980 }
1981
1982 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
1983}
1984
1985static void __init init_uclamp(void)
1986{
1987 struct uclamp_se uc_max = {};
1988 enum uclamp_id clamp_id;
1989 int cpu;
1990
1991 for_each_possible_cpu(cpu)
1992 init_uclamp_rq(cpu_rq(cpu));
1993
1994 for_each_clamp_id(clamp_id) {
1995 uclamp_se_set(&init_task.uclamp_req[clamp_id],
1996 uclamp_none(clamp_id), false);
1997 }
1998
1999 /* System defaults allow max clamp values for both indexes */
2000 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2001 for_each_clamp_id(clamp_id) {
2002 uclamp_default[clamp_id] = uc_max;
2003#ifdef CONFIG_UCLAMP_TASK_GROUP
2004 root_task_group.uclamp_req[clamp_id] = uc_max;
2005 root_task_group.uclamp[clamp_id] = uc_max;
2006#endif
2007 }
2008}
2009
2010#else /* CONFIG_UCLAMP_TASK */
2011static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
2012static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
2013static inline int uclamp_validate(struct task_struct *p,
2014 const struct sched_attr *attr)
2015{
2016 return -EOPNOTSUPP;
2017}
2018static void __setscheduler_uclamp(struct task_struct *p,
2019 const struct sched_attr *attr) { }
2020static inline void uclamp_fork(struct task_struct *p) { }
2021static inline void uclamp_post_fork(struct task_struct *p) { }
2022static inline void init_uclamp(void) { }
2023#endif /* CONFIG_UCLAMP_TASK */
2024
2025bool sched_task_on_rq(struct task_struct *p)
2026{
2027 return task_on_rq_queued(p);
2028}
2029
2030unsigned long get_wchan(struct task_struct *p)
2031{
2032 unsigned long ip = 0;
2033 unsigned int state;
2034
2035 if (!p || p == current)
2036 return 0;
2037
2038 /* Only get wchan if task is blocked and we can keep it that way. */
2039 raw_spin_lock_irq(&p->pi_lock);
2040 state = READ_ONCE(p->__state);
2041 smp_rmb(); /* see try_to_wake_up() */
2042 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2043 ip = __get_wchan(p);
2044 raw_spin_unlock_irq(&p->pi_lock);
2045
2046 return ip;
2047}
2048
2049static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2050{
2051 if (!(flags & ENQUEUE_NOCLOCK))
2052 update_rq_clock(rq);
2053
2054 if (!(flags & ENQUEUE_RESTORE)) {
2055 sched_info_enqueue(rq, p);
2056 psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
2057 }
2058
2059 uclamp_rq_inc(rq, p);
2060 p->sched_class->enqueue_task(rq, p, flags);
2061
2062 if (sched_core_enabled(rq))
2063 sched_core_enqueue(rq, p);
2064}
2065
2066static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2067{
2068 if (sched_core_enabled(rq))
2069 sched_core_dequeue(rq, p, flags);
2070
2071 if (!(flags & DEQUEUE_NOCLOCK))
2072 update_rq_clock(rq);
2073
2074 if (!(flags & DEQUEUE_SAVE)) {
2075 sched_info_dequeue(rq, p);
2076 psi_dequeue(p, flags & DEQUEUE_SLEEP);
2077 }
2078
2079 uclamp_rq_dec(rq, p);
2080 p->sched_class->dequeue_task(rq, p, flags);
2081}
2082
2083void activate_task(struct rq *rq, struct task_struct *p, int flags)
2084{
2085 enqueue_task(rq, p, flags);
2086
2087 p->on_rq = TASK_ON_RQ_QUEUED;
2088}
2089
2090void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2091{
2092 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
2093
2094 dequeue_task(rq, p, flags);
2095}
2096
2097static inline int __normal_prio(int policy, int rt_prio, int nice)
2098{
2099 int prio;
2100
2101 if (dl_policy(policy))
2102 prio = MAX_DL_PRIO - 1;
2103 else if (rt_policy(policy))
2104 prio = MAX_RT_PRIO - 1 - rt_prio;
2105 else
2106 prio = NICE_TO_PRIO(nice);
2107
2108 return prio;
2109}
2110
2111/*
2112 * Calculate the expected normal priority: i.e. priority
2113 * without taking RT-inheritance into account. Might be
2114 * boosted by interactivity modifiers. Changes upon fork,
2115 * setprio syscalls, and whenever the interactivity
2116 * estimator recalculates.
2117 */
2118static inline int normal_prio(struct task_struct *p)
2119{
2120 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
2121}
2122
2123/*
2124 * Calculate the current priority, i.e. the priority
2125 * taken into account by the scheduler. This value might
2126 * be boosted by RT tasks, or might be boosted by
2127 * interactivity modifiers. Will be RT if the task got
2128 * RT-boosted. If not then it returns p->normal_prio.
2129 */
2130static int effective_prio(struct task_struct *p)
2131{
2132 p->normal_prio = normal_prio(p);
2133 /*
2134 * If we are RT tasks or we were boosted to RT priority,
2135 * keep the priority unchanged. Otherwise, update priority
2136 * to the normal priority:
2137 */
2138 if (!rt_prio(p->prio))
2139 return p->normal_prio;
2140 return p->prio;
2141}
2142
2143/**
2144 * task_curr - is this task currently executing on a CPU?
2145 * @p: the task in question.
2146 *
2147 * Return: 1 if the task is currently executing. 0 otherwise.
2148 */
2149inline int task_curr(const struct task_struct *p)
2150{
2151 return cpu_curr(task_cpu(p)) == p;
2152}
2153
2154/*
2155 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2156 * use the balance_callback list if you want balancing.
2157 *
2158 * this means any call to check_class_changed() must be followed by a call to
2159 * balance_callback().
2160 */
2161static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2162 const struct sched_class *prev_class,
2163 int oldprio)
2164{
2165 if (prev_class != p->sched_class) {
2166 if (prev_class->switched_from)
2167 prev_class->switched_from(rq, p);
2168
2169 p->sched_class->switched_to(rq, p);
2170 } else if (oldprio != p->prio || dl_task(p))
2171 p->sched_class->prio_changed(rq, p, oldprio);
2172}
2173
2174void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2175{
2176 if (p->sched_class == rq->curr->sched_class)
2177 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2178 else if (sched_class_above(p->sched_class, rq->curr->sched_class))
2179 resched_curr(rq);
2180
2181 /*
2182 * A queue event has occurred, and we're going to schedule. In
2183 * this case, we can save a useless back to back clock update.
2184 */
2185 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
2186 rq_clock_skip_update(rq);
2187}
2188
2189#ifdef CONFIG_SMP
2190
2191static void
2192__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2193
2194static int __set_cpus_allowed_ptr(struct task_struct *p,
2195 struct affinity_context *ctx);
2196
2197static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2198{
2199 struct affinity_context ac = {
2200 .new_mask = cpumask_of(rq->cpu),
2201 .flags = SCA_MIGRATE_DISABLE,
2202 };
2203
2204 if (likely(!p->migration_disabled))
2205 return;
2206
2207 if (p->cpus_ptr != &p->cpus_mask)
2208 return;
2209
2210 /*
2211 * Violates locking rules! see comment in __do_set_cpus_allowed().
2212 */
2213 __do_set_cpus_allowed(p, &ac);
2214}
2215
2216void migrate_disable(void)
2217{
2218 struct task_struct *p = current;
2219
2220 if (p->migration_disabled) {
2221 p->migration_disabled++;
2222 return;
2223 }
2224
2225 preempt_disable();
2226 this_rq()->nr_pinned++;
2227 p->migration_disabled = 1;
2228 preempt_enable();
2229}
2230EXPORT_SYMBOL_GPL(migrate_disable);
2231
2232void migrate_enable(void)
2233{
2234 struct task_struct *p = current;
2235 struct affinity_context ac = {
2236 .new_mask = &p->cpus_mask,
2237 .flags = SCA_MIGRATE_ENABLE,
2238 };
2239
2240 if (p->migration_disabled > 1) {
2241 p->migration_disabled--;
2242 return;
2243 }
2244
2245 if (WARN_ON_ONCE(!p->migration_disabled))
2246 return;
2247
2248 /*
2249 * Ensure stop_task runs either before or after this, and that
2250 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2251 */
2252 preempt_disable();
2253 if (p->cpus_ptr != &p->cpus_mask)
2254 __set_cpus_allowed_ptr(p, &ac);
2255 /*
2256 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2257 * regular cpus_mask, otherwise things that race (eg.
2258 * select_fallback_rq) get confused.
2259 */
2260 barrier();
2261 p->migration_disabled = 0;
2262 this_rq()->nr_pinned--;
2263 preempt_enable();
2264}
2265EXPORT_SYMBOL_GPL(migrate_enable);
2266
2267static inline bool rq_has_pinned_tasks(struct rq *rq)
2268{
2269 return rq->nr_pinned;
2270}
2271
2272/*
2273 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2274 * __set_cpus_allowed_ptr() and select_fallback_rq().
2275 */
2276static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2277{
2278 /* When not in the task's cpumask, no point in looking further. */
2279 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
2280 return false;
2281
2282 /* migrate_disabled() must be allowed to finish. */
2283 if (is_migration_disabled(p))
2284 return cpu_online(cpu);
2285
2286 /* Non kernel threads are not allowed during either online or offline. */
2287 if (!(p->flags & PF_KTHREAD))
2288 return cpu_active(cpu) && task_cpu_possible(cpu, p);
2289
2290 /* KTHREAD_IS_PER_CPU is always allowed. */
2291 if (kthread_is_per_cpu(p))
2292 return cpu_online(cpu);
2293
2294 /* Regular kernel threads don't get to stay during offline. */
2295 if (cpu_dying(cpu))
2296 return false;
2297
2298 /* But are allowed during online. */
2299 return cpu_online(cpu);
2300}
2301
2302/*
2303 * This is how migration works:
2304 *
2305 * 1) we invoke migration_cpu_stop() on the target CPU using
2306 * stop_one_cpu().
2307 * 2) stopper starts to run (implicitly forcing the migrated thread
2308 * off the CPU)
2309 * 3) it checks whether the migrated task is still in the wrong runqueue.
2310 * 4) if it's in the wrong runqueue then the migration thread removes
2311 * it and puts it into the right queue.
2312 * 5) stopper completes and stop_one_cpu() returns and the migration
2313 * is done.
2314 */
2315
2316/*
2317 * move_queued_task - move a queued task to new rq.
2318 *
2319 * Returns (locked) new rq. Old rq's lock is released.
2320 */
2321static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2322 struct task_struct *p, int new_cpu)
2323{
2324 lockdep_assert_rq_held(rq);
2325
2326 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2327 set_task_cpu(p, new_cpu);
2328 rq_unlock(rq, rf);
2329
2330 rq = cpu_rq(new_cpu);
2331
2332 rq_lock(rq, rf);
2333 WARN_ON_ONCE(task_cpu(p) != new_cpu);
2334 activate_task(rq, p, 0);
2335 check_preempt_curr(rq, p, 0);
2336
2337 return rq;
2338}
2339
2340struct migration_arg {
2341 struct task_struct *task;
2342 int dest_cpu;
2343 struct set_affinity_pending *pending;
2344};
2345
2346/*
2347 * @refs: number of wait_for_completion()
2348 * @stop_pending: is @stop_work in use
2349 */
2350struct set_affinity_pending {
2351 refcount_t refs;
2352 unsigned int stop_pending;
2353 struct completion done;
2354 struct cpu_stop_work stop_work;
2355 struct migration_arg arg;
2356};
2357
2358/*
2359 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2360 * this because either it can't run here any more (set_cpus_allowed()
2361 * away from this CPU, or CPU going down), or because we're
2362 * attempting to rebalance this task on exec (sched_exec).
2363 *
2364 * So we race with normal scheduler movements, but that's OK, as long
2365 * as the task is no longer on this CPU.
2366 */
2367static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2368 struct task_struct *p, int dest_cpu)
2369{
2370 /* Affinity changed (again). */
2371 if (!is_cpu_allowed(p, dest_cpu))
2372 return rq;
2373
2374 update_rq_clock(rq);
2375 rq = move_queued_task(rq, rf, p, dest_cpu);
2376
2377 return rq;
2378}
2379
2380/*
2381 * migration_cpu_stop - this will be executed by a highprio stopper thread
2382 * and performs thread migration by bumping thread off CPU then
2383 * 'pushing' onto another runqueue.
2384 */
2385static int migration_cpu_stop(void *data)
2386{
2387 struct migration_arg *arg = data;
2388 struct set_affinity_pending *pending = arg->pending;
2389 struct task_struct *p = arg->task;
2390 struct rq *rq = this_rq();
2391 bool complete = false;
2392 struct rq_flags rf;
2393
2394 /*
2395 * The original target CPU might have gone down and we might
2396 * be on another CPU but it doesn't matter.
2397 */
2398 local_irq_save(rf.flags);
2399 /*
2400 * We need to explicitly wake pending tasks before running
2401 * __migrate_task() such that we will not miss enforcing cpus_ptr
2402 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2403 */
2404 flush_smp_call_function_queue();
2405
2406 raw_spin_lock(&p->pi_lock);
2407 rq_lock(rq, &rf);
2408
2409 /*
2410 * If we were passed a pending, then ->stop_pending was set, thus
2411 * p->migration_pending must have remained stable.
2412 */
2413 WARN_ON_ONCE(pending && pending != p->migration_pending);
2414
2415 /*
2416 * If task_rq(p) != rq, it cannot be migrated here, because we're
2417 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2418 * we're holding p->pi_lock.
2419 */
2420 if (task_rq(p) == rq) {
2421 if (is_migration_disabled(p))
2422 goto out;
2423
2424 if (pending) {
2425 p->migration_pending = NULL;
2426 complete = true;
2427
2428 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2429 goto out;
2430 }
2431
2432 if (task_on_rq_queued(p))
2433 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2434 else
2435 p->wake_cpu = arg->dest_cpu;
2436
2437 /*
2438 * XXX __migrate_task() can fail, at which point we might end
2439 * up running on a dodgy CPU, AFAICT this can only happen
2440 * during CPU hotplug, at which point we'll get pushed out
2441 * anyway, so it's probably not a big deal.
2442 */
2443
2444 } else if (pending) {
2445 /*
2446 * This happens when we get migrated between migrate_enable()'s
2447 * preempt_enable() and scheduling the stopper task. At that
2448 * point we're a regular task again and not current anymore.
2449 *
2450 * A !PREEMPT kernel has a giant hole here, which makes it far
2451 * more likely.
2452 */
2453
2454 /*
2455 * The task moved before the stopper got to run. We're holding
2456 * ->pi_lock, so the allowed mask is stable - if it got
2457 * somewhere allowed, we're done.
2458 */
2459 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2460 p->migration_pending = NULL;
2461 complete = true;
2462 goto out;
2463 }
2464
2465 /*
2466 * When migrate_enable() hits a rq mis-match we can't reliably
2467 * determine is_migration_disabled() and so have to chase after
2468 * it.
2469 */
2470 WARN_ON_ONCE(!pending->stop_pending);
2471 task_rq_unlock(rq, p, &rf);
2472 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2473 &pending->arg, &pending->stop_work);
2474 return 0;
2475 }
2476out:
2477 if (pending)
2478 pending->stop_pending = false;
2479 task_rq_unlock(rq, p, &rf);
2480
2481 if (complete)
2482 complete_all(&pending->done);
2483
2484 return 0;
2485}
2486
2487int push_cpu_stop(void *arg)
2488{
2489 struct rq *lowest_rq = NULL, *rq = this_rq();
2490 struct task_struct *p = arg;
2491
2492 raw_spin_lock_irq(&p->pi_lock);
2493 raw_spin_rq_lock(rq);
2494
2495 if (task_rq(p) != rq)
2496 goto out_unlock;
2497
2498 if (is_migration_disabled(p)) {
2499 p->migration_flags |= MDF_PUSH;
2500 goto out_unlock;
2501 }
2502
2503 p->migration_flags &= ~MDF_PUSH;
2504
2505 if (p->sched_class->find_lock_rq)
2506 lowest_rq = p->sched_class->find_lock_rq(p, rq);
2507
2508 if (!lowest_rq)
2509 goto out_unlock;
2510
2511 // XXX validate p is still the highest prio task
2512 if (task_rq(p) == rq) {
2513 deactivate_task(rq, p, 0);
2514 set_task_cpu(p, lowest_rq->cpu);
2515 activate_task(lowest_rq, p, 0);
2516 resched_curr(lowest_rq);
2517 }
2518
2519 double_unlock_balance(rq, lowest_rq);
2520
2521out_unlock:
2522 rq->push_busy = false;
2523 raw_spin_rq_unlock(rq);
2524 raw_spin_unlock_irq(&p->pi_lock);
2525
2526 put_task_struct(p);
2527 return 0;
2528}
2529
2530/*
2531 * sched_class::set_cpus_allowed must do the below, but is not required to
2532 * actually call this function.
2533 */
2534void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2535{
2536 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2537 p->cpus_ptr = ctx->new_mask;
2538 return;
2539 }
2540
2541 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2542 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2543
2544 /*
2545 * Swap in a new user_cpus_ptr if SCA_USER flag set
2546 */
2547 if (ctx->flags & SCA_USER)
2548 swap(p->user_cpus_ptr, ctx->user_mask);
2549}
2550
2551static void
2552__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2553{
2554 struct rq *rq = task_rq(p);
2555 bool queued, running;
2556
2557 /*
2558 * This here violates the locking rules for affinity, since we're only
2559 * supposed to change these variables while holding both rq->lock and
2560 * p->pi_lock.
2561 *
2562 * HOWEVER, it magically works, because ttwu() is the only code that
2563 * accesses these variables under p->pi_lock and only does so after
2564 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2565 * before finish_task().
2566 *
2567 * XXX do further audits, this smells like something putrid.
2568 */
2569 if (ctx->flags & SCA_MIGRATE_DISABLE)
2570 SCHED_WARN_ON(!p->on_cpu);
2571 else
2572 lockdep_assert_held(&p->pi_lock);
2573
2574 queued = task_on_rq_queued(p);
2575 running = task_current(rq, p);
2576
2577 if (queued) {
2578 /*
2579 * Because __kthread_bind() calls this on blocked tasks without
2580 * holding rq->lock.
2581 */
2582 lockdep_assert_rq_held(rq);
2583 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2584 }
2585 if (running)
2586 put_prev_task(rq, p);
2587
2588 p->sched_class->set_cpus_allowed(p, ctx);
2589
2590 if (queued)
2591 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2592 if (running)
2593 set_next_task(rq, p);
2594}
2595
2596/*
2597 * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2598 * affinity (if any) should be destroyed too.
2599 */
2600void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2601{
2602 struct affinity_context ac = {
2603 .new_mask = new_mask,
2604 .user_mask = NULL,
2605 .flags = SCA_USER, /* clear the user requested mask */
2606 };
2607 union cpumask_rcuhead {
2608 cpumask_t cpumask;
2609 struct rcu_head rcu;
2610 };
2611
2612 __do_set_cpus_allowed(p, &ac);
2613
2614 /*
2615 * Because this is called with p->pi_lock held, it is not possible
2616 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2617 * kfree_rcu().
2618 */
2619 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2620}
2621
2622static cpumask_t *alloc_user_cpus_ptr(int node)
2623{
2624 /*
2625 * See do_set_cpus_allowed() above for the rcu_head usage.
2626 */
2627 int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
2628
2629 return kmalloc_node(size, GFP_KERNEL, node);
2630}
2631
2632int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2633 int node)
2634{
2635 cpumask_t *user_mask;
2636 unsigned long flags;
2637
2638 /*
2639 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2640 * may differ by now due to racing.
2641 */
2642 dst->user_cpus_ptr = NULL;
2643
2644 /*
2645 * This check is racy and losing the race is a valid situation.
2646 * It is not worth the extra overhead of taking the pi_lock on
2647 * every fork/clone.
2648 */
2649 if (data_race(!src->user_cpus_ptr))
2650 return 0;
2651
2652 user_mask = alloc_user_cpus_ptr(node);
2653 if (!user_mask)
2654 return -ENOMEM;
2655
2656 /*
2657 * Use pi_lock to protect content of user_cpus_ptr
2658 *
2659 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2660 * do_set_cpus_allowed().
2661 */
2662 raw_spin_lock_irqsave(&src->pi_lock, flags);
2663 if (src->user_cpus_ptr) {
2664 swap(dst->user_cpus_ptr, user_mask);
2665 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2666 }
2667 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2668
2669 if (unlikely(user_mask))
2670 kfree(user_mask);
2671
2672 return 0;
2673}
2674
2675static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2676{
2677 struct cpumask *user_mask = NULL;
2678
2679 swap(p->user_cpus_ptr, user_mask);
2680
2681 return user_mask;
2682}
2683
2684void release_user_cpus_ptr(struct task_struct *p)
2685{
2686 kfree(clear_user_cpus_ptr(p));
2687}
2688
2689/*
2690 * This function is wildly self concurrent; here be dragons.
2691 *
2692 *
2693 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2694 * designated task is enqueued on an allowed CPU. If that task is currently
2695 * running, we have to kick it out using the CPU stopper.
2696 *
2697 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2698 * Consider:
2699 *
2700 * Initial conditions: P0->cpus_mask = [0, 1]
2701 *
2702 * P0@CPU0 P1
2703 *
2704 * migrate_disable();
2705 * <preempted>
2706 * set_cpus_allowed_ptr(P0, [1]);
2707 *
2708 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2709 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2710 * This means we need the following scheme:
2711 *
2712 * P0@CPU0 P1
2713 *
2714 * migrate_disable();
2715 * <preempted>
2716 * set_cpus_allowed_ptr(P0, [1]);
2717 * <blocks>
2718 * <resumes>
2719 * migrate_enable();
2720 * __set_cpus_allowed_ptr();
2721 * <wakes local stopper>
2722 * `--> <woken on migration completion>
2723 *
2724 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2725 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2726 * task p are serialized by p->pi_lock, which we can leverage: the one that
2727 * should come into effect at the end of the Migrate-Disable region is the last
2728 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2729 * but we still need to properly signal those waiting tasks at the appropriate
2730 * moment.
2731 *
2732 * This is implemented using struct set_affinity_pending. The first
2733 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2734 * setup an instance of that struct and install it on the targeted task_struct.
2735 * Any and all further callers will reuse that instance. Those then wait for
2736 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2737 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2738 *
2739 *
2740 * (1) In the cases covered above. There is one more where the completion is
2741 * signaled within affine_move_task() itself: when a subsequent affinity request
2742 * occurs after the stopper bailed out due to the targeted task still being
2743 * Migrate-Disable. Consider:
2744 *
2745 * Initial conditions: P0->cpus_mask = [0, 1]
2746 *
2747 * CPU0 P1 P2
2748 * <P0>
2749 * migrate_disable();
2750 * <preempted>
2751 * set_cpus_allowed_ptr(P0, [1]);
2752 * <blocks>
2753 * <migration/0>
2754 * migration_cpu_stop()
2755 * is_migration_disabled()
2756 * <bails>
2757 * set_cpus_allowed_ptr(P0, [0, 1]);
2758 * <signal completion>
2759 * <awakes>
2760 *
2761 * Note that the above is safe vs a concurrent migrate_enable(), as any
2762 * pending affinity completion is preceded by an uninstallation of
2763 * p->migration_pending done with p->pi_lock held.
2764 */
2765static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2766 int dest_cpu, unsigned int flags)
2767 __releases(rq->lock)
2768 __releases(p->pi_lock)
2769{
2770 struct set_affinity_pending my_pending = { }, *pending = NULL;
2771 bool stop_pending, complete = false;
2772
2773 /* Can the task run on the task's current CPU? If so, we're done */
2774 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2775 struct task_struct *push_task = NULL;
2776
2777 if ((flags & SCA_MIGRATE_ENABLE) &&
2778 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2779 rq->push_busy = true;
2780 push_task = get_task_struct(p);
2781 }
2782
2783 /*
2784 * If there are pending waiters, but no pending stop_work,
2785 * then complete now.
2786 */
2787 pending = p->migration_pending;
2788 if (pending && !pending->stop_pending) {
2789 p->migration_pending = NULL;
2790 complete = true;
2791 }
2792
2793 task_rq_unlock(rq, p, rf);
2794
2795 if (push_task) {
2796 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2797 p, &rq->push_work);
2798 }
2799
2800 if (complete)
2801 complete_all(&pending->done);
2802
2803 return 0;
2804 }
2805
2806 if (!(flags & SCA_MIGRATE_ENABLE)) {
2807 /* serialized by p->pi_lock */
2808 if (!p->migration_pending) {
2809 /* Install the request */
2810 refcount_set(&my_pending.refs, 1);
2811 init_completion(&my_pending.done);
2812 my_pending.arg = (struct migration_arg) {
2813 .task = p,
2814 .dest_cpu = dest_cpu,
2815 .pending = &my_pending,
2816 };
2817
2818 p->migration_pending = &my_pending;
2819 } else {
2820 pending = p->migration_pending;
2821 refcount_inc(&pending->refs);
2822 /*
2823 * Affinity has changed, but we've already installed a
2824 * pending. migration_cpu_stop() *must* see this, else
2825 * we risk a completion of the pending despite having a
2826 * task on a disallowed CPU.
2827 *
2828 * Serialized by p->pi_lock, so this is safe.
2829 */
2830 pending->arg.dest_cpu = dest_cpu;
2831 }
2832 }
2833 pending = p->migration_pending;
2834 /*
2835 * - !MIGRATE_ENABLE:
2836 * we'll have installed a pending if there wasn't one already.
2837 *
2838 * - MIGRATE_ENABLE:
2839 * we're here because the current CPU isn't matching anymore,
2840 * the only way that can happen is because of a concurrent
2841 * set_cpus_allowed_ptr() call, which should then still be
2842 * pending completion.
2843 *
2844 * Either way, we really should have a @pending here.
2845 */
2846 if (WARN_ON_ONCE(!pending)) {
2847 task_rq_unlock(rq, p, rf);
2848 return -EINVAL;
2849 }
2850
2851 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
2852 /*
2853 * MIGRATE_ENABLE gets here because 'p == current', but for
2854 * anything else we cannot do is_migration_disabled(), punt
2855 * and have the stopper function handle it all race-free.
2856 */
2857 stop_pending = pending->stop_pending;
2858 if (!stop_pending)
2859 pending->stop_pending = true;
2860
2861 if (flags & SCA_MIGRATE_ENABLE)
2862 p->migration_flags &= ~MDF_PUSH;
2863
2864 task_rq_unlock(rq, p, rf);
2865
2866 if (!stop_pending) {
2867 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
2868 &pending->arg, &pending->stop_work);
2869 }
2870
2871 if (flags & SCA_MIGRATE_ENABLE)
2872 return 0;
2873 } else {
2874
2875 if (!is_migration_disabled(p)) {
2876 if (task_on_rq_queued(p))
2877 rq = move_queued_task(rq, rf, p, dest_cpu);
2878
2879 if (!pending->stop_pending) {
2880 p->migration_pending = NULL;
2881 complete = true;
2882 }
2883 }
2884 task_rq_unlock(rq, p, rf);
2885
2886 if (complete)
2887 complete_all(&pending->done);
2888 }
2889
2890 wait_for_completion(&pending->done);
2891
2892 if (refcount_dec_and_test(&pending->refs))
2893 wake_up_var(&pending->refs); /* No UaF, just an address */
2894
2895 /*
2896 * Block the original owner of &pending until all subsequent callers
2897 * have seen the completion and decremented the refcount
2898 */
2899 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
2900
2901 /* ARGH */
2902 WARN_ON_ONCE(my_pending.stop_pending);
2903
2904 return 0;
2905}
2906
2907/*
2908 * Called with both p->pi_lock and rq->lock held; drops both before returning.
2909 */
2910static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
2911 struct affinity_context *ctx,
2912 struct rq *rq,
2913 struct rq_flags *rf)
2914 __releases(rq->lock)
2915 __releases(p->pi_lock)
2916{
2917 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
2918 const struct cpumask *cpu_valid_mask = cpu_active_mask;
2919 bool kthread = p->flags & PF_KTHREAD;
2920 unsigned int dest_cpu;
2921 int ret = 0;
2922
2923 update_rq_clock(rq);
2924
2925 if (kthread || is_migration_disabled(p)) {
2926 /*
2927 * Kernel threads are allowed on online && !active CPUs,
2928 * however, during cpu-hot-unplug, even these might get pushed
2929 * away if not KTHREAD_IS_PER_CPU.
2930 *
2931 * Specifically, migration_disabled() tasks must not fail the
2932 * cpumask_any_and_distribute() pick below, esp. so on
2933 * SCA_MIGRATE_ENABLE, otherwise we'll not call
2934 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
2935 */
2936 cpu_valid_mask = cpu_online_mask;
2937 }
2938
2939 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
2940 ret = -EINVAL;
2941 goto out;
2942 }
2943
2944 /*
2945 * Must re-check here, to close a race against __kthread_bind(),
2946 * sched_setaffinity() is not guaranteed to observe the flag.
2947 */
2948 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
2949 ret = -EINVAL;
2950 goto out;
2951 }
2952
2953 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
2954 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
2955 if (ctx->flags & SCA_USER)
2956 swap(p->user_cpus_ptr, ctx->user_mask);
2957 goto out;
2958 }
2959
2960 if (WARN_ON_ONCE(p == current &&
2961 is_migration_disabled(p) &&
2962 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
2963 ret = -EBUSY;
2964 goto out;
2965 }
2966 }
2967
2968 /*
2969 * Picking a ~random cpu helps in cases where we are changing affinity
2970 * for groups of tasks (ie. cpuset), so that load balancing is not
2971 * immediately required to distribute the tasks within their new mask.
2972 */
2973 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
2974 if (dest_cpu >= nr_cpu_ids) {
2975 ret = -EINVAL;
2976 goto out;
2977 }
2978
2979 __do_set_cpus_allowed(p, ctx);
2980
2981 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
2982
2983out:
2984 task_rq_unlock(rq, p, rf);
2985
2986 return ret;
2987}
2988
2989/*
2990 * Change a given task's CPU affinity. Migrate the thread to a
2991 * proper CPU and schedule it away if the CPU it's executing on
2992 * is removed from the allowed bitmask.
2993 *
2994 * NOTE: the caller must have a valid reference to the task, the
2995 * task must not exit() & deallocate itself prematurely. The
2996 * call is not atomic; no spinlocks may be held.
2997 */
2998static int __set_cpus_allowed_ptr(struct task_struct *p,
2999 struct affinity_context *ctx)
3000{
3001 struct rq_flags rf;
3002 struct rq *rq;
3003
3004 rq = task_rq_lock(p, &rf);
3005 /*
3006 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3007 * flags are set.
3008 */
3009 if (p->user_cpus_ptr &&
3010 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3011 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3012 ctx->new_mask = rq->scratch_mask;
3013
3014 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3015}
3016
3017int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3018{
3019 struct affinity_context ac = {
3020 .new_mask = new_mask,
3021 .flags = 0,
3022 };
3023
3024 return __set_cpus_allowed_ptr(p, &ac);
3025}
3026EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3027
3028/*
3029 * Change a given task's CPU affinity to the intersection of its current
3030 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3031 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3032 * affinity or use cpu_online_mask instead.
3033 *
3034 * If the resulting mask is empty, leave the affinity unchanged and return
3035 * -EINVAL.
3036 */
3037static int restrict_cpus_allowed_ptr(struct task_struct *p,
3038 struct cpumask *new_mask,
3039 const struct cpumask *subset_mask)
3040{
3041 struct affinity_context ac = {
3042 .new_mask = new_mask,
3043 .flags = 0,
3044 };
3045 struct rq_flags rf;
3046 struct rq *rq;
3047 int err;
3048
3049 rq = task_rq_lock(p, &rf);
3050
3051 /*
3052 * Forcefully restricting the affinity of a deadline task is
3053 * likely to cause problems, so fail and noisily override the
3054 * mask entirely.
3055 */
3056 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3057 err = -EPERM;
3058 goto err_unlock;
3059 }
3060
3061 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3062 err = -EINVAL;
3063 goto err_unlock;
3064 }
3065
3066 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3067
3068err_unlock:
3069 task_rq_unlock(rq, p, &rf);
3070 return err;
3071}
3072
3073/*
3074 * Restrict the CPU affinity of task @p so that it is a subset of
3075 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3076 * old affinity mask. If the resulting mask is empty, we warn and walk
3077 * up the cpuset hierarchy until we find a suitable mask.
3078 */
3079void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3080{
3081 cpumask_var_t new_mask;
3082 const struct cpumask *override_mask = task_cpu_possible_mask(p);
3083
3084 alloc_cpumask_var(&new_mask, GFP_KERNEL);
3085
3086 /*
3087 * __migrate_task() can fail silently in the face of concurrent
3088 * offlining of the chosen destination CPU, so take the hotplug
3089 * lock to ensure that the migration succeeds.
3090 */
3091 cpus_read_lock();
3092 if (!cpumask_available(new_mask))
3093 goto out_set_mask;
3094
3095 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3096 goto out_free_mask;
3097
3098 /*
3099 * We failed to find a valid subset of the affinity mask for the
3100 * task, so override it based on its cpuset hierarchy.
3101 */
3102 cpuset_cpus_allowed(p, new_mask);
3103 override_mask = new_mask;
3104
3105out_set_mask:
3106 if (printk_ratelimit()) {
3107 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3108 task_pid_nr(p), p->comm,
3109 cpumask_pr_args(override_mask));
3110 }
3111
3112 WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3113out_free_mask:
3114 cpus_read_unlock();
3115 free_cpumask_var(new_mask);
3116}
3117
3118static int
3119__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
3120
3121/*
3122 * Restore the affinity of a task @p which was previously restricted by a
3123 * call to force_compatible_cpus_allowed_ptr().
3124 *
3125 * It is the caller's responsibility to serialise this with any calls to
3126 * force_compatible_cpus_allowed_ptr(@p).
3127 */
3128void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3129{
3130 struct affinity_context ac = {
3131 .new_mask = task_user_cpus(p),
3132 .flags = 0,
3133 };
3134 int ret;
3135
3136 /*
3137 * Try to restore the old affinity mask with __sched_setaffinity().
3138 * Cpuset masking will be done there too.
3139 */
3140 ret = __sched_setaffinity(p, &ac);
3141 WARN_ON_ONCE(ret);
3142}
3143
3144void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3145{
3146#ifdef CONFIG_SCHED_DEBUG
3147 unsigned int state = READ_ONCE(p->__state);
3148
3149 /*
3150 * We should never call set_task_cpu() on a blocked task,
3151 * ttwu() will sort out the placement.
3152 */
3153 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3154
3155 /*
3156 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3157 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3158 * time relying on p->on_rq.
3159 */
3160 WARN_ON_ONCE(state == TASK_RUNNING &&
3161 p->sched_class == &fair_sched_class &&
3162 (p->on_rq && !task_on_rq_migrating(p)));
3163
3164#ifdef CONFIG_LOCKDEP
3165 /*
3166 * The caller should hold either p->pi_lock or rq->lock, when changing
3167 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3168 *
3169 * sched_move_task() holds both and thus holding either pins the cgroup,
3170 * see task_group().
3171 *
3172 * Furthermore, all task_rq users should acquire both locks, see
3173 * task_rq_lock().
3174 */
3175 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3176 lockdep_is_held(__rq_lockp(task_rq(p)))));
3177#endif
3178 /*
3179 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3180 */
3181 WARN_ON_ONCE(!cpu_online(new_cpu));
3182
3183 WARN_ON_ONCE(is_migration_disabled(p));
3184#endif
3185
3186 trace_sched_migrate_task(p, new_cpu);
3187
3188 if (task_cpu(p) != new_cpu) {
3189 if (p->sched_class->migrate_task_rq)
3190 p->sched_class->migrate_task_rq(p, new_cpu);
3191 p->se.nr_migrations++;
3192 rseq_migrate(p);
3193 perf_event_task_migrate(p);
3194 }
3195
3196 __set_task_cpu(p, new_cpu);
3197}
3198
3199#ifdef CONFIG_NUMA_BALANCING
3200static void __migrate_swap_task(struct task_struct *p, int cpu)
3201{
3202 if (task_on_rq_queued(p)) {
3203 struct rq *src_rq, *dst_rq;
3204 struct rq_flags srf, drf;
3205
3206 src_rq = task_rq(p);
3207 dst_rq = cpu_rq(cpu);
3208
3209 rq_pin_lock(src_rq, &srf);
3210 rq_pin_lock(dst_rq, &drf);
3211
3212 deactivate_task(src_rq, p, 0);
3213 set_task_cpu(p, cpu);
3214 activate_task(dst_rq, p, 0);
3215 check_preempt_curr(dst_rq, p, 0);
3216
3217 rq_unpin_lock(dst_rq, &drf);
3218 rq_unpin_lock(src_rq, &srf);
3219
3220 } else {
3221 /*
3222 * Task isn't running anymore; make it appear like we migrated
3223 * it before it went to sleep. This means on wakeup we make the
3224 * previous CPU our target instead of where it really is.
3225 */
3226 p->wake_cpu = cpu;
3227 }
3228}
3229
3230struct migration_swap_arg {
3231 struct task_struct *src_task, *dst_task;
3232 int src_cpu, dst_cpu;
3233};
3234
3235static int migrate_swap_stop(void *data)
3236{
3237 struct migration_swap_arg *arg = data;
3238 struct rq *src_rq, *dst_rq;
3239 int ret = -EAGAIN;
3240
3241 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3242 return -EAGAIN;
3243
3244 src_rq = cpu_rq(arg->src_cpu);
3245 dst_rq = cpu_rq(arg->dst_cpu);
3246
3247 double_raw_lock(&arg->src_task->pi_lock,
3248 &arg->dst_task->pi_lock);
3249 double_rq_lock(src_rq, dst_rq);
3250
3251 if (task_cpu(arg->dst_task) != arg->dst_cpu)
3252 goto unlock;
3253
3254 if (task_cpu(arg->src_task) != arg->src_cpu)
3255 goto unlock;
3256
3257 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3258 goto unlock;
3259
3260 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3261 goto unlock;
3262
3263 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3264 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3265
3266 ret = 0;
3267
3268unlock:
3269 double_rq_unlock(src_rq, dst_rq);
3270 raw_spin_unlock(&arg->dst_task->pi_lock);
3271 raw_spin_unlock(&arg->src_task->pi_lock);
3272
3273 return ret;
3274}
3275
3276/*
3277 * Cross migrate two tasks
3278 */
3279int migrate_swap(struct task_struct *cur, struct task_struct *p,
3280 int target_cpu, int curr_cpu)
3281{
3282 struct migration_swap_arg arg;
3283 int ret = -EINVAL;
3284
3285 arg = (struct migration_swap_arg){
3286 .src_task = cur,
3287 .src_cpu = curr_cpu,
3288 .dst_task = p,
3289 .dst_cpu = target_cpu,
3290 };
3291
3292 if (arg.src_cpu == arg.dst_cpu)
3293 goto out;
3294
3295 /*
3296 * These three tests are all lockless; this is OK since all of them
3297 * will be re-checked with proper locks held further down the line.
3298 */
3299 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3300 goto out;
3301
3302 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3303 goto out;
3304
3305 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3306 goto out;
3307
3308 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3309 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3310
3311out:
3312 return ret;
3313}
3314#endif /* CONFIG_NUMA_BALANCING */
3315
3316/*
3317 * wait_task_inactive - wait for a thread to unschedule.
3318 *
3319 * Wait for the thread to block in any of the states set in @match_state.
3320 * If it changes, i.e. @p might have woken up, then return zero. When we
3321 * succeed in waiting for @p to be off its CPU, we return a positive number
3322 * (its total switch count). If a second call a short while later returns the
3323 * same number, the caller can be sure that @p has remained unscheduled the
3324 * whole time.
3325 *
3326 * The caller must ensure that the task *will* unschedule sometime soon,
3327 * else this function might spin for a *long* time. This function can't
3328 * be called with interrupts off, or it may introduce deadlock with
3329 * smp_call_function() if an IPI is sent by the same process we are
3330 * waiting to become inactive.
3331 */
3332unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
3333{
3334 int running, queued;
3335 struct rq_flags rf;
3336 unsigned long ncsw;
3337 struct rq *rq;
3338
3339 for (;;) {
3340 /*
3341 * We do the initial early heuristics without holding
3342 * any task-queue locks at all. We'll only try to get
3343 * the runqueue lock when things look like they will
3344 * work out!
3345 */
3346 rq = task_rq(p);
3347
3348 /*
3349 * If the task is actively running on another CPU
3350 * still, just relax and busy-wait without holding
3351 * any locks.
3352 *
3353 * NOTE! Since we don't hold any locks, it's not
3354 * even sure that "rq" stays as the right runqueue!
3355 * But we don't care, since "task_on_cpu()" will
3356 * return false if the runqueue has changed and p
3357 * is actually now running somewhere else!
3358 */
3359 while (task_on_cpu(rq, p)) {
3360 if (!(READ_ONCE(p->__state) & match_state))
3361 return 0;
3362 cpu_relax();
3363 }
3364
3365 /*
3366 * Ok, time to look more closely! We need the rq
3367 * lock now, to be *sure*. If we're wrong, we'll
3368 * just go back and repeat.
3369 */
3370 rq = task_rq_lock(p, &rf);
3371 trace_sched_wait_task(p);
3372 running = task_on_cpu(rq, p);
3373 queued = task_on_rq_queued(p);
3374 ncsw = 0;
3375 if (READ_ONCE(p->__state) & match_state)
3376 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
3377 task_rq_unlock(rq, p, &rf);
3378
3379 /*
3380 * If it changed from the expected state, bail out now.
3381 */
3382 if (unlikely(!ncsw))
3383 break;
3384
3385 /*
3386 * Was it really running after all now that we
3387 * checked with the proper locks actually held?
3388 *
3389 * Oops. Go back and try again..
3390 */
3391 if (unlikely(running)) {
3392 cpu_relax();
3393 continue;
3394 }
3395
3396 /*
3397 * It's not enough that it's not actively running,
3398 * it must be off the runqueue _entirely_, and not
3399 * preempted!
3400 *
3401 * So if it was still runnable (but just not actively
3402 * running right now), it's preempted, and we should
3403 * yield - it could be a while.
3404 */
3405 if (unlikely(queued)) {
3406 ktime_t to = NSEC_PER_SEC / HZ;
3407
3408 set_current_state(TASK_UNINTERRUPTIBLE);
3409 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
3410 continue;
3411 }
3412
3413 /*
3414 * Ahh, all good. It wasn't running, and it wasn't
3415 * runnable, which means that it will never become
3416 * running in the future either. We're all done!
3417 */
3418 break;
3419 }
3420
3421 return ncsw;
3422}
3423
3424/***
3425 * kick_process - kick a running thread to enter/exit the kernel
3426 * @p: the to-be-kicked thread
3427 *
3428 * Cause a process which is running on another CPU to enter
3429 * kernel-mode, without any delay. (to get signals handled.)
3430 *
3431 * NOTE: this function doesn't have to take the runqueue lock,
3432 * because all it wants to ensure is that the remote task enters
3433 * the kernel. If the IPI races and the task has been migrated
3434 * to another CPU then no harm is done and the purpose has been
3435 * achieved as well.
3436 */
3437void kick_process(struct task_struct *p)
3438{
3439 int cpu;
3440
3441 preempt_disable();
3442 cpu = task_cpu(p);
3443 if ((cpu != smp_processor_id()) && task_curr(p))
3444 smp_send_reschedule(cpu);
3445 preempt_enable();
3446}
3447EXPORT_SYMBOL_GPL(kick_process);
3448
3449/*
3450 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3451 *
3452 * A few notes on cpu_active vs cpu_online:
3453 *
3454 * - cpu_active must be a subset of cpu_online
3455 *
3456 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3457 * see __set_cpus_allowed_ptr(). At this point the newly online
3458 * CPU isn't yet part of the sched domains, and balancing will not
3459 * see it.
3460 *
3461 * - on CPU-down we clear cpu_active() to mask the sched domains and
3462 * avoid the load balancer to place new tasks on the to be removed
3463 * CPU. Existing tasks will remain running there and will be taken
3464 * off.
3465 *
3466 * This means that fallback selection must not select !active CPUs.
3467 * And can assume that any active CPU must be online. Conversely
3468 * select_task_rq() below may allow selection of !active CPUs in order
3469 * to satisfy the above rules.
3470 */
3471static int select_fallback_rq(int cpu, struct task_struct *p)
3472{
3473 int nid = cpu_to_node(cpu);
3474 const struct cpumask *nodemask = NULL;
3475 enum { cpuset, possible, fail } state = cpuset;
3476 int dest_cpu;
3477
3478 /*
3479 * If the node that the CPU is on has been offlined, cpu_to_node()
3480 * will return -1. There is no CPU on the node, and we should
3481 * select the CPU on the other node.
3482 */
3483 if (nid != -1) {
3484 nodemask = cpumask_of_node(nid);
3485
3486 /* Look for allowed, online CPU in same node. */
3487 for_each_cpu(dest_cpu, nodemask) {
3488 if (is_cpu_allowed(p, dest_cpu))
3489 return dest_cpu;
3490 }
3491 }
3492
3493 for (;;) {
3494 /* Any allowed, online CPU? */
3495 for_each_cpu(dest_cpu, p->cpus_ptr) {
3496 if (!is_cpu_allowed(p, dest_cpu))
3497 continue;
3498
3499 goto out;
3500 }
3501
3502 /* No more Mr. Nice Guy. */
3503 switch (state) {
3504 case cpuset:
3505 if (cpuset_cpus_allowed_fallback(p)) {
3506 state = possible;
3507 break;
3508 }
3509 fallthrough;
3510 case possible:
3511 /*
3512 * XXX When called from select_task_rq() we only
3513 * hold p->pi_lock and again violate locking order.
3514 *
3515 * More yuck to audit.
3516 */
3517 do_set_cpus_allowed(p, task_cpu_possible_mask(p));
3518 state = fail;
3519 break;
3520 case fail:
3521 BUG();
3522 break;
3523 }
3524 }
3525
3526out:
3527 if (state != cpuset) {
3528 /*
3529 * Don't tell them about moving exiting tasks or
3530 * kernel threads (both mm NULL), since they never
3531 * leave kernel.
3532 */
3533 if (p->mm && printk_ratelimit()) {
3534 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3535 task_pid_nr(p), p->comm, cpu);
3536 }
3537 }
3538
3539 return dest_cpu;
3540}
3541
3542/*
3543 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3544 */
3545static inline
3546int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
3547{
3548 lockdep_assert_held(&p->pi_lock);
3549
3550 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
3551 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
3552 else
3553 cpu = cpumask_any(p->cpus_ptr);
3554
3555 /*
3556 * In order not to call set_task_cpu() on a blocking task we need
3557 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3558 * CPU.
3559 *
3560 * Since this is common to all placement strategies, this lives here.
3561 *
3562 * [ this allows ->select_task() to simply return task_cpu(p) and
3563 * not worry about this generic constraint ]
3564 */
3565 if (unlikely(!is_cpu_allowed(p, cpu)))
3566 cpu = select_fallback_rq(task_cpu(p), p);
3567
3568 return cpu;
3569}
3570
3571void sched_set_stop_task(int cpu, struct task_struct *stop)
3572{
3573 static struct lock_class_key stop_pi_lock;
3574 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3575 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3576
3577 if (stop) {
3578 /*
3579 * Make it appear like a SCHED_FIFO task, its something
3580 * userspace knows about and won't get confused about.
3581 *
3582 * Also, it will make PI more or less work without too
3583 * much confusion -- but then, stop work should not
3584 * rely on PI working anyway.
3585 */
3586 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
3587
3588 stop->sched_class = &stop_sched_class;
3589
3590 /*
3591 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3592 * adjust the effective priority of a task. As a result,
3593 * rt_mutex_setprio() can trigger (RT) balancing operations,
3594 * which can then trigger wakeups of the stop thread to push
3595 * around the current task.
3596 *
3597 * The stop task itself will never be part of the PI-chain, it
3598 * never blocks, therefore that ->pi_lock recursion is safe.
3599 * Tell lockdep about this by placing the stop->pi_lock in its
3600 * own class.
3601 */
3602 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3603 }
3604
3605 cpu_rq(cpu)->stop = stop;
3606
3607 if (old_stop) {
3608 /*
3609 * Reset it back to a normal scheduling class so that
3610 * it can die in pieces.
3611 */
3612 old_stop->sched_class = &rt_sched_class;
3613 }
3614}
3615
3616#else /* CONFIG_SMP */
3617
3618static inline int __set_cpus_allowed_ptr(struct task_struct *p,
3619 struct affinity_context *ctx)
3620{
3621 return set_cpus_allowed_ptr(p, ctx->new_mask);
3622}
3623
3624static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3625
3626static inline bool rq_has_pinned_tasks(struct rq *rq)
3627{
3628 return false;
3629}
3630
3631static inline cpumask_t *alloc_user_cpus_ptr(int node)
3632{
3633 return NULL;
3634}
3635
3636#endif /* !CONFIG_SMP */
3637
3638static void
3639ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3640{
3641 struct rq *rq;
3642
3643 if (!schedstat_enabled())
3644 return;
3645
3646 rq = this_rq();
3647
3648#ifdef CONFIG_SMP
3649 if (cpu == rq->cpu) {
3650 __schedstat_inc(rq->ttwu_local);
3651 __schedstat_inc(p->stats.nr_wakeups_local);
3652 } else {
3653 struct sched_domain *sd;
3654
3655 __schedstat_inc(p->stats.nr_wakeups_remote);
3656 rcu_read_lock();
3657 for_each_domain(rq->cpu, sd) {
3658 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3659 __schedstat_inc(sd->ttwu_wake_remote);
3660 break;
3661 }
3662 }
3663 rcu_read_unlock();
3664 }
3665
3666 if (wake_flags & WF_MIGRATED)
3667 __schedstat_inc(p->stats.nr_wakeups_migrate);
3668#endif /* CONFIG_SMP */
3669
3670 __schedstat_inc(rq->ttwu_count);
3671 __schedstat_inc(p->stats.nr_wakeups);
3672
3673 if (wake_flags & WF_SYNC)
3674 __schedstat_inc(p->stats.nr_wakeups_sync);
3675}
3676
3677/*
3678 * Mark the task runnable and perform wakeup-preemption.
3679 */
3680static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
3681 struct rq_flags *rf)
3682{
3683 check_preempt_curr(rq, p, wake_flags);
3684 WRITE_ONCE(p->__state, TASK_RUNNING);
3685 trace_sched_wakeup(p);
3686
3687#ifdef CONFIG_SMP
3688 if (p->sched_class->task_woken) {
3689 /*
3690 * Our task @p is fully woken up and running; so it's safe to
3691 * drop the rq->lock, hereafter rq is only used for statistics.
3692 */
3693 rq_unpin_lock(rq, rf);
3694 p->sched_class->task_woken(rq, p);
3695 rq_repin_lock(rq, rf);
3696 }
3697
3698 if (rq->idle_stamp) {
3699 u64 delta = rq_clock(rq) - rq->idle_stamp;
3700 u64 max = 2*rq->max_idle_balance_cost;
3701
3702 update_avg(&rq->avg_idle, delta);
3703
3704 if (rq->avg_idle > max)
3705 rq->avg_idle = max;
3706
3707 rq->wake_stamp = jiffies;
3708 rq->wake_avg_idle = rq->avg_idle / 2;
3709
3710 rq->idle_stamp = 0;
3711 }
3712#endif
3713}
3714
3715static void
3716ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3717 struct rq_flags *rf)
3718{
3719 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3720
3721 lockdep_assert_rq_held(rq);
3722
3723 if (p->sched_contributes_to_load)
3724 rq->nr_uninterruptible--;
3725
3726#ifdef CONFIG_SMP
3727 if (wake_flags & WF_MIGRATED)
3728 en_flags |= ENQUEUE_MIGRATED;
3729 else
3730#endif
3731 if (p->in_iowait) {
3732 delayacct_blkio_end(p);
3733 atomic_dec(&task_rq(p)->nr_iowait);
3734 }
3735
3736 activate_task(rq, p, en_flags);
3737 ttwu_do_wakeup(rq, p, wake_flags, rf);
3738}
3739
3740/*
3741 * Consider @p being inside a wait loop:
3742 *
3743 * for (;;) {
3744 * set_current_state(TASK_UNINTERRUPTIBLE);
3745 *
3746 * if (CONDITION)
3747 * break;
3748 *
3749 * schedule();
3750 * }
3751 * __set_current_state(TASK_RUNNING);
3752 *
3753 * between set_current_state() and schedule(). In this case @p is still
3754 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3755 * an atomic manner.
3756 *
3757 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3758 * then schedule() must still happen and p->state can be changed to
3759 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3760 * need to do a full wakeup with enqueue.
3761 *
3762 * Returns: %true when the wakeup is done,
3763 * %false otherwise.
3764 */
3765static int ttwu_runnable(struct task_struct *p, int wake_flags)
3766{
3767 struct rq_flags rf;
3768 struct rq *rq;
3769 int ret = 0;
3770
3771 rq = __task_rq_lock(p, &rf);
3772 if (task_on_rq_queued(p)) {
3773 /* check_preempt_curr() may use rq clock */
3774 update_rq_clock(rq);
3775 ttwu_do_wakeup(rq, p, wake_flags, &rf);
3776 ret = 1;
3777 }
3778 __task_rq_unlock(rq, &rf);
3779
3780 return ret;
3781}
3782
3783#ifdef CONFIG_SMP
3784void sched_ttwu_pending(void *arg)
3785{
3786 struct llist_node *llist = arg;
3787 struct rq *rq = this_rq();
3788 struct task_struct *p, *t;
3789 struct rq_flags rf;
3790
3791 if (!llist)
3792 return;
3793
3794 rq_lock_irqsave(rq, &rf);
3795 update_rq_clock(rq);
3796
3797 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3798 if (WARN_ON_ONCE(p->on_cpu))
3799 smp_cond_load_acquire(&p->on_cpu, !VAL);
3800
3801 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3802 set_task_cpu(p, cpu_of(rq));
3803
3804 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3805 }
3806
3807 /*
3808 * Must be after enqueueing at least once task such that
3809 * idle_cpu() does not observe a false-negative -- if it does,
3810 * it is possible for select_idle_siblings() to stack a number
3811 * of tasks on this CPU during that window.
3812 *
3813 * It is ok to clear ttwu_pending when another task pending.
3814 * We will receive IPI after local irq enabled and then enqueue it.
3815 * Since now nr_running > 0, idle_cpu() will always get correct result.
3816 */
3817 WRITE_ONCE(rq->ttwu_pending, 0);
3818 rq_unlock_irqrestore(rq, &rf);
3819}
3820
3821void send_call_function_single_ipi(int cpu)
3822{
3823 struct rq *rq = cpu_rq(cpu);
3824
3825 if (!set_nr_if_polling(rq->idle))
3826 arch_send_call_function_single_ipi(cpu);
3827 else
3828 trace_sched_wake_idle_without_ipi(cpu);
3829}
3830
3831/*
3832 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3833 * necessary. The wakee CPU on receipt of the IPI will queue the task
3834 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3835 * of the wakeup instead of the waker.
3836 */
3837static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3838{
3839 struct rq *rq = cpu_rq(cpu);
3840
3841 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3842
3843 WRITE_ONCE(rq->ttwu_pending, 1);
3844 __smp_call_single_queue(cpu, &p->wake_entry.llist);
3845}
3846
3847void wake_up_if_idle(int cpu)
3848{
3849 struct rq *rq = cpu_rq(cpu);
3850 struct rq_flags rf;
3851
3852 rcu_read_lock();
3853
3854 if (!is_idle_task(rcu_dereference(rq->curr)))
3855 goto out;
3856
3857 rq_lock_irqsave(rq, &rf);
3858 if (is_idle_task(rq->curr))
3859 resched_curr(rq);
3860 /* Else CPU is not idle, do nothing here: */
3861 rq_unlock_irqrestore(rq, &rf);
3862
3863out:
3864 rcu_read_unlock();
3865}
3866
3867bool cpus_share_cache(int this_cpu, int that_cpu)
3868{
3869 if (this_cpu == that_cpu)
3870 return true;
3871
3872 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3873}
3874
3875static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3876{
3877 /*
3878 * Do not complicate things with the async wake_list while the CPU is
3879 * in hotplug state.
3880 */
3881 if (!cpu_active(cpu))
3882 return false;
3883
3884 /* Ensure the task will still be allowed to run on the CPU. */
3885 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3886 return false;
3887
3888 /*
3889 * If the CPU does not share cache, then queue the task on the
3890 * remote rqs wakelist to avoid accessing remote data.
3891 */
3892 if (!cpus_share_cache(smp_processor_id(), cpu))
3893 return true;
3894
3895 if (cpu == smp_processor_id())
3896 return false;
3897
3898 /*
3899 * If the wakee cpu is idle, or the task is descheduling and the
3900 * only running task on the CPU, then use the wakelist to offload
3901 * the task activation to the idle (or soon-to-be-idle) CPU as
3902 * the current CPU is likely busy. nr_running is checked to
3903 * avoid unnecessary task stacking.
3904 *
3905 * Note that we can only get here with (wakee) p->on_rq=0,
3906 * p->on_cpu can be whatever, we've done the dequeue, so
3907 * the wakee has been accounted out of ->nr_running.
3908 */
3909 if (!cpu_rq(cpu)->nr_running)
3910 return true;
3911
3912 return false;
3913}
3914
3915static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3916{
3917 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3918 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3919 __ttwu_queue_wakelist(p, cpu, wake_flags);
3920 return true;
3921 }
3922
3923 return false;
3924}
3925
3926#else /* !CONFIG_SMP */
3927
3928static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3929{
3930 return false;
3931}
3932
3933#endif /* CONFIG_SMP */
3934
3935static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3936{
3937 struct rq *rq = cpu_rq(cpu);
3938 struct rq_flags rf;
3939
3940 if (ttwu_queue_wakelist(p, cpu, wake_flags))
3941 return;
3942
3943 rq_lock(rq, &rf);
3944 update_rq_clock(rq);
3945 ttwu_do_activate(rq, p, wake_flags, &rf);
3946 rq_unlock(rq, &rf);
3947}
3948
3949/*
3950 * Invoked from try_to_wake_up() to check whether the task can be woken up.
3951 *
3952 * The caller holds p::pi_lock if p != current or has preemption
3953 * disabled when p == current.
3954 *
3955 * The rules of PREEMPT_RT saved_state:
3956 *
3957 * The related locking code always holds p::pi_lock when updating
3958 * p::saved_state, which means the code is fully serialized in both cases.
3959 *
3960 * The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
3961 * bits set. This allows to distinguish all wakeup scenarios.
3962 */
3963static __always_inline
3964bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
3965{
3966 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
3967 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
3968 state != TASK_RTLOCK_WAIT);
3969 }
3970
3971 if (READ_ONCE(p->__state) & state) {
3972 *success = 1;
3973 return true;
3974 }
3975
3976#ifdef CONFIG_PREEMPT_RT
3977 /*
3978 * Saved state preserves the task state across blocking on
3979 * an RT lock. If the state matches, set p::saved_state to
3980 * TASK_RUNNING, but do not wake the task because it waits
3981 * for a lock wakeup. Also indicate success because from
3982 * the regular waker's point of view this has succeeded.
3983 *
3984 * After acquiring the lock the task will restore p::__state
3985 * from p::saved_state which ensures that the regular
3986 * wakeup is not lost. The restore will also set
3987 * p::saved_state to TASK_RUNNING so any further tests will
3988 * not result in false positives vs. @success
3989 */
3990 if (p->saved_state & state) {
3991 p->saved_state = TASK_RUNNING;
3992 *success = 1;
3993 }
3994#endif
3995 return false;
3996}
3997
3998/*
3999 * Notes on Program-Order guarantees on SMP systems.
4000 *
4001 * MIGRATION
4002 *
4003 * The basic program-order guarantee on SMP systems is that when a task [t]
4004 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4005 * execution on its new CPU [c1].
4006 *
4007 * For migration (of runnable tasks) this is provided by the following means:
4008 *
4009 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4010 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4011 * rq(c1)->lock (if not at the same time, then in that order).
4012 * C) LOCK of the rq(c1)->lock scheduling in task
4013 *
4014 * Release/acquire chaining guarantees that B happens after A and C after B.
4015 * Note: the CPU doing B need not be c0 or c1
4016 *
4017 * Example:
4018 *
4019 * CPU0 CPU1 CPU2
4020 *
4021 * LOCK rq(0)->lock
4022 * sched-out X
4023 * sched-in Y
4024 * UNLOCK rq(0)->lock
4025 *
4026 * LOCK rq(0)->lock // orders against CPU0
4027 * dequeue X
4028 * UNLOCK rq(0)->lock
4029 *
4030 * LOCK rq(1)->lock
4031 * enqueue X
4032 * UNLOCK rq(1)->lock
4033 *
4034 * LOCK rq(1)->lock // orders against CPU2
4035 * sched-out Z
4036 * sched-in X
4037 * UNLOCK rq(1)->lock
4038 *
4039 *
4040 * BLOCKING -- aka. SLEEP + WAKEUP
4041 *
4042 * For blocking we (obviously) need to provide the same guarantee as for
4043 * migration. However the means are completely different as there is no lock
4044 * chain to provide order. Instead we do:
4045 *
4046 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4047 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4048 *
4049 * Example:
4050 *
4051 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4052 *
4053 * LOCK rq(0)->lock LOCK X->pi_lock
4054 * dequeue X
4055 * sched-out X
4056 * smp_store_release(X->on_cpu, 0);
4057 *
4058 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4059 * X->state = WAKING
4060 * set_task_cpu(X,2)
4061 *
4062 * LOCK rq(2)->lock
4063 * enqueue X
4064 * X->state = RUNNING
4065 * UNLOCK rq(2)->lock
4066 *
4067 * LOCK rq(2)->lock // orders against CPU1
4068 * sched-out Z
4069 * sched-in X
4070 * UNLOCK rq(2)->lock
4071 *
4072 * UNLOCK X->pi_lock
4073 * UNLOCK rq(0)->lock
4074 *
4075 *
4076 * However, for wakeups there is a second guarantee we must provide, namely we
4077 * must ensure that CONDITION=1 done by the caller can not be reordered with
4078 * accesses to the task state; see try_to_wake_up() and set_current_state().
4079 */
4080
4081/**
4082 * try_to_wake_up - wake up a thread
4083 * @p: the thread to be awakened
4084 * @state: the mask of task states that can be woken
4085 * @wake_flags: wake modifier flags (WF_*)
4086 *
4087 * Conceptually does:
4088 *
4089 * If (@state & @p->state) @p->state = TASK_RUNNING.
4090 *
4091 * If the task was not queued/runnable, also place it back on a runqueue.
4092 *
4093 * This function is atomic against schedule() which would dequeue the task.
4094 *
4095 * It issues a full memory barrier before accessing @p->state, see the comment
4096 * with set_current_state().
4097 *
4098 * Uses p->pi_lock to serialize against concurrent wake-ups.
4099 *
4100 * Relies on p->pi_lock stabilizing:
4101 * - p->sched_class
4102 * - p->cpus_ptr
4103 * - p->sched_task_group
4104 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4105 *
4106 * Tries really hard to only take one task_rq(p)->lock for performance.
4107 * Takes rq->lock in:
4108 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4109 * - ttwu_queue() -- new rq, for enqueue of the task;
4110 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4111 *
4112 * As a consequence we race really badly with just about everything. See the
4113 * many memory barriers and their comments for details.
4114 *
4115 * Return: %true if @p->state changes (an actual wakeup was done),
4116 * %false otherwise.
4117 */
4118static int
4119try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4120{
4121 unsigned long flags;
4122 int cpu, success = 0;
4123
4124 preempt_disable();
4125 if (p == current) {
4126 /*
4127 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4128 * == smp_processor_id()'. Together this means we can special
4129 * case the whole 'p->on_rq && ttwu_runnable()' case below
4130 * without taking any locks.
4131 *
4132 * In particular:
4133 * - we rely on Program-Order guarantees for all the ordering,
4134 * - we're serialized against set_special_state() by virtue of
4135 * it disabling IRQs (this allows not taking ->pi_lock).
4136 */
4137 if (!ttwu_state_match(p, state, &success))
4138 goto out;
4139
4140 trace_sched_waking(p);
4141 WRITE_ONCE(p->__state, TASK_RUNNING);
4142 trace_sched_wakeup(p);
4143 goto out;
4144 }
4145
4146 /*
4147 * If we are going to wake up a thread waiting for CONDITION we
4148 * need to ensure that CONDITION=1 done by the caller can not be
4149 * reordered with p->state check below. This pairs with smp_store_mb()
4150 * in set_current_state() that the waiting thread does.
4151 */
4152 raw_spin_lock_irqsave(&p->pi_lock, flags);
4153 smp_mb__after_spinlock();
4154 if (!ttwu_state_match(p, state, &success))
4155 goto unlock;
4156
4157 trace_sched_waking(p);
4158
4159 /*
4160 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4161 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4162 * in smp_cond_load_acquire() below.
4163 *
4164 * sched_ttwu_pending() try_to_wake_up()
4165 * STORE p->on_rq = 1 LOAD p->state
4166 * UNLOCK rq->lock
4167 *
4168 * __schedule() (switch to task 'p')
4169 * LOCK rq->lock smp_rmb();
4170 * smp_mb__after_spinlock();
4171 * UNLOCK rq->lock
4172 *
4173 * [task p]
4174 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4175 *
4176 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4177 * __schedule(). See the comment for smp_mb__after_spinlock().
4178 *
4179 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
4180 */
4181 smp_rmb();
4182 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4183 goto unlock;
4184
4185#ifdef CONFIG_SMP
4186 /*
4187 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4188 * possible to, falsely, observe p->on_cpu == 0.
4189 *
4190 * One must be running (->on_cpu == 1) in order to remove oneself
4191 * from the runqueue.
4192 *
4193 * __schedule() (switch to task 'p') try_to_wake_up()
4194 * STORE p->on_cpu = 1 LOAD p->on_rq
4195 * UNLOCK rq->lock
4196 *
4197 * __schedule() (put 'p' to sleep)
4198 * LOCK rq->lock smp_rmb();
4199 * smp_mb__after_spinlock();
4200 * STORE p->on_rq = 0 LOAD p->on_cpu
4201 *
4202 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4203 * __schedule(). See the comment for smp_mb__after_spinlock().
4204 *
4205 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4206 * schedule()'s deactivate_task() has 'happened' and p will no longer
4207 * care about it's own p->state. See the comment in __schedule().
4208 */
4209 smp_acquire__after_ctrl_dep();
4210
4211 /*
4212 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4213 * == 0), which means we need to do an enqueue, change p->state to
4214 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4215 * enqueue, such as ttwu_queue_wakelist().
4216 */
4217 WRITE_ONCE(p->__state, TASK_WAKING);
4218
4219 /*
4220 * If the owning (remote) CPU is still in the middle of schedule() with
4221 * this task as prev, considering queueing p on the remote CPUs wake_list
4222 * which potentially sends an IPI instead of spinning on p->on_cpu to
4223 * let the waker make forward progress. This is safe because IRQs are
4224 * disabled and the IPI will deliver after on_cpu is cleared.
4225 *
4226 * Ensure we load task_cpu(p) after p->on_cpu:
4227 *
4228 * set_task_cpu(p, cpu);
4229 * STORE p->cpu = @cpu
4230 * __schedule() (switch to task 'p')
4231 * LOCK rq->lock
4232 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4233 * STORE p->on_cpu = 1 LOAD p->cpu
4234 *
4235 * to ensure we observe the correct CPU on which the task is currently
4236 * scheduling.
4237 */
4238 if (smp_load_acquire(&p->on_cpu) &&
4239 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4240 goto unlock;
4241
4242 /*
4243 * If the owning (remote) CPU is still in the middle of schedule() with
4244 * this task as prev, wait until it's done referencing the task.
4245 *
4246 * Pairs with the smp_store_release() in finish_task().
4247 *
4248 * This ensures that tasks getting woken will be fully ordered against
4249 * their previous state and preserve Program Order.
4250 */
4251 smp_cond_load_acquire(&p->on_cpu, !VAL);
4252
4253 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
4254 if (task_cpu(p) != cpu) {
4255 if (p->in_iowait) {
4256 delayacct_blkio_end(p);
4257 atomic_dec(&task_rq(p)->nr_iowait);
4258 }
4259
4260 wake_flags |= WF_MIGRATED;
4261 psi_ttwu_dequeue(p);
4262 set_task_cpu(p, cpu);
4263 }
4264#else
4265 cpu = task_cpu(p);
4266#endif /* CONFIG_SMP */
4267
4268 ttwu_queue(p, cpu, wake_flags);
4269unlock:
4270 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4271out:
4272 if (success)
4273 ttwu_stat(p, task_cpu(p), wake_flags);
4274 preempt_enable();
4275
4276 return success;
4277}
4278
4279static bool __task_needs_rq_lock(struct task_struct *p)
4280{
4281 unsigned int state = READ_ONCE(p->__state);
4282
4283 /*
4284 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4285 * the task is blocked. Make sure to check @state since ttwu() can drop
4286 * locks at the end, see ttwu_queue_wakelist().
4287 */
4288 if (state == TASK_RUNNING || state == TASK_WAKING)
4289 return true;
4290
4291 /*
4292 * Ensure we load p->on_rq after p->__state, otherwise it would be
4293 * possible to, falsely, observe p->on_rq == 0.
4294 *
4295 * See try_to_wake_up() for a longer comment.
4296 */
4297 smp_rmb();
4298 if (p->on_rq)
4299 return true;
4300
4301#ifdef CONFIG_SMP
4302 /*
4303 * Ensure the task has finished __schedule() and will not be referenced
4304 * anymore. Again, see try_to_wake_up() for a longer comment.
4305 */
4306 smp_rmb();
4307 smp_cond_load_acquire(&p->on_cpu, !VAL);
4308#endif
4309
4310 return false;
4311}
4312
4313/**
4314 * task_call_func - Invoke a function on task in fixed state
4315 * @p: Process for which the function is to be invoked, can be @current.
4316 * @func: Function to invoke.
4317 * @arg: Argument to function.
4318 *
4319 * Fix the task in it's current state by avoiding wakeups and or rq operations
4320 * and call @func(@arg) on it. This function can use ->on_rq and task_curr()
4321 * to work out what the state is, if required. Given that @func can be invoked
4322 * with a runqueue lock held, it had better be quite lightweight.
4323 *
4324 * Returns:
4325 * Whatever @func returns
4326 */
4327int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4328{
4329 struct rq *rq = NULL;
4330 struct rq_flags rf;
4331 int ret;
4332
4333 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4334
4335 if (__task_needs_rq_lock(p))
4336 rq = __task_rq_lock(p, &rf);
4337
4338 /*
4339 * At this point the task is pinned; either:
4340 * - blocked and we're holding off wakeups (pi->lock)
4341 * - woken, and we're holding off enqueue (rq->lock)
4342 * - queued, and we're holding off schedule (rq->lock)
4343 * - running, and we're holding off de-schedule (rq->lock)
4344 *
4345 * The called function (@func) can use: task_curr(), p->on_rq and
4346 * p->__state to differentiate between these states.
4347 */
4348 ret = func(p, arg);
4349
4350 if (rq)
4351 rq_unlock(rq, &rf);
4352
4353 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4354 return ret;
4355}
4356
4357/**
4358 * cpu_curr_snapshot - Return a snapshot of the currently running task
4359 * @cpu: The CPU on which to snapshot the task.
4360 *
4361 * Returns the task_struct pointer of the task "currently" running on
4362 * the specified CPU. If the same task is running on that CPU throughout,
4363 * the return value will be a pointer to that task's task_struct structure.
4364 * If the CPU did any context switches even vaguely concurrently with the
4365 * execution of this function, the return value will be a pointer to the
4366 * task_struct structure of a randomly chosen task that was running on
4367 * that CPU somewhere around the time that this function was executing.
4368 *
4369 * If the specified CPU was offline, the return value is whatever it
4370 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4371 * task, but there is no guarantee. Callers wishing a useful return
4372 * value must take some action to ensure that the specified CPU remains
4373 * online throughout.
4374 *
4375 * This function executes full memory barriers before and after fetching
4376 * the pointer, which permits the caller to confine this function's fetch
4377 * with respect to the caller's accesses to other shared variables.
4378 */
4379struct task_struct *cpu_curr_snapshot(int cpu)
4380{
4381 struct task_struct *t;
4382
4383 smp_mb(); /* Pairing determined by caller's synchronization design. */
4384 t = rcu_dereference(cpu_curr(cpu));
4385 smp_mb(); /* Pairing determined by caller's synchronization design. */
4386 return t;
4387}
4388
4389/**
4390 * wake_up_process - Wake up a specific process
4391 * @p: The process to be woken up.
4392 *
4393 * Attempt to wake up the nominated process and move it to the set of runnable
4394 * processes.
4395 *
4396 * Return: 1 if the process was woken up, 0 if it was already running.
4397 *
4398 * This function executes a full memory barrier before accessing the task state.
4399 */
4400int wake_up_process(struct task_struct *p)
4401{
4402 return try_to_wake_up(p, TASK_NORMAL, 0);
4403}
4404EXPORT_SYMBOL(wake_up_process);
4405
4406int wake_up_state(struct task_struct *p, unsigned int state)
4407{
4408 return try_to_wake_up(p, state, 0);
4409}
4410
4411/*
4412 * Perform scheduler related setup for a newly forked process p.
4413 * p is forked by current.
4414 *
4415 * __sched_fork() is basic setup used by init_idle() too:
4416 */
4417static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4418{
4419 p->on_rq = 0;
4420
4421 p->se.on_rq = 0;
4422 p->se.exec_start = 0;
4423 p->se.sum_exec_runtime = 0;
4424 p->se.prev_sum_exec_runtime = 0;
4425 p->se.nr_migrations = 0;
4426 p->se.vruntime = 0;
4427 INIT_LIST_HEAD(&p->se.group_node);
4428
4429#ifdef CONFIG_FAIR_GROUP_SCHED
4430 p->se.cfs_rq = NULL;
4431#endif
4432
4433#ifdef CONFIG_SCHEDSTATS
4434 /* Even if schedstat is disabled, there should not be garbage */
4435 memset(&p->stats, 0, sizeof(p->stats));
4436#endif
4437
4438 RB_CLEAR_NODE(&p->dl.rb_node);
4439 init_dl_task_timer(&p->dl);
4440 init_dl_inactive_task_timer(&p->dl);
4441 __dl_clear_params(p);
4442
4443 INIT_LIST_HEAD(&p->rt.run_list);
4444 p->rt.timeout = 0;
4445 p->rt.time_slice = sched_rr_timeslice;
4446 p->rt.on_rq = 0;
4447 p->rt.on_list = 0;
4448
4449#ifdef CONFIG_PREEMPT_NOTIFIERS
4450 INIT_HLIST_HEAD(&p->preempt_notifiers);
4451#endif
4452
4453#ifdef CONFIG_COMPACTION
4454 p->capture_control = NULL;
4455#endif
4456 init_numa_balancing(clone_flags, p);
4457#ifdef CONFIG_SMP
4458 p->wake_entry.u_flags = CSD_TYPE_TTWU;
4459 p->migration_pending = NULL;
4460#endif
4461}
4462
4463DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4464
4465#ifdef CONFIG_NUMA_BALANCING
4466
4467int sysctl_numa_balancing_mode;
4468
4469static void __set_numabalancing_state(bool enabled)
4470{
4471 if (enabled)
4472 static_branch_enable(&sched_numa_balancing);
4473 else
4474 static_branch_disable(&sched_numa_balancing);
4475}
4476
4477void set_numabalancing_state(bool enabled)
4478{
4479 if (enabled)
4480 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4481 else
4482 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4483 __set_numabalancing_state(enabled);
4484}
4485
4486#ifdef CONFIG_PROC_SYSCTL
4487static void reset_memory_tiering(void)
4488{
4489 struct pglist_data *pgdat;
4490
4491 for_each_online_pgdat(pgdat) {
4492 pgdat->nbp_threshold = 0;
4493 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4494 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4495 }
4496}
4497
4498static int sysctl_numa_balancing(struct ctl_table *table, int write,
4499 void *buffer, size_t *lenp, loff_t *ppos)
4500{
4501 struct ctl_table t;
4502 int err;
4503 int state = sysctl_numa_balancing_mode;
4504
4505 if (write && !capable(CAP_SYS_ADMIN))
4506 return -EPERM;
4507
4508 t = *table;
4509 t.data = &state;
4510 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4511 if (err < 0)
4512 return err;
4513 if (write) {
4514 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4515 (state & NUMA_BALANCING_MEMORY_TIERING))
4516 reset_memory_tiering();
4517 sysctl_numa_balancing_mode = state;
4518 __set_numabalancing_state(state);
4519 }
4520 return err;
4521}
4522#endif
4523#endif
4524
4525#ifdef CONFIG_SCHEDSTATS
4526
4527DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4528
4529static void set_schedstats(bool enabled)
4530{
4531 if (enabled)
4532 static_branch_enable(&sched_schedstats);
4533 else
4534 static_branch_disable(&sched_schedstats);
4535}
4536
4537void force_schedstat_enabled(void)
4538{
4539 if (!schedstat_enabled()) {
4540 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4541 static_branch_enable(&sched_schedstats);
4542 }
4543}
4544
4545static int __init setup_schedstats(char *str)
4546{
4547 int ret = 0;
4548 if (!str)
4549 goto out;
4550
4551 if (!strcmp(str, "enable")) {
4552 set_schedstats(true);
4553 ret = 1;
4554 } else if (!strcmp(str, "disable")) {
4555 set_schedstats(false);
4556 ret = 1;
4557 }
4558out:
4559 if (!ret)
4560 pr_warn("Unable to parse schedstats=\n");
4561
4562 return ret;
4563}
4564__setup("schedstats=", setup_schedstats);
4565
4566#ifdef CONFIG_PROC_SYSCTL
4567static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
4568 size_t *lenp, loff_t *ppos)
4569{
4570 struct ctl_table t;
4571 int err;
4572 int state = static_branch_likely(&sched_schedstats);
4573
4574 if (write && !capable(CAP_SYS_ADMIN))
4575 return -EPERM;
4576
4577 t = *table;
4578 t.data = &state;
4579 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4580 if (err < 0)
4581 return err;
4582 if (write)
4583 set_schedstats(state);
4584 return err;
4585}
4586#endif /* CONFIG_PROC_SYSCTL */
4587#endif /* CONFIG_SCHEDSTATS */
4588
4589#ifdef CONFIG_SYSCTL
4590static struct ctl_table sched_core_sysctls[] = {
4591#ifdef CONFIG_SCHEDSTATS
4592 {
4593 .procname = "sched_schedstats",
4594 .data = NULL,
4595 .maxlen = sizeof(unsigned int),
4596 .mode = 0644,
4597 .proc_handler = sysctl_schedstats,
4598 .extra1 = SYSCTL_ZERO,
4599 .extra2 = SYSCTL_ONE,
4600 },
4601#endif /* CONFIG_SCHEDSTATS */
4602#ifdef CONFIG_UCLAMP_TASK
4603 {
4604 .procname = "sched_util_clamp_min",
4605 .data = &sysctl_sched_uclamp_util_min,
4606 .maxlen = sizeof(unsigned int),
4607 .mode = 0644,
4608 .proc_handler = sysctl_sched_uclamp_handler,
4609 },
4610 {
4611 .procname = "sched_util_clamp_max",
4612 .data = &sysctl_sched_uclamp_util_max,
4613 .maxlen = sizeof(unsigned int),
4614 .mode = 0644,
4615 .proc_handler = sysctl_sched_uclamp_handler,
4616 },
4617 {
4618 .procname = "sched_util_clamp_min_rt_default",
4619 .data = &sysctl_sched_uclamp_util_min_rt_default,
4620 .maxlen = sizeof(unsigned int),
4621 .mode = 0644,
4622 .proc_handler = sysctl_sched_uclamp_handler,
4623 },
4624#endif /* CONFIG_UCLAMP_TASK */
4625#ifdef CONFIG_NUMA_BALANCING
4626 {
4627 .procname = "numa_balancing",
4628 .data = NULL, /* filled in by handler */
4629 .maxlen = sizeof(unsigned int),
4630 .mode = 0644,
4631 .proc_handler = sysctl_numa_balancing,
4632 .extra1 = SYSCTL_ZERO,
4633 .extra2 = SYSCTL_FOUR,
4634 },
4635#endif /* CONFIG_NUMA_BALANCING */
4636 {}
4637};
4638static int __init sched_core_sysctl_init(void)
4639{
4640 register_sysctl_init("kernel", sched_core_sysctls);
4641 return 0;
4642}
4643late_initcall(sched_core_sysctl_init);
4644#endif /* CONFIG_SYSCTL */
4645
4646/*
4647 * fork()/clone()-time setup:
4648 */
4649int sched_fork(unsigned long clone_flags, struct task_struct *p)
4650{
4651 __sched_fork(clone_flags, p);
4652 /*
4653 * We mark the process as NEW here. This guarantees that
4654 * nobody will actually run it, and a signal or other external
4655 * event cannot wake it up and insert it on the runqueue either.
4656 */
4657 p->__state = TASK_NEW;
4658
4659 /*
4660 * Make sure we do not leak PI boosting priority to the child.
4661 */
4662 p->prio = current->normal_prio;
4663
4664 uclamp_fork(p);
4665
4666 /*
4667 * Revert to default priority/policy on fork if requested.
4668 */
4669 if (unlikely(p->sched_reset_on_fork)) {
4670 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4671 p->policy = SCHED_NORMAL;
4672 p->static_prio = NICE_TO_PRIO(0);
4673 p->rt_priority = 0;
4674 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4675 p->static_prio = NICE_TO_PRIO(0);
4676
4677 p->prio = p->normal_prio = p->static_prio;
4678 set_load_weight(p, false);
4679
4680 /*
4681 * We don't need the reset flag anymore after the fork. It has
4682 * fulfilled its duty:
4683 */
4684 p->sched_reset_on_fork = 0;
4685 }
4686
4687 if (dl_prio(p->prio))
4688 return -EAGAIN;
4689 else if (rt_prio(p->prio))
4690 p->sched_class = &rt_sched_class;
4691 else
4692 p->sched_class = &fair_sched_class;
4693
4694 init_entity_runnable_average(&p->se);
4695
4696
4697#ifdef CONFIG_SCHED_INFO
4698 if (likely(sched_info_on()))
4699 memset(&p->sched_info, 0, sizeof(p->sched_info));
4700#endif
4701#if defined(CONFIG_SMP)
4702 p->on_cpu = 0;
4703#endif
4704 init_task_preempt_count(p);
4705#ifdef CONFIG_SMP
4706 plist_node_init(&p->pushable_tasks, MAX_PRIO);
4707 RB_CLEAR_NODE(&p->pushable_dl_tasks);
4708#endif
4709 return 0;
4710}
4711
4712void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4713{
4714 unsigned long flags;
4715
4716 /*
4717 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4718 * required yet, but lockdep gets upset if rules are violated.
4719 */
4720 raw_spin_lock_irqsave(&p->pi_lock, flags);
4721#ifdef CONFIG_CGROUP_SCHED
4722 if (1) {
4723 struct task_group *tg;
4724 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4725 struct task_group, css);
4726 tg = autogroup_task_group(p, tg);
4727 p->sched_task_group = tg;
4728 }
4729#endif
4730 rseq_migrate(p);
4731 /*
4732 * We're setting the CPU for the first time, we don't migrate,
4733 * so use __set_task_cpu().
4734 */
4735 __set_task_cpu(p, smp_processor_id());
4736 if (p->sched_class->task_fork)
4737 p->sched_class->task_fork(p);
4738 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4739}
4740
4741void sched_post_fork(struct task_struct *p)
4742{
4743 uclamp_post_fork(p);
4744}
4745
4746unsigned long to_ratio(u64 period, u64 runtime)
4747{
4748 if (runtime == RUNTIME_INF)
4749 return BW_UNIT;
4750
4751 /*
4752 * Doing this here saves a lot of checks in all
4753 * the calling paths, and returning zero seems
4754 * safe for them anyway.
4755 */
4756 if (period == 0)
4757 return 0;
4758
4759 return div64_u64(runtime << BW_SHIFT, period);
4760}
4761
4762/*
4763 * wake_up_new_task - wake up a newly created task for the first time.
4764 *
4765 * This function will do some initial scheduler statistics housekeeping
4766 * that must be done for every newly created context, then puts the task
4767 * on the runqueue and wakes it.
4768 */
4769void wake_up_new_task(struct task_struct *p)
4770{
4771 struct rq_flags rf;
4772 struct rq *rq;
4773
4774 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4775 WRITE_ONCE(p->__state, TASK_RUNNING);
4776#ifdef CONFIG_SMP
4777 /*
4778 * Fork balancing, do it here and not earlier because:
4779 * - cpus_ptr can change in the fork path
4780 * - any previously selected CPU might disappear through hotplug
4781 *
4782 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4783 * as we're not fully set-up yet.
4784 */
4785 p->recent_used_cpu = task_cpu(p);
4786 rseq_migrate(p);
4787 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
4788#endif
4789 rq = __task_rq_lock(p, &rf);
4790 update_rq_clock(rq);
4791 post_init_entity_util_avg(p);
4792
4793 activate_task(rq, p, ENQUEUE_NOCLOCK);
4794 trace_sched_wakeup_new(p);
4795 check_preempt_curr(rq, p, WF_FORK);
4796#ifdef CONFIG_SMP
4797 if (p->sched_class->task_woken) {
4798 /*
4799 * Nothing relies on rq->lock after this, so it's fine to
4800 * drop it.
4801 */
4802 rq_unpin_lock(rq, &rf);
4803 p->sched_class->task_woken(rq, p);
4804 rq_repin_lock(rq, &rf);
4805 }
4806#endif
4807 task_rq_unlock(rq, p, &rf);
4808}
4809
4810#ifdef CONFIG_PREEMPT_NOTIFIERS
4811
4812static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4813
4814void preempt_notifier_inc(void)
4815{
4816 static_branch_inc(&preempt_notifier_key);
4817}
4818EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4819
4820void preempt_notifier_dec(void)
4821{
4822 static_branch_dec(&preempt_notifier_key);
4823}
4824EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4825
4826/**
4827 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4828 * @notifier: notifier struct to register
4829 */
4830void preempt_notifier_register(struct preempt_notifier *notifier)
4831{
4832 if (!static_branch_unlikely(&preempt_notifier_key))
4833 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4834
4835 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
4836}
4837EXPORT_SYMBOL_GPL(preempt_notifier_register);
4838
4839/**
4840 * preempt_notifier_unregister - no longer interested in preemption notifications
4841 * @notifier: notifier struct to unregister
4842 *
4843 * This is *not* safe to call from within a preemption notifier.
4844 */
4845void preempt_notifier_unregister(struct preempt_notifier *notifier)
4846{
4847 hlist_del(¬ifier->link);
4848}
4849EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4850
4851static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4852{
4853 struct preempt_notifier *notifier;
4854
4855 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4856 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4857}
4858
4859static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4860{
4861 if (static_branch_unlikely(&preempt_notifier_key))
4862 __fire_sched_in_preempt_notifiers(curr);
4863}
4864
4865static void
4866__fire_sched_out_preempt_notifiers(struct task_struct *curr,
4867 struct task_struct *next)
4868{
4869 struct preempt_notifier *notifier;
4870
4871 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4872 notifier->ops->sched_out(notifier, next);
4873}
4874
4875static __always_inline void
4876fire_sched_out_preempt_notifiers(struct task_struct *curr,
4877 struct task_struct *next)
4878{
4879 if (static_branch_unlikely(&preempt_notifier_key))
4880 __fire_sched_out_preempt_notifiers(curr, next);
4881}
4882
4883#else /* !CONFIG_PREEMPT_NOTIFIERS */
4884
4885static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4886{
4887}
4888
4889static inline void
4890fire_sched_out_preempt_notifiers(struct task_struct *curr,
4891 struct task_struct *next)
4892{
4893}
4894
4895#endif /* CONFIG_PREEMPT_NOTIFIERS */
4896
4897static inline void prepare_task(struct task_struct *next)
4898{
4899#ifdef CONFIG_SMP
4900 /*
4901 * Claim the task as running, we do this before switching to it
4902 * such that any running task will have this set.
4903 *
4904 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4905 * its ordering comment.
4906 */
4907 WRITE_ONCE(next->on_cpu, 1);
4908#endif
4909}
4910
4911static inline void finish_task(struct task_struct *prev)
4912{
4913#ifdef CONFIG_SMP
4914 /*
4915 * This must be the very last reference to @prev from this CPU. After
4916 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4917 * must ensure this doesn't happen until the switch is completely
4918 * finished.
4919 *
4920 * In particular, the load of prev->state in finish_task_switch() must
4921 * happen before this.
4922 *
4923 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
4924 */
4925 smp_store_release(&prev->on_cpu, 0);
4926#endif
4927}
4928
4929#ifdef CONFIG_SMP
4930
4931static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
4932{
4933 void (*func)(struct rq *rq);
4934 struct balance_callback *next;
4935
4936 lockdep_assert_rq_held(rq);
4937
4938 while (head) {
4939 func = (void (*)(struct rq *))head->func;
4940 next = head->next;
4941 head->next = NULL;
4942 head = next;
4943
4944 func(rq);
4945 }
4946}
4947
4948static void balance_push(struct rq *rq);
4949
4950/*
4951 * balance_push_callback is a right abuse of the callback interface and plays
4952 * by significantly different rules.
4953 *
4954 * Where the normal balance_callback's purpose is to be ran in the same context
4955 * that queued it (only later, when it's safe to drop rq->lock again),
4956 * balance_push_callback is specifically targeted at __schedule().
4957 *
4958 * This abuse is tolerated because it places all the unlikely/odd cases behind
4959 * a single test, namely: rq->balance_callback == NULL.
4960 */
4961struct balance_callback balance_push_callback = {
4962 .next = NULL,
4963 .func = balance_push,
4964};
4965
4966static inline struct balance_callback *
4967__splice_balance_callbacks(struct rq *rq, bool split)
4968{
4969 struct balance_callback *head = rq->balance_callback;
4970
4971 if (likely(!head))
4972 return NULL;
4973
4974 lockdep_assert_rq_held(rq);
4975 /*
4976 * Must not take balance_push_callback off the list when
4977 * splice_balance_callbacks() and balance_callbacks() are not
4978 * in the same rq->lock section.
4979 *
4980 * In that case it would be possible for __schedule() to interleave
4981 * and observe the list empty.
4982 */
4983 if (split && head == &balance_push_callback)
4984 head = NULL;
4985 else
4986 rq->balance_callback = NULL;
4987
4988 return head;
4989}
4990
4991static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
4992{
4993 return __splice_balance_callbacks(rq, true);
4994}
4995
4996static void __balance_callbacks(struct rq *rq)
4997{
4998 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
4999}
5000
5001static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
5002{
5003 unsigned long flags;
5004
5005 if (unlikely(head)) {
5006 raw_spin_rq_lock_irqsave(rq, flags);
5007 do_balance_callbacks(rq, head);
5008 raw_spin_rq_unlock_irqrestore(rq, flags);
5009 }
5010}
5011
5012#else
5013
5014static inline void __balance_callbacks(struct rq *rq)
5015{
5016}
5017
5018static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
5019{
5020 return NULL;
5021}
5022
5023static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
5024{
5025}
5026
5027#endif
5028
5029static inline void
5030prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5031{
5032 /*
5033 * Since the runqueue lock will be released by the next
5034 * task (which is an invalid locking op but in the case
5035 * of the scheduler it's an obvious special-case), so we
5036 * do an early lockdep release here:
5037 */
5038 rq_unpin_lock(rq, rf);
5039 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5040#ifdef CONFIG_DEBUG_SPINLOCK
5041 /* this is a valid case when another task releases the spinlock */
5042 rq_lockp(rq)->owner = next;
5043#endif
5044}
5045
5046static inline void finish_lock_switch(struct rq *rq)
5047{
5048 /*
5049 * If we are tracking spinlock dependencies then we have to
5050 * fix up the runqueue lock - which gets 'carried over' from
5051 * prev into current:
5052 */
5053 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5054 __balance_callbacks(rq);
5055 raw_spin_rq_unlock_irq(rq);
5056}
5057
5058/*
5059 * NOP if the arch has not defined these:
5060 */
5061
5062#ifndef prepare_arch_switch
5063# define prepare_arch_switch(next) do { } while (0)
5064#endif
5065
5066#ifndef finish_arch_post_lock_switch
5067# define finish_arch_post_lock_switch() do { } while (0)
5068#endif
5069
5070static inline void kmap_local_sched_out(void)
5071{
5072#ifdef CONFIG_KMAP_LOCAL
5073 if (unlikely(current->kmap_ctrl.idx))
5074 __kmap_local_sched_out();
5075#endif
5076}
5077
5078static inline void kmap_local_sched_in(void)
5079{
5080#ifdef CONFIG_KMAP_LOCAL
5081 if (unlikely(current->kmap_ctrl.idx))
5082 __kmap_local_sched_in();
5083#endif
5084}
5085
5086/**
5087 * prepare_task_switch - prepare to switch tasks
5088 * @rq: the runqueue preparing to switch
5089 * @prev: the current task that is being switched out
5090 * @next: the task we are going to switch to.
5091 *
5092 * This is called with the rq lock held and interrupts off. It must
5093 * be paired with a subsequent finish_task_switch after the context
5094 * switch.
5095 *
5096 * prepare_task_switch sets up locking and calls architecture specific
5097 * hooks.
5098 */
5099static inline void
5100prepare_task_switch(struct rq *rq, struct task_struct *prev,
5101 struct task_struct *next)
5102{
5103 kcov_prepare_switch(prev);
5104 sched_info_switch(rq, prev, next);
5105 perf_event_task_sched_out(prev, next);
5106 rseq_preempt(prev);
5107 fire_sched_out_preempt_notifiers(prev, next);
5108 kmap_local_sched_out();
5109 prepare_task(next);
5110 prepare_arch_switch(next);
5111}
5112
5113/**
5114 * finish_task_switch - clean up after a task-switch
5115 * @prev: the thread we just switched away from.
5116 *
5117 * finish_task_switch must be called after the context switch, paired
5118 * with a prepare_task_switch call before the context switch.
5119 * finish_task_switch will reconcile locking set up by prepare_task_switch,
5120 * and do any other architecture-specific cleanup actions.
5121 *
5122 * Note that we may have delayed dropping an mm in context_switch(). If
5123 * so, we finish that here outside of the runqueue lock. (Doing it
5124 * with the lock held can cause deadlocks; see schedule() for
5125 * details.)
5126 *
5127 * The context switch have flipped the stack from under us and restored the
5128 * local variables which were saved when this task called schedule() in the
5129 * past. prev == current is still correct but we need to recalculate this_rq
5130 * because prev may have moved to another CPU.
5131 */
5132static struct rq *finish_task_switch(struct task_struct *prev)
5133 __releases(rq->lock)
5134{
5135 struct rq *rq = this_rq();
5136 struct mm_struct *mm = rq->prev_mm;
5137 unsigned int prev_state;
5138
5139 /*
5140 * The previous task will have left us with a preempt_count of 2
5141 * because it left us after:
5142 *
5143 * schedule()
5144 * preempt_disable(); // 1
5145 * __schedule()
5146 * raw_spin_lock_irq(&rq->lock) // 2
5147 *
5148 * Also, see FORK_PREEMPT_COUNT.
5149 */
5150 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5151 "corrupted preempt_count: %s/%d/0x%x\n",
5152 current->comm, current->pid, preempt_count()))
5153 preempt_count_set(FORK_PREEMPT_COUNT);
5154
5155 rq->prev_mm = NULL;
5156
5157 /*
5158 * A task struct has one reference for the use as "current".
5159 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5160 * schedule one last time. The schedule call will never return, and
5161 * the scheduled task must drop that reference.
5162 *
5163 * We must observe prev->state before clearing prev->on_cpu (in
5164 * finish_task), otherwise a concurrent wakeup can get prev
5165 * running on another CPU and we could rave with its RUNNING -> DEAD
5166 * transition, resulting in a double drop.
5167 */
5168 prev_state = READ_ONCE(prev->__state);
5169 vtime_task_switch(prev);
5170 perf_event_task_sched_in(prev, current);
5171 finish_task(prev);
5172 tick_nohz_task_switch();
5173 finish_lock_switch(rq);
5174 finish_arch_post_lock_switch();
5175 kcov_finish_switch(current);
5176 /*
5177 * kmap_local_sched_out() is invoked with rq::lock held and
5178 * interrupts disabled. There is no requirement for that, but the
5179 * sched out code does not have an interrupt enabled section.
5180 * Restoring the maps on sched in does not require interrupts being
5181 * disabled either.
5182 */
5183 kmap_local_sched_in();
5184
5185 fire_sched_in_preempt_notifiers(current);
5186 /*
5187 * When switching through a kernel thread, the loop in
5188 * membarrier_{private,global}_expedited() may have observed that
5189 * kernel thread and not issued an IPI. It is therefore possible to
5190 * schedule between user->kernel->user threads without passing though
5191 * switch_mm(). Membarrier requires a barrier after storing to
5192 * rq->curr, before returning to userspace, so provide them here:
5193 *
5194 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5195 * provided by mmdrop(),
5196 * - a sync_core for SYNC_CORE.
5197 */
5198 if (mm) {
5199 membarrier_mm_sync_core_before_usermode(mm);
5200 mmdrop_sched(mm);
5201 }
5202 if (unlikely(prev_state == TASK_DEAD)) {
5203 if (prev->sched_class->task_dead)
5204 prev->sched_class->task_dead(prev);
5205
5206 /* Task is done with its stack. */
5207 put_task_stack(prev);
5208
5209 put_task_struct_rcu_user(prev);
5210 }
5211
5212 return rq;
5213}
5214
5215/**
5216 * schedule_tail - first thing a freshly forked thread must call.
5217 * @prev: the thread we just switched away from.
5218 */
5219asmlinkage __visible void schedule_tail(struct task_struct *prev)
5220 __releases(rq->lock)
5221{
5222 /*
5223 * New tasks start with FORK_PREEMPT_COUNT, see there and
5224 * finish_task_switch() for details.
5225 *
5226 * finish_task_switch() will drop rq->lock() and lower preempt_count
5227 * and the preempt_enable() will end up enabling preemption (on
5228 * PREEMPT_COUNT kernels).
5229 */
5230
5231 finish_task_switch(prev);
5232 preempt_enable();
5233
5234 if (current->set_child_tid)
5235 put_user(task_pid_vnr(current), current->set_child_tid);
5236
5237 calculate_sigpending();
5238}
5239
5240/*
5241 * context_switch - switch to the new MM and the new thread's register state.
5242 */
5243static __always_inline struct rq *
5244context_switch(struct rq *rq, struct task_struct *prev,
5245 struct task_struct *next, struct rq_flags *rf)
5246{
5247 prepare_task_switch(rq, prev, next);
5248
5249 /*
5250 * For paravirt, this is coupled with an exit in switch_to to
5251 * combine the page table reload and the switch backend into
5252 * one hypercall.
5253 */
5254 arch_start_context_switch(prev);
5255
5256 /*
5257 * kernel -> kernel lazy + transfer active
5258 * user -> kernel lazy + mmgrab() active
5259 *
5260 * kernel -> user switch + mmdrop() active
5261 * user -> user switch
5262 */
5263 if (!next->mm) { // to kernel
5264 enter_lazy_tlb(prev->active_mm, next);
5265
5266 next->active_mm = prev->active_mm;
5267 if (prev->mm) // from user
5268 mmgrab(prev->active_mm);
5269 else
5270 prev->active_mm = NULL;
5271 } else { // to user
5272 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5273 /*
5274 * sys_membarrier() requires an smp_mb() between setting
5275 * rq->curr / membarrier_switch_mm() and returning to userspace.
5276 *
5277 * The below provides this either through switch_mm(), or in
5278 * case 'prev->active_mm == next->mm' through
5279 * finish_task_switch()'s mmdrop().
5280 */
5281 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5282 lru_gen_use_mm(next->mm);
5283
5284 if (!prev->mm) { // from kernel
5285 /* will mmdrop() in finish_task_switch(). */
5286 rq->prev_mm = prev->active_mm;
5287 prev->active_mm = NULL;
5288 }
5289 }
5290
5291 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
5292
5293 prepare_lock_switch(rq, next, rf);
5294
5295 /* Here we just switch the register state and the stack. */
5296 switch_to(prev, next, prev);
5297 barrier();
5298
5299 return finish_task_switch(prev);
5300}
5301
5302/*
5303 * nr_running and nr_context_switches:
5304 *
5305 * externally visible scheduler statistics: current number of runnable
5306 * threads, total number of context switches performed since bootup.
5307 */
5308unsigned int nr_running(void)
5309{
5310 unsigned int i, sum = 0;
5311
5312 for_each_online_cpu(i)
5313 sum += cpu_rq(i)->nr_running;
5314
5315 return sum;
5316}
5317
5318/*
5319 * Check if only the current task is running on the CPU.
5320 *
5321 * Caution: this function does not check that the caller has disabled
5322 * preemption, thus the result might have a time-of-check-to-time-of-use
5323 * race. The caller is responsible to use it correctly, for example:
5324 *
5325 * - from a non-preemptible section (of course)
5326 *
5327 * - from a thread that is bound to a single CPU
5328 *
5329 * - in a loop with very short iterations (e.g. a polling loop)
5330 */
5331bool single_task_running(void)
5332{
5333 return raw_rq()->nr_running == 1;
5334}
5335EXPORT_SYMBOL(single_task_running);
5336
5337unsigned long long nr_context_switches(void)
5338{
5339 int i;
5340 unsigned long long sum = 0;
5341
5342 for_each_possible_cpu(i)
5343 sum += cpu_rq(i)->nr_switches;
5344
5345 return sum;
5346}
5347
5348/*
5349 * Consumers of these two interfaces, like for example the cpuidle menu
5350 * governor, are using nonsensical data. Preferring shallow idle state selection
5351 * for a CPU that has IO-wait which might not even end up running the task when
5352 * it does become runnable.
5353 */
5354
5355unsigned int nr_iowait_cpu(int cpu)
5356{
5357 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5358}
5359
5360/*
5361 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5362 *
5363 * The idea behind IO-wait account is to account the idle time that we could
5364 * have spend running if it were not for IO. That is, if we were to improve the
5365 * storage performance, we'd have a proportional reduction in IO-wait time.
5366 *
5367 * This all works nicely on UP, where, when a task blocks on IO, we account
5368 * idle time as IO-wait, because if the storage were faster, it could've been
5369 * running and we'd not be idle.
5370 *
5371 * This has been extended to SMP, by doing the same for each CPU. This however
5372 * is broken.
5373 *
5374 * Imagine for instance the case where two tasks block on one CPU, only the one
5375 * CPU will have IO-wait accounted, while the other has regular idle. Even
5376 * though, if the storage were faster, both could've ran at the same time,
5377 * utilising both CPUs.
5378 *
5379 * This means, that when looking globally, the current IO-wait accounting on
5380 * SMP is a lower bound, by reason of under accounting.
5381 *
5382 * Worse, since the numbers are provided per CPU, they are sometimes
5383 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5384 * associated with any one particular CPU, it can wake to another CPU than it
5385 * blocked on. This means the per CPU IO-wait number is meaningless.
5386 *
5387 * Task CPU affinities can make all that even more 'interesting'.
5388 */
5389
5390unsigned int nr_iowait(void)
5391{
5392 unsigned int i, sum = 0;
5393
5394 for_each_possible_cpu(i)
5395 sum += nr_iowait_cpu(i);
5396
5397 return sum;
5398}
5399
5400#ifdef CONFIG_SMP
5401
5402/*
5403 * sched_exec - execve() is a valuable balancing opportunity, because at
5404 * this point the task has the smallest effective memory and cache footprint.
5405 */
5406void sched_exec(void)
5407{
5408 struct task_struct *p = current;
5409 unsigned long flags;
5410 int dest_cpu;
5411
5412 raw_spin_lock_irqsave(&p->pi_lock, flags);
5413 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5414 if (dest_cpu == smp_processor_id())
5415 goto unlock;
5416
5417 if (likely(cpu_active(dest_cpu))) {
5418 struct migration_arg arg = { p, dest_cpu };
5419
5420 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5421 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5422 return;
5423 }
5424unlock:
5425 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5426}
5427
5428#endif
5429
5430DEFINE_PER_CPU(struct kernel_stat, kstat);
5431DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5432
5433EXPORT_PER_CPU_SYMBOL(kstat);
5434EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5435
5436/*
5437 * The function fair_sched_class.update_curr accesses the struct curr
5438 * and its field curr->exec_start; when called from task_sched_runtime(),
5439 * we observe a high rate of cache misses in practice.
5440 * Prefetching this data results in improved performance.
5441 */
5442static inline void prefetch_curr_exec_start(struct task_struct *p)
5443{
5444#ifdef CONFIG_FAIR_GROUP_SCHED
5445 struct sched_entity *curr = (&p->se)->cfs_rq->curr;
5446#else
5447 struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
5448#endif
5449 prefetch(curr);
5450 prefetch(&curr->exec_start);
5451}
5452
5453/*
5454 * Return accounted runtime for the task.
5455 * In case the task is currently running, return the runtime plus current's
5456 * pending runtime that have not been accounted yet.
5457 */
5458unsigned long long task_sched_runtime(struct task_struct *p)
5459{
5460 struct rq_flags rf;
5461 struct rq *rq;
5462 u64 ns;
5463
5464#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5465 /*
5466 * 64-bit doesn't need locks to atomically read a 64-bit value.
5467 * So we have a optimization chance when the task's delta_exec is 0.
5468 * Reading ->on_cpu is racy, but this is ok.
5469 *
5470 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5471 * If we race with it entering CPU, unaccounted time is 0. This is
5472 * indistinguishable from the read occurring a few cycles earlier.
5473 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5474 * been accounted, so we're correct here as well.
5475 */
5476 if (!p->on_cpu || !task_on_rq_queued(p))
5477 return p->se.sum_exec_runtime;
5478#endif
5479
5480 rq = task_rq_lock(p, &rf);
5481 /*
5482 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5483 * project cycles that may never be accounted to this
5484 * thread, breaking clock_gettime().
5485 */
5486 if (task_current(rq, p) && task_on_rq_queued(p)) {
5487 prefetch_curr_exec_start(p);
5488 update_rq_clock(rq);
5489 p->sched_class->update_curr(rq);
5490 }
5491 ns = p->se.sum_exec_runtime;
5492 task_rq_unlock(rq, p, &rf);
5493
5494 return ns;
5495}
5496
5497#ifdef CONFIG_SCHED_DEBUG
5498static u64 cpu_resched_latency(struct rq *rq)
5499{
5500 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5501 u64 resched_latency, now = rq_clock(rq);
5502 static bool warned_once;
5503
5504 if (sysctl_resched_latency_warn_once && warned_once)
5505 return 0;
5506
5507 if (!need_resched() || !latency_warn_ms)
5508 return 0;
5509
5510 if (system_state == SYSTEM_BOOTING)
5511 return 0;
5512
5513 if (!rq->last_seen_need_resched_ns) {
5514 rq->last_seen_need_resched_ns = now;
5515 rq->ticks_without_resched = 0;
5516 return 0;
5517 }
5518
5519 rq->ticks_without_resched++;
5520 resched_latency = now - rq->last_seen_need_resched_ns;
5521 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5522 return 0;
5523
5524 warned_once = true;
5525
5526 return resched_latency;
5527}
5528
5529static int __init setup_resched_latency_warn_ms(char *str)
5530{
5531 long val;
5532
5533 if ((kstrtol(str, 0, &val))) {
5534 pr_warn("Unable to set resched_latency_warn_ms\n");
5535 return 1;
5536 }
5537
5538 sysctl_resched_latency_warn_ms = val;
5539 return 1;
5540}
5541__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5542#else
5543static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5544#endif /* CONFIG_SCHED_DEBUG */
5545
5546/*
5547 * This function gets called by the timer code, with HZ frequency.
5548 * We call it with interrupts disabled.
5549 */
5550void scheduler_tick(void)
5551{
5552 int cpu = smp_processor_id();
5553 struct rq *rq = cpu_rq(cpu);
5554 struct task_struct *curr = rq->curr;
5555 struct rq_flags rf;
5556 unsigned long thermal_pressure;
5557 u64 resched_latency;
5558
5559 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5560 arch_scale_freq_tick();
5561
5562 sched_clock_tick();
5563
5564 rq_lock(rq, &rf);
5565
5566 update_rq_clock(rq);
5567 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
5568 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
5569 curr->sched_class->task_tick(rq, curr, 0);
5570 if (sched_feat(LATENCY_WARN))
5571 resched_latency = cpu_resched_latency(rq);
5572 calc_global_load_tick(rq);
5573 sched_core_tick(rq);
5574
5575 rq_unlock(rq, &rf);
5576
5577 if (sched_feat(LATENCY_WARN) && resched_latency)
5578 resched_latency_warn(cpu, resched_latency);
5579
5580 perf_event_task_tick();
5581
5582#ifdef CONFIG_SMP
5583 rq->idle_balance = idle_cpu(cpu);
5584 trigger_load_balance(rq);
5585#endif
5586}
5587
5588#ifdef CONFIG_NO_HZ_FULL
5589
5590struct tick_work {
5591 int cpu;
5592 atomic_t state;
5593 struct delayed_work work;
5594};
5595/* Values for ->state, see diagram below. */
5596#define TICK_SCHED_REMOTE_OFFLINE 0
5597#define TICK_SCHED_REMOTE_OFFLINING 1
5598#define TICK_SCHED_REMOTE_RUNNING 2
5599
5600/*
5601 * State diagram for ->state:
5602 *
5603 *
5604 * TICK_SCHED_REMOTE_OFFLINE
5605 * | ^
5606 * | |
5607 * | | sched_tick_remote()
5608 * | |
5609 * | |
5610 * +--TICK_SCHED_REMOTE_OFFLINING
5611 * | ^
5612 * | |
5613 * sched_tick_start() | | sched_tick_stop()
5614 * | |
5615 * V |
5616 * TICK_SCHED_REMOTE_RUNNING
5617 *
5618 *
5619 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5620 * and sched_tick_start() are happy to leave the state in RUNNING.
5621 */
5622
5623static struct tick_work __percpu *tick_work_cpu;
5624
5625static void sched_tick_remote(struct work_struct *work)
5626{
5627 struct delayed_work *dwork = to_delayed_work(work);
5628 struct tick_work *twork = container_of(dwork, struct tick_work, work);
5629 int cpu = twork->cpu;
5630 struct rq *rq = cpu_rq(cpu);
5631 struct task_struct *curr;
5632 struct rq_flags rf;
5633 u64 delta;
5634 int os;
5635
5636 /*
5637 * Handle the tick only if it appears the remote CPU is running in full
5638 * dynticks mode. The check is racy by nature, but missing a tick or
5639 * having one too much is no big deal because the scheduler tick updates
5640 * statistics and checks timeslices in a time-independent way, regardless
5641 * of when exactly it is running.
5642 */
5643 if (!tick_nohz_tick_stopped_cpu(cpu))
5644 goto out_requeue;
5645
5646 rq_lock_irq(rq, &rf);
5647 curr = rq->curr;
5648 if (cpu_is_offline(cpu))
5649 goto out_unlock;
5650
5651 update_rq_clock(rq);
5652
5653 if (!is_idle_task(curr)) {
5654 /*
5655 * Make sure the next tick runs within a reasonable
5656 * amount of time.
5657 */
5658 delta = rq_clock_task(rq) - curr->se.exec_start;
5659 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5660 }
5661 curr->sched_class->task_tick(rq, curr, 0);
5662
5663 calc_load_nohz_remote(rq);
5664out_unlock:
5665 rq_unlock_irq(rq, &rf);
5666out_requeue:
5667
5668 /*
5669 * Run the remote tick once per second (1Hz). This arbitrary
5670 * frequency is large enough to avoid overload but short enough
5671 * to keep scheduler internal stats reasonably up to date. But
5672 * first update state to reflect hotplug activity if required.
5673 */
5674 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5675 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5676 if (os == TICK_SCHED_REMOTE_RUNNING)
5677 queue_delayed_work(system_unbound_wq, dwork, HZ);
5678}
5679
5680static void sched_tick_start(int cpu)
5681{
5682 int os;
5683 struct tick_work *twork;
5684
5685 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5686 return;
5687
5688 WARN_ON_ONCE(!tick_work_cpu);
5689
5690 twork = per_cpu_ptr(tick_work_cpu, cpu);
5691 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5692 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5693 if (os == TICK_SCHED_REMOTE_OFFLINE) {
5694 twork->cpu = cpu;
5695 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5696 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5697 }
5698}
5699
5700#ifdef CONFIG_HOTPLUG_CPU
5701static void sched_tick_stop(int cpu)
5702{
5703 struct tick_work *twork;
5704 int os;
5705
5706 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5707 return;
5708
5709 WARN_ON_ONCE(!tick_work_cpu);
5710
5711 twork = per_cpu_ptr(tick_work_cpu, cpu);
5712 /* There cannot be competing actions, but don't rely on stop-machine. */
5713 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5714 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5715 /* Don't cancel, as this would mess up the state machine. */
5716}
5717#endif /* CONFIG_HOTPLUG_CPU */
5718
5719int __init sched_tick_offload_init(void)
5720{
5721 tick_work_cpu = alloc_percpu(struct tick_work);
5722 BUG_ON(!tick_work_cpu);
5723 return 0;
5724}
5725
5726#else /* !CONFIG_NO_HZ_FULL */
5727static inline void sched_tick_start(int cpu) { }
5728static inline void sched_tick_stop(int cpu) { }
5729#endif
5730
5731#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5732 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5733/*
5734 * If the value passed in is equal to the current preempt count
5735 * then we just disabled preemption. Start timing the latency.
5736 */
5737static inline void preempt_latency_start(int val)
5738{
5739 if (preempt_count() == val) {
5740 unsigned long ip = get_lock_parent_ip();
5741#ifdef CONFIG_DEBUG_PREEMPT
5742 current->preempt_disable_ip = ip;
5743#endif
5744 trace_preempt_off(CALLER_ADDR0, ip);
5745 }
5746}
5747
5748void preempt_count_add(int val)
5749{
5750#ifdef CONFIG_DEBUG_PREEMPT
5751 /*
5752 * Underflow?
5753 */
5754 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5755 return;
5756#endif
5757 __preempt_count_add(val);
5758#ifdef CONFIG_DEBUG_PREEMPT
5759 /*
5760 * Spinlock count overflowing soon?
5761 */
5762 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5763 PREEMPT_MASK - 10);
5764#endif
5765 preempt_latency_start(val);
5766}
5767EXPORT_SYMBOL(preempt_count_add);
5768NOKPROBE_SYMBOL(preempt_count_add);
5769
5770/*
5771 * If the value passed in equals to the current preempt count
5772 * then we just enabled preemption. Stop timing the latency.
5773 */
5774static inline void preempt_latency_stop(int val)
5775{
5776 if (preempt_count() == val)
5777 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5778}
5779
5780void preempt_count_sub(int val)
5781{
5782#ifdef CONFIG_DEBUG_PREEMPT
5783 /*
5784 * Underflow?
5785 */
5786 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5787 return;
5788 /*
5789 * Is the spinlock portion underflowing?
5790 */
5791 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5792 !(preempt_count() & PREEMPT_MASK)))
5793 return;
5794#endif
5795
5796 preempt_latency_stop(val);
5797 __preempt_count_sub(val);
5798}
5799EXPORT_SYMBOL(preempt_count_sub);
5800NOKPROBE_SYMBOL(preempt_count_sub);
5801
5802#else
5803static inline void preempt_latency_start(int val) { }
5804static inline void preempt_latency_stop(int val) { }
5805#endif
5806
5807static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5808{
5809#ifdef CONFIG_DEBUG_PREEMPT
5810 return p->preempt_disable_ip;
5811#else
5812 return 0;
5813#endif
5814}
5815
5816/*
5817 * Print scheduling while atomic bug:
5818 */
5819static noinline void __schedule_bug(struct task_struct *prev)
5820{
5821 /* Save this before calling printk(), since that will clobber it */
5822 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5823
5824 if (oops_in_progress)
5825 return;
5826
5827 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5828 prev->comm, prev->pid, preempt_count());
5829
5830 debug_show_held_locks(prev);
5831 print_modules();
5832 if (irqs_disabled())
5833 print_irqtrace_events(prev);
5834 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
5835 && in_atomic_preempt_off()) {
5836 pr_err("Preemption disabled at:");
5837 print_ip_sym(KERN_ERR, preempt_disable_ip);
5838 }
5839 check_panic_on_warn("scheduling while atomic");
5840
5841 dump_stack();
5842 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5843}
5844
5845/*
5846 * Various schedule()-time debugging checks and statistics:
5847 */
5848static inline void schedule_debug(struct task_struct *prev, bool preempt)
5849{
5850#ifdef CONFIG_SCHED_STACK_END_CHECK
5851 if (task_stack_end_corrupted(prev))
5852 panic("corrupted stack end detected inside scheduler\n");
5853
5854 if (task_scs_end_corrupted(prev))
5855 panic("corrupted shadow stack detected inside scheduler\n");
5856#endif
5857
5858#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5859 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5860 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5861 prev->comm, prev->pid, prev->non_block_count);
5862 dump_stack();
5863 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5864 }
5865#endif
5866
5867 if (unlikely(in_atomic_preempt_off())) {
5868 __schedule_bug(prev);
5869 preempt_count_set(PREEMPT_DISABLED);
5870 }
5871 rcu_sleep_check();
5872 SCHED_WARN_ON(ct_state() == CONTEXT_USER);
5873
5874 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5875
5876 schedstat_inc(this_rq()->sched_count);
5877}
5878
5879static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
5880 struct rq_flags *rf)
5881{
5882#ifdef CONFIG_SMP
5883 const struct sched_class *class;
5884 /*
5885 * We must do the balancing pass before put_prev_task(), such
5886 * that when we release the rq->lock the task is in the same
5887 * state as before we took rq->lock.
5888 *
5889 * We can terminate the balance pass as soon as we know there is
5890 * a runnable task of @class priority or higher.
5891 */
5892 for_class_range(class, prev->sched_class, &idle_sched_class) {
5893 if (class->balance(rq, prev, rf))
5894 break;
5895 }
5896#endif
5897
5898 put_prev_task(rq, prev);
5899}
5900
5901/*
5902 * Pick up the highest-prio task:
5903 */
5904static inline struct task_struct *
5905__pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5906{
5907 const struct sched_class *class;
5908 struct task_struct *p;
5909
5910 /*
5911 * Optimization: we know that if all tasks are in the fair class we can
5912 * call that function directly, but only if the @prev task wasn't of a
5913 * higher scheduling class, because otherwise those lose the
5914 * opportunity to pull in more work from other CPUs.
5915 */
5916 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
5917 rq->nr_running == rq->cfs.h_nr_running)) {
5918
5919 p = pick_next_task_fair(rq, prev, rf);
5920 if (unlikely(p == RETRY_TASK))
5921 goto restart;
5922
5923 /* Assume the next prioritized class is idle_sched_class */
5924 if (!p) {
5925 put_prev_task(rq, prev);
5926 p = pick_next_task_idle(rq);
5927 }
5928
5929 return p;
5930 }
5931
5932restart:
5933 put_prev_task_balance(rq, prev, rf);
5934
5935 for_each_class(class) {
5936 p = class->pick_next_task(rq);
5937 if (p)
5938 return p;
5939 }
5940
5941 BUG(); /* The idle class should always have a runnable task. */
5942}
5943
5944#ifdef CONFIG_SCHED_CORE
5945static inline bool is_task_rq_idle(struct task_struct *t)
5946{
5947 return (task_rq(t)->idle == t);
5948}
5949
5950static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
5951{
5952 return is_task_rq_idle(a) || (a->core_cookie == cookie);
5953}
5954
5955static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
5956{
5957 if (is_task_rq_idle(a) || is_task_rq_idle(b))
5958 return true;
5959
5960 return a->core_cookie == b->core_cookie;
5961}
5962
5963static inline struct task_struct *pick_task(struct rq *rq)
5964{
5965 const struct sched_class *class;
5966 struct task_struct *p;
5967
5968 for_each_class(class) {
5969 p = class->pick_task(rq);
5970 if (p)
5971 return p;
5972 }
5973
5974 BUG(); /* The idle class should always have a runnable task. */
5975}
5976
5977extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
5978
5979static void queue_core_balance(struct rq *rq);
5980
5981static struct task_struct *
5982pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5983{
5984 struct task_struct *next, *p, *max = NULL;
5985 const struct cpumask *smt_mask;
5986 bool fi_before = false;
5987 bool core_clock_updated = (rq == rq->core);
5988 unsigned long cookie;
5989 int i, cpu, occ = 0;
5990 struct rq *rq_i;
5991 bool need_sync;
5992
5993 if (!sched_core_enabled(rq))
5994 return __pick_next_task(rq, prev, rf);
5995
5996 cpu = cpu_of(rq);
5997
5998 /* Stopper task is switching into idle, no need core-wide selection. */
5999 if (cpu_is_offline(cpu)) {
6000 /*
6001 * Reset core_pick so that we don't enter the fastpath when
6002 * coming online. core_pick would already be migrated to
6003 * another cpu during offline.
6004 */
6005 rq->core_pick = NULL;
6006 return __pick_next_task(rq, prev, rf);
6007 }
6008
6009 /*
6010 * If there were no {en,de}queues since we picked (IOW, the task
6011 * pointers are all still valid), and we haven't scheduled the last
6012 * pick yet, do so now.
6013 *
6014 * rq->core_pick can be NULL if no selection was made for a CPU because
6015 * it was either offline or went offline during a sibling's core-wide
6016 * selection. In this case, do a core-wide selection.
6017 */
6018 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6019 rq->core->core_pick_seq != rq->core_sched_seq &&
6020 rq->core_pick) {
6021 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6022
6023 next = rq->core_pick;
6024 if (next != prev) {
6025 put_prev_task(rq, prev);
6026 set_next_task(rq, next);
6027 }
6028
6029 rq->core_pick = NULL;
6030 goto out;
6031 }
6032
6033 put_prev_task_balance(rq, prev, rf);
6034
6035 smt_mask = cpu_smt_mask(cpu);
6036 need_sync = !!rq->core->core_cookie;
6037
6038 /* reset state */
6039 rq->core->core_cookie = 0UL;
6040 if (rq->core->core_forceidle_count) {
6041 if (!core_clock_updated) {
6042 update_rq_clock(rq->core);
6043 core_clock_updated = true;
6044 }
6045 sched_core_account_forceidle(rq);
6046 /* reset after accounting force idle */
6047 rq->core->core_forceidle_start = 0;
6048 rq->core->core_forceidle_count = 0;
6049 rq->core->core_forceidle_occupation = 0;
6050 need_sync = true;
6051 fi_before = true;
6052 }
6053
6054 /*
6055 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6056 *
6057 * @task_seq guards the task state ({en,de}queues)
6058 * @pick_seq is the @task_seq we did a selection on
6059 * @sched_seq is the @pick_seq we scheduled
6060 *
6061 * However, preemptions can cause multiple picks on the same task set.
6062 * 'Fix' this by also increasing @task_seq for every pick.
6063 */
6064 rq->core->core_task_seq++;
6065
6066 /*
6067 * Optimize for common case where this CPU has no cookies
6068 * and there are no cookied tasks running on siblings.
6069 */
6070 if (!need_sync) {
6071 next = pick_task(rq);
6072 if (!next->core_cookie) {
6073 rq->core_pick = NULL;
6074 /*
6075 * For robustness, update the min_vruntime_fi for
6076 * unconstrained picks as well.
6077 */
6078 WARN_ON_ONCE(fi_before);
6079 task_vruntime_update(rq, next, false);
6080 goto out_set_next;
6081 }
6082 }
6083
6084 /*
6085 * For each thread: do the regular task pick and find the max prio task
6086 * amongst them.
6087 *
6088 * Tie-break prio towards the current CPU
6089 */
6090 for_each_cpu_wrap(i, smt_mask, cpu) {
6091 rq_i = cpu_rq(i);
6092
6093 /*
6094 * Current cpu always has its clock updated on entrance to
6095 * pick_next_task(). If the current cpu is not the core,
6096 * the core may also have been updated above.
6097 */
6098 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6099 update_rq_clock(rq_i);
6100
6101 p = rq_i->core_pick = pick_task(rq_i);
6102 if (!max || prio_less(max, p, fi_before))
6103 max = p;
6104 }
6105
6106 cookie = rq->core->core_cookie = max->core_cookie;
6107
6108 /*
6109 * For each thread: try and find a runnable task that matches @max or
6110 * force idle.
6111 */
6112 for_each_cpu(i, smt_mask) {
6113 rq_i = cpu_rq(i);
6114 p = rq_i->core_pick;
6115
6116 if (!cookie_equals(p, cookie)) {
6117 p = NULL;
6118 if (cookie)
6119 p = sched_core_find(rq_i, cookie);
6120 if (!p)
6121 p = idle_sched_class.pick_task(rq_i);
6122 }
6123
6124 rq_i->core_pick = p;
6125
6126 if (p == rq_i->idle) {
6127 if (rq_i->nr_running) {
6128 rq->core->core_forceidle_count++;
6129 if (!fi_before)
6130 rq->core->core_forceidle_seq++;
6131 }
6132 } else {
6133 occ++;
6134 }
6135 }
6136
6137 if (schedstat_enabled() && rq->core->core_forceidle_count) {
6138 rq->core->core_forceidle_start = rq_clock(rq->core);
6139 rq->core->core_forceidle_occupation = occ;
6140 }
6141
6142 rq->core->core_pick_seq = rq->core->core_task_seq;
6143 next = rq->core_pick;
6144 rq->core_sched_seq = rq->core->core_pick_seq;
6145
6146 /* Something should have been selected for current CPU */
6147 WARN_ON_ONCE(!next);
6148
6149 /*
6150 * Reschedule siblings
6151 *
6152 * NOTE: L1TF -- at this point we're no longer running the old task and
6153 * sending an IPI (below) ensures the sibling will no longer be running
6154 * their task. This ensures there is no inter-sibling overlap between
6155 * non-matching user state.
6156 */
6157 for_each_cpu(i, smt_mask) {
6158 rq_i = cpu_rq(i);
6159
6160 /*
6161 * An online sibling might have gone offline before a task
6162 * could be picked for it, or it might be offline but later
6163 * happen to come online, but its too late and nothing was
6164 * picked for it. That's Ok - it will pick tasks for itself,
6165 * so ignore it.
6166 */
6167 if (!rq_i->core_pick)
6168 continue;
6169
6170 /*
6171 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6172 * fi_before fi update?
6173 * 0 0 1
6174 * 0 1 1
6175 * 1 0 1
6176 * 1 1 0
6177 */
6178 if (!(fi_before && rq->core->core_forceidle_count))
6179 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6180
6181 rq_i->core_pick->core_occupation = occ;
6182
6183 if (i == cpu) {
6184 rq_i->core_pick = NULL;
6185 continue;
6186 }
6187
6188 /* Did we break L1TF mitigation requirements? */
6189 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6190
6191 if (rq_i->curr == rq_i->core_pick) {
6192 rq_i->core_pick = NULL;
6193 continue;
6194 }
6195
6196 resched_curr(rq_i);
6197 }
6198
6199out_set_next:
6200 set_next_task(rq, next);
6201out:
6202 if (rq->core->core_forceidle_count && next == rq->idle)
6203 queue_core_balance(rq);
6204
6205 return next;
6206}
6207
6208static bool try_steal_cookie(int this, int that)
6209{
6210 struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6211 struct task_struct *p;
6212 unsigned long cookie;
6213 bool success = false;
6214
6215 local_irq_disable();
6216 double_rq_lock(dst, src);
6217
6218 cookie = dst->core->core_cookie;
6219 if (!cookie)
6220 goto unlock;
6221
6222 if (dst->curr != dst->idle)
6223 goto unlock;
6224
6225 p = sched_core_find(src, cookie);
6226 if (p == src->idle)
6227 goto unlock;
6228
6229 do {
6230 if (p == src->core_pick || p == src->curr)
6231 goto next;
6232
6233 if (!is_cpu_allowed(p, this))
6234 goto next;
6235
6236 if (p->core_occupation > dst->idle->core_occupation)
6237 goto next;
6238
6239 deactivate_task(src, p, 0);
6240 set_task_cpu(p, this);
6241 activate_task(dst, p, 0);
6242
6243 resched_curr(dst);
6244
6245 success = true;
6246 break;
6247
6248next:
6249 p = sched_core_next(p, cookie);
6250 } while (p);
6251
6252unlock:
6253 double_rq_unlock(dst, src);
6254 local_irq_enable();
6255
6256 return success;
6257}
6258
6259static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6260{
6261 int i;
6262
6263 for_each_cpu_wrap(i, sched_domain_span(sd), cpu) {
6264 if (i == cpu)
6265 continue;
6266
6267 if (need_resched())
6268 break;
6269
6270 if (try_steal_cookie(cpu, i))
6271 return true;
6272 }
6273
6274 return false;
6275}
6276
6277static void sched_core_balance(struct rq *rq)
6278{
6279 struct sched_domain *sd;
6280 int cpu = cpu_of(rq);
6281
6282 preempt_disable();
6283 rcu_read_lock();
6284 raw_spin_rq_unlock_irq(rq);
6285 for_each_domain(cpu, sd) {
6286 if (need_resched())
6287 break;
6288
6289 if (steal_cookie_task(cpu, sd))
6290 break;
6291 }
6292 raw_spin_rq_lock_irq(rq);
6293 rcu_read_unlock();
6294 preempt_enable();
6295}
6296
6297static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6298
6299static void queue_core_balance(struct rq *rq)
6300{
6301 if (!sched_core_enabled(rq))
6302 return;
6303
6304 if (!rq->core->core_cookie)
6305 return;
6306
6307 if (!rq->nr_running) /* not forced idle */
6308 return;
6309
6310 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6311}
6312
6313static void sched_core_cpu_starting(unsigned int cpu)
6314{
6315 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6316 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6317 unsigned long flags;
6318 int t;
6319
6320 sched_core_lock(cpu, &flags);
6321
6322 WARN_ON_ONCE(rq->core != rq);
6323
6324 /* if we're the first, we'll be our own leader */
6325 if (cpumask_weight(smt_mask) == 1)
6326 goto unlock;
6327
6328 /* find the leader */
6329 for_each_cpu(t, smt_mask) {
6330 if (t == cpu)
6331 continue;
6332 rq = cpu_rq(t);
6333 if (rq->core == rq) {
6334 core_rq = rq;
6335 break;
6336 }
6337 }
6338
6339 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6340 goto unlock;
6341
6342 /* install and validate core_rq */
6343 for_each_cpu(t, smt_mask) {
6344 rq = cpu_rq(t);
6345
6346 if (t == cpu)
6347 rq->core = core_rq;
6348
6349 WARN_ON_ONCE(rq->core != core_rq);
6350 }
6351
6352unlock:
6353 sched_core_unlock(cpu, &flags);
6354}
6355
6356static void sched_core_cpu_deactivate(unsigned int cpu)
6357{
6358 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6359 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6360 unsigned long flags;
6361 int t;
6362
6363 sched_core_lock(cpu, &flags);
6364
6365 /* if we're the last man standing, nothing to do */
6366 if (cpumask_weight(smt_mask) == 1) {
6367 WARN_ON_ONCE(rq->core != rq);
6368 goto unlock;
6369 }
6370
6371 /* if we're not the leader, nothing to do */
6372 if (rq->core != rq)
6373 goto unlock;
6374
6375 /* find a new leader */
6376 for_each_cpu(t, smt_mask) {
6377 if (t == cpu)
6378 continue;
6379 core_rq = cpu_rq(t);
6380 break;
6381 }
6382
6383 if (WARN_ON_ONCE(!core_rq)) /* impossible */
6384 goto unlock;
6385
6386 /* copy the shared state to the new leader */
6387 core_rq->core_task_seq = rq->core_task_seq;
6388 core_rq->core_pick_seq = rq->core_pick_seq;
6389 core_rq->core_cookie = rq->core_cookie;
6390 core_rq->core_forceidle_count = rq->core_forceidle_count;
6391 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6392 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6393
6394 /*
6395 * Accounting edge for forced idle is handled in pick_next_task().
6396 * Don't need another one here, since the hotplug thread shouldn't
6397 * have a cookie.
6398 */
6399 core_rq->core_forceidle_start = 0;
6400
6401 /* install new leader */
6402 for_each_cpu(t, smt_mask) {
6403 rq = cpu_rq(t);
6404 rq->core = core_rq;
6405 }
6406
6407unlock:
6408 sched_core_unlock(cpu, &flags);
6409}
6410
6411static inline void sched_core_cpu_dying(unsigned int cpu)
6412{
6413 struct rq *rq = cpu_rq(cpu);
6414
6415 if (rq->core != rq)
6416 rq->core = rq;
6417}
6418
6419#else /* !CONFIG_SCHED_CORE */
6420
6421static inline void sched_core_cpu_starting(unsigned int cpu) {}
6422static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
6423static inline void sched_core_cpu_dying(unsigned int cpu) {}
6424
6425static struct task_struct *
6426pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6427{
6428 return __pick_next_task(rq, prev, rf);
6429}
6430
6431#endif /* CONFIG_SCHED_CORE */
6432
6433/*
6434 * Constants for the sched_mode argument of __schedule().
6435 *
6436 * The mode argument allows RT enabled kernels to differentiate a
6437 * preemption from blocking on an 'sleeping' spin/rwlock. Note that
6438 * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
6439 * optimize the AND operation out and just check for zero.
6440 */
6441#define SM_NONE 0x0
6442#define SM_PREEMPT 0x1
6443#define SM_RTLOCK_WAIT 0x2
6444
6445#ifndef CONFIG_PREEMPT_RT
6446# define SM_MASK_PREEMPT (~0U)
6447#else
6448# define SM_MASK_PREEMPT SM_PREEMPT
6449#endif
6450
6451/*
6452 * __schedule() is the main scheduler function.
6453 *
6454 * The main means of driving the scheduler and thus entering this function are:
6455 *
6456 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6457 *
6458 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6459 * paths. For example, see arch/x86/entry_64.S.
6460 *
6461 * To drive preemption between tasks, the scheduler sets the flag in timer
6462 * interrupt handler scheduler_tick().
6463 *
6464 * 3. Wakeups don't really cause entry into schedule(). They add a
6465 * task to the run-queue and that's it.
6466 *
6467 * Now, if the new task added to the run-queue preempts the current
6468 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6469 * called on the nearest possible occasion:
6470 *
6471 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6472 *
6473 * - in syscall or exception context, at the next outmost
6474 * preempt_enable(). (this might be as soon as the wake_up()'s
6475 * spin_unlock()!)
6476 *
6477 * - in IRQ context, return from interrupt-handler to
6478 * preemptible context
6479 *
6480 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6481 * then at the next:
6482 *
6483 * - cond_resched() call
6484 * - explicit schedule() call
6485 * - return from syscall or exception to user-space
6486 * - return from interrupt-handler to user-space
6487 *
6488 * WARNING: must be called with preemption disabled!
6489 */
6490static void __sched notrace __schedule(unsigned int sched_mode)
6491{
6492 struct task_struct *prev, *next;
6493 unsigned long *switch_count;
6494 unsigned long prev_state;
6495 struct rq_flags rf;
6496 struct rq *rq;
6497 int cpu;
6498
6499 cpu = smp_processor_id();
6500 rq = cpu_rq(cpu);
6501 prev = rq->curr;
6502
6503 schedule_debug(prev, !!sched_mode);
6504
6505 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6506 hrtick_clear(rq);
6507
6508 local_irq_disable();
6509 rcu_note_context_switch(!!sched_mode);
6510
6511 /*
6512 * Make sure that signal_pending_state()->signal_pending() below
6513 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6514 * done by the caller to avoid the race with signal_wake_up():
6515 *
6516 * __set_current_state(@state) signal_wake_up()
6517 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
6518 * wake_up_state(p, state)
6519 * LOCK rq->lock LOCK p->pi_state
6520 * smp_mb__after_spinlock() smp_mb__after_spinlock()
6521 * if (signal_pending_state()) if (p->state & @state)
6522 *
6523 * Also, the membarrier system call requires a full memory barrier
6524 * after coming from user-space, before storing to rq->curr.
6525 */
6526 rq_lock(rq, &rf);
6527 smp_mb__after_spinlock();
6528
6529 /* Promote REQ to ACT */
6530 rq->clock_update_flags <<= 1;
6531 update_rq_clock(rq);
6532
6533 switch_count = &prev->nivcsw;
6534
6535 /*
6536 * We must load prev->state once (task_struct::state is volatile), such
6537 * that we form a control dependency vs deactivate_task() below.
6538 */
6539 prev_state = READ_ONCE(prev->__state);
6540 if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
6541 if (signal_pending_state(prev_state, prev)) {
6542 WRITE_ONCE(prev->__state, TASK_RUNNING);
6543 } else {
6544 prev->sched_contributes_to_load =
6545 (prev_state & TASK_UNINTERRUPTIBLE) &&
6546 !(prev_state & TASK_NOLOAD) &&
6547 !(prev_state & TASK_FROZEN);
6548
6549 if (prev->sched_contributes_to_load)
6550 rq->nr_uninterruptible++;
6551
6552 /*
6553 * __schedule() ttwu()
6554 * prev_state = prev->state; if (p->on_rq && ...)
6555 * if (prev_state) goto out;
6556 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6557 * p->state = TASK_WAKING
6558 *
6559 * Where __schedule() and ttwu() have matching control dependencies.
6560 *
6561 * After this, schedule() must not care about p->state any more.
6562 */
6563 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
6564
6565 if (prev->in_iowait) {
6566 atomic_inc(&rq->nr_iowait);
6567 delayacct_blkio_start();
6568 }
6569 }
6570 switch_count = &prev->nvcsw;
6571 }
6572
6573 next = pick_next_task(rq, prev, &rf);
6574 clear_tsk_need_resched(prev);
6575 clear_preempt_need_resched();
6576#ifdef CONFIG_SCHED_DEBUG
6577 rq->last_seen_need_resched_ns = 0;
6578#endif
6579
6580 if (likely(prev != next)) {
6581 rq->nr_switches++;
6582 /*
6583 * RCU users of rcu_dereference(rq->curr) may not see
6584 * changes to task_struct made by pick_next_task().
6585 */
6586 RCU_INIT_POINTER(rq->curr, next);
6587 /*
6588 * The membarrier system call requires each architecture
6589 * to have a full memory barrier after updating
6590 * rq->curr, before returning to user-space.
6591 *
6592 * Here are the schemes providing that barrier on the
6593 * various architectures:
6594 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
6595 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
6596 * - finish_lock_switch() for weakly-ordered
6597 * architectures where spin_unlock is a full barrier,
6598 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6599 * is a RELEASE barrier),
6600 */
6601 ++*switch_count;
6602
6603 migrate_disable_switch(rq, prev);
6604 psi_sched_switch(prev, next, !task_on_rq_queued(prev));
6605
6606 trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
6607
6608 /* Also unlocks the rq: */
6609 rq = context_switch(rq, prev, next, &rf);
6610 } else {
6611 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
6612
6613 rq_unpin_lock(rq, &rf);
6614 __balance_callbacks(rq);
6615 raw_spin_rq_unlock_irq(rq);
6616 }
6617}
6618
6619void __noreturn do_task_dead(void)
6620{
6621 /* Causes final put_task_struct in finish_task_switch(): */
6622 set_special_state(TASK_DEAD);
6623
6624 /* Tell freezer to ignore us: */
6625 current->flags |= PF_NOFREEZE;
6626
6627 __schedule(SM_NONE);
6628 BUG();
6629
6630 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6631 for (;;)
6632 cpu_relax();
6633}
6634
6635static inline void sched_submit_work(struct task_struct *tsk)
6636{
6637 unsigned int task_flags;
6638
6639 if (task_is_running(tsk))
6640 return;
6641
6642 task_flags = tsk->flags;
6643 /*
6644 * If a worker goes to sleep, notify and ask workqueue whether it
6645 * wants to wake up a task to maintain concurrency.
6646 */
6647 if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
6648 if (task_flags & PF_WQ_WORKER)
6649 wq_worker_sleeping(tsk);
6650 else
6651 io_wq_worker_sleeping(tsk);
6652 }
6653
6654 /*
6655 * spinlock and rwlock must not flush block requests. This will
6656 * deadlock if the callback attempts to acquire a lock which is
6657 * already acquired.
6658 */
6659 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6660
6661 /*
6662 * If we are going to sleep and we have plugged IO queued,
6663 * make sure to submit it to avoid deadlocks.
6664 */
6665 blk_flush_plug(tsk->plug, true);
6666}
6667
6668static void sched_update_worker(struct task_struct *tsk)
6669{
6670 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
6671 if (tsk->flags & PF_WQ_WORKER)
6672 wq_worker_running(tsk);
6673 else
6674 io_wq_worker_running(tsk);
6675 }
6676}
6677
6678asmlinkage __visible void __sched schedule(void)
6679{
6680 struct task_struct *tsk = current;
6681
6682 sched_submit_work(tsk);
6683 do {
6684 preempt_disable();
6685 __schedule(SM_NONE);
6686 sched_preempt_enable_no_resched();
6687 } while (need_resched());
6688 sched_update_worker(tsk);
6689}
6690EXPORT_SYMBOL(schedule);
6691
6692/*
6693 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6694 * state (have scheduled out non-voluntarily) by making sure that all
6695 * tasks have either left the run queue or have gone into user space.
6696 * As idle tasks do not do either, they must not ever be preempted
6697 * (schedule out non-voluntarily).
6698 *
6699 * schedule_idle() is similar to schedule_preempt_disable() except that it
6700 * never enables preemption because it does not call sched_submit_work().
6701 */
6702void __sched schedule_idle(void)
6703{
6704 /*
6705 * As this skips calling sched_submit_work(), which the idle task does
6706 * regardless because that function is a nop when the task is in a
6707 * TASK_RUNNING state, make sure this isn't used someplace that the
6708 * current task can be in any other state. Note, idle is always in the
6709 * TASK_RUNNING state.
6710 */
6711 WARN_ON_ONCE(current->__state);
6712 do {
6713 __schedule(SM_NONE);
6714 } while (need_resched());
6715}
6716
6717#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
6718asmlinkage __visible void __sched schedule_user(void)
6719{
6720 /*
6721 * If we come here after a random call to set_need_resched(),
6722 * or we have been woken up remotely but the IPI has not yet arrived,
6723 * we haven't yet exited the RCU idle mode. Do it here manually until
6724 * we find a better solution.
6725 *
6726 * NB: There are buggy callers of this function. Ideally we
6727 * should warn if prev_state != CONTEXT_USER, but that will trigger
6728 * too frequently to make sense yet.
6729 */
6730 enum ctx_state prev_state = exception_enter();
6731 schedule();
6732 exception_exit(prev_state);
6733}
6734#endif
6735
6736/**
6737 * schedule_preempt_disabled - called with preemption disabled
6738 *
6739 * Returns with preemption disabled. Note: preempt_count must be 1
6740 */
6741void __sched schedule_preempt_disabled(void)
6742{
6743 sched_preempt_enable_no_resched();
6744 schedule();
6745 preempt_disable();
6746}
6747
6748#ifdef CONFIG_PREEMPT_RT
6749void __sched notrace schedule_rtlock(void)
6750{
6751 do {
6752 preempt_disable();
6753 __schedule(SM_RTLOCK_WAIT);
6754 sched_preempt_enable_no_resched();
6755 } while (need_resched());
6756}
6757NOKPROBE_SYMBOL(schedule_rtlock);
6758#endif
6759
6760static void __sched notrace preempt_schedule_common(void)
6761{
6762 do {
6763 /*
6764 * Because the function tracer can trace preempt_count_sub()
6765 * and it also uses preempt_enable/disable_notrace(), if
6766 * NEED_RESCHED is set, the preempt_enable_notrace() called
6767 * by the function tracer will call this function again and
6768 * cause infinite recursion.
6769 *
6770 * Preemption must be disabled here before the function
6771 * tracer can trace. Break up preempt_disable() into two
6772 * calls. One to disable preemption without fear of being
6773 * traced. The other to still record the preemption latency,
6774 * which can also be traced by the function tracer.
6775 */
6776 preempt_disable_notrace();
6777 preempt_latency_start(1);
6778 __schedule(SM_PREEMPT);
6779 preempt_latency_stop(1);
6780 preempt_enable_no_resched_notrace();
6781
6782 /*
6783 * Check again in case we missed a preemption opportunity
6784 * between schedule and now.
6785 */
6786 } while (need_resched());
6787}
6788
6789#ifdef CONFIG_PREEMPTION
6790/*
6791 * This is the entry point to schedule() from in-kernel preemption
6792 * off of preempt_enable.
6793 */
6794asmlinkage __visible void __sched notrace preempt_schedule(void)
6795{
6796 /*
6797 * If there is a non-zero preempt_count or interrupts are disabled,
6798 * we do not want to preempt the current task. Just return..
6799 */
6800 if (likely(!preemptible()))
6801 return;
6802 preempt_schedule_common();
6803}
6804NOKPROBE_SYMBOL(preempt_schedule);
6805EXPORT_SYMBOL(preempt_schedule);
6806
6807#ifdef CONFIG_PREEMPT_DYNAMIC
6808#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6809#ifndef preempt_schedule_dynamic_enabled
6810#define preempt_schedule_dynamic_enabled preempt_schedule
6811#define preempt_schedule_dynamic_disabled NULL
6812#endif
6813DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
6814EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6815#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6816static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
6817void __sched notrace dynamic_preempt_schedule(void)
6818{
6819 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6820 return;
6821 preempt_schedule();
6822}
6823NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6824EXPORT_SYMBOL(dynamic_preempt_schedule);
6825#endif
6826#endif
6827
6828/**
6829 * preempt_schedule_notrace - preempt_schedule called by tracing
6830 *
6831 * The tracing infrastructure uses preempt_enable_notrace to prevent
6832 * recursion and tracing preempt enabling caused by the tracing
6833 * infrastructure itself. But as tracing can happen in areas coming
6834 * from userspace or just about to enter userspace, a preempt enable
6835 * can occur before user_exit() is called. This will cause the scheduler
6836 * to be called when the system is still in usermode.
6837 *
6838 * To prevent this, the preempt_enable_notrace will use this function
6839 * instead of preempt_schedule() to exit user context if needed before
6840 * calling the scheduler.
6841 */
6842asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
6843{
6844 enum ctx_state prev_ctx;
6845
6846 if (likely(!preemptible()))
6847 return;
6848
6849 do {
6850 /*
6851 * Because the function tracer can trace preempt_count_sub()
6852 * and it also uses preempt_enable/disable_notrace(), if
6853 * NEED_RESCHED is set, the preempt_enable_notrace() called
6854 * by the function tracer will call this function again and
6855 * cause infinite recursion.
6856 *
6857 * Preemption must be disabled here before the function
6858 * tracer can trace. Break up preempt_disable() into two
6859 * calls. One to disable preemption without fear of being
6860 * traced. The other to still record the preemption latency,
6861 * which can also be traced by the function tracer.
6862 */
6863 preempt_disable_notrace();
6864 preempt_latency_start(1);
6865 /*
6866 * Needs preempt disabled in case user_exit() is traced
6867 * and the tracer calls preempt_enable_notrace() causing
6868 * an infinite recursion.
6869 */
6870 prev_ctx = exception_enter();
6871 __schedule(SM_PREEMPT);
6872 exception_exit(prev_ctx);
6873
6874 preempt_latency_stop(1);
6875 preempt_enable_no_resched_notrace();
6876 } while (need_resched());
6877}
6878EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
6879
6880#ifdef CONFIG_PREEMPT_DYNAMIC
6881#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6882#ifndef preempt_schedule_notrace_dynamic_enabled
6883#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
6884#define preempt_schedule_notrace_dynamic_disabled NULL
6885#endif
6886DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
6887EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
6888#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6889static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
6890void __sched notrace dynamic_preempt_schedule_notrace(void)
6891{
6892 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
6893 return;
6894 preempt_schedule_notrace();
6895}
6896NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
6897EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
6898#endif
6899#endif
6900
6901#endif /* CONFIG_PREEMPTION */
6902
6903/*
6904 * This is the entry point to schedule() from kernel preemption
6905 * off of irq context.
6906 * Note, that this is called and return with irqs disabled. This will
6907 * protect us against recursive calling from irq.
6908 */
6909asmlinkage __visible void __sched preempt_schedule_irq(void)
6910{
6911 enum ctx_state prev_state;
6912
6913 /* Catch callers which need to be fixed */
6914 BUG_ON(preempt_count() || !irqs_disabled());
6915
6916 prev_state = exception_enter();
6917
6918 do {
6919 preempt_disable();
6920 local_irq_enable();
6921 __schedule(SM_PREEMPT);
6922 local_irq_disable();
6923 sched_preempt_enable_no_resched();
6924 } while (need_resched());
6925
6926 exception_exit(prev_state);
6927}
6928
6929int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
6930 void *key)
6931{
6932 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
6933 return try_to_wake_up(curr->private, mode, wake_flags);
6934}
6935EXPORT_SYMBOL(default_wake_function);
6936
6937static void __setscheduler_prio(struct task_struct *p, int prio)
6938{
6939 if (dl_prio(prio))
6940 p->sched_class = &dl_sched_class;
6941 else if (rt_prio(prio))
6942 p->sched_class = &rt_sched_class;
6943 else
6944 p->sched_class = &fair_sched_class;
6945
6946 p->prio = prio;
6947}
6948
6949#ifdef CONFIG_RT_MUTEXES
6950
6951static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
6952{
6953 if (pi_task)
6954 prio = min(prio, pi_task->prio);
6955
6956 return prio;
6957}
6958
6959static inline int rt_effective_prio(struct task_struct *p, int prio)
6960{
6961 struct task_struct *pi_task = rt_mutex_get_top_task(p);
6962
6963 return __rt_effective_prio(pi_task, prio);
6964}
6965
6966/*
6967 * rt_mutex_setprio - set the current priority of a task
6968 * @p: task to boost
6969 * @pi_task: donor task
6970 *
6971 * This function changes the 'effective' priority of a task. It does
6972 * not touch ->normal_prio like __setscheduler().
6973 *
6974 * Used by the rt_mutex code to implement priority inheritance
6975 * logic. Call site only calls if the priority of the task changed.
6976 */
6977void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
6978{
6979 int prio, oldprio, queued, running, queue_flag =
6980 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
6981 const struct sched_class *prev_class;
6982 struct rq_flags rf;
6983 struct rq *rq;
6984
6985 /* XXX used to be waiter->prio, not waiter->task->prio */
6986 prio = __rt_effective_prio(pi_task, p->normal_prio);
6987
6988 /*
6989 * If nothing changed; bail early.
6990 */
6991 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
6992 return;
6993
6994 rq = __task_rq_lock(p, &rf);
6995 update_rq_clock(rq);
6996 /*
6997 * Set under pi_lock && rq->lock, such that the value can be used under
6998 * either lock.
6999 *
7000 * Note that there is loads of tricky to make this pointer cache work
7001 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7002 * ensure a task is de-boosted (pi_task is set to NULL) before the
7003 * task is allowed to run again (and can exit). This ensures the pointer
7004 * points to a blocked task -- which guarantees the task is present.
7005 */
7006 p->pi_top_task = pi_task;
7007
7008 /*
7009 * For FIFO/RR we only need to set prio, if that matches we're done.
7010 */
7011 if (prio == p->prio && !dl_prio(prio))
7012 goto out_unlock;
7013
7014 /*
7015 * Idle task boosting is a nono in general. There is one
7016 * exception, when PREEMPT_RT and NOHZ is active:
7017 *
7018 * The idle task calls get_next_timer_interrupt() and holds
7019 * the timer wheel base->lock on the CPU and another CPU wants
7020 * to access the timer (probably to cancel it). We can safely
7021 * ignore the boosting request, as the idle CPU runs this code
7022 * with interrupts disabled and will complete the lock
7023 * protected section without being interrupted. So there is no
7024 * real need to boost.
7025 */
7026 if (unlikely(p == rq->idle)) {
7027 WARN_ON(p != rq->curr);
7028 WARN_ON(p->pi_blocked_on);
7029 goto out_unlock;
7030 }
7031
7032 trace_sched_pi_setprio(p, pi_task);
7033 oldprio = p->prio;
7034
7035 if (oldprio == prio)
7036 queue_flag &= ~DEQUEUE_MOVE;
7037
7038 prev_class = p->sched_class;
7039 queued = task_on_rq_queued(p);
7040 running = task_current(rq, p);
7041 if (queued)
7042 dequeue_task(rq, p, queue_flag);
7043 if (running)
7044 put_prev_task(rq, p);
7045
7046 /*
7047 * Boosting condition are:
7048 * 1. -rt task is running and holds mutex A
7049 * --> -dl task blocks on mutex A
7050 *
7051 * 2. -dl task is running and holds mutex A
7052 * --> -dl task blocks on mutex A and could preempt the
7053 * running task
7054 */
7055 if (dl_prio(prio)) {
7056 if (!dl_prio(p->normal_prio) ||
7057 (pi_task && dl_prio(pi_task->prio) &&
7058 dl_entity_preempt(&pi_task->dl, &p->dl))) {
7059 p->dl.pi_se = pi_task->dl.pi_se;
7060 queue_flag |= ENQUEUE_REPLENISH;
7061 } else {
7062 p->dl.pi_se = &p->dl;
7063 }
7064 } else if (rt_prio(prio)) {
7065 if (dl_prio(oldprio))
7066 p->dl.pi_se = &p->dl;
7067 if (oldprio < prio)
7068 queue_flag |= ENQUEUE_HEAD;
7069 } else {
7070 if (dl_prio(oldprio))
7071 p->dl.pi_se = &p->dl;
7072 if (rt_prio(oldprio))
7073 p->rt.timeout = 0;
7074 }
7075
7076 __setscheduler_prio(p, prio);
7077
7078 if (queued)
7079 enqueue_task(rq, p, queue_flag);
7080 if (running)
7081 set_next_task(rq, p);
7082
7083 check_class_changed(rq, p, prev_class, oldprio);
7084out_unlock:
7085 /* Avoid rq from going away on us: */
7086 preempt_disable();
7087
7088 rq_unpin_lock(rq, &rf);
7089 __balance_callbacks(rq);
7090 raw_spin_rq_unlock(rq);
7091
7092 preempt_enable();
7093}
7094#else
7095static inline int rt_effective_prio(struct task_struct *p, int prio)
7096{
7097 return prio;
7098}
7099#endif
7100
7101void set_user_nice(struct task_struct *p, long nice)
7102{
7103 bool queued, running;
7104 int old_prio;
7105 struct rq_flags rf;
7106 struct rq *rq;
7107
7108 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
7109 return;
7110 /*
7111 * We have to be careful, if called from sys_setpriority(),
7112 * the task might be in the middle of scheduling on another CPU.
7113 */
7114 rq = task_rq_lock(p, &rf);
7115 update_rq_clock(rq);
7116
7117 /*
7118 * The RT priorities are set via sched_setscheduler(), but we still
7119 * allow the 'normal' nice value to be set - but as expected
7120 * it won't have any effect on scheduling until the task is
7121 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
7122 */
7123 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
7124 p->static_prio = NICE_TO_PRIO(nice);
7125 goto out_unlock;
7126 }
7127 queued = task_on_rq_queued(p);
7128 running = task_current(rq, p);
7129 if (queued)
7130 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
7131 if (running)
7132 put_prev_task(rq, p);
7133
7134 p->static_prio = NICE_TO_PRIO(nice);
7135 set_load_weight(p, true);
7136 old_prio = p->prio;
7137 p->prio = effective_prio(p);
7138
7139 if (queued)
7140 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7141 if (running)
7142 set_next_task(rq, p);
7143
7144 /*
7145 * If the task increased its priority or is running and
7146 * lowered its priority, then reschedule its CPU:
7147 */
7148 p->sched_class->prio_changed(rq, p, old_prio);
7149
7150out_unlock:
7151 task_rq_unlock(rq, p, &rf);
7152}
7153EXPORT_SYMBOL(set_user_nice);
7154
7155/*
7156 * is_nice_reduction - check if nice value is an actual reduction
7157 *
7158 * Similar to can_nice() but does not perform a capability check.
7159 *
7160 * @p: task
7161 * @nice: nice value
7162 */
7163static bool is_nice_reduction(const struct task_struct *p, const int nice)
7164{
7165 /* Convert nice value [19,-20] to rlimit style value [1,40]: */
7166 int nice_rlim = nice_to_rlimit(nice);
7167
7168 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
7169}
7170
7171/*
7172 * can_nice - check if a task can reduce its nice value
7173 * @p: task
7174 * @nice: nice value
7175 */
7176int can_nice(const struct task_struct *p, const int nice)
7177{
7178 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
7179}
7180
7181#ifdef __ARCH_WANT_SYS_NICE
7182
7183/*
7184 * sys_nice - change the priority of the current process.
7185 * @increment: priority increment
7186 *
7187 * sys_setpriority is a more generic, but much slower function that
7188 * does similar things.
7189 */
7190SYSCALL_DEFINE1(nice, int, increment)
7191{
7192 long nice, retval;
7193
7194 /*
7195 * Setpriority might change our priority at the same moment.
7196 * We don't have to worry. Conceptually one call occurs first
7197 * and we have a single winner.
7198 */
7199 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
7200 nice = task_nice(current) + increment;
7201
7202 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
7203 if (increment < 0 && !can_nice(current, nice))
7204 return -EPERM;
7205
7206 retval = security_task_setnice(current, nice);
7207 if (retval)
7208 return retval;
7209
7210 set_user_nice(current, nice);
7211 return 0;
7212}
7213
7214#endif
7215
7216/**
7217 * task_prio - return the priority value of a given task.
7218 * @p: the task in question.
7219 *
7220 * Return: The priority value as seen by users in /proc.
7221 *
7222 * sched policy return value kernel prio user prio/nice
7223 *
7224 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19]
7225 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99]
7226 * deadline -101 -1 0
7227 */
7228int task_prio(const struct task_struct *p)
7229{
7230 return p->prio - MAX_RT_PRIO;
7231}
7232
7233/**
7234 * idle_cpu - is a given CPU idle currently?
7235 * @cpu: the processor in question.
7236 *
7237 * Return: 1 if the CPU is currently idle. 0 otherwise.
7238 */
7239int idle_cpu(int cpu)
7240{
7241 struct rq *rq = cpu_rq(cpu);
7242
7243 if (rq->curr != rq->idle)
7244 return 0;
7245
7246 if (rq->nr_running)
7247 return 0;
7248
7249#ifdef CONFIG_SMP
7250 if (rq->ttwu_pending)
7251 return 0;
7252#endif
7253
7254 return 1;
7255}
7256
7257/**
7258 * available_idle_cpu - is a given CPU idle for enqueuing work.
7259 * @cpu: the CPU in question.
7260 *
7261 * Return: 1 if the CPU is currently idle. 0 otherwise.
7262 */
7263int available_idle_cpu(int cpu)
7264{
7265 if (!idle_cpu(cpu))
7266 return 0;
7267
7268 if (vcpu_is_preempted(cpu))
7269 return 0;
7270
7271 return 1;
7272}
7273
7274/**
7275 * idle_task - return the idle task for a given CPU.
7276 * @cpu: the processor in question.
7277 *
7278 * Return: The idle task for the CPU @cpu.
7279 */
7280struct task_struct *idle_task(int cpu)
7281{
7282 return cpu_rq(cpu)->idle;
7283}
7284
7285#ifdef CONFIG_SMP
7286/*
7287 * This function computes an effective utilization for the given CPU, to be
7288 * used for frequency selection given the linear relation: f = u * f_max.
7289 *
7290 * The scheduler tracks the following metrics:
7291 *
7292 * cpu_util_{cfs,rt,dl,irq}()
7293 * cpu_bw_dl()
7294 *
7295 * Where the cfs,rt and dl util numbers are tracked with the same metric and
7296 * synchronized windows and are thus directly comparable.
7297 *
7298 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
7299 * which excludes things like IRQ and steal-time. These latter are then accrued
7300 * in the irq utilization.
7301 *
7302 * The DL bandwidth number otoh is not a measured metric but a value computed
7303 * based on the task model parameters and gives the minimal utilization
7304 * required to meet deadlines.
7305 */
7306unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
7307 enum cpu_util_type type,
7308 struct task_struct *p)
7309{
7310 unsigned long dl_util, util, irq, max;
7311 struct rq *rq = cpu_rq(cpu);
7312
7313 max = arch_scale_cpu_capacity(cpu);
7314
7315 if (!uclamp_is_used() &&
7316 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
7317 return max;
7318 }
7319
7320 /*
7321 * Early check to see if IRQ/steal time saturates the CPU, can be
7322 * because of inaccuracies in how we track these -- see
7323 * update_irq_load_avg().
7324 */
7325 irq = cpu_util_irq(rq);
7326 if (unlikely(irq >= max))
7327 return max;
7328
7329 /*
7330 * Because the time spend on RT/DL tasks is visible as 'lost' time to
7331 * CFS tasks and we use the same metric to track the effective
7332 * utilization (PELT windows are synchronized) we can directly add them
7333 * to obtain the CPU's actual utilization.
7334 *
7335 * CFS and RT utilization can be boosted or capped, depending on
7336 * utilization clamp constraints requested by currently RUNNABLE
7337 * tasks.
7338 * When there are no CFS RUNNABLE tasks, clamps are released and
7339 * frequency will be gracefully reduced with the utilization decay.
7340 */
7341 util = util_cfs + cpu_util_rt(rq);
7342 if (type == FREQUENCY_UTIL)
7343 util = uclamp_rq_util_with(rq, util, p);
7344
7345 dl_util = cpu_util_dl(rq);
7346
7347 /*
7348 * For frequency selection we do not make cpu_util_dl() a permanent part
7349 * of this sum because we want to use cpu_bw_dl() later on, but we need
7350 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
7351 * that we select f_max when there is no idle time.
7352 *
7353 * NOTE: numerical errors or stop class might cause us to not quite hit
7354 * saturation when we should -- something for later.
7355 */
7356 if (util + dl_util >= max)
7357 return max;
7358
7359 /*
7360 * OTOH, for energy computation we need the estimated running time, so
7361 * include util_dl and ignore dl_bw.
7362 */
7363 if (type == ENERGY_UTIL)
7364 util += dl_util;
7365
7366 /*
7367 * There is still idle time; further improve the number by using the
7368 * irq metric. Because IRQ/steal time is hidden from the task clock we
7369 * need to scale the task numbers:
7370 *
7371 * max - irq
7372 * U' = irq + --------- * U
7373 * max
7374 */
7375 util = scale_irq_capacity(util, irq, max);
7376 util += irq;
7377
7378 /*
7379 * Bandwidth required by DEADLINE must always be granted while, for
7380 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
7381 * to gracefully reduce the frequency when no tasks show up for longer
7382 * periods of time.
7383 *
7384 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
7385 * bw_dl as requested freq. However, cpufreq is not yet ready for such
7386 * an interface. So, we only do the latter for now.
7387 */
7388 if (type == FREQUENCY_UTIL)
7389 util += cpu_bw_dl(rq);
7390
7391 return min(max, util);
7392}
7393
7394unsigned long sched_cpu_util(int cpu)
7395{
7396 return effective_cpu_util(cpu, cpu_util_cfs(cpu), ENERGY_UTIL, NULL);
7397}
7398#endif /* CONFIG_SMP */
7399
7400/**
7401 * find_process_by_pid - find a process with a matching PID value.
7402 * @pid: the pid in question.
7403 *
7404 * The task of @pid, if found. %NULL otherwise.
7405 */
7406static struct task_struct *find_process_by_pid(pid_t pid)
7407{
7408 return pid ? find_task_by_vpid(pid) : current;
7409}
7410
7411/*
7412 * sched_setparam() passes in -1 for its policy, to let the functions
7413 * it calls know not to change it.
7414 */
7415#define SETPARAM_POLICY -1
7416
7417static void __setscheduler_params(struct task_struct *p,
7418 const struct sched_attr *attr)
7419{
7420 int policy = attr->sched_policy;
7421
7422 if (policy == SETPARAM_POLICY)
7423 policy = p->policy;
7424
7425 p->policy = policy;
7426
7427 if (dl_policy(policy))
7428 __setparam_dl(p, attr);
7429 else if (fair_policy(policy))
7430 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
7431
7432 /*
7433 * __sched_setscheduler() ensures attr->sched_priority == 0 when
7434 * !rt_policy. Always setting this ensures that things like
7435 * getparam()/getattr() don't report silly values for !rt tasks.
7436 */
7437 p->rt_priority = attr->sched_priority;
7438 p->normal_prio = normal_prio(p);
7439 set_load_weight(p, true);
7440}
7441
7442/*
7443 * Check the target process has a UID that matches the current process's:
7444 */
7445static bool check_same_owner(struct task_struct *p)
7446{
7447 const struct cred *cred = current_cred(), *pcred;
7448 bool match;
7449
7450 rcu_read_lock();
7451 pcred = __task_cred(p);
7452 match = (uid_eq(cred->euid, pcred->euid) ||
7453 uid_eq(cred->euid, pcred->uid));
7454 rcu_read_unlock();
7455 return match;
7456}
7457
7458/*
7459 * Allow unprivileged RT tasks to decrease priority.
7460 * Only issue a capable test if needed and only once to avoid an audit
7461 * event on permitted non-privileged operations:
7462 */
7463static int user_check_sched_setscheduler(struct task_struct *p,
7464 const struct sched_attr *attr,
7465 int policy, int reset_on_fork)
7466{
7467 if (fair_policy(policy)) {
7468 if (attr->sched_nice < task_nice(p) &&
7469 !is_nice_reduction(p, attr->sched_nice))
7470 goto req_priv;
7471 }
7472
7473 if (rt_policy(policy)) {
7474 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
7475
7476 /* Can't set/change the rt policy: */
7477 if (policy != p->policy && !rlim_rtprio)
7478 goto req_priv;
7479
7480 /* Can't increase priority: */
7481 if (attr->sched_priority > p->rt_priority &&
7482 attr->sched_priority > rlim_rtprio)
7483 goto req_priv;
7484 }
7485
7486 /*
7487 * Can't set/change SCHED_DEADLINE policy at all for now
7488 * (safest behavior); in the future we would like to allow
7489 * unprivileged DL tasks to increase their relative deadline
7490 * or reduce their runtime (both ways reducing utilization)
7491 */
7492 if (dl_policy(policy))
7493 goto req_priv;
7494
7495 /*
7496 * Treat SCHED_IDLE as nice 20. Only allow a switch to
7497 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
7498 */
7499 if (task_has_idle_policy(p) && !idle_policy(policy)) {
7500 if (!is_nice_reduction(p, task_nice(p)))
7501 goto req_priv;
7502 }
7503
7504 /* Can't change other user's priorities: */
7505 if (!check_same_owner(p))
7506 goto req_priv;
7507
7508 /* Normal users shall not reset the sched_reset_on_fork flag: */
7509 if (p->sched_reset_on_fork && !reset_on_fork)
7510 goto req_priv;
7511
7512 return 0;
7513
7514req_priv:
7515 if (!capable(CAP_SYS_NICE))
7516 return -EPERM;
7517
7518 return 0;
7519}
7520
7521static int __sched_setscheduler(struct task_struct *p,
7522 const struct sched_attr *attr,
7523 bool user, bool pi)
7524{
7525 int oldpolicy = -1, policy = attr->sched_policy;
7526 int retval, oldprio, newprio, queued, running;
7527 const struct sched_class *prev_class;
7528 struct balance_callback *head;
7529 struct rq_flags rf;
7530 int reset_on_fork;
7531 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7532 struct rq *rq;
7533
7534 /* The pi code expects interrupts enabled */
7535 BUG_ON(pi && in_interrupt());
7536recheck:
7537 /* Double check policy once rq lock held: */
7538 if (policy < 0) {
7539 reset_on_fork = p->sched_reset_on_fork;
7540 policy = oldpolicy = p->policy;
7541 } else {
7542 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
7543
7544 if (!valid_policy(policy))
7545 return -EINVAL;
7546 }
7547
7548 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
7549 return -EINVAL;
7550
7551 /*
7552 * Valid priorities for SCHED_FIFO and SCHED_RR are
7553 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
7554 * SCHED_BATCH and SCHED_IDLE is 0.
7555 */
7556 if (attr->sched_priority > MAX_RT_PRIO-1)
7557 return -EINVAL;
7558 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
7559 (rt_policy(policy) != (attr->sched_priority != 0)))
7560 return -EINVAL;
7561
7562 if (user) {
7563 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
7564 if (retval)
7565 return retval;
7566
7567 if (attr->sched_flags & SCHED_FLAG_SUGOV)
7568 return -EINVAL;
7569
7570 retval = security_task_setscheduler(p);
7571 if (retval)
7572 return retval;
7573 }
7574
7575 /* Update task specific "requested" clamps */
7576 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
7577 retval = uclamp_validate(p, attr);
7578 if (retval)
7579 return retval;
7580 }
7581
7582 if (pi)
7583 cpuset_read_lock();
7584
7585 /*
7586 * Make sure no PI-waiters arrive (or leave) while we are
7587 * changing the priority of the task:
7588 *
7589 * To be able to change p->policy safely, the appropriate
7590 * runqueue lock must be held.
7591 */
7592 rq = task_rq_lock(p, &rf);
7593 update_rq_clock(rq);
7594
7595 /*
7596 * Changing the policy of the stop threads its a very bad idea:
7597 */
7598 if (p == rq->stop) {
7599 retval = -EINVAL;
7600 goto unlock;
7601 }
7602
7603 /*
7604 * If not changing anything there's no need to proceed further,
7605 * but store a possible modification of reset_on_fork.
7606 */
7607 if (unlikely(policy == p->policy)) {
7608 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
7609 goto change;
7610 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
7611 goto change;
7612 if (dl_policy(policy) && dl_param_changed(p, attr))
7613 goto change;
7614 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
7615 goto change;
7616
7617 p->sched_reset_on_fork = reset_on_fork;
7618 retval = 0;
7619 goto unlock;
7620 }
7621change:
7622
7623 if (user) {
7624#ifdef CONFIG_RT_GROUP_SCHED
7625 /*
7626 * Do not allow realtime tasks into groups that have no runtime
7627 * assigned.
7628 */
7629 if (rt_bandwidth_enabled() && rt_policy(policy) &&
7630 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
7631 !task_group_is_autogroup(task_group(p))) {
7632 retval = -EPERM;
7633 goto unlock;
7634 }
7635#endif
7636#ifdef CONFIG_SMP
7637 if (dl_bandwidth_enabled() && dl_policy(policy) &&
7638 !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
7639 cpumask_t *span = rq->rd->span;
7640
7641 /*
7642 * Don't allow tasks with an affinity mask smaller than
7643 * the entire root_domain to become SCHED_DEADLINE. We
7644 * will also fail if there's no bandwidth available.
7645 */
7646 if (!cpumask_subset(span, p->cpus_ptr) ||
7647 rq->rd->dl_bw.bw == 0) {
7648 retval = -EPERM;
7649 goto unlock;
7650 }
7651 }
7652#endif
7653 }
7654
7655 /* Re-check policy now with rq lock held: */
7656 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
7657 policy = oldpolicy = -1;
7658 task_rq_unlock(rq, p, &rf);
7659 if (pi)
7660 cpuset_read_unlock();
7661 goto recheck;
7662 }
7663
7664 /*
7665 * If setscheduling to SCHED_DEADLINE (or changing the parameters
7666 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
7667 * is available.
7668 */
7669 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
7670 retval = -EBUSY;
7671 goto unlock;
7672 }
7673
7674 p->sched_reset_on_fork = reset_on_fork;
7675 oldprio = p->prio;
7676
7677 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
7678 if (pi) {
7679 /*
7680 * Take priority boosted tasks into account. If the new
7681 * effective priority is unchanged, we just store the new
7682 * normal parameters and do not touch the scheduler class and
7683 * the runqueue. This will be done when the task deboost
7684 * itself.
7685 */
7686 newprio = rt_effective_prio(p, newprio);
7687 if (newprio == oldprio)
7688 queue_flags &= ~DEQUEUE_MOVE;
7689 }
7690
7691 queued = task_on_rq_queued(p);
7692 running = task_current(rq, p);
7693 if (queued)
7694 dequeue_task(rq, p, queue_flags);
7695 if (running)
7696 put_prev_task(rq, p);
7697
7698 prev_class = p->sched_class;
7699
7700 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
7701 __setscheduler_params(p, attr);
7702 __setscheduler_prio(p, newprio);
7703 }
7704 __setscheduler_uclamp(p, attr);
7705
7706 if (queued) {
7707 /*
7708 * We enqueue to tail when the priority of a task is
7709 * increased (user space view).
7710 */
7711 if (oldprio < p->prio)
7712 queue_flags |= ENQUEUE_HEAD;
7713
7714 enqueue_task(rq, p, queue_flags);
7715 }
7716 if (running)
7717 set_next_task(rq, p);
7718
7719 check_class_changed(rq, p, prev_class, oldprio);
7720
7721 /* Avoid rq from going away on us: */
7722 preempt_disable();
7723 head = splice_balance_callbacks(rq);
7724 task_rq_unlock(rq, p, &rf);
7725
7726 if (pi) {
7727 cpuset_read_unlock();
7728 rt_mutex_adjust_pi(p);
7729 }
7730
7731 /* Run balance callbacks after we've adjusted the PI chain: */
7732 balance_callbacks(rq, head);
7733 preempt_enable();
7734
7735 return 0;
7736
7737unlock:
7738 task_rq_unlock(rq, p, &rf);
7739 if (pi)
7740 cpuset_read_unlock();
7741 return retval;
7742}
7743
7744static int _sched_setscheduler(struct task_struct *p, int policy,
7745 const struct sched_param *param, bool check)
7746{
7747 struct sched_attr attr = {
7748 .sched_policy = policy,
7749 .sched_priority = param->sched_priority,
7750 .sched_nice = PRIO_TO_NICE(p->static_prio),
7751 };
7752
7753 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
7754 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
7755 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
7756 policy &= ~SCHED_RESET_ON_FORK;
7757 attr.sched_policy = policy;
7758 }
7759
7760 return __sched_setscheduler(p, &attr, check, true);
7761}
7762/**
7763 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
7764 * @p: the task in question.
7765 * @policy: new policy.
7766 * @param: structure containing the new RT priority.
7767 *
7768 * Use sched_set_fifo(), read its comment.
7769 *
7770 * Return: 0 on success. An error code otherwise.
7771 *
7772 * NOTE that the task may be already dead.
7773 */
7774int sched_setscheduler(struct task_struct *p, int policy,
7775 const struct sched_param *param)
7776{
7777 return _sched_setscheduler(p, policy, param, true);
7778}
7779
7780int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
7781{
7782 return __sched_setscheduler(p, attr, true, true);
7783}
7784
7785int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
7786{
7787 return __sched_setscheduler(p, attr, false, true);
7788}
7789EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
7790
7791/**
7792 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
7793 * @p: the task in question.
7794 * @policy: new policy.
7795 * @param: structure containing the new RT priority.
7796 *
7797 * Just like sched_setscheduler, only don't bother checking if the
7798 * current context has permission. For example, this is needed in
7799 * stop_machine(): we create temporary high priority worker threads,
7800 * but our caller might not have that capability.
7801 *
7802 * Return: 0 on success. An error code otherwise.
7803 */
7804int sched_setscheduler_nocheck(struct task_struct *p, int policy,
7805 const struct sched_param *param)
7806{
7807 return _sched_setscheduler(p, policy, param, false);
7808}
7809
7810/*
7811 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
7812 * incapable of resource management, which is the one thing an OS really should
7813 * be doing.
7814 *
7815 * This is of course the reason it is limited to privileged users only.
7816 *
7817 * Worse still; it is fundamentally impossible to compose static priority
7818 * workloads. You cannot take two correctly working static prio workloads
7819 * and smash them together and still expect them to work.
7820 *
7821 * For this reason 'all' FIFO tasks the kernel creates are basically at:
7822 *
7823 * MAX_RT_PRIO / 2
7824 *
7825 * The administrator _MUST_ configure the system, the kernel simply doesn't
7826 * know enough information to make a sensible choice.
7827 */
7828void sched_set_fifo(struct task_struct *p)
7829{
7830 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
7831 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
7832}
7833EXPORT_SYMBOL_GPL(sched_set_fifo);
7834
7835/*
7836 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
7837 */
7838void sched_set_fifo_low(struct task_struct *p)
7839{
7840 struct sched_param sp = { .sched_priority = 1 };
7841 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
7842}
7843EXPORT_SYMBOL_GPL(sched_set_fifo_low);
7844
7845void sched_set_normal(struct task_struct *p, int nice)
7846{
7847 struct sched_attr attr = {
7848 .sched_policy = SCHED_NORMAL,
7849 .sched_nice = nice,
7850 };
7851 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
7852}
7853EXPORT_SYMBOL_GPL(sched_set_normal);
7854
7855static int
7856do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
7857{
7858 struct sched_param lparam;
7859 struct task_struct *p;
7860 int retval;
7861
7862 if (!param || pid < 0)
7863 return -EINVAL;
7864 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
7865 return -EFAULT;
7866
7867 rcu_read_lock();
7868 retval = -ESRCH;
7869 p = find_process_by_pid(pid);
7870 if (likely(p))
7871 get_task_struct(p);
7872 rcu_read_unlock();
7873
7874 if (likely(p)) {
7875 retval = sched_setscheduler(p, policy, &lparam);
7876 put_task_struct(p);
7877 }
7878
7879 return retval;
7880}
7881
7882/*
7883 * Mimics kernel/events/core.c perf_copy_attr().
7884 */
7885static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
7886{
7887 u32 size;
7888 int ret;
7889
7890 /* Zero the full structure, so that a short copy will be nice: */
7891 memset(attr, 0, sizeof(*attr));
7892
7893 ret = get_user(size, &uattr->size);
7894 if (ret)
7895 return ret;
7896
7897 /* ABI compatibility quirk: */
7898 if (!size)
7899 size = SCHED_ATTR_SIZE_VER0;
7900 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
7901 goto err_size;
7902
7903 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
7904 if (ret) {
7905 if (ret == -E2BIG)
7906 goto err_size;
7907 return ret;
7908 }
7909
7910 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
7911 size < SCHED_ATTR_SIZE_VER1)
7912 return -EINVAL;
7913
7914 /*
7915 * XXX: Do we want to be lenient like existing syscalls; or do we want
7916 * to be strict and return an error on out-of-bounds values?
7917 */
7918 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
7919
7920 return 0;
7921
7922err_size:
7923 put_user(sizeof(*attr), &uattr->size);
7924 return -E2BIG;
7925}
7926
7927static void get_params(struct task_struct *p, struct sched_attr *attr)
7928{
7929 if (task_has_dl_policy(p))
7930 __getparam_dl(p, attr);
7931 else if (task_has_rt_policy(p))
7932 attr->sched_priority = p->rt_priority;
7933 else
7934 attr->sched_nice = task_nice(p);
7935}
7936
7937/**
7938 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
7939 * @pid: the pid in question.
7940 * @policy: new policy.
7941 * @param: structure containing the new RT priority.
7942 *
7943 * Return: 0 on success. An error code otherwise.
7944 */
7945SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
7946{
7947 if (policy < 0)
7948 return -EINVAL;
7949
7950 return do_sched_setscheduler(pid, policy, param);
7951}
7952
7953/**
7954 * sys_sched_setparam - set/change the RT priority of a thread
7955 * @pid: the pid in question.
7956 * @param: structure containing the new RT priority.
7957 *
7958 * Return: 0 on success. An error code otherwise.
7959 */
7960SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
7961{
7962 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
7963}
7964
7965/**
7966 * sys_sched_setattr - same as above, but with extended sched_attr
7967 * @pid: the pid in question.
7968 * @uattr: structure containing the extended parameters.
7969 * @flags: for future extension.
7970 */
7971SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
7972 unsigned int, flags)
7973{
7974 struct sched_attr attr;
7975 struct task_struct *p;
7976 int retval;
7977
7978 if (!uattr || pid < 0 || flags)
7979 return -EINVAL;
7980
7981 retval = sched_copy_attr(uattr, &attr);
7982 if (retval)
7983 return retval;
7984
7985 if ((int)attr.sched_policy < 0)
7986 return -EINVAL;
7987 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
7988 attr.sched_policy = SETPARAM_POLICY;
7989
7990 rcu_read_lock();
7991 retval = -ESRCH;
7992 p = find_process_by_pid(pid);
7993 if (likely(p))
7994 get_task_struct(p);
7995 rcu_read_unlock();
7996
7997 if (likely(p)) {
7998 if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
7999 get_params(p, &attr);
8000 retval = sched_setattr(p, &attr);
8001 put_task_struct(p);
8002 }
8003
8004 return retval;
8005}
8006
8007/**
8008 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
8009 * @pid: the pid in question.
8010 *
8011 * Return: On success, the policy of the thread. Otherwise, a negative error
8012 * code.
8013 */
8014SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
8015{
8016 struct task_struct *p;
8017 int retval;
8018
8019 if (pid < 0)
8020 return -EINVAL;
8021
8022 retval = -ESRCH;
8023 rcu_read_lock();
8024 p = find_process_by_pid(pid);
8025 if (p) {
8026 retval = security_task_getscheduler(p);
8027 if (!retval)
8028 retval = p->policy
8029 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
8030 }
8031 rcu_read_unlock();
8032 return retval;
8033}
8034
8035/**
8036 * sys_sched_getparam - get the RT priority of a thread
8037 * @pid: the pid in question.
8038 * @param: structure containing the RT priority.
8039 *
8040 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
8041 * code.
8042 */
8043SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
8044{
8045 struct sched_param lp = { .sched_priority = 0 };
8046 struct task_struct *p;
8047 int retval;
8048
8049 if (!param || pid < 0)
8050 return -EINVAL;
8051
8052 rcu_read_lock();
8053 p = find_process_by_pid(pid);
8054 retval = -ESRCH;
8055 if (!p)
8056 goto out_unlock;
8057
8058 retval = security_task_getscheduler(p);
8059 if (retval)
8060 goto out_unlock;
8061
8062 if (task_has_rt_policy(p))
8063 lp.sched_priority = p->rt_priority;
8064 rcu_read_unlock();
8065
8066 /*
8067 * This one might sleep, we cannot do it with a spinlock held ...
8068 */
8069 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
8070
8071 return retval;
8072
8073out_unlock:
8074 rcu_read_unlock();
8075 return retval;
8076}
8077
8078/*
8079 * Copy the kernel size attribute structure (which might be larger
8080 * than what user-space knows about) to user-space.
8081 *
8082 * Note that all cases are valid: user-space buffer can be larger or
8083 * smaller than the kernel-space buffer. The usual case is that both
8084 * have the same size.
8085 */
8086static int
8087sched_attr_copy_to_user(struct sched_attr __user *uattr,
8088 struct sched_attr *kattr,
8089 unsigned int usize)
8090{
8091 unsigned int ksize = sizeof(*kattr);
8092
8093 if (!access_ok(uattr, usize))
8094 return -EFAULT;
8095
8096 /*
8097 * sched_getattr() ABI forwards and backwards compatibility:
8098 *
8099 * If usize == ksize then we just copy everything to user-space and all is good.
8100 *
8101 * If usize < ksize then we only copy as much as user-space has space for,
8102 * this keeps ABI compatibility as well. We skip the rest.
8103 *
8104 * If usize > ksize then user-space is using a newer version of the ABI,
8105 * which part the kernel doesn't know about. Just ignore it - tooling can
8106 * detect the kernel's knowledge of attributes from the attr->size value
8107 * which is set to ksize in this case.
8108 */
8109 kattr->size = min(usize, ksize);
8110
8111 if (copy_to_user(uattr, kattr, kattr->size))
8112 return -EFAULT;
8113
8114 return 0;
8115}
8116
8117/**
8118 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
8119 * @pid: the pid in question.
8120 * @uattr: structure containing the extended parameters.
8121 * @usize: sizeof(attr) for fwd/bwd comp.
8122 * @flags: for future extension.
8123 */
8124SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
8125 unsigned int, usize, unsigned int, flags)
8126{
8127 struct sched_attr kattr = { };
8128 struct task_struct *p;
8129 int retval;
8130
8131 if (!uattr || pid < 0 || usize > PAGE_SIZE ||
8132 usize < SCHED_ATTR_SIZE_VER0 || flags)
8133 return -EINVAL;
8134
8135 rcu_read_lock();
8136 p = find_process_by_pid(pid);
8137 retval = -ESRCH;
8138 if (!p)
8139 goto out_unlock;
8140
8141 retval = security_task_getscheduler(p);
8142 if (retval)
8143 goto out_unlock;
8144
8145 kattr.sched_policy = p->policy;
8146 if (p->sched_reset_on_fork)
8147 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
8148 get_params(p, &kattr);
8149 kattr.sched_flags &= SCHED_FLAG_ALL;
8150
8151#ifdef CONFIG_UCLAMP_TASK
8152 /*
8153 * This could race with another potential updater, but this is fine
8154 * because it'll correctly read the old or the new value. We don't need
8155 * to guarantee who wins the race as long as it doesn't return garbage.
8156 */
8157 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
8158 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
8159#endif
8160
8161 rcu_read_unlock();
8162
8163 return sched_attr_copy_to_user(uattr, &kattr, usize);
8164
8165out_unlock:
8166 rcu_read_unlock();
8167 return retval;
8168}
8169
8170#ifdef CONFIG_SMP
8171int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
8172{
8173 int ret = 0;
8174
8175 /*
8176 * If the task isn't a deadline task or admission control is
8177 * disabled then we don't care about affinity changes.
8178 */
8179 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
8180 return 0;
8181
8182 /*
8183 * Since bandwidth control happens on root_domain basis,
8184 * if admission test is enabled, we only admit -deadline
8185 * tasks allowed to run on all the CPUs in the task's
8186 * root_domain.
8187 */
8188 rcu_read_lock();
8189 if (!cpumask_subset(task_rq(p)->rd->span, mask))
8190 ret = -EBUSY;
8191 rcu_read_unlock();
8192 return ret;
8193}
8194#endif
8195
8196static int
8197__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
8198{
8199 int retval;
8200 cpumask_var_t cpus_allowed, new_mask;
8201
8202 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
8203 return -ENOMEM;
8204
8205 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
8206 retval = -ENOMEM;
8207 goto out_free_cpus_allowed;
8208 }
8209
8210 cpuset_cpus_allowed(p, cpus_allowed);
8211 cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
8212
8213 ctx->new_mask = new_mask;
8214 ctx->flags |= SCA_CHECK;
8215
8216 retval = dl_task_check_affinity(p, new_mask);
8217 if (retval)
8218 goto out_free_new_mask;
8219
8220 retval = __set_cpus_allowed_ptr(p, ctx);
8221 if (retval)
8222 goto out_free_new_mask;
8223
8224 cpuset_cpus_allowed(p, cpus_allowed);
8225 if (!cpumask_subset(new_mask, cpus_allowed)) {
8226 /*
8227 * We must have raced with a concurrent cpuset update.
8228 * Just reset the cpumask to the cpuset's cpus_allowed.
8229 */
8230 cpumask_copy(new_mask, cpus_allowed);
8231
8232 /*
8233 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
8234 * will restore the previous user_cpus_ptr value.
8235 *
8236 * In the unlikely event a previous user_cpus_ptr exists,
8237 * we need to further restrict the mask to what is allowed
8238 * by that old user_cpus_ptr.
8239 */
8240 if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
8241 bool empty = !cpumask_and(new_mask, new_mask,
8242 ctx->user_mask);
8243
8244 if (WARN_ON_ONCE(empty))
8245 cpumask_copy(new_mask, cpus_allowed);
8246 }
8247 __set_cpus_allowed_ptr(p, ctx);
8248 retval = -EINVAL;
8249 }
8250
8251out_free_new_mask:
8252 free_cpumask_var(new_mask);
8253out_free_cpus_allowed:
8254 free_cpumask_var(cpus_allowed);
8255 return retval;
8256}
8257
8258long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
8259{
8260 struct affinity_context ac;
8261 struct cpumask *user_mask;
8262 struct task_struct *p;
8263 int retval;
8264
8265 rcu_read_lock();
8266
8267 p = find_process_by_pid(pid);
8268 if (!p) {
8269 rcu_read_unlock();
8270 return -ESRCH;
8271 }
8272
8273 /* Prevent p going away */
8274 get_task_struct(p);
8275 rcu_read_unlock();
8276
8277 if (p->flags & PF_NO_SETAFFINITY) {
8278 retval = -EINVAL;
8279 goto out_put_task;
8280 }
8281
8282 if (!check_same_owner(p)) {
8283 rcu_read_lock();
8284 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
8285 rcu_read_unlock();
8286 retval = -EPERM;
8287 goto out_put_task;
8288 }
8289 rcu_read_unlock();
8290 }
8291
8292 retval = security_task_setscheduler(p);
8293 if (retval)
8294 goto out_put_task;
8295
8296 /*
8297 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
8298 * alloc_user_cpus_ptr() returns NULL.
8299 */
8300 user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
8301 if (user_mask) {
8302 cpumask_copy(user_mask, in_mask);
8303 } else if (IS_ENABLED(CONFIG_SMP)) {
8304 retval = -ENOMEM;
8305 goto out_put_task;
8306 }
8307
8308 ac = (struct affinity_context){
8309 .new_mask = in_mask,
8310 .user_mask = user_mask,
8311 .flags = SCA_USER,
8312 };
8313
8314 retval = __sched_setaffinity(p, &ac);
8315 kfree(ac.user_mask);
8316
8317out_put_task:
8318 put_task_struct(p);
8319 return retval;
8320}
8321
8322static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
8323 struct cpumask *new_mask)
8324{
8325 if (len < cpumask_size())
8326 cpumask_clear(new_mask);
8327 else if (len > cpumask_size())
8328 len = cpumask_size();
8329
8330 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
8331}
8332
8333/**
8334 * sys_sched_setaffinity - set the CPU affinity of a process
8335 * @pid: pid of the process
8336 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
8337 * @user_mask_ptr: user-space pointer to the new CPU mask
8338 *
8339 * Return: 0 on success. An error code otherwise.
8340 */
8341SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
8342 unsigned long __user *, user_mask_ptr)
8343{
8344 cpumask_var_t new_mask;
8345 int retval;
8346
8347 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
8348 return -ENOMEM;
8349
8350 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
8351 if (retval == 0)
8352 retval = sched_setaffinity(pid, new_mask);
8353 free_cpumask_var(new_mask);
8354 return retval;
8355}
8356
8357long sched_getaffinity(pid_t pid, struct cpumask *mask)
8358{
8359 struct task_struct *p;
8360 unsigned long flags;
8361 int retval;
8362
8363 rcu_read_lock();
8364
8365 retval = -ESRCH;
8366 p = find_process_by_pid(pid);
8367 if (!p)
8368 goto out_unlock;
8369
8370 retval = security_task_getscheduler(p);
8371 if (retval)
8372 goto out_unlock;
8373
8374 raw_spin_lock_irqsave(&p->pi_lock, flags);
8375 cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
8376 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
8377
8378out_unlock:
8379 rcu_read_unlock();
8380
8381 return retval;
8382}
8383
8384/**
8385 * sys_sched_getaffinity - get the CPU affinity of a process
8386 * @pid: pid of the process
8387 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
8388 * @user_mask_ptr: user-space pointer to hold the current CPU mask
8389 *
8390 * Return: size of CPU mask copied to user_mask_ptr on success. An
8391 * error code otherwise.
8392 */
8393SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
8394 unsigned long __user *, user_mask_ptr)
8395{
8396 int ret;
8397 cpumask_var_t mask;
8398
8399 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
8400 return -EINVAL;
8401 if (len & (sizeof(unsigned long)-1))
8402 return -EINVAL;
8403
8404 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
8405 return -ENOMEM;
8406
8407 ret = sched_getaffinity(pid, mask);
8408 if (ret == 0) {
8409 unsigned int retlen = min(len, cpumask_size());
8410
8411 if (copy_to_user(user_mask_ptr, mask, retlen))
8412 ret = -EFAULT;
8413 else
8414 ret = retlen;
8415 }
8416 free_cpumask_var(mask);
8417
8418 return ret;
8419}
8420
8421static void do_sched_yield(void)
8422{
8423 struct rq_flags rf;
8424 struct rq *rq;
8425
8426 rq = this_rq_lock_irq(&rf);
8427
8428 schedstat_inc(rq->yld_count);
8429 current->sched_class->yield_task(rq);
8430
8431 preempt_disable();
8432 rq_unlock_irq(rq, &rf);
8433 sched_preempt_enable_no_resched();
8434
8435 schedule();
8436}
8437
8438/**
8439 * sys_sched_yield - yield the current processor to other threads.
8440 *
8441 * This function yields the current CPU to other tasks. If there are no
8442 * other threads running on this CPU then this function will return.
8443 *
8444 * Return: 0.
8445 */
8446SYSCALL_DEFINE0(sched_yield)
8447{
8448 do_sched_yield();
8449 return 0;
8450}
8451
8452#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
8453int __sched __cond_resched(void)
8454{
8455 if (should_resched(0)) {
8456 preempt_schedule_common();
8457 return 1;
8458 }
8459 /*
8460 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
8461 * whether the current CPU is in an RCU read-side critical section,
8462 * so the tick can report quiescent states even for CPUs looping
8463 * in kernel context. In contrast, in non-preemptible kernels,
8464 * RCU readers leave no in-memory hints, which means that CPU-bound
8465 * processes executing in kernel context might never report an
8466 * RCU quiescent state. Therefore, the following code causes
8467 * cond_resched() to report a quiescent state, but only when RCU
8468 * is in urgent need of one.
8469 */
8470#ifndef CONFIG_PREEMPT_RCU
8471 rcu_all_qs();
8472#endif
8473 return 0;
8474}
8475EXPORT_SYMBOL(__cond_resched);
8476#endif
8477
8478#ifdef CONFIG_PREEMPT_DYNAMIC
8479#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
8480#define cond_resched_dynamic_enabled __cond_resched
8481#define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
8482DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
8483EXPORT_STATIC_CALL_TRAMP(cond_resched);
8484
8485#define might_resched_dynamic_enabled __cond_resched
8486#define might_resched_dynamic_disabled ((void *)&__static_call_return0)
8487DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
8488EXPORT_STATIC_CALL_TRAMP(might_resched);
8489#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
8490static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
8491int __sched dynamic_cond_resched(void)
8492{
8493 if (!static_branch_unlikely(&sk_dynamic_cond_resched))
8494 return 0;
8495 return __cond_resched();
8496}
8497EXPORT_SYMBOL(dynamic_cond_resched);
8498
8499static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
8500int __sched dynamic_might_resched(void)
8501{
8502 if (!static_branch_unlikely(&sk_dynamic_might_resched))
8503 return 0;
8504 return __cond_resched();
8505}
8506EXPORT_SYMBOL(dynamic_might_resched);
8507#endif
8508#endif
8509
8510/*
8511 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
8512 * call schedule, and on return reacquire the lock.
8513 *
8514 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
8515 * operations here to prevent schedule() from being called twice (once via
8516 * spin_unlock(), once by hand).
8517 */
8518int __cond_resched_lock(spinlock_t *lock)
8519{
8520 int resched = should_resched(PREEMPT_LOCK_OFFSET);
8521 int ret = 0;
8522
8523 lockdep_assert_held(lock);
8524
8525 if (spin_needbreak(lock) || resched) {
8526 spin_unlock(lock);
8527 if (!_cond_resched())
8528 cpu_relax();
8529 ret = 1;
8530 spin_lock(lock);
8531 }
8532 return ret;
8533}
8534EXPORT_SYMBOL(__cond_resched_lock);
8535
8536int __cond_resched_rwlock_read(rwlock_t *lock)
8537{
8538 int resched = should_resched(PREEMPT_LOCK_OFFSET);
8539 int ret = 0;
8540
8541 lockdep_assert_held_read(lock);
8542
8543 if (rwlock_needbreak(lock) || resched) {
8544 read_unlock(lock);
8545 if (!_cond_resched())
8546 cpu_relax();
8547 ret = 1;
8548 read_lock(lock);
8549 }
8550 return ret;
8551}
8552EXPORT_SYMBOL(__cond_resched_rwlock_read);
8553
8554int __cond_resched_rwlock_write(rwlock_t *lock)
8555{
8556 int resched = should_resched(PREEMPT_LOCK_OFFSET);
8557 int ret = 0;
8558
8559 lockdep_assert_held_write(lock);
8560
8561 if (rwlock_needbreak(lock) || resched) {
8562 write_unlock(lock);
8563 if (!_cond_resched())
8564 cpu_relax();
8565 ret = 1;
8566 write_lock(lock);
8567 }
8568 return ret;
8569}
8570EXPORT_SYMBOL(__cond_resched_rwlock_write);
8571
8572#ifdef CONFIG_PREEMPT_DYNAMIC
8573
8574#ifdef CONFIG_GENERIC_ENTRY
8575#include <linux/entry-common.h>
8576#endif
8577
8578/*
8579 * SC:cond_resched
8580 * SC:might_resched
8581 * SC:preempt_schedule
8582 * SC:preempt_schedule_notrace
8583 * SC:irqentry_exit_cond_resched
8584 *
8585 *
8586 * NONE:
8587 * cond_resched <- __cond_resched
8588 * might_resched <- RET0
8589 * preempt_schedule <- NOP
8590 * preempt_schedule_notrace <- NOP
8591 * irqentry_exit_cond_resched <- NOP
8592 *
8593 * VOLUNTARY:
8594 * cond_resched <- __cond_resched
8595 * might_resched <- __cond_resched
8596 * preempt_schedule <- NOP
8597 * preempt_schedule_notrace <- NOP
8598 * irqentry_exit_cond_resched <- NOP
8599 *
8600 * FULL:
8601 * cond_resched <- RET0
8602 * might_resched <- RET0
8603 * preempt_schedule <- preempt_schedule
8604 * preempt_schedule_notrace <- preempt_schedule_notrace
8605 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
8606 */
8607
8608enum {
8609 preempt_dynamic_undefined = -1,
8610 preempt_dynamic_none,
8611 preempt_dynamic_voluntary,
8612 preempt_dynamic_full,
8613};
8614
8615int preempt_dynamic_mode = preempt_dynamic_undefined;
8616
8617int sched_dynamic_mode(const char *str)
8618{
8619 if (!strcmp(str, "none"))
8620 return preempt_dynamic_none;
8621
8622 if (!strcmp(str, "voluntary"))
8623 return preempt_dynamic_voluntary;
8624
8625 if (!strcmp(str, "full"))
8626 return preempt_dynamic_full;
8627
8628 return -EINVAL;
8629}
8630
8631#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
8632#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
8633#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
8634#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
8635#define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key)
8636#define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key)
8637#else
8638#error "Unsupported PREEMPT_DYNAMIC mechanism"
8639#endif
8640
8641void sched_dynamic_update(int mode)
8642{
8643 /*
8644 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
8645 * the ZERO state, which is invalid.
8646 */
8647 preempt_dynamic_enable(cond_resched);
8648 preempt_dynamic_enable(might_resched);
8649 preempt_dynamic_enable(preempt_schedule);
8650 preempt_dynamic_enable(preempt_schedule_notrace);
8651 preempt_dynamic_enable(irqentry_exit_cond_resched);
8652
8653 switch (mode) {
8654 case preempt_dynamic_none:
8655 preempt_dynamic_enable(cond_resched);
8656 preempt_dynamic_disable(might_resched);
8657 preempt_dynamic_disable(preempt_schedule);
8658 preempt_dynamic_disable(preempt_schedule_notrace);
8659 preempt_dynamic_disable(irqentry_exit_cond_resched);
8660 pr_info("Dynamic Preempt: none\n");
8661 break;
8662
8663 case preempt_dynamic_voluntary:
8664 preempt_dynamic_enable(cond_resched);
8665 preempt_dynamic_enable(might_resched);
8666 preempt_dynamic_disable(preempt_schedule);
8667 preempt_dynamic_disable(preempt_schedule_notrace);
8668 preempt_dynamic_disable(irqentry_exit_cond_resched);
8669 pr_info("Dynamic Preempt: voluntary\n");
8670 break;
8671
8672 case preempt_dynamic_full:
8673 preempt_dynamic_disable(cond_resched);
8674 preempt_dynamic_disable(might_resched);
8675 preempt_dynamic_enable(preempt_schedule);
8676 preempt_dynamic_enable(preempt_schedule_notrace);
8677 preempt_dynamic_enable(irqentry_exit_cond_resched);
8678 pr_info("Dynamic Preempt: full\n");
8679 break;
8680 }
8681
8682 preempt_dynamic_mode = mode;
8683}
8684
8685static int __init setup_preempt_mode(char *str)
8686{
8687 int mode = sched_dynamic_mode(str);
8688 if (mode < 0) {
8689 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
8690 return 0;
8691 }
8692
8693 sched_dynamic_update(mode);
8694 return 1;
8695}
8696__setup("preempt=", setup_preempt_mode);
8697
8698static void __init preempt_dynamic_init(void)
8699{
8700 if (preempt_dynamic_mode == preempt_dynamic_undefined) {
8701 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
8702 sched_dynamic_update(preempt_dynamic_none);
8703 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
8704 sched_dynamic_update(preempt_dynamic_voluntary);
8705 } else {
8706 /* Default static call setting, nothing to do */
8707 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
8708 preempt_dynamic_mode = preempt_dynamic_full;
8709 pr_info("Dynamic Preempt: full\n");
8710 }
8711 }
8712}
8713
8714#define PREEMPT_MODEL_ACCESSOR(mode) \
8715 bool preempt_model_##mode(void) \
8716 { \
8717 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
8718 return preempt_dynamic_mode == preempt_dynamic_##mode; \
8719 } \
8720 EXPORT_SYMBOL_GPL(preempt_model_##mode)
8721
8722PREEMPT_MODEL_ACCESSOR(none);
8723PREEMPT_MODEL_ACCESSOR(voluntary);
8724PREEMPT_MODEL_ACCESSOR(full);
8725
8726#else /* !CONFIG_PREEMPT_DYNAMIC */
8727
8728static inline void preempt_dynamic_init(void) { }
8729
8730#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
8731
8732/**
8733 * yield - yield the current processor to other threads.
8734 *
8735 * Do not ever use this function, there's a 99% chance you're doing it wrong.
8736 *
8737 * The scheduler is at all times free to pick the calling task as the most
8738 * eligible task to run, if removing the yield() call from your code breaks
8739 * it, it's already broken.
8740 *
8741 * Typical broken usage is:
8742 *
8743 * while (!event)
8744 * yield();
8745 *
8746 * where one assumes that yield() will let 'the other' process run that will
8747 * make event true. If the current task is a SCHED_FIFO task that will never
8748 * happen. Never use yield() as a progress guarantee!!
8749 *
8750 * If you want to use yield() to wait for something, use wait_event().
8751 * If you want to use yield() to be 'nice' for others, use cond_resched().
8752 * If you still want to use yield(), do not!
8753 */
8754void __sched yield(void)
8755{
8756 set_current_state(TASK_RUNNING);
8757 do_sched_yield();
8758}
8759EXPORT_SYMBOL(yield);
8760
8761/**
8762 * yield_to - yield the current processor to another thread in
8763 * your thread group, or accelerate that thread toward the
8764 * processor it's on.
8765 * @p: target task
8766 * @preempt: whether task preemption is allowed or not
8767 *
8768 * It's the caller's job to ensure that the target task struct
8769 * can't go away on us before we can do any checks.
8770 *
8771 * Return:
8772 * true (>0) if we indeed boosted the target task.
8773 * false (0) if we failed to boost the target.
8774 * -ESRCH if there's no task to yield to.
8775 */
8776int __sched yield_to(struct task_struct *p, bool preempt)
8777{
8778 struct task_struct *curr = current;
8779 struct rq *rq, *p_rq;
8780 unsigned long flags;
8781 int yielded = 0;
8782
8783 local_irq_save(flags);
8784 rq = this_rq();
8785
8786again:
8787 p_rq = task_rq(p);
8788 /*
8789 * If we're the only runnable task on the rq and target rq also
8790 * has only one task, there's absolutely no point in yielding.
8791 */
8792 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
8793 yielded = -ESRCH;
8794 goto out_irq;
8795 }
8796
8797 double_rq_lock(rq, p_rq);
8798 if (task_rq(p) != p_rq) {
8799 double_rq_unlock(rq, p_rq);
8800 goto again;
8801 }
8802
8803 if (!curr->sched_class->yield_to_task)
8804 goto out_unlock;
8805
8806 if (curr->sched_class != p->sched_class)
8807 goto out_unlock;
8808
8809 if (task_on_cpu(p_rq, p) || !task_is_running(p))
8810 goto out_unlock;
8811
8812 yielded = curr->sched_class->yield_to_task(rq, p);
8813 if (yielded) {
8814 schedstat_inc(rq->yld_count);
8815 /*
8816 * Make p's CPU reschedule; pick_next_entity takes care of
8817 * fairness.
8818 */
8819 if (preempt && rq != p_rq)
8820 resched_curr(p_rq);
8821 }
8822
8823out_unlock:
8824 double_rq_unlock(rq, p_rq);
8825out_irq:
8826 local_irq_restore(flags);
8827
8828 if (yielded > 0)
8829 schedule();
8830
8831 return yielded;
8832}
8833EXPORT_SYMBOL_GPL(yield_to);
8834
8835int io_schedule_prepare(void)
8836{
8837 int old_iowait = current->in_iowait;
8838
8839 current->in_iowait = 1;
8840 blk_flush_plug(current->plug, true);
8841 return old_iowait;
8842}
8843
8844void io_schedule_finish(int token)
8845{
8846 current->in_iowait = token;
8847}
8848
8849/*
8850 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
8851 * that process accounting knows that this is a task in IO wait state.
8852 */
8853long __sched io_schedule_timeout(long timeout)
8854{
8855 int token;
8856 long ret;
8857
8858 token = io_schedule_prepare();
8859 ret = schedule_timeout(timeout);
8860 io_schedule_finish(token);
8861
8862 return ret;
8863}
8864EXPORT_SYMBOL(io_schedule_timeout);
8865
8866void __sched io_schedule(void)
8867{
8868 int token;
8869
8870 token = io_schedule_prepare();
8871 schedule();
8872 io_schedule_finish(token);
8873}
8874EXPORT_SYMBOL(io_schedule);
8875
8876/**
8877 * sys_sched_get_priority_max - return maximum RT priority.
8878 * @policy: scheduling class.
8879 *
8880 * Return: On success, this syscall returns the maximum
8881 * rt_priority that can be used by a given scheduling class.
8882 * On failure, a negative error code is returned.
8883 */
8884SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
8885{
8886 int ret = -EINVAL;
8887
8888 switch (policy) {
8889 case SCHED_FIFO:
8890 case SCHED_RR:
8891 ret = MAX_RT_PRIO-1;
8892 break;
8893 case SCHED_DEADLINE:
8894 case SCHED_NORMAL:
8895 case SCHED_BATCH:
8896 case SCHED_IDLE:
8897 ret = 0;
8898 break;
8899 }
8900 return ret;
8901}
8902
8903/**
8904 * sys_sched_get_priority_min - return minimum RT priority.
8905 * @policy: scheduling class.
8906 *
8907 * Return: On success, this syscall returns the minimum
8908 * rt_priority that can be used by a given scheduling class.
8909 * On failure, a negative error code is returned.
8910 */
8911SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
8912{
8913 int ret = -EINVAL;
8914
8915 switch (policy) {
8916 case SCHED_FIFO:
8917 case SCHED_RR:
8918 ret = 1;
8919 break;
8920 case SCHED_DEADLINE:
8921 case SCHED_NORMAL:
8922 case SCHED_BATCH:
8923 case SCHED_IDLE:
8924 ret = 0;
8925 }
8926 return ret;
8927}
8928
8929static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
8930{
8931 struct task_struct *p;
8932 unsigned int time_slice;
8933 struct rq_flags rf;
8934 struct rq *rq;
8935 int retval;
8936
8937 if (pid < 0)
8938 return -EINVAL;
8939
8940 retval = -ESRCH;
8941 rcu_read_lock();
8942 p = find_process_by_pid(pid);
8943 if (!p)
8944 goto out_unlock;
8945
8946 retval = security_task_getscheduler(p);
8947 if (retval)
8948 goto out_unlock;
8949
8950 rq = task_rq_lock(p, &rf);
8951 time_slice = 0;
8952 if (p->sched_class->get_rr_interval)
8953 time_slice = p->sched_class->get_rr_interval(rq, p);
8954 task_rq_unlock(rq, p, &rf);
8955
8956 rcu_read_unlock();
8957 jiffies_to_timespec64(time_slice, t);
8958 return 0;
8959
8960out_unlock:
8961 rcu_read_unlock();
8962 return retval;
8963}
8964
8965/**
8966 * sys_sched_rr_get_interval - return the default timeslice of a process.
8967 * @pid: pid of the process.
8968 * @interval: userspace pointer to the timeslice value.
8969 *
8970 * this syscall writes the default timeslice value of a given process
8971 * into the user-space timespec buffer. A value of '0' means infinity.
8972 *
8973 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
8974 * an error code.
8975 */
8976SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
8977 struct __kernel_timespec __user *, interval)
8978{
8979 struct timespec64 t;
8980 int retval = sched_rr_get_interval(pid, &t);
8981
8982 if (retval == 0)
8983 retval = put_timespec64(&t, interval);
8984
8985 return retval;
8986}
8987
8988#ifdef CONFIG_COMPAT_32BIT_TIME
8989SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
8990 struct old_timespec32 __user *, interval)
8991{
8992 struct timespec64 t;
8993 int retval = sched_rr_get_interval(pid, &t);
8994
8995 if (retval == 0)
8996 retval = put_old_timespec32(&t, interval);
8997 return retval;
8998}
8999#endif
9000
9001void sched_show_task(struct task_struct *p)
9002{
9003 unsigned long free = 0;
9004 int ppid;
9005
9006 if (!try_get_task_stack(p))
9007 return;
9008
9009 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
9010
9011 if (task_is_running(p))
9012 pr_cont(" running task ");
9013#ifdef CONFIG_DEBUG_STACK_USAGE
9014 free = stack_not_used(p);
9015#endif
9016 ppid = 0;
9017 rcu_read_lock();
9018 if (pid_alive(p))
9019 ppid = task_pid_nr(rcu_dereference(p->real_parent));
9020 rcu_read_unlock();
9021 pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
9022 free, task_pid_nr(p), ppid,
9023 read_task_thread_flags(p));
9024
9025 print_worker_info(KERN_INFO, p);
9026 print_stop_info(KERN_INFO, p);
9027 show_stack(p, NULL, KERN_INFO);
9028 put_task_stack(p);
9029}
9030EXPORT_SYMBOL_GPL(sched_show_task);
9031
9032static inline bool
9033state_filter_match(unsigned long state_filter, struct task_struct *p)
9034{
9035 unsigned int state = READ_ONCE(p->__state);
9036
9037 /* no filter, everything matches */
9038 if (!state_filter)
9039 return true;
9040
9041 /* filter, but doesn't match */
9042 if (!(state & state_filter))
9043 return false;
9044
9045 /*
9046 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
9047 * TASK_KILLABLE).
9048 */
9049 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
9050 return false;
9051
9052 return true;
9053}
9054
9055
9056void show_state_filter(unsigned int state_filter)
9057{
9058 struct task_struct *g, *p;
9059
9060 rcu_read_lock();
9061 for_each_process_thread(g, p) {
9062 /*
9063 * reset the NMI-timeout, listing all files on a slow
9064 * console might take a lot of time:
9065 * Also, reset softlockup watchdogs on all CPUs, because
9066 * another CPU might be blocked waiting for us to process
9067 * an IPI.
9068 */
9069 touch_nmi_watchdog();
9070 touch_all_softlockup_watchdogs();
9071 if (state_filter_match(state_filter, p))
9072 sched_show_task(p);
9073 }
9074
9075#ifdef CONFIG_SCHED_DEBUG
9076 if (!state_filter)
9077 sysrq_sched_debug_show();
9078#endif
9079 rcu_read_unlock();
9080 /*
9081 * Only show locks if all tasks are dumped:
9082 */
9083 if (!state_filter)
9084 debug_show_all_locks();
9085}
9086
9087/**
9088 * init_idle - set up an idle thread for a given CPU
9089 * @idle: task in question
9090 * @cpu: CPU the idle task belongs to
9091 *
9092 * NOTE: this function does not set the idle thread's NEED_RESCHED
9093 * flag, to make booting more robust.
9094 */
9095void __init init_idle(struct task_struct *idle, int cpu)
9096{
9097#ifdef CONFIG_SMP
9098 struct affinity_context ac = (struct affinity_context) {
9099 .new_mask = cpumask_of(cpu),
9100 .flags = 0,
9101 };
9102#endif
9103 struct rq *rq = cpu_rq(cpu);
9104 unsigned long flags;
9105
9106 __sched_fork(0, idle);
9107
9108 raw_spin_lock_irqsave(&idle->pi_lock, flags);
9109 raw_spin_rq_lock(rq);
9110
9111 idle->__state = TASK_RUNNING;
9112 idle->se.exec_start = sched_clock();
9113 /*
9114 * PF_KTHREAD should already be set at this point; regardless, make it
9115 * look like a proper per-CPU kthread.
9116 */
9117 idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
9118 kthread_set_per_cpu(idle, cpu);
9119
9120#ifdef CONFIG_SMP
9121 /*
9122 * It's possible that init_idle() gets called multiple times on a task,
9123 * in that case do_set_cpus_allowed() will not do the right thing.
9124 *
9125 * And since this is boot we can forgo the serialization.
9126 */
9127 set_cpus_allowed_common(idle, &ac);
9128#endif
9129 /*
9130 * We're having a chicken and egg problem, even though we are
9131 * holding rq->lock, the CPU isn't yet set to this CPU so the
9132 * lockdep check in task_group() will fail.
9133 *
9134 * Similar case to sched_fork(). / Alternatively we could
9135 * use task_rq_lock() here and obtain the other rq->lock.
9136 *
9137 * Silence PROVE_RCU
9138 */
9139 rcu_read_lock();
9140 __set_task_cpu(idle, cpu);
9141 rcu_read_unlock();
9142
9143 rq->idle = idle;
9144 rcu_assign_pointer(rq->curr, idle);
9145 idle->on_rq = TASK_ON_RQ_QUEUED;
9146#ifdef CONFIG_SMP
9147 idle->on_cpu = 1;
9148#endif
9149 raw_spin_rq_unlock(rq);
9150 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
9151
9152 /* Set the preempt count _outside_ the spinlocks! */
9153 init_idle_preempt_count(idle, cpu);
9154
9155 /*
9156 * The idle tasks have their own, simple scheduling class:
9157 */
9158 idle->sched_class = &idle_sched_class;
9159 ftrace_graph_init_idle_task(idle, cpu);
9160 vtime_init_idle(idle, cpu);
9161#ifdef CONFIG_SMP
9162 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
9163#endif
9164}
9165
9166#ifdef CONFIG_SMP
9167
9168int cpuset_cpumask_can_shrink(const struct cpumask *cur,
9169 const struct cpumask *trial)
9170{
9171 int ret = 1;
9172
9173 if (cpumask_empty(cur))
9174 return ret;
9175
9176 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
9177
9178 return ret;
9179}
9180
9181int task_can_attach(struct task_struct *p,
9182 const struct cpumask *cs_effective_cpus)
9183{
9184 int ret = 0;
9185
9186 /*
9187 * Kthreads which disallow setaffinity shouldn't be moved
9188 * to a new cpuset; we don't want to change their CPU
9189 * affinity and isolating such threads by their set of
9190 * allowed nodes is unnecessary. Thus, cpusets are not
9191 * applicable for such threads. This prevents checking for
9192 * success of set_cpus_allowed_ptr() on all attached tasks
9193 * before cpus_mask may be changed.
9194 */
9195 if (p->flags & PF_NO_SETAFFINITY) {
9196 ret = -EINVAL;
9197 goto out;
9198 }
9199
9200 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
9201 cs_effective_cpus)) {
9202 int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus);
9203
9204 if (unlikely(cpu >= nr_cpu_ids))
9205 return -EINVAL;
9206 ret = dl_cpu_busy(cpu, p);
9207 }
9208
9209out:
9210 return ret;
9211}
9212
9213bool sched_smp_initialized __read_mostly;
9214
9215#ifdef CONFIG_NUMA_BALANCING
9216/* Migrate current task p to target_cpu */
9217int migrate_task_to(struct task_struct *p, int target_cpu)
9218{
9219 struct migration_arg arg = { p, target_cpu };
9220 int curr_cpu = task_cpu(p);
9221
9222 if (curr_cpu == target_cpu)
9223 return 0;
9224
9225 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
9226 return -EINVAL;
9227
9228 /* TODO: This is not properly updating schedstats */
9229
9230 trace_sched_move_numa(p, curr_cpu, target_cpu);
9231 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
9232}
9233
9234/*
9235 * Requeue a task on a given node and accurately track the number of NUMA
9236 * tasks on the runqueues
9237 */
9238void sched_setnuma(struct task_struct *p, int nid)
9239{
9240 bool queued, running;
9241 struct rq_flags rf;
9242 struct rq *rq;
9243
9244 rq = task_rq_lock(p, &rf);
9245 queued = task_on_rq_queued(p);
9246 running = task_current(rq, p);
9247
9248 if (queued)
9249 dequeue_task(rq, p, DEQUEUE_SAVE);
9250 if (running)
9251 put_prev_task(rq, p);
9252
9253 p->numa_preferred_nid = nid;
9254
9255 if (queued)
9256 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
9257 if (running)
9258 set_next_task(rq, p);
9259 task_rq_unlock(rq, p, &rf);
9260}
9261#endif /* CONFIG_NUMA_BALANCING */
9262
9263#ifdef CONFIG_HOTPLUG_CPU
9264/*
9265 * Ensure that the idle task is using init_mm right before its CPU goes
9266 * offline.
9267 */
9268void idle_task_exit(void)
9269{
9270 struct mm_struct *mm = current->active_mm;
9271
9272 BUG_ON(cpu_online(smp_processor_id()));
9273 BUG_ON(current != this_rq()->idle);
9274
9275 if (mm != &init_mm) {
9276 switch_mm(mm, &init_mm, current);
9277 finish_arch_post_lock_switch();
9278 }
9279
9280 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
9281}
9282
9283static int __balance_push_cpu_stop(void *arg)
9284{
9285 struct task_struct *p = arg;
9286 struct rq *rq = this_rq();
9287 struct rq_flags rf;
9288 int cpu;
9289
9290 raw_spin_lock_irq(&p->pi_lock);
9291 rq_lock(rq, &rf);
9292
9293 update_rq_clock(rq);
9294
9295 if (task_rq(p) == rq && task_on_rq_queued(p)) {
9296 cpu = select_fallback_rq(rq->cpu, p);
9297 rq = __migrate_task(rq, &rf, p, cpu);
9298 }
9299
9300 rq_unlock(rq, &rf);
9301 raw_spin_unlock_irq(&p->pi_lock);
9302
9303 put_task_struct(p);
9304
9305 return 0;
9306}
9307
9308static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
9309
9310/*
9311 * Ensure we only run per-cpu kthreads once the CPU goes !active.
9312 *
9313 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
9314 * effective when the hotplug motion is down.
9315 */
9316static void balance_push(struct rq *rq)
9317{
9318 struct task_struct *push_task = rq->curr;
9319
9320 lockdep_assert_rq_held(rq);
9321
9322 /*
9323 * Ensure the thing is persistent until balance_push_set(.on = false);
9324 */
9325 rq->balance_callback = &balance_push_callback;
9326
9327 /*
9328 * Only active while going offline and when invoked on the outgoing
9329 * CPU.
9330 */
9331 if (!cpu_dying(rq->cpu) || rq != this_rq())
9332 return;
9333
9334 /*
9335 * Both the cpu-hotplug and stop task are in this case and are
9336 * required to complete the hotplug process.
9337 */
9338 if (kthread_is_per_cpu(push_task) ||
9339 is_migration_disabled(push_task)) {
9340
9341 /*
9342 * If this is the idle task on the outgoing CPU try to wake
9343 * up the hotplug control thread which might wait for the
9344 * last task to vanish. The rcuwait_active() check is
9345 * accurate here because the waiter is pinned on this CPU
9346 * and can't obviously be running in parallel.
9347 *
9348 * On RT kernels this also has to check whether there are
9349 * pinned and scheduled out tasks on the runqueue. They
9350 * need to leave the migrate disabled section first.
9351 */
9352 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
9353 rcuwait_active(&rq->hotplug_wait)) {
9354 raw_spin_rq_unlock(rq);
9355 rcuwait_wake_up(&rq->hotplug_wait);
9356 raw_spin_rq_lock(rq);
9357 }
9358 return;
9359 }
9360
9361 get_task_struct(push_task);
9362 /*
9363 * Temporarily drop rq->lock such that we can wake-up the stop task.
9364 * Both preemption and IRQs are still disabled.
9365 */
9366 raw_spin_rq_unlock(rq);
9367 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
9368 this_cpu_ptr(&push_work));
9369 /*
9370 * At this point need_resched() is true and we'll take the loop in
9371 * schedule(). The next pick is obviously going to be the stop task
9372 * which kthread_is_per_cpu() and will push this task away.
9373 */
9374 raw_spin_rq_lock(rq);
9375}
9376
9377static void balance_push_set(int cpu, bool on)
9378{
9379 struct rq *rq = cpu_rq(cpu);
9380 struct rq_flags rf;
9381
9382 rq_lock_irqsave(rq, &rf);
9383 if (on) {
9384 WARN_ON_ONCE(rq->balance_callback);
9385 rq->balance_callback = &balance_push_callback;
9386 } else if (rq->balance_callback == &balance_push_callback) {
9387 rq->balance_callback = NULL;
9388 }
9389 rq_unlock_irqrestore(rq, &rf);
9390}
9391
9392/*
9393 * Invoked from a CPUs hotplug control thread after the CPU has been marked
9394 * inactive. All tasks which are not per CPU kernel threads are either
9395 * pushed off this CPU now via balance_push() or placed on a different CPU
9396 * during wakeup. Wait until the CPU is quiescent.
9397 */
9398static void balance_hotplug_wait(void)
9399{
9400 struct rq *rq = this_rq();
9401
9402 rcuwait_wait_event(&rq->hotplug_wait,
9403 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
9404 TASK_UNINTERRUPTIBLE);
9405}
9406
9407#else
9408
9409static inline void balance_push(struct rq *rq)
9410{
9411}
9412
9413static inline void balance_push_set(int cpu, bool on)
9414{
9415}
9416
9417static inline void balance_hotplug_wait(void)
9418{
9419}
9420
9421#endif /* CONFIG_HOTPLUG_CPU */
9422
9423void set_rq_online(struct rq *rq)
9424{
9425 if (!rq->online) {
9426 const struct sched_class *class;
9427
9428 cpumask_set_cpu(rq->cpu, rq->rd->online);
9429 rq->online = 1;
9430
9431 for_each_class(class) {
9432 if (class->rq_online)
9433 class->rq_online(rq);
9434 }
9435 }
9436}
9437
9438void set_rq_offline(struct rq *rq)
9439{
9440 if (rq->online) {
9441 const struct sched_class *class;
9442
9443 for_each_class(class) {
9444 if (class->rq_offline)
9445 class->rq_offline(rq);
9446 }
9447
9448 cpumask_clear_cpu(rq->cpu, rq->rd->online);
9449 rq->online = 0;
9450 }
9451}
9452
9453/*
9454 * used to mark begin/end of suspend/resume:
9455 */
9456static int num_cpus_frozen;
9457
9458/*
9459 * Update cpusets according to cpu_active mask. If cpusets are
9460 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
9461 * around partition_sched_domains().
9462 *
9463 * If we come here as part of a suspend/resume, don't touch cpusets because we
9464 * want to restore it back to its original state upon resume anyway.
9465 */
9466static void cpuset_cpu_active(void)
9467{
9468 if (cpuhp_tasks_frozen) {
9469 /*
9470 * num_cpus_frozen tracks how many CPUs are involved in suspend
9471 * resume sequence. As long as this is not the last online
9472 * operation in the resume sequence, just build a single sched
9473 * domain, ignoring cpusets.
9474 */
9475 partition_sched_domains(1, NULL, NULL);
9476 if (--num_cpus_frozen)
9477 return;
9478 /*
9479 * This is the last CPU online operation. So fall through and
9480 * restore the original sched domains by considering the
9481 * cpuset configurations.
9482 */
9483 cpuset_force_rebuild();
9484 }
9485 cpuset_update_active_cpus();
9486}
9487
9488static int cpuset_cpu_inactive(unsigned int cpu)
9489{
9490 if (!cpuhp_tasks_frozen) {
9491 int ret = dl_cpu_busy(cpu, NULL);
9492
9493 if (ret)
9494 return ret;
9495 cpuset_update_active_cpus();
9496 } else {
9497 num_cpus_frozen++;
9498 partition_sched_domains(1, NULL, NULL);
9499 }
9500 return 0;
9501}
9502
9503int sched_cpu_activate(unsigned int cpu)
9504{
9505 struct rq *rq = cpu_rq(cpu);
9506 struct rq_flags rf;
9507
9508 /*
9509 * Clear the balance_push callback and prepare to schedule
9510 * regular tasks.
9511 */
9512 balance_push_set(cpu, false);
9513
9514#ifdef CONFIG_SCHED_SMT
9515 /*
9516 * When going up, increment the number of cores with SMT present.
9517 */
9518 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
9519 static_branch_inc_cpuslocked(&sched_smt_present);
9520#endif
9521 set_cpu_active(cpu, true);
9522
9523 if (sched_smp_initialized) {
9524 sched_update_numa(cpu, true);
9525 sched_domains_numa_masks_set(cpu);
9526 cpuset_cpu_active();
9527 }
9528
9529 /*
9530 * Put the rq online, if not already. This happens:
9531 *
9532 * 1) In the early boot process, because we build the real domains
9533 * after all CPUs have been brought up.
9534 *
9535 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
9536 * domains.
9537 */
9538 rq_lock_irqsave(rq, &rf);
9539 if (rq->rd) {
9540 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9541 set_rq_online(rq);
9542 }
9543 rq_unlock_irqrestore(rq, &rf);
9544
9545 return 0;
9546}
9547
9548int sched_cpu_deactivate(unsigned int cpu)
9549{
9550 struct rq *rq = cpu_rq(cpu);
9551 struct rq_flags rf;
9552 int ret;
9553
9554 /*
9555 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
9556 * load balancing when not active
9557 */
9558 nohz_balance_exit_idle(rq);
9559
9560 set_cpu_active(cpu, false);
9561
9562 /*
9563 * From this point forward, this CPU will refuse to run any task that
9564 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
9565 * push those tasks away until this gets cleared, see
9566 * sched_cpu_dying().
9567 */
9568 balance_push_set(cpu, true);
9569
9570 /*
9571 * We've cleared cpu_active_mask / set balance_push, wait for all
9572 * preempt-disabled and RCU users of this state to go away such that
9573 * all new such users will observe it.
9574 *
9575 * Specifically, we rely on ttwu to no longer target this CPU, see
9576 * ttwu_queue_cond() and is_cpu_allowed().
9577 *
9578 * Do sync before park smpboot threads to take care the rcu boost case.
9579 */
9580 synchronize_rcu();
9581
9582 rq_lock_irqsave(rq, &rf);
9583 if (rq->rd) {
9584 update_rq_clock(rq);
9585 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9586 set_rq_offline(rq);
9587 }
9588 rq_unlock_irqrestore(rq, &rf);
9589
9590#ifdef CONFIG_SCHED_SMT
9591 /*
9592 * When going down, decrement the number of cores with SMT present.
9593 */
9594 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
9595 static_branch_dec_cpuslocked(&sched_smt_present);
9596
9597 sched_core_cpu_deactivate(cpu);
9598#endif
9599
9600 if (!sched_smp_initialized)
9601 return 0;
9602
9603 sched_update_numa(cpu, false);
9604 ret = cpuset_cpu_inactive(cpu);
9605 if (ret) {
9606 balance_push_set(cpu, false);
9607 set_cpu_active(cpu, true);
9608 sched_update_numa(cpu, true);
9609 return ret;
9610 }
9611 sched_domains_numa_masks_clear(cpu);
9612 return 0;
9613}
9614
9615static void sched_rq_cpu_starting(unsigned int cpu)
9616{
9617 struct rq *rq = cpu_rq(cpu);
9618
9619 rq->calc_load_update = calc_load_update;
9620 update_max_interval();
9621}
9622
9623int sched_cpu_starting(unsigned int cpu)
9624{
9625 sched_core_cpu_starting(cpu);
9626 sched_rq_cpu_starting(cpu);
9627 sched_tick_start(cpu);
9628 return 0;
9629}
9630
9631#ifdef CONFIG_HOTPLUG_CPU
9632
9633/*
9634 * Invoked immediately before the stopper thread is invoked to bring the
9635 * CPU down completely. At this point all per CPU kthreads except the
9636 * hotplug thread (current) and the stopper thread (inactive) have been
9637 * either parked or have been unbound from the outgoing CPU. Ensure that
9638 * any of those which might be on the way out are gone.
9639 *
9640 * If after this point a bound task is being woken on this CPU then the
9641 * responsible hotplug callback has failed to do it's job.
9642 * sched_cpu_dying() will catch it with the appropriate fireworks.
9643 */
9644int sched_cpu_wait_empty(unsigned int cpu)
9645{
9646 balance_hotplug_wait();
9647 return 0;
9648}
9649
9650/*
9651 * Since this CPU is going 'away' for a while, fold any nr_active delta we
9652 * might have. Called from the CPU stopper task after ensuring that the
9653 * stopper is the last running task on the CPU, so nr_active count is
9654 * stable. We need to take the teardown thread which is calling this into
9655 * account, so we hand in adjust = 1 to the load calculation.
9656 *
9657 * Also see the comment "Global load-average calculations".
9658 */
9659static void calc_load_migrate(struct rq *rq)
9660{
9661 long delta = calc_load_fold_active(rq, 1);
9662
9663 if (delta)
9664 atomic_long_add(delta, &calc_load_tasks);
9665}
9666
9667static void dump_rq_tasks(struct rq *rq, const char *loglvl)
9668{
9669 struct task_struct *g, *p;
9670 int cpu = cpu_of(rq);
9671
9672 lockdep_assert_rq_held(rq);
9673
9674 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
9675 for_each_process_thread(g, p) {
9676 if (task_cpu(p) != cpu)
9677 continue;
9678
9679 if (!task_on_rq_queued(p))
9680 continue;
9681
9682 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
9683 }
9684}
9685
9686int sched_cpu_dying(unsigned int cpu)
9687{
9688 struct rq *rq = cpu_rq(cpu);
9689 struct rq_flags rf;
9690
9691 /* Handle pending wakeups and then migrate everything off */
9692 sched_tick_stop(cpu);
9693
9694 rq_lock_irqsave(rq, &rf);
9695 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
9696 WARN(true, "Dying CPU not properly vacated!");
9697 dump_rq_tasks(rq, KERN_WARNING);
9698 }
9699 rq_unlock_irqrestore(rq, &rf);
9700
9701 calc_load_migrate(rq);
9702 update_max_interval();
9703 hrtick_clear(rq);
9704 sched_core_cpu_dying(cpu);
9705 return 0;
9706}
9707#endif
9708
9709void __init sched_init_smp(void)
9710{
9711 sched_init_numa(NUMA_NO_NODE);
9712
9713 /*
9714 * There's no userspace yet to cause hotplug operations; hence all the
9715 * CPU masks are stable and all blatant races in the below code cannot
9716 * happen.
9717 */
9718 mutex_lock(&sched_domains_mutex);
9719 sched_init_domains(cpu_active_mask);
9720 mutex_unlock(&sched_domains_mutex);
9721
9722 /* Move init over to a non-isolated CPU */
9723 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
9724 BUG();
9725 current->flags &= ~PF_NO_SETAFFINITY;
9726 sched_init_granularity();
9727
9728 init_sched_rt_class();
9729 init_sched_dl_class();
9730
9731 sched_smp_initialized = true;
9732}
9733
9734static int __init migration_init(void)
9735{
9736 sched_cpu_starting(smp_processor_id());
9737 return 0;
9738}
9739early_initcall(migration_init);
9740
9741#else
9742void __init sched_init_smp(void)
9743{
9744 sched_init_granularity();
9745}
9746#endif /* CONFIG_SMP */
9747
9748int in_sched_functions(unsigned long addr)
9749{
9750 return in_lock_functions(addr) ||
9751 (addr >= (unsigned long)__sched_text_start
9752 && addr < (unsigned long)__sched_text_end);
9753}
9754
9755#ifdef CONFIG_CGROUP_SCHED
9756/*
9757 * Default task group.
9758 * Every task in system belongs to this group at bootup.
9759 */
9760struct task_group root_task_group;
9761LIST_HEAD(task_groups);
9762
9763/* Cacheline aligned slab cache for task_group */
9764static struct kmem_cache *task_group_cache __read_mostly;
9765#endif
9766
9767void __init sched_init(void)
9768{
9769 unsigned long ptr = 0;
9770 int i;
9771
9772 /* Make sure the linker didn't screw up */
9773 BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||
9774 &fair_sched_class != &rt_sched_class + 1 ||
9775 &rt_sched_class != &dl_sched_class + 1);
9776#ifdef CONFIG_SMP
9777 BUG_ON(&dl_sched_class != &stop_sched_class + 1);
9778#endif
9779
9780 wait_bit_init();
9781
9782#ifdef CONFIG_FAIR_GROUP_SCHED
9783 ptr += 2 * nr_cpu_ids * sizeof(void **);
9784#endif
9785#ifdef CONFIG_RT_GROUP_SCHED
9786 ptr += 2 * nr_cpu_ids * sizeof(void **);
9787#endif
9788 if (ptr) {
9789 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
9790
9791#ifdef CONFIG_FAIR_GROUP_SCHED
9792 root_task_group.se = (struct sched_entity **)ptr;
9793 ptr += nr_cpu_ids * sizeof(void **);
9794
9795 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
9796 ptr += nr_cpu_ids * sizeof(void **);
9797
9798 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
9799 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
9800#endif /* CONFIG_FAIR_GROUP_SCHED */
9801#ifdef CONFIG_RT_GROUP_SCHED
9802 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
9803 ptr += nr_cpu_ids * sizeof(void **);
9804
9805 root_task_group.rt_rq = (struct rt_rq **)ptr;
9806 ptr += nr_cpu_ids * sizeof(void **);
9807
9808#endif /* CONFIG_RT_GROUP_SCHED */
9809 }
9810
9811 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
9812
9813#ifdef CONFIG_SMP
9814 init_defrootdomain();
9815#endif
9816
9817#ifdef CONFIG_RT_GROUP_SCHED
9818 init_rt_bandwidth(&root_task_group.rt_bandwidth,
9819 global_rt_period(), global_rt_runtime());
9820#endif /* CONFIG_RT_GROUP_SCHED */
9821
9822#ifdef CONFIG_CGROUP_SCHED
9823 task_group_cache = KMEM_CACHE(task_group, 0);
9824
9825 list_add(&root_task_group.list, &task_groups);
9826 INIT_LIST_HEAD(&root_task_group.children);
9827 INIT_LIST_HEAD(&root_task_group.siblings);
9828 autogroup_init(&init_task);
9829#endif /* CONFIG_CGROUP_SCHED */
9830
9831 for_each_possible_cpu(i) {
9832 struct rq *rq;
9833
9834 rq = cpu_rq(i);
9835 raw_spin_lock_init(&rq->__lock);
9836 rq->nr_running = 0;
9837 rq->calc_load_active = 0;
9838 rq->calc_load_update = jiffies + LOAD_FREQ;
9839 init_cfs_rq(&rq->cfs);
9840 init_rt_rq(&rq->rt);
9841 init_dl_rq(&rq->dl);
9842#ifdef CONFIG_FAIR_GROUP_SCHED
9843 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
9844 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
9845 /*
9846 * How much CPU bandwidth does root_task_group get?
9847 *
9848 * In case of task-groups formed thr' the cgroup filesystem, it
9849 * gets 100% of the CPU resources in the system. This overall
9850 * system CPU resource is divided among the tasks of
9851 * root_task_group and its child task-groups in a fair manner,
9852 * based on each entity's (task or task-group's) weight
9853 * (se->load.weight).
9854 *
9855 * In other words, if root_task_group has 10 tasks of weight
9856 * 1024) and two child groups A0 and A1 (of weight 1024 each),
9857 * then A0's share of the CPU resource is:
9858 *
9859 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
9860 *
9861 * We achieve this by letting root_task_group's tasks sit
9862 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
9863 */
9864 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
9865#endif /* CONFIG_FAIR_GROUP_SCHED */
9866
9867 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
9868#ifdef CONFIG_RT_GROUP_SCHED
9869 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
9870#endif
9871#ifdef CONFIG_SMP
9872 rq->sd = NULL;
9873 rq->rd = NULL;
9874 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
9875 rq->balance_callback = &balance_push_callback;
9876 rq->active_balance = 0;
9877 rq->next_balance = jiffies;
9878 rq->push_cpu = 0;
9879 rq->cpu = i;
9880 rq->online = 0;
9881 rq->idle_stamp = 0;
9882 rq->avg_idle = 2*sysctl_sched_migration_cost;
9883 rq->wake_stamp = jiffies;
9884 rq->wake_avg_idle = rq->avg_idle;
9885 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
9886
9887 INIT_LIST_HEAD(&rq->cfs_tasks);
9888
9889 rq_attach_root(rq, &def_root_domain);
9890#ifdef CONFIG_NO_HZ_COMMON
9891 rq->last_blocked_load_update_tick = jiffies;
9892 atomic_set(&rq->nohz_flags, 0);
9893
9894 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
9895#endif
9896#ifdef CONFIG_HOTPLUG_CPU
9897 rcuwait_init(&rq->hotplug_wait);
9898#endif
9899#endif /* CONFIG_SMP */
9900 hrtick_rq_init(rq);
9901 atomic_set(&rq->nr_iowait, 0);
9902
9903#ifdef CONFIG_SCHED_CORE
9904 rq->core = rq;
9905 rq->core_pick = NULL;
9906 rq->core_enabled = 0;
9907 rq->core_tree = RB_ROOT;
9908 rq->core_forceidle_count = 0;
9909 rq->core_forceidle_occupation = 0;
9910 rq->core_forceidle_start = 0;
9911
9912 rq->core_cookie = 0UL;
9913#endif
9914 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
9915 }
9916
9917 set_load_weight(&init_task, false);
9918
9919 /*
9920 * The boot idle thread does lazy MMU switching as well:
9921 */
9922 mmgrab(&init_mm);
9923 enter_lazy_tlb(&init_mm, current);
9924
9925 /*
9926 * The idle task doesn't need the kthread struct to function, but it
9927 * is dressed up as a per-CPU kthread and thus needs to play the part
9928 * if we want to avoid special-casing it in code that deals with per-CPU
9929 * kthreads.
9930 */
9931 WARN_ON(!set_kthread_struct(current));
9932
9933 /*
9934 * Make us the idle thread. Technically, schedule() should not be
9935 * called from this thread, however somewhere below it might be,
9936 * but because we are the idle thread, we just pick up running again
9937 * when this runqueue becomes "idle".
9938 */
9939 init_idle(current, smp_processor_id());
9940
9941 calc_load_update = jiffies + LOAD_FREQ;
9942
9943#ifdef CONFIG_SMP
9944 idle_thread_set_boot_cpu();
9945 balance_push_set(smp_processor_id(), false);
9946#endif
9947 init_sched_fair_class();
9948
9949 psi_init();
9950
9951 init_uclamp();
9952
9953 preempt_dynamic_init();
9954
9955 scheduler_running = 1;
9956}
9957
9958#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
9959
9960void __might_sleep(const char *file, int line)
9961{
9962 unsigned int state = get_current_state();
9963 /*
9964 * Blocking primitives will set (and therefore destroy) current->state,
9965 * since we will exit with TASK_RUNNING make sure we enter with it,
9966 * otherwise we will destroy state.
9967 */
9968 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
9969 "do not call blocking ops when !TASK_RUNNING; "
9970 "state=%x set at [<%p>] %pS\n", state,
9971 (void *)current->task_state_change,
9972 (void *)current->task_state_change);
9973
9974 __might_resched(file, line, 0);
9975}
9976EXPORT_SYMBOL(__might_sleep);
9977
9978static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
9979{
9980 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
9981 return;
9982
9983 if (preempt_count() == preempt_offset)
9984 return;
9985
9986 pr_err("Preemption disabled at:");
9987 print_ip_sym(KERN_ERR, ip);
9988}
9989
9990static inline bool resched_offsets_ok(unsigned int offsets)
9991{
9992 unsigned int nested = preempt_count();
9993
9994 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
9995
9996 return nested == offsets;
9997}
9998
9999void __might_resched(const char *file, int line, unsigned int offsets)
10000{
10001 /* Ratelimiting timestamp: */
10002 static unsigned long prev_jiffy;
10003
10004 unsigned long preempt_disable_ip;
10005
10006 /* WARN_ON_ONCE() by default, no rate limit required: */
10007 rcu_sleep_check();
10008
10009 if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
10010 !is_idle_task(current) && !current->non_block_count) ||
10011 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
10012 oops_in_progress)
10013 return;
10014
10015 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
10016 return;
10017 prev_jiffy = jiffies;
10018
10019 /* Save this before calling printk(), since that will clobber it: */
10020 preempt_disable_ip = get_preempt_disable_ip(current);
10021
10022 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
10023 file, line);
10024 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
10025 in_atomic(), irqs_disabled(), current->non_block_count,
10026 current->pid, current->comm);
10027 pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
10028 offsets & MIGHT_RESCHED_PREEMPT_MASK);
10029
10030 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
10031 pr_err("RCU nest depth: %d, expected: %u\n",
10032 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
10033 }
10034
10035 if (task_stack_end_corrupted(current))
10036 pr_emerg("Thread overran stack, or stack corrupted\n");
10037
10038 debug_show_held_locks(current);
10039 if (irqs_disabled())
10040 print_irqtrace_events(current);
10041
10042 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
10043 preempt_disable_ip);
10044
10045 dump_stack();
10046 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
10047}
10048EXPORT_SYMBOL(__might_resched);
10049
10050void __cant_sleep(const char *file, int line, int preempt_offset)
10051{
10052 static unsigned long prev_jiffy;
10053
10054 if (irqs_disabled())
10055 return;
10056
10057 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
10058 return;
10059
10060 if (preempt_count() > preempt_offset)
10061 return;
10062
10063 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
10064 return;
10065 prev_jiffy = jiffies;
10066
10067 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
10068 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
10069 in_atomic(), irqs_disabled(),
10070 current->pid, current->comm);
10071
10072 debug_show_held_locks(current);
10073 dump_stack();
10074 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
10075}
10076EXPORT_SYMBOL_GPL(__cant_sleep);
10077
10078#ifdef CONFIG_SMP
10079void __cant_migrate(const char *file, int line)
10080{
10081 static unsigned long prev_jiffy;
10082
10083 if (irqs_disabled())
10084 return;
10085
10086 if (is_migration_disabled(current))
10087 return;
10088
10089 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
10090 return;
10091
10092 if (preempt_count() > 0)
10093 return;
10094
10095 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
10096 return;
10097 prev_jiffy = jiffies;
10098
10099 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
10100 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
10101 in_atomic(), irqs_disabled(), is_migration_disabled(current),
10102 current->pid, current->comm);
10103
10104 debug_show_held_locks(current);
10105 dump_stack();
10106 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
10107}
10108EXPORT_SYMBOL_GPL(__cant_migrate);
10109#endif
10110#endif
10111
10112#ifdef CONFIG_MAGIC_SYSRQ
10113void normalize_rt_tasks(void)
10114{
10115 struct task_struct *g, *p;
10116 struct sched_attr attr = {
10117 .sched_policy = SCHED_NORMAL,
10118 };
10119
10120 read_lock(&tasklist_lock);
10121 for_each_process_thread(g, p) {
10122 /*
10123 * Only normalize user tasks:
10124 */
10125 if (p->flags & PF_KTHREAD)
10126 continue;
10127
10128 p->se.exec_start = 0;
10129 schedstat_set(p->stats.wait_start, 0);
10130 schedstat_set(p->stats.sleep_start, 0);
10131 schedstat_set(p->stats.block_start, 0);
10132
10133 if (!dl_task(p) && !rt_task(p)) {
10134 /*
10135 * Renice negative nice level userspace
10136 * tasks back to 0:
10137 */
10138 if (task_nice(p) < 0)
10139 set_user_nice(p, 0);
10140 continue;
10141 }
10142
10143 __sched_setscheduler(p, &attr, false, false);
10144 }
10145 read_unlock(&tasklist_lock);
10146}
10147
10148#endif /* CONFIG_MAGIC_SYSRQ */
10149
10150#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
10151/*
10152 * These functions are only useful for the IA64 MCA handling, or kdb.
10153 *
10154 * They can only be called when the whole system has been
10155 * stopped - every CPU needs to be quiescent, and no scheduling
10156 * activity can take place. Using them for anything else would
10157 * be a serious bug, and as a result, they aren't even visible
10158 * under any other configuration.
10159 */
10160
10161/**
10162 * curr_task - return the current task for a given CPU.
10163 * @cpu: the processor in question.
10164 *
10165 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
10166 *
10167 * Return: The current task for @cpu.
10168 */
10169struct task_struct *curr_task(int cpu)
10170{
10171 return cpu_curr(cpu);
10172}
10173
10174#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
10175
10176#ifdef CONFIG_IA64
10177/**
10178 * ia64_set_curr_task - set the current task for a given CPU.
10179 * @cpu: the processor in question.
10180 * @p: the task pointer to set.
10181 *
10182 * Description: This function must only be used when non-maskable interrupts
10183 * are serviced on a separate stack. It allows the architecture to switch the
10184 * notion of the current task on a CPU in a non-blocking manner. This function
10185 * must be called with all CPU's synchronized, and interrupts disabled, the
10186 * and caller must save the original value of the current task (see
10187 * curr_task() above) and restore that value before reenabling interrupts and
10188 * re-starting the system.
10189 *
10190 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
10191 */
10192void ia64_set_curr_task(int cpu, struct task_struct *p)
10193{
10194 cpu_curr(cpu) = p;
10195}
10196
10197#endif
10198
10199#ifdef CONFIG_CGROUP_SCHED
10200/* task_group_lock serializes the addition/removal of task groups */
10201static DEFINE_SPINLOCK(task_group_lock);
10202
10203static inline void alloc_uclamp_sched_group(struct task_group *tg,
10204 struct task_group *parent)
10205{
10206#ifdef CONFIG_UCLAMP_TASK_GROUP
10207 enum uclamp_id clamp_id;
10208
10209 for_each_clamp_id(clamp_id) {
10210 uclamp_se_set(&tg->uclamp_req[clamp_id],
10211 uclamp_none(clamp_id), false);
10212 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
10213 }
10214#endif
10215}
10216
10217static void sched_free_group(struct task_group *tg)
10218{
10219 free_fair_sched_group(tg);
10220 free_rt_sched_group(tg);
10221 autogroup_free(tg);
10222 kmem_cache_free(task_group_cache, tg);
10223}
10224
10225static void sched_free_group_rcu(struct rcu_head *rcu)
10226{
10227 sched_free_group(container_of(rcu, struct task_group, rcu));
10228}
10229
10230static void sched_unregister_group(struct task_group *tg)
10231{
10232 unregister_fair_sched_group(tg);
10233 unregister_rt_sched_group(tg);
10234 /*
10235 * We have to wait for yet another RCU grace period to expire, as
10236 * print_cfs_stats() might run concurrently.
10237 */
10238 call_rcu(&tg->rcu, sched_free_group_rcu);
10239}
10240
10241/* allocate runqueue etc for a new task group */
10242struct task_group *sched_create_group(struct task_group *parent)
10243{
10244 struct task_group *tg;
10245
10246 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
10247 if (!tg)
10248 return ERR_PTR(-ENOMEM);
10249
10250 if (!alloc_fair_sched_group(tg, parent))
10251 goto err;
10252
10253 if (!alloc_rt_sched_group(tg, parent))
10254 goto err;
10255
10256 alloc_uclamp_sched_group(tg, parent);
10257
10258 return tg;
10259
10260err:
10261 sched_free_group(tg);
10262 return ERR_PTR(-ENOMEM);
10263}
10264
10265void sched_online_group(struct task_group *tg, struct task_group *parent)
10266{
10267 unsigned long flags;
10268
10269 spin_lock_irqsave(&task_group_lock, flags);
10270 list_add_rcu(&tg->list, &task_groups);
10271
10272 /* Root should already exist: */
10273 WARN_ON(!parent);
10274
10275 tg->parent = parent;
10276 INIT_LIST_HEAD(&tg->children);
10277 list_add_rcu(&tg->siblings, &parent->children);
10278 spin_unlock_irqrestore(&task_group_lock, flags);
10279
10280 online_fair_sched_group(tg);
10281}
10282
10283/* rcu callback to free various structures associated with a task group */
10284static void sched_unregister_group_rcu(struct rcu_head *rhp)
10285{
10286 /* Now it should be safe to free those cfs_rqs: */
10287 sched_unregister_group(container_of(rhp, struct task_group, rcu));
10288}
10289
10290void sched_destroy_group(struct task_group *tg)
10291{
10292 /* Wait for possible concurrent references to cfs_rqs complete: */
10293 call_rcu(&tg->rcu, sched_unregister_group_rcu);
10294}
10295
10296void sched_release_group(struct task_group *tg)
10297{
10298 unsigned long flags;
10299
10300 /*
10301 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
10302 * sched_cfs_period_timer()).
10303 *
10304 * For this to be effective, we have to wait for all pending users of
10305 * this task group to leave their RCU critical section to ensure no new
10306 * user will see our dying task group any more. Specifically ensure
10307 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
10308 *
10309 * We therefore defer calling unregister_fair_sched_group() to
10310 * sched_unregister_group() which is guarantied to get called only after the
10311 * current RCU grace period has expired.
10312 */
10313 spin_lock_irqsave(&task_group_lock, flags);
10314 list_del_rcu(&tg->list);
10315 list_del_rcu(&tg->siblings);
10316 spin_unlock_irqrestore(&task_group_lock, flags);
10317}
10318
10319static void sched_change_group(struct task_struct *tsk)
10320{
10321 struct task_group *tg;
10322
10323 /*
10324 * All callers are synchronized by task_rq_lock(); we do not use RCU
10325 * which is pointless here. Thus, we pass "true" to task_css_check()
10326 * to prevent lockdep warnings.
10327 */
10328 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
10329 struct task_group, css);
10330 tg = autogroup_task_group(tsk, tg);
10331 tsk->sched_task_group = tg;
10332
10333#ifdef CONFIG_FAIR_GROUP_SCHED
10334 if (tsk->sched_class->task_change_group)
10335 tsk->sched_class->task_change_group(tsk);
10336 else
10337#endif
10338 set_task_rq(tsk, task_cpu(tsk));
10339}
10340
10341/*
10342 * Change task's runqueue when it moves between groups.
10343 *
10344 * The caller of this function should have put the task in its new group by
10345 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
10346 * its new group.
10347 */
10348void sched_move_task(struct task_struct *tsk)
10349{
10350 int queued, running, queue_flags =
10351 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
10352 struct rq_flags rf;
10353 struct rq *rq;
10354
10355 rq = task_rq_lock(tsk, &rf);
10356 update_rq_clock(rq);
10357
10358 running = task_current(rq, tsk);
10359 queued = task_on_rq_queued(tsk);
10360
10361 if (queued)
10362 dequeue_task(rq, tsk, queue_flags);
10363 if (running)
10364 put_prev_task(rq, tsk);
10365
10366 sched_change_group(tsk);
10367
10368 if (queued)
10369 enqueue_task(rq, tsk, queue_flags);
10370 if (running) {
10371 set_next_task(rq, tsk);
10372 /*
10373 * After changing group, the running task may have joined a
10374 * throttled one but it's still the running task. Trigger a
10375 * resched to make sure that task can still run.
10376 */
10377 resched_curr(rq);
10378 }
10379
10380 task_rq_unlock(rq, tsk, &rf);
10381}
10382
10383static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
10384{
10385 return css ? container_of(css, struct task_group, css) : NULL;
10386}
10387
10388static struct cgroup_subsys_state *
10389cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
10390{
10391 struct task_group *parent = css_tg(parent_css);
10392 struct task_group *tg;
10393
10394 if (!parent) {
10395 /* This is early initialization for the top cgroup */
10396 return &root_task_group.css;
10397 }
10398
10399 tg = sched_create_group(parent);
10400 if (IS_ERR(tg))
10401 return ERR_PTR(-ENOMEM);
10402
10403 return &tg->css;
10404}
10405
10406/* Expose task group only after completing cgroup initialization */
10407static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
10408{
10409 struct task_group *tg = css_tg(css);
10410 struct task_group *parent = css_tg(css->parent);
10411
10412 if (parent)
10413 sched_online_group(tg, parent);
10414
10415#ifdef CONFIG_UCLAMP_TASK_GROUP
10416 /* Propagate the effective uclamp value for the new group */
10417 mutex_lock(&uclamp_mutex);
10418 rcu_read_lock();
10419 cpu_util_update_eff(css);
10420 rcu_read_unlock();
10421 mutex_unlock(&uclamp_mutex);
10422#endif
10423
10424 return 0;
10425}
10426
10427static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
10428{
10429 struct task_group *tg = css_tg(css);
10430
10431 sched_release_group(tg);
10432}
10433
10434static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
10435{
10436 struct task_group *tg = css_tg(css);
10437
10438 /*
10439 * Relies on the RCU grace period between css_released() and this.
10440 */
10441 sched_unregister_group(tg);
10442}
10443
10444#ifdef CONFIG_RT_GROUP_SCHED
10445static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
10446{
10447 struct task_struct *task;
10448 struct cgroup_subsys_state *css;
10449
10450 cgroup_taskset_for_each(task, css, tset) {
10451 if (!sched_rt_can_attach(css_tg(css), task))
10452 return -EINVAL;
10453 }
10454 return 0;
10455}
10456#endif
10457
10458static void cpu_cgroup_attach(struct cgroup_taskset *tset)
10459{
10460 struct task_struct *task;
10461 struct cgroup_subsys_state *css;
10462
10463 cgroup_taskset_for_each(task, css, tset)
10464 sched_move_task(task);
10465}
10466
10467#ifdef CONFIG_UCLAMP_TASK_GROUP
10468static void cpu_util_update_eff(struct cgroup_subsys_state *css)
10469{
10470 struct cgroup_subsys_state *top_css = css;
10471 struct uclamp_se *uc_parent = NULL;
10472 struct uclamp_se *uc_se = NULL;
10473 unsigned int eff[UCLAMP_CNT];
10474 enum uclamp_id clamp_id;
10475 unsigned int clamps;
10476
10477 lockdep_assert_held(&uclamp_mutex);
10478 SCHED_WARN_ON(!rcu_read_lock_held());
10479
10480 css_for_each_descendant_pre(css, top_css) {
10481 uc_parent = css_tg(css)->parent
10482 ? css_tg(css)->parent->uclamp : NULL;
10483
10484 for_each_clamp_id(clamp_id) {
10485 /* Assume effective clamps matches requested clamps */
10486 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
10487 /* Cap effective clamps with parent's effective clamps */
10488 if (uc_parent &&
10489 eff[clamp_id] > uc_parent[clamp_id].value) {
10490 eff[clamp_id] = uc_parent[clamp_id].value;
10491 }
10492 }
10493 /* Ensure protection is always capped by limit */
10494 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
10495
10496 /* Propagate most restrictive effective clamps */
10497 clamps = 0x0;
10498 uc_se = css_tg(css)->uclamp;
10499 for_each_clamp_id(clamp_id) {
10500 if (eff[clamp_id] == uc_se[clamp_id].value)
10501 continue;
10502 uc_se[clamp_id].value = eff[clamp_id];
10503 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
10504 clamps |= (0x1 << clamp_id);
10505 }
10506 if (!clamps) {
10507 css = css_rightmost_descendant(css);
10508 continue;
10509 }
10510
10511 /* Immediately update descendants RUNNABLE tasks */
10512 uclamp_update_active_tasks(css);
10513 }
10514}
10515
10516/*
10517 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
10518 * C expression. Since there is no way to convert a macro argument (N) into a
10519 * character constant, use two levels of macros.
10520 */
10521#define _POW10(exp) ((unsigned int)1e##exp)
10522#define POW10(exp) _POW10(exp)
10523
10524struct uclamp_request {
10525#define UCLAMP_PERCENT_SHIFT 2
10526#define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
10527 s64 percent;
10528 u64 util;
10529 int ret;
10530};
10531
10532static inline struct uclamp_request
10533capacity_from_percent(char *buf)
10534{
10535 struct uclamp_request req = {
10536 .percent = UCLAMP_PERCENT_SCALE,
10537 .util = SCHED_CAPACITY_SCALE,
10538 .ret = 0,
10539 };
10540
10541 buf = strim(buf);
10542 if (strcmp(buf, "max")) {
10543 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
10544 &req.percent);
10545 if (req.ret)
10546 return req;
10547 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
10548 req.ret = -ERANGE;
10549 return req;
10550 }
10551
10552 req.util = req.percent << SCHED_CAPACITY_SHIFT;
10553 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
10554 }
10555
10556 return req;
10557}
10558
10559static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
10560 size_t nbytes, loff_t off,
10561 enum uclamp_id clamp_id)
10562{
10563 struct uclamp_request req;
10564 struct task_group *tg;
10565
10566 req = capacity_from_percent(buf);
10567 if (req.ret)
10568 return req.ret;
10569
10570 static_branch_enable(&sched_uclamp_used);
10571
10572 mutex_lock(&uclamp_mutex);
10573 rcu_read_lock();
10574
10575 tg = css_tg(of_css(of));
10576 if (tg->uclamp_req[clamp_id].value != req.util)
10577 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
10578
10579 /*
10580 * Because of not recoverable conversion rounding we keep track of the
10581 * exact requested value
10582 */
10583 tg->uclamp_pct[clamp_id] = req.percent;
10584
10585 /* Update effective clamps to track the most restrictive value */
10586 cpu_util_update_eff(of_css(of));
10587
10588 rcu_read_unlock();
10589 mutex_unlock(&uclamp_mutex);
10590
10591 return nbytes;
10592}
10593
10594static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
10595 char *buf, size_t nbytes,
10596 loff_t off)
10597{
10598 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
10599}
10600
10601static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
10602 char *buf, size_t nbytes,
10603 loff_t off)
10604{
10605 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
10606}
10607
10608static inline void cpu_uclamp_print(struct seq_file *sf,
10609 enum uclamp_id clamp_id)
10610{
10611 struct task_group *tg;
10612 u64 util_clamp;
10613 u64 percent;
10614 u32 rem;
10615
10616 rcu_read_lock();
10617 tg = css_tg(seq_css(sf));
10618 util_clamp = tg->uclamp_req[clamp_id].value;
10619 rcu_read_unlock();
10620
10621 if (util_clamp == SCHED_CAPACITY_SCALE) {
10622 seq_puts(sf, "max\n");
10623 return;
10624 }
10625
10626 percent = tg->uclamp_pct[clamp_id];
10627 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
10628 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
10629}
10630
10631static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
10632{
10633 cpu_uclamp_print(sf, UCLAMP_MIN);
10634 return 0;
10635}
10636
10637static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
10638{
10639 cpu_uclamp_print(sf, UCLAMP_MAX);
10640 return 0;
10641}
10642#endif /* CONFIG_UCLAMP_TASK_GROUP */
10643
10644#ifdef CONFIG_FAIR_GROUP_SCHED
10645static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
10646 struct cftype *cftype, u64 shareval)
10647{
10648 if (shareval > scale_load_down(ULONG_MAX))
10649 shareval = MAX_SHARES;
10650 return sched_group_set_shares(css_tg(css), scale_load(shareval));
10651}
10652
10653static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
10654 struct cftype *cft)
10655{
10656 struct task_group *tg = css_tg(css);
10657
10658 return (u64) scale_load_down(tg->shares);
10659}
10660
10661#ifdef CONFIG_CFS_BANDWIDTH
10662static DEFINE_MUTEX(cfs_constraints_mutex);
10663
10664const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
10665static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
10666/* More than 203 days if BW_SHIFT equals 20. */
10667static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
10668
10669static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
10670
10671static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
10672 u64 burst)
10673{
10674 int i, ret = 0, runtime_enabled, runtime_was_enabled;
10675 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10676
10677 if (tg == &root_task_group)
10678 return -EINVAL;
10679
10680 /*
10681 * Ensure we have at some amount of bandwidth every period. This is
10682 * to prevent reaching a state of large arrears when throttled via
10683 * entity_tick() resulting in prolonged exit starvation.
10684 */
10685 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
10686 return -EINVAL;
10687
10688 /*
10689 * Likewise, bound things on the other side by preventing insane quota
10690 * periods. This also allows us to normalize in computing quota
10691 * feasibility.
10692 */
10693 if (period > max_cfs_quota_period)
10694 return -EINVAL;
10695
10696 /*
10697 * Bound quota to defend quota against overflow during bandwidth shift.
10698 */
10699 if (quota != RUNTIME_INF && quota > max_cfs_runtime)
10700 return -EINVAL;
10701
10702 if (quota != RUNTIME_INF && (burst > quota ||
10703 burst + quota > max_cfs_runtime))
10704 return -EINVAL;
10705
10706 /*
10707 * Prevent race between setting of cfs_rq->runtime_enabled and
10708 * unthrottle_offline_cfs_rqs().
10709 */
10710 cpus_read_lock();
10711 mutex_lock(&cfs_constraints_mutex);
10712 ret = __cfs_schedulable(tg, period, quota);
10713 if (ret)
10714 goto out_unlock;
10715
10716 runtime_enabled = quota != RUNTIME_INF;
10717 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
10718 /*
10719 * If we need to toggle cfs_bandwidth_used, off->on must occur
10720 * before making related changes, and on->off must occur afterwards
10721 */
10722 if (runtime_enabled && !runtime_was_enabled)
10723 cfs_bandwidth_usage_inc();
10724 raw_spin_lock_irq(&cfs_b->lock);
10725 cfs_b->period = ns_to_ktime(period);
10726 cfs_b->quota = quota;
10727 cfs_b->burst = burst;
10728
10729 __refill_cfs_bandwidth_runtime(cfs_b);
10730
10731 /* Restart the period timer (if active) to handle new period expiry: */
10732 if (runtime_enabled)
10733 start_cfs_bandwidth(cfs_b);
10734
10735 raw_spin_unlock_irq(&cfs_b->lock);
10736
10737 for_each_online_cpu(i) {
10738 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
10739 struct rq *rq = cfs_rq->rq;
10740 struct rq_flags rf;
10741
10742 rq_lock_irq(rq, &rf);
10743 cfs_rq->runtime_enabled = runtime_enabled;
10744 cfs_rq->runtime_remaining = 0;
10745
10746 if (cfs_rq->throttled)
10747 unthrottle_cfs_rq(cfs_rq);
10748 rq_unlock_irq(rq, &rf);
10749 }
10750 if (runtime_was_enabled && !runtime_enabled)
10751 cfs_bandwidth_usage_dec();
10752out_unlock:
10753 mutex_unlock(&cfs_constraints_mutex);
10754 cpus_read_unlock();
10755
10756 return ret;
10757}
10758
10759static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
10760{
10761 u64 quota, period, burst;
10762
10763 period = ktime_to_ns(tg->cfs_bandwidth.period);
10764 burst = tg->cfs_bandwidth.burst;
10765 if (cfs_quota_us < 0)
10766 quota = RUNTIME_INF;
10767 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
10768 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
10769 else
10770 return -EINVAL;
10771
10772 return tg_set_cfs_bandwidth(tg, period, quota, burst);
10773}
10774
10775static long tg_get_cfs_quota(struct task_group *tg)
10776{
10777 u64 quota_us;
10778
10779 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
10780 return -1;
10781
10782 quota_us = tg->cfs_bandwidth.quota;
10783 do_div(quota_us, NSEC_PER_USEC);
10784
10785 return quota_us;
10786}
10787
10788static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
10789{
10790 u64 quota, period, burst;
10791
10792 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
10793 return -EINVAL;
10794
10795 period = (u64)cfs_period_us * NSEC_PER_USEC;
10796 quota = tg->cfs_bandwidth.quota;
10797 burst = tg->cfs_bandwidth.burst;
10798
10799 return tg_set_cfs_bandwidth(tg, period, quota, burst);
10800}
10801
10802static long tg_get_cfs_period(struct task_group *tg)
10803{
10804 u64 cfs_period_us;
10805
10806 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
10807 do_div(cfs_period_us, NSEC_PER_USEC);
10808
10809 return cfs_period_us;
10810}
10811
10812static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
10813{
10814 u64 quota, period, burst;
10815
10816 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
10817 return -EINVAL;
10818
10819 burst = (u64)cfs_burst_us * NSEC_PER_USEC;
10820 period = ktime_to_ns(tg->cfs_bandwidth.period);
10821 quota = tg->cfs_bandwidth.quota;
10822
10823 return tg_set_cfs_bandwidth(tg, period, quota, burst);
10824}
10825
10826static long tg_get_cfs_burst(struct task_group *tg)
10827{
10828 u64 burst_us;
10829
10830 burst_us = tg->cfs_bandwidth.burst;
10831 do_div(burst_us, NSEC_PER_USEC);
10832
10833 return burst_us;
10834}
10835
10836static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
10837 struct cftype *cft)
10838{
10839 return tg_get_cfs_quota(css_tg(css));
10840}
10841
10842static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
10843 struct cftype *cftype, s64 cfs_quota_us)
10844{
10845 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
10846}
10847
10848static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
10849 struct cftype *cft)
10850{
10851 return tg_get_cfs_period(css_tg(css));
10852}
10853
10854static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
10855 struct cftype *cftype, u64 cfs_period_us)
10856{
10857 return tg_set_cfs_period(css_tg(css), cfs_period_us);
10858}
10859
10860static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
10861 struct cftype *cft)
10862{
10863 return tg_get_cfs_burst(css_tg(css));
10864}
10865
10866static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
10867 struct cftype *cftype, u64 cfs_burst_us)
10868{
10869 return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
10870}
10871
10872struct cfs_schedulable_data {
10873 struct task_group *tg;
10874 u64 period, quota;
10875};
10876
10877/*
10878 * normalize group quota/period to be quota/max_period
10879 * note: units are usecs
10880 */
10881static u64 normalize_cfs_quota(struct task_group *tg,
10882 struct cfs_schedulable_data *d)
10883{
10884 u64 quota, period;
10885
10886 if (tg == d->tg) {
10887 period = d->period;
10888 quota = d->quota;
10889 } else {
10890 period = tg_get_cfs_period(tg);
10891 quota = tg_get_cfs_quota(tg);
10892 }
10893
10894 /* note: these should typically be equivalent */
10895 if (quota == RUNTIME_INF || quota == -1)
10896 return RUNTIME_INF;
10897
10898 return to_ratio(period, quota);
10899}
10900
10901static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
10902{
10903 struct cfs_schedulable_data *d = data;
10904 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10905 s64 quota = 0, parent_quota = -1;
10906
10907 if (!tg->parent) {
10908 quota = RUNTIME_INF;
10909 } else {
10910 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
10911
10912 quota = normalize_cfs_quota(tg, d);
10913 parent_quota = parent_b->hierarchical_quota;
10914
10915 /*
10916 * Ensure max(child_quota) <= parent_quota. On cgroup2,
10917 * always take the min. On cgroup1, only inherit when no
10918 * limit is set:
10919 */
10920 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
10921 quota = min(quota, parent_quota);
10922 } else {
10923 if (quota == RUNTIME_INF)
10924 quota = parent_quota;
10925 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
10926 return -EINVAL;
10927 }
10928 }
10929 cfs_b->hierarchical_quota = quota;
10930
10931 return 0;
10932}
10933
10934static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
10935{
10936 int ret;
10937 struct cfs_schedulable_data data = {
10938 .tg = tg,
10939 .period = period,
10940 .quota = quota,
10941 };
10942
10943 if (quota != RUNTIME_INF) {
10944 do_div(data.period, NSEC_PER_USEC);
10945 do_div(data.quota, NSEC_PER_USEC);
10946 }
10947
10948 rcu_read_lock();
10949 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
10950 rcu_read_unlock();
10951
10952 return ret;
10953}
10954
10955static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
10956{
10957 struct task_group *tg = css_tg(seq_css(sf));
10958 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10959
10960 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
10961 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
10962 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
10963
10964 if (schedstat_enabled() && tg != &root_task_group) {
10965 struct sched_statistics *stats;
10966 u64 ws = 0;
10967 int i;
10968
10969 for_each_possible_cpu(i) {
10970 stats = __schedstats_from_se(tg->se[i]);
10971 ws += schedstat_val(stats->wait_sum);
10972 }
10973
10974 seq_printf(sf, "wait_sum %llu\n", ws);
10975 }
10976
10977 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
10978 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
10979
10980 return 0;
10981}
10982#endif /* CONFIG_CFS_BANDWIDTH */
10983#endif /* CONFIG_FAIR_GROUP_SCHED */
10984
10985#ifdef CONFIG_RT_GROUP_SCHED
10986static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
10987 struct cftype *cft, s64 val)
10988{
10989 return sched_group_set_rt_runtime(css_tg(css), val);
10990}
10991
10992static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
10993 struct cftype *cft)
10994{
10995 return sched_group_rt_runtime(css_tg(css));
10996}
10997
10998static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
10999 struct cftype *cftype, u64 rt_period_us)
11000{
11001 return sched_group_set_rt_period(css_tg(css), rt_period_us);
11002}
11003
11004static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
11005 struct cftype *cft)
11006{
11007 return sched_group_rt_period(css_tg(css));
11008}
11009#endif /* CONFIG_RT_GROUP_SCHED */
11010
11011#ifdef CONFIG_FAIR_GROUP_SCHED
11012static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
11013 struct cftype *cft)
11014{
11015 return css_tg(css)->idle;
11016}
11017
11018static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
11019 struct cftype *cft, s64 idle)
11020{
11021 return sched_group_set_idle(css_tg(css), idle);
11022}
11023#endif
11024
11025static struct cftype cpu_legacy_files[] = {
11026#ifdef CONFIG_FAIR_GROUP_SCHED
11027 {
11028 .name = "shares",
11029 .read_u64 = cpu_shares_read_u64,
11030 .write_u64 = cpu_shares_write_u64,
11031 },
11032 {
11033 .name = "idle",
11034 .read_s64 = cpu_idle_read_s64,
11035 .write_s64 = cpu_idle_write_s64,
11036 },
11037#endif
11038#ifdef CONFIG_CFS_BANDWIDTH
11039 {
11040 .name = "cfs_quota_us",
11041 .read_s64 = cpu_cfs_quota_read_s64,
11042 .write_s64 = cpu_cfs_quota_write_s64,
11043 },
11044 {
11045 .name = "cfs_period_us",
11046 .read_u64 = cpu_cfs_period_read_u64,
11047 .write_u64 = cpu_cfs_period_write_u64,
11048 },
11049 {
11050 .name = "cfs_burst_us",
11051 .read_u64 = cpu_cfs_burst_read_u64,
11052 .write_u64 = cpu_cfs_burst_write_u64,
11053 },
11054 {
11055 .name = "stat",
11056 .seq_show = cpu_cfs_stat_show,
11057 },
11058#endif
11059#ifdef CONFIG_RT_GROUP_SCHED
11060 {
11061 .name = "rt_runtime_us",
11062 .read_s64 = cpu_rt_runtime_read,
11063 .write_s64 = cpu_rt_runtime_write,
11064 },
11065 {
11066 .name = "rt_period_us",
11067 .read_u64 = cpu_rt_period_read_uint,
11068 .write_u64 = cpu_rt_period_write_uint,
11069 },
11070#endif
11071#ifdef CONFIG_UCLAMP_TASK_GROUP
11072 {
11073 .name = "uclamp.min",
11074 .flags = CFTYPE_NOT_ON_ROOT,
11075 .seq_show = cpu_uclamp_min_show,
11076 .write = cpu_uclamp_min_write,
11077 },
11078 {
11079 .name = "uclamp.max",
11080 .flags = CFTYPE_NOT_ON_ROOT,
11081 .seq_show = cpu_uclamp_max_show,
11082 .write = cpu_uclamp_max_write,
11083 },
11084#endif
11085 { } /* Terminate */
11086};
11087
11088static int cpu_extra_stat_show(struct seq_file *sf,
11089 struct cgroup_subsys_state *css)
11090{
11091#ifdef CONFIG_CFS_BANDWIDTH
11092 {
11093 struct task_group *tg = css_tg(css);
11094 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11095 u64 throttled_usec, burst_usec;
11096
11097 throttled_usec = cfs_b->throttled_time;
11098 do_div(throttled_usec, NSEC_PER_USEC);
11099 burst_usec = cfs_b->burst_time;
11100 do_div(burst_usec, NSEC_PER_USEC);
11101
11102 seq_printf(sf, "nr_periods %d\n"
11103 "nr_throttled %d\n"
11104 "throttled_usec %llu\n"
11105 "nr_bursts %d\n"
11106 "burst_usec %llu\n",
11107 cfs_b->nr_periods, cfs_b->nr_throttled,
11108 throttled_usec, cfs_b->nr_burst, burst_usec);
11109 }
11110#endif
11111 return 0;
11112}
11113
11114#ifdef CONFIG_FAIR_GROUP_SCHED
11115static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
11116 struct cftype *cft)
11117{
11118 struct task_group *tg = css_tg(css);
11119 u64 weight = scale_load_down(tg->shares);
11120
11121 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
11122}
11123
11124static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
11125 struct cftype *cft, u64 weight)
11126{
11127 /*
11128 * cgroup weight knobs should use the common MIN, DFL and MAX
11129 * values which are 1, 100 and 10000 respectively. While it loses
11130 * a bit of range on both ends, it maps pretty well onto the shares
11131 * value used by scheduler and the round-trip conversions preserve
11132 * the original value over the entire range.
11133 */
11134 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
11135 return -ERANGE;
11136
11137 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
11138
11139 return sched_group_set_shares(css_tg(css), scale_load(weight));
11140}
11141
11142static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
11143 struct cftype *cft)
11144{
11145 unsigned long weight = scale_load_down(css_tg(css)->shares);
11146 int last_delta = INT_MAX;
11147 int prio, delta;
11148
11149 /* find the closest nice value to the current weight */
11150 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
11151 delta = abs(sched_prio_to_weight[prio] - weight);
11152 if (delta >= last_delta)
11153 break;
11154 last_delta = delta;
11155 }
11156
11157 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
11158}
11159
11160static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
11161 struct cftype *cft, s64 nice)
11162{
11163 unsigned long weight;
11164 int idx;
11165
11166 if (nice < MIN_NICE || nice > MAX_NICE)
11167 return -ERANGE;
11168
11169 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
11170 idx = array_index_nospec(idx, 40);
11171 weight = sched_prio_to_weight[idx];
11172
11173 return sched_group_set_shares(css_tg(css), scale_load(weight));
11174}
11175#endif
11176
11177static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
11178 long period, long quota)
11179{
11180 if (quota < 0)
11181 seq_puts(sf, "max");
11182 else
11183 seq_printf(sf, "%ld", quota);
11184
11185 seq_printf(sf, " %ld\n", period);
11186}
11187
11188/* caller should put the current value in *@periodp before calling */
11189static int __maybe_unused cpu_period_quota_parse(char *buf,
11190 u64 *periodp, u64 *quotap)
11191{
11192 char tok[21]; /* U64_MAX */
11193
11194 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
11195 return -EINVAL;
11196
11197 *periodp *= NSEC_PER_USEC;
11198
11199 if (sscanf(tok, "%llu", quotap))
11200 *quotap *= NSEC_PER_USEC;
11201 else if (!strcmp(tok, "max"))
11202 *quotap = RUNTIME_INF;
11203 else
11204 return -EINVAL;
11205
11206 return 0;
11207}
11208
11209#ifdef CONFIG_CFS_BANDWIDTH
11210static int cpu_max_show(struct seq_file *sf, void *v)
11211{
11212 struct task_group *tg = css_tg(seq_css(sf));
11213
11214 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
11215 return 0;
11216}
11217
11218static ssize_t cpu_max_write(struct kernfs_open_file *of,
11219 char *buf, size_t nbytes, loff_t off)
11220{
11221 struct task_group *tg = css_tg(of_css(of));
11222 u64 period = tg_get_cfs_period(tg);
11223 u64 burst = tg_get_cfs_burst(tg);
11224 u64 quota;
11225 int ret;
11226
11227 ret = cpu_period_quota_parse(buf, &period, "a);
11228 if (!ret)
11229 ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
11230 return ret ?: nbytes;
11231}
11232#endif
11233
11234static struct cftype cpu_files[] = {
11235#ifdef CONFIG_FAIR_GROUP_SCHED
11236 {
11237 .name = "weight",
11238 .flags = CFTYPE_NOT_ON_ROOT,
11239 .read_u64 = cpu_weight_read_u64,
11240 .write_u64 = cpu_weight_write_u64,
11241 },
11242 {
11243 .name = "weight.nice",
11244 .flags = CFTYPE_NOT_ON_ROOT,
11245 .read_s64 = cpu_weight_nice_read_s64,
11246 .write_s64 = cpu_weight_nice_write_s64,
11247 },
11248 {
11249 .name = "idle",
11250 .flags = CFTYPE_NOT_ON_ROOT,
11251 .read_s64 = cpu_idle_read_s64,
11252 .write_s64 = cpu_idle_write_s64,
11253 },
11254#endif
11255#ifdef CONFIG_CFS_BANDWIDTH
11256 {
11257 .name = "max",
11258 .flags = CFTYPE_NOT_ON_ROOT,
11259 .seq_show = cpu_max_show,
11260 .write = cpu_max_write,
11261 },
11262 {
11263 .name = "max.burst",
11264 .flags = CFTYPE_NOT_ON_ROOT,
11265 .read_u64 = cpu_cfs_burst_read_u64,
11266 .write_u64 = cpu_cfs_burst_write_u64,
11267 },
11268#endif
11269#ifdef CONFIG_UCLAMP_TASK_GROUP
11270 {
11271 .name = "uclamp.min",
11272 .flags = CFTYPE_NOT_ON_ROOT,
11273 .seq_show = cpu_uclamp_min_show,
11274 .write = cpu_uclamp_min_write,
11275 },
11276 {
11277 .name = "uclamp.max",
11278 .flags = CFTYPE_NOT_ON_ROOT,
11279 .seq_show = cpu_uclamp_max_show,
11280 .write = cpu_uclamp_max_write,
11281 },
11282#endif
11283 { } /* terminate */
11284};
11285
11286struct cgroup_subsys cpu_cgrp_subsys = {
11287 .css_alloc = cpu_cgroup_css_alloc,
11288 .css_online = cpu_cgroup_css_online,
11289 .css_released = cpu_cgroup_css_released,
11290 .css_free = cpu_cgroup_css_free,
11291 .css_extra_stat_show = cpu_extra_stat_show,
11292#ifdef CONFIG_RT_GROUP_SCHED
11293 .can_attach = cpu_cgroup_can_attach,
11294#endif
11295 .attach = cpu_cgroup_attach,
11296 .legacy_cftypes = cpu_legacy_files,
11297 .dfl_cftypes = cpu_files,
11298 .early_init = true,
11299 .threaded = true,
11300};
11301
11302#endif /* CONFIG_CGROUP_SCHED */
11303
11304void dump_cpu_task(int cpu)
11305{
11306 if (cpu == smp_processor_id() && in_hardirq()) {
11307 struct pt_regs *regs;
11308
11309 regs = get_irq_regs();
11310 if (regs) {
11311 show_regs(regs);
11312 return;
11313 }
11314 }
11315
11316 if (trigger_single_cpu_backtrace(cpu))
11317 return;
11318
11319 pr_info("Task dump for CPU %d:\n", cpu);
11320 sched_show_task(cpu_curr(cpu));
11321}
11322
11323/*
11324 * Nice levels are multiplicative, with a gentle 10% change for every
11325 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
11326 * nice 1, it will get ~10% less CPU time than another CPU-bound task
11327 * that remained on nice 0.
11328 *
11329 * The "10% effect" is relative and cumulative: from _any_ nice level,
11330 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
11331 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
11332 * If a task goes up by ~10% and another task goes down by ~10% then
11333 * the relative distance between them is ~25%.)
11334 */
11335const int sched_prio_to_weight[40] = {
11336 /* -20 */ 88761, 71755, 56483, 46273, 36291,
11337 /* -15 */ 29154, 23254, 18705, 14949, 11916,
11338 /* -10 */ 9548, 7620, 6100, 4904, 3906,
11339 /* -5 */ 3121, 2501, 1991, 1586, 1277,
11340 /* 0 */ 1024, 820, 655, 526, 423,
11341 /* 5 */ 335, 272, 215, 172, 137,
11342 /* 10 */ 110, 87, 70, 56, 45,
11343 /* 15 */ 36, 29, 23, 18, 15,
11344};
11345
11346/*
11347 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
11348 *
11349 * In cases where the weight does not change often, we can use the
11350 * precalculated inverse to speed up arithmetics by turning divisions
11351 * into multiplications:
11352 */
11353const u32 sched_prio_to_wmult[40] = {
11354 /* -20 */ 48388, 59856, 76040, 92818, 118348,
11355 /* -15 */ 147320, 184698, 229616, 287308, 360437,
11356 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
11357 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
11358 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
11359 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
11360 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
11361 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
11362};
11363
11364void call_trace_sched_update_nr_running(struct rq *rq, int count)
11365{
11366 trace_sched_update_nr_running_tp(rq, count);
11367}
1/*
2 * kernel/sched/core.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
27 */
28
29#include <linux/kasan.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/nmi.h>
33#include <linux/init.h>
34#include <linux/uaccess.h>
35#include <linux/highmem.h>
36#include <asm/mmu_context.h>
37#include <linux/interrupt.h>
38#include <linux/capability.h>
39#include <linux/completion.h>
40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h>
42#include <linux/perf_event.h>
43#include <linux/security.h>
44#include <linux/notifier.h>
45#include <linux/profile.h>
46#include <linux/freezer.h>
47#include <linux/vmalloc.h>
48#include <linux/blkdev.h>
49#include <linux/delay.h>
50#include <linux/pid_namespace.h>
51#include <linux/smp.h>
52#include <linux/threads.h>
53#include <linux/timer.h>
54#include <linux/rcupdate.h>
55#include <linux/cpu.h>
56#include <linux/cpuset.h>
57#include <linux/percpu.h>
58#include <linux/proc_fs.h>
59#include <linux/seq_file.h>
60#include <linux/sysctl.h>
61#include <linux/syscalls.h>
62#include <linux/times.h>
63#include <linux/tsacct_kern.h>
64#include <linux/kprobes.h>
65#include <linux/delayacct.h>
66#include <linux/unistd.h>
67#include <linux/pagemap.h>
68#include <linux/hrtimer.h>
69#include <linux/tick.h>
70#include <linux/ctype.h>
71#include <linux/ftrace.h>
72#include <linux/slab.h>
73#include <linux/init_task.h>
74#include <linux/context_tracking.h>
75#include <linux/compiler.h>
76#include <linux/frame.h>
77
78#include <asm/switch_to.h>
79#include <asm/tlb.h>
80#include <asm/irq_regs.h>
81#include <asm/mutex.h>
82#ifdef CONFIG_PARAVIRT
83#include <asm/paravirt.h>
84#endif
85
86#include "sched.h"
87#include "../workqueue_internal.h"
88#include "../smpboot.h"
89
90#define CREATE_TRACE_POINTS
91#include <trace/events/sched.h>
92
93DEFINE_MUTEX(sched_domains_mutex);
94DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
95
96static void update_rq_clock_task(struct rq *rq, s64 delta);
97
98void update_rq_clock(struct rq *rq)
99{
100 s64 delta;
101
102 lockdep_assert_held(&rq->lock);
103
104 if (rq->clock_skip_update & RQCF_ACT_SKIP)
105 return;
106
107 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
108 if (delta < 0)
109 return;
110 rq->clock += delta;
111 update_rq_clock_task(rq, delta);
112}
113
114/*
115 * Debugging: various feature bits
116 */
117
118#define SCHED_FEAT(name, enabled) \
119 (1UL << __SCHED_FEAT_##name) * enabled |
120
121const_debug unsigned int sysctl_sched_features =
122#include "features.h"
123 0;
124
125#undef SCHED_FEAT
126
127/*
128 * Number of tasks to iterate in a single balance run.
129 * Limited because this is done with IRQs disabled.
130 */
131const_debug unsigned int sysctl_sched_nr_migrate = 32;
132
133/*
134 * period over which we average the RT time consumption, measured
135 * in ms.
136 *
137 * default: 1s
138 */
139const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
140
141/*
142 * period over which we measure -rt task cpu usage in us.
143 * default: 1s
144 */
145unsigned int sysctl_sched_rt_period = 1000000;
146
147__read_mostly int scheduler_running;
148
149/*
150 * part of the period that we allow rt tasks to run in us.
151 * default: 0.95s
152 */
153int sysctl_sched_rt_runtime = 950000;
154
155/* cpus with isolated domains */
156cpumask_var_t cpu_isolated_map;
157
158/*
159 * this_rq_lock - lock this runqueue and disable interrupts.
160 */
161static struct rq *this_rq_lock(void)
162 __acquires(rq->lock)
163{
164 struct rq *rq;
165
166 local_irq_disable();
167 rq = this_rq();
168 raw_spin_lock(&rq->lock);
169
170 return rq;
171}
172
173#ifdef CONFIG_SCHED_HRTICK
174/*
175 * Use HR-timers to deliver accurate preemption points.
176 */
177
178static void hrtick_clear(struct rq *rq)
179{
180 if (hrtimer_active(&rq->hrtick_timer))
181 hrtimer_cancel(&rq->hrtick_timer);
182}
183
184/*
185 * High-resolution timer tick.
186 * Runs from hardirq context with interrupts disabled.
187 */
188static enum hrtimer_restart hrtick(struct hrtimer *timer)
189{
190 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
191
192 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
193
194 raw_spin_lock(&rq->lock);
195 update_rq_clock(rq);
196 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
197 raw_spin_unlock(&rq->lock);
198
199 return HRTIMER_NORESTART;
200}
201
202#ifdef CONFIG_SMP
203
204static void __hrtick_restart(struct rq *rq)
205{
206 struct hrtimer *timer = &rq->hrtick_timer;
207
208 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
209}
210
211/*
212 * called from hardirq (IPI) context
213 */
214static void __hrtick_start(void *arg)
215{
216 struct rq *rq = arg;
217
218 raw_spin_lock(&rq->lock);
219 __hrtick_restart(rq);
220 rq->hrtick_csd_pending = 0;
221 raw_spin_unlock(&rq->lock);
222}
223
224/*
225 * Called to set the hrtick timer state.
226 *
227 * called with rq->lock held and irqs disabled
228 */
229void hrtick_start(struct rq *rq, u64 delay)
230{
231 struct hrtimer *timer = &rq->hrtick_timer;
232 ktime_t time;
233 s64 delta;
234
235 /*
236 * Don't schedule slices shorter than 10000ns, that just
237 * doesn't make sense and can cause timer DoS.
238 */
239 delta = max_t(s64, delay, 10000LL);
240 time = ktime_add_ns(timer->base->get_time(), delta);
241
242 hrtimer_set_expires(timer, time);
243
244 if (rq == this_rq()) {
245 __hrtick_restart(rq);
246 } else if (!rq->hrtick_csd_pending) {
247 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
248 rq->hrtick_csd_pending = 1;
249 }
250}
251
252static int
253hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
254{
255 int cpu = (int)(long)hcpu;
256
257 switch (action) {
258 case CPU_UP_CANCELED:
259 case CPU_UP_CANCELED_FROZEN:
260 case CPU_DOWN_PREPARE:
261 case CPU_DOWN_PREPARE_FROZEN:
262 case CPU_DEAD:
263 case CPU_DEAD_FROZEN:
264 hrtick_clear(cpu_rq(cpu));
265 return NOTIFY_OK;
266 }
267
268 return NOTIFY_DONE;
269}
270
271static __init void init_hrtick(void)
272{
273 hotcpu_notifier(hotplug_hrtick, 0);
274}
275#else
276/*
277 * Called to set the hrtick timer state.
278 *
279 * called with rq->lock held and irqs disabled
280 */
281void hrtick_start(struct rq *rq, u64 delay)
282{
283 /*
284 * Don't schedule slices shorter than 10000ns, that just
285 * doesn't make sense. Rely on vruntime for fairness.
286 */
287 delay = max_t(u64, delay, 10000LL);
288 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
289 HRTIMER_MODE_REL_PINNED);
290}
291
292static inline void init_hrtick(void)
293{
294}
295#endif /* CONFIG_SMP */
296
297static void init_rq_hrtick(struct rq *rq)
298{
299#ifdef CONFIG_SMP
300 rq->hrtick_csd_pending = 0;
301
302 rq->hrtick_csd.flags = 0;
303 rq->hrtick_csd.func = __hrtick_start;
304 rq->hrtick_csd.info = rq;
305#endif
306
307 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
308 rq->hrtick_timer.function = hrtick;
309}
310#else /* CONFIG_SCHED_HRTICK */
311static inline void hrtick_clear(struct rq *rq)
312{
313}
314
315static inline void init_rq_hrtick(struct rq *rq)
316{
317}
318
319static inline void init_hrtick(void)
320{
321}
322#endif /* CONFIG_SCHED_HRTICK */
323
324/*
325 * cmpxchg based fetch_or, macro so it works for different integer types
326 */
327#define fetch_or(ptr, mask) \
328 ({ \
329 typeof(ptr) _ptr = (ptr); \
330 typeof(mask) _mask = (mask); \
331 typeof(*_ptr) _old, _val = *_ptr; \
332 \
333 for (;;) { \
334 _old = cmpxchg(_ptr, _val, _val | _mask); \
335 if (_old == _val) \
336 break; \
337 _val = _old; \
338 } \
339 _old; \
340})
341
342#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
343/*
344 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
345 * this avoids any races wrt polling state changes and thereby avoids
346 * spurious IPIs.
347 */
348static bool set_nr_and_not_polling(struct task_struct *p)
349{
350 struct thread_info *ti = task_thread_info(p);
351 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
352}
353
354/*
355 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
356 *
357 * If this returns true, then the idle task promises to call
358 * sched_ttwu_pending() and reschedule soon.
359 */
360static bool set_nr_if_polling(struct task_struct *p)
361{
362 struct thread_info *ti = task_thread_info(p);
363 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
364
365 for (;;) {
366 if (!(val & _TIF_POLLING_NRFLAG))
367 return false;
368 if (val & _TIF_NEED_RESCHED)
369 return true;
370 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
371 if (old == val)
372 break;
373 val = old;
374 }
375 return true;
376}
377
378#else
379static bool set_nr_and_not_polling(struct task_struct *p)
380{
381 set_tsk_need_resched(p);
382 return true;
383}
384
385#ifdef CONFIG_SMP
386static bool set_nr_if_polling(struct task_struct *p)
387{
388 return false;
389}
390#endif
391#endif
392
393void wake_q_add(struct wake_q_head *head, struct task_struct *task)
394{
395 struct wake_q_node *node = &task->wake_q;
396
397 /*
398 * Atomically grab the task, if ->wake_q is !nil already it means
399 * its already queued (either by us or someone else) and will get the
400 * wakeup due to that.
401 *
402 * This cmpxchg() implies a full barrier, which pairs with the write
403 * barrier implied by the wakeup in wake_up_list().
404 */
405 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
406 return;
407
408 get_task_struct(task);
409
410 /*
411 * The head is context local, there can be no concurrency.
412 */
413 *head->lastp = node;
414 head->lastp = &node->next;
415}
416
417void wake_up_q(struct wake_q_head *head)
418{
419 struct wake_q_node *node = head->first;
420
421 while (node != WAKE_Q_TAIL) {
422 struct task_struct *task;
423
424 task = container_of(node, struct task_struct, wake_q);
425 BUG_ON(!task);
426 /* task can safely be re-inserted now */
427 node = node->next;
428 task->wake_q.next = NULL;
429
430 /*
431 * wake_up_process() implies a wmb() to pair with the queueing
432 * in wake_q_add() so as not to miss wakeups.
433 */
434 wake_up_process(task);
435 put_task_struct(task);
436 }
437}
438
439/*
440 * resched_curr - mark rq's current task 'to be rescheduled now'.
441 *
442 * On UP this means the setting of the need_resched flag, on SMP it
443 * might also involve a cross-CPU call to trigger the scheduler on
444 * the target CPU.
445 */
446void resched_curr(struct rq *rq)
447{
448 struct task_struct *curr = rq->curr;
449 int cpu;
450
451 lockdep_assert_held(&rq->lock);
452
453 if (test_tsk_need_resched(curr))
454 return;
455
456 cpu = cpu_of(rq);
457
458 if (cpu == smp_processor_id()) {
459 set_tsk_need_resched(curr);
460 set_preempt_need_resched();
461 return;
462 }
463
464 if (set_nr_and_not_polling(curr))
465 smp_send_reschedule(cpu);
466 else
467 trace_sched_wake_idle_without_ipi(cpu);
468}
469
470void resched_cpu(int cpu)
471{
472 struct rq *rq = cpu_rq(cpu);
473 unsigned long flags;
474
475 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
476 return;
477 resched_curr(rq);
478 raw_spin_unlock_irqrestore(&rq->lock, flags);
479}
480
481#ifdef CONFIG_SMP
482#ifdef CONFIG_NO_HZ_COMMON
483/*
484 * In the semi idle case, use the nearest busy cpu for migrating timers
485 * from an idle cpu. This is good for power-savings.
486 *
487 * We don't do similar optimization for completely idle system, as
488 * selecting an idle cpu will add more delays to the timers than intended
489 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
490 */
491int get_nohz_timer_target(void)
492{
493 int i, cpu = smp_processor_id();
494 struct sched_domain *sd;
495
496 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
497 return cpu;
498
499 rcu_read_lock();
500 for_each_domain(cpu, sd) {
501 for_each_cpu(i, sched_domain_span(sd)) {
502 if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
503 cpu = i;
504 goto unlock;
505 }
506 }
507 }
508
509 if (!is_housekeeping_cpu(cpu))
510 cpu = housekeeping_any_cpu();
511unlock:
512 rcu_read_unlock();
513 return cpu;
514}
515/*
516 * When add_timer_on() enqueues a timer into the timer wheel of an
517 * idle CPU then this timer might expire before the next timer event
518 * which is scheduled to wake up that CPU. In case of a completely
519 * idle system the next event might even be infinite time into the
520 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
521 * leaves the inner idle loop so the newly added timer is taken into
522 * account when the CPU goes back to idle and evaluates the timer
523 * wheel for the next timer event.
524 */
525static void wake_up_idle_cpu(int cpu)
526{
527 struct rq *rq = cpu_rq(cpu);
528
529 if (cpu == smp_processor_id())
530 return;
531
532 if (set_nr_and_not_polling(rq->idle))
533 smp_send_reschedule(cpu);
534 else
535 trace_sched_wake_idle_without_ipi(cpu);
536}
537
538static bool wake_up_full_nohz_cpu(int cpu)
539{
540 /*
541 * We just need the target to call irq_exit() and re-evaluate
542 * the next tick. The nohz full kick at least implies that.
543 * If needed we can still optimize that later with an
544 * empty IRQ.
545 */
546 if (tick_nohz_full_cpu(cpu)) {
547 if (cpu != smp_processor_id() ||
548 tick_nohz_tick_stopped())
549 tick_nohz_full_kick_cpu(cpu);
550 return true;
551 }
552
553 return false;
554}
555
556void wake_up_nohz_cpu(int cpu)
557{
558 if (!wake_up_full_nohz_cpu(cpu))
559 wake_up_idle_cpu(cpu);
560}
561
562static inline bool got_nohz_idle_kick(void)
563{
564 int cpu = smp_processor_id();
565
566 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
567 return false;
568
569 if (idle_cpu(cpu) && !need_resched())
570 return true;
571
572 /*
573 * We can't run Idle Load Balance on this CPU for this time so we
574 * cancel it and clear NOHZ_BALANCE_KICK
575 */
576 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
577 return false;
578}
579
580#else /* CONFIG_NO_HZ_COMMON */
581
582static inline bool got_nohz_idle_kick(void)
583{
584 return false;
585}
586
587#endif /* CONFIG_NO_HZ_COMMON */
588
589#ifdef CONFIG_NO_HZ_FULL
590bool sched_can_stop_tick(struct rq *rq)
591{
592 int fifo_nr_running;
593
594 /* Deadline tasks, even if single, need the tick */
595 if (rq->dl.dl_nr_running)
596 return false;
597
598 /*
599 * If there are more than one RR tasks, we need the tick to effect the
600 * actual RR behaviour.
601 */
602 if (rq->rt.rr_nr_running) {
603 if (rq->rt.rr_nr_running == 1)
604 return true;
605 else
606 return false;
607 }
608
609 /*
610 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
611 * forced preemption between FIFO tasks.
612 */
613 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
614 if (fifo_nr_running)
615 return true;
616
617 /*
618 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
619 * if there's more than one we need the tick for involuntary
620 * preemption.
621 */
622 if (rq->nr_running > 1)
623 return false;
624
625 return true;
626}
627#endif /* CONFIG_NO_HZ_FULL */
628
629void sched_avg_update(struct rq *rq)
630{
631 s64 period = sched_avg_period();
632
633 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
634 /*
635 * Inline assembly required to prevent the compiler
636 * optimising this loop into a divmod call.
637 * See __iter_div_u64_rem() for another example of this.
638 */
639 asm("" : "+rm" (rq->age_stamp));
640 rq->age_stamp += period;
641 rq->rt_avg /= 2;
642 }
643}
644
645#endif /* CONFIG_SMP */
646
647#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
648 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
649/*
650 * Iterate task_group tree rooted at *from, calling @down when first entering a
651 * node and @up when leaving it for the final time.
652 *
653 * Caller must hold rcu_lock or sufficient equivalent.
654 */
655int walk_tg_tree_from(struct task_group *from,
656 tg_visitor down, tg_visitor up, void *data)
657{
658 struct task_group *parent, *child;
659 int ret;
660
661 parent = from;
662
663down:
664 ret = (*down)(parent, data);
665 if (ret)
666 goto out;
667 list_for_each_entry_rcu(child, &parent->children, siblings) {
668 parent = child;
669 goto down;
670
671up:
672 continue;
673 }
674 ret = (*up)(parent, data);
675 if (ret || parent == from)
676 goto out;
677
678 child = parent;
679 parent = parent->parent;
680 if (parent)
681 goto up;
682out:
683 return ret;
684}
685
686int tg_nop(struct task_group *tg, void *data)
687{
688 return 0;
689}
690#endif
691
692static void set_load_weight(struct task_struct *p)
693{
694 int prio = p->static_prio - MAX_RT_PRIO;
695 struct load_weight *load = &p->se.load;
696
697 /*
698 * SCHED_IDLE tasks get minimal weight:
699 */
700 if (idle_policy(p->policy)) {
701 load->weight = scale_load(WEIGHT_IDLEPRIO);
702 load->inv_weight = WMULT_IDLEPRIO;
703 return;
704 }
705
706 load->weight = scale_load(sched_prio_to_weight[prio]);
707 load->inv_weight = sched_prio_to_wmult[prio];
708}
709
710static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
711{
712 update_rq_clock(rq);
713 if (!(flags & ENQUEUE_RESTORE))
714 sched_info_queued(rq, p);
715 p->sched_class->enqueue_task(rq, p, flags);
716}
717
718static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
719{
720 update_rq_clock(rq);
721 if (!(flags & DEQUEUE_SAVE))
722 sched_info_dequeued(rq, p);
723 p->sched_class->dequeue_task(rq, p, flags);
724}
725
726void activate_task(struct rq *rq, struct task_struct *p, int flags)
727{
728 if (task_contributes_to_load(p))
729 rq->nr_uninterruptible--;
730
731 enqueue_task(rq, p, flags);
732}
733
734void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
735{
736 if (task_contributes_to_load(p))
737 rq->nr_uninterruptible++;
738
739 dequeue_task(rq, p, flags);
740}
741
742static void update_rq_clock_task(struct rq *rq, s64 delta)
743{
744/*
745 * In theory, the compile should just see 0 here, and optimize out the call
746 * to sched_rt_avg_update. But I don't trust it...
747 */
748#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
749 s64 steal = 0, irq_delta = 0;
750#endif
751#ifdef CONFIG_IRQ_TIME_ACCOUNTING
752 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
753
754 /*
755 * Since irq_time is only updated on {soft,}irq_exit, we might run into
756 * this case when a previous update_rq_clock() happened inside a
757 * {soft,}irq region.
758 *
759 * When this happens, we stop ->clock_task and only update the
760 * prev_irq_time stamp to account for the part that fit, so that a next
761 * update will consume the rest. This ensures ->clock_task is
762 * monotonic.
763 *
764 * It does however cause some slight miss-attribution of {soft,}irq
765 * time, a more accurate solution would be to update the irq_time using
766 * the current rq->clock timestamp, except that would require using
767 * atomic ops.
768 */
769 if (irq_delta > delta)
770 irq_delta = delta;
771
772 rq->prev_irq_time += irq_delta;
773 delta -= irq_delta;
774#endif
775#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
776 if (static_key_false((¶virt_steal_rq_enabled))) {
777 steal = paravirt_steal_clock(cpu_of(rq));
778 steal -= rq->prev_steal_time_rq;
779
780 if (unlikely(steal > delta))
781 steal = delta;
782
783 rq->prev_steal_time_rq += steal;
784 delta -= steal;
785 }
786#endif
787
788 rq->clock_task += delta;
789
790#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
791 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
792 sched_rt_avg_update(rq, irq_delta + steal);
793#endif
794}
795
796void sched_set_stop_task(int cpu, struct task_struct *stop)
797{
798 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
799 struct task_struct *old_stop = cpu_rq(cpu)->stop;
800
801 if (stop) {
802 /*
803 * Make it appear like a SCHED_FIFO task, its something
804 * userspace knows about and won't get confused about.
805 *
806 * Also, it will make PI more or less work without too
807 * much confusion -- but then, stop work should not
808 * rely on PI working anyway.
809 */
810 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
811
812 stop->sched_class = &stop_sched_class;
813 }
814
815 cpu_rq(cpu)->stop = stop;
816
817 if (old_stop) {
818 /*
819 * Reset it back to a normal scheduling class so that
820 * it can die in pieces.
821 */
822 old_stop->sched_class = &rt_sched_class;
823 }
824}
825
826/*
827 * __normal_prio - return the priority that is based on the static prio
828 */
829static inline int __normal_prio(struct task_struct *p)
830{
831 return p->static_prio;
832}
833
834/*
835 * Calculate the expected normal priority: i.e. priority
836 * without taking RT-inheritance into account. Might be
837 * boosted by interactivity modifiers. Changes upon fork,
838 * setprio syscalls, and whenever the interactivity
839 * estimator recalculates.
840 */
841static inline int normal_prio(struct task_struct *p)
842{
843 int prio;
844
845 if (task_has_dl_policy(p))
846 prio = MAX_DL_PRIO-1;
847 else if (task_has_rt_policy(p))
848 prio = MAX_RT_PRIO-1 - p->rt_priority;
849 else
850 prio = __normal_prio(p);
851 return prio;
852}
853
854/*
855 * Calculate the current priority, i.e. the priority
856 * taken into account by the scheduler. This value might
857 * be boosted by RT tasks, or might be boosted by
858 * interactivity modifiers. Will be RT if the task got
859 * RT-boosted. If not then it returns p->normal_prio.
860 */
861static int effective_prio(struct task_struct *p)
862{
863 p->normal_prio = normal_prio(p);
864 /*
865 * If we are RT tasks or we were boosted to RT priority,
866 * keep the priority unchanged. Otherwise, update priority
867 * to the normal priority:
868 */
869 if (!rt_prio(p->prio))
870 return p->normal_prio;
871 return p->prio;
872}
873
874/**
875 * task_curr - is this task currently executing on a CPU?
876 * @p: the task in question.
877 *
878 * Return: 1 if the task is currently executing. 0 otherwise.
879 */
880inline int task_curr(const struct task_struct *p)
881{
882 return cpu_curr(task_cpu(p)) == p;
883}
884
885/*
886 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
887 * use the balance_callback list if you want balancing.
888 *
889 * this means any call to check_class_changed() must be followed by a call to
890 * balance_callback().
891 */
892static inline void check_class_changed(struct rq *rq, struct task_struct *p,
893 const struct sched_class *prev_class,
894 int oldprio)
895{
896 if (prev_class != p->sched_class) {
897 if (prev_class->switched_from)
898 prev_class->switched_from(rq, p);
899
900 p->sched_class->switched_to(rq, p);
901 } else if (oldprio != p->prio || dl_task(p))
902 p->sched_class->prio_changed(rq, p, oldprio);
903}
904
905void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
906{
907 const struct sched_class *class;
908
909 if (p->sched_class == rq->curr->sched_class) {
910 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
911 } else {
912 for_each_class(class) {
913 if (class == rq->curr->sched_class)
914 break;
915 if (class == p->sched_class) {
916 resched_curr(rq);
917 break;
918 }
919 }
920 }
921
922 /*
923 * A queue event has occurred, and we're going to schedule. In
924 * this case, we can save a useless back to back clock update.
925 */
926 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
927 rq_clock_skip_update(rq, true);
928}
929
930#ifdef CONFIG_SMP
931/*
932 * This is how migration works:
933 *
934 * 1) we invoke migration_cpu_stop() on the target CPU using
935 * stop_one_cpu().
936 * 2) stopper starts to run (implicitly forcing the migrated thread
937 * off the CPU)
938 * 3) it checks whether the migrated task is still in the wrong runqueue.
939 * 4) if it's in the wrong runqueue then the migration thread removes
940 * it and puts it into the right queue.
941 * 5) stopper completes and stop_one_cpu() returns and the migration
942 * is done.
943 */
944
945/*
946 * move_queued_task - move a queued task to new rq.
947 *
948 * Returns (locked) new rq. Old rq's lock is released.
949 */
950static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
951{
952 lockdep_assert_held(&rq->lock);
953
954 p->on_rq = TASK_ON_RQ_MIGRATING;
955 dequeue_task(rq, p, 0);
956 set_task_cpu(p, new_cpu);
957 raw_spin_unlock(&rq->lock);
958
959 rq = cpu_rq(new_cpu);
960
961 raw_spin_lock(&rq->lock);
962 BUG_ON(task_cpu(p) != new_cpu);
963 enqueue_task(rq, p, 0);
964 p->on_rq = TASK_ON_RQ_QUEUED;
965 check_preempt_curr(rq, p, 0);
966
967 return rq;
968}
969
970struct migration_arg {
971 struct task_struct *task;
972 int dest_cpu;
973};
974
975/*
976 * Move (not current) task off this cpu, onto dest cpu. We're doing
977 * this because either it can't run here any more (set_cpus_allowed()
978 * away from this CPU, or CPU going down), or because we're
979 * attempting to rebalance this task on exec (sched_exec).
980 *
981 * So we race with normal scheduler movements, but that's OK, as long
982 * as the task is no longer on this CPU.
983 */
984static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
985{
986 if (unlikely(!cpu_active(dest_cpu)))
987 return rq;
988
989 /* Affinity changed (again). */
990 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
991 return rq;
992
993 rq = move_queued_task(rq, p, dest_cpu);
994
995 return rq;
996}
997
998/*
999 * migration_cpu_stop - this will be executed by a highprio stopper thread
1000 * and performs thread migration by bumping thread off CPU then
1001 * 'pushing' onto another runqueue.
1002 */
1003static int migration_cpu_stop(void *data)
1004{
1005 struct migration_arg *arg = data;
1006 struct task_struct *p = arg->task;
1007 struct rq *rq = this_rq();
1008
1009 /*
1010 * The original target cpu might have gone down and we might
1011 * be on another cpu but it doesn't matter.
1012 */
1013 local_irq_disable();
1014 /*
1015 * We need to explicitly wake pending tasks before running
1016 * __migrate_task() such that we will not miss enforcing cpus_allowed
1017 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1018 */
1019 sched_ttwu_pending();
1020
1021 raw_spin_lock(&p->pi_lock);
1022 raw_spin_lock(&rq->lock);
1023 /*
1024 * If task_rq(p) != rq, it cannot be migrated here, because we're
1025 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1026 * we're holding p->pi_lock.
1027 */
1028 if (task_rq(p) == rq && task_on_rq_queued(p))
1029 rq = __migrate_task(rq, p, arg->dest_cpu);
1030 raw_spin_unlock(&rq->lock);
1031 raw_spin_unlock(&p->pi_lock);
1032
1033 local_irq_enable();
1034 return 0;
1035}
1036
1037/*
1038 * sched_class::set_cpus_allowed must do the below, but is not required to
1039 * actually call this function.
1040 */
1041void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
1042{
1043 cpumask_copy(&p->cpus_allowed, new_mask);
1044 p->nr_cpus_allowed = cpumask_weight(new_mask);
1045}
1046
1047void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1048{
1049 struct rq *rq = task_rq(p);
1050 bool queued, running;
1051
1052 lockdep_assert_held(&p->pi_lock);
1053
1054 queued = task_on_rq_queued(p);
1055 running = task_current(rq, p);
1056
1057 if (queued) {
1058 /*
1059 * Because __kthread_bind() calls this on blocked tasks without
1060 * holding rq->lock.
1061 */
1062 lockdep_assert_held(&rq->lock);
1063 dequeue_task(rq, p, DEQUEUE_SAVE);
1064 }
1065 if (running)
1066 put_prev_task(rq, p);
1067
1068 p->sched_class->set_cpus_allowed(p, new_mask);
1069
1070 if (running)
1071 p->sched_class->set_curr_task(rq);
1072 if (queued)
1073 enqueue_task(rq, p, ENQUEUE_RESTORE);
1074}
1075
1076/*
1077 * Change a given task's CPU affinity. Migrate the thread to a
1078 * proper CPU and schedule it away if the CPU it's executing on
1079 * is removed from the allowed bitmask.
1080 *
1081 * NOTE: the caller must have a valid reference to the task, the
1082 * task must not exit() & deallocate itself prematurely. The
1083 * call is not atomic; no spinlocks may be held.
1084 */
1085static int __set_cpus_allowed_ptr(struct task_struct *p,
1086 const struct cpumask *new_mask, bool check)
1087{
1088 unsigned long flags;
1089 struct rq *rq;
1090 unsigned int dest_cpu;
1091 int ret = 0;
1092
1093 rq = task_rq_lock(p, &flags);
1094
1095 /*
1096 * Must re-check here, to close a race against __kthread_bind(),
1097 * sched_setaffinity() is not guaranteed to observe the flag.
1098 */
1099 if (check && (p->flags & PF_NO_SETAFFINITY)) {
1100 ret = -EINVAL;
1101 goto out;
1102 }
1103
1104 if (cpumask_equal(&p->cpus_allowed, new_mask))
1105 goto out;
1106
1107 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1108 ret = -EINVAL;
1109 goto out;
1110 }
1111
1112 do_set_cpus_allowed(p, new_mask);
1113
1114 /* Can the task run on the task's current CPU? If so, we're done */
1115 if (cpumask_test_cpu(task_cpu(p), new_mask))
1116 goto out;
1117
1118 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
1119 if (task_running(rq, p) || p->state == TASK_WAKING) {
1120 struct migration_arg arg = { p, dest_cpu };
1121 /* Need help from migration thread: drop lock and wait. */
1122 task_rq_unlock(rq, p, &flags);
1123 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1124 tlb_migrate_finish(p->mm);
1125 return 0;
1126 } else if (task_on_rq_queued(p)) {
1127 /*
1128 * OK, since we're going to drop the lock immediately
1129 * afterwards anyway.
1130 */
1131 lockdep_unpin_lock(&rq->lock);
1132 rq = move_queued_task(rq, p, dest_cpu);
1133 lockdep_pin_lock(&rq->lock);
1134 }
1135out:
1136 task_rq_unlock(rq, p, &flags);
1137
1138 return ret;
1139}
1140
1141int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1142{
1143 return __set_cpus_allowed_ptr(p, new_mask, false);
1144}
1145EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1146
1147void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1148{
1149#ifdef CONFIG_SCHED_DEBUG
1150 /*
1151 * We should never call set_task_cpu() on a blocked task,
1152 * ttwu() will sort out the placement.
1153 */
1154 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1155 !p->on_rq);
1156
1157 /*
1158 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
1159 * because schedstat_wait_{start,end} rebase migrating task's wait_start
1160 * time relying on p->on_rq.
1161 */
1162 WARN_ON_ONCE(p->state == TASK_RUNNING &&
1163 p->sched_class == &fair_sched_class &&
1164 (p->on_rq && !task_on_rq_migrating(p)));
1165
1166#ifdef CONFIG_LOCKDEP
1167 /*
1168 * The caller should hold either p->pi_lock or rq->lock, when changing
1169 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1170 *
1171 * sched_move_task() holds both and thus holding either pins the cgroup,
1172 * see task_group().
1173 *
1174 * Furthermore, all task_rq users should acquire both locks, see
1175 * task_rq_lock().
1176 */
1177 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1178 lockdep_is_held(&task_rq(p)->lock)));
1179#endif
1180#endif
1181
1182 trace_sched_migrate_task(p, new_cpu);
1183
1184 if (task_cpu(p) != new_cpu) {
1185 if (p->sched_class->migrate_task_rq)
1186 p->sched_class->migrate_task_rq(p);
1187 p->se.nr_migrations++;
1188 perf_event_task_migrate(p);
1189 }
1190
1191 __set_task_cpu(p, new_cpu);
1192}
1193
1194static void __migrate_swap_task(struct task_struct *p, int cpu)
1195{
1196 if (task_on_rq_queued(p)) {
1197 struct rq *src_rq, *dst_rq;
1198
1199 src_rq = task_rq(p);
1200 dst_rq = cpu_rq(cpu);
1201
1202 p->on_rq = TASK_ON_RQ_MIGRATING;
1203 deactivate_task(src_rq, p, 0);
1204 set_task_cpu(p, cpu);
1205 activate_task(dst_rq, p, 0);
1206 p->on_rq = TASK_ON_RQ_QUEUED;
1207 check_preempt_curr(dst_rq, p, 0);
1208 } else {
1209 /*
1210 * Task isn't running anymore; make it appear like we migrated
1211 * it before it went to sleep. This means on wakeup we make the
1212 * previous cpu our targer instead of where it really is.
1213 */
1214 p->wake_cpu = cpu;
1215 }
1216}
1217
1218struct migration_swap_arg {
1219 struct task_struct *src_task, *dst_task;
1220 int src_cpu, dst_cpu;
1221};
1222
1223static int migrate_swap_stop(void *data)
1224{
1225 struct migration_swap_arg *arg = data;
1226 struct rq *src_rq, *dst_rq;
1227 int ret = -EAGAIN;
1228
1229 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
1230 return -EAGAIN;
1231
1232 src_rq = cpu_rq(arg->src_cpu);
1233 dst_rq = cpu_rq(arg->dst_cpu);
1234
1235 double_raw_lock(&arg->src_task->pi_lock,
1236 &arg->dst_task->pi_lock);
1237 double_rq_lock(src_rq, dst_rq);
1238
1239 if (task_cpu(arg->dst_task) != arg->dst_cpu)
1240 goto unlock;
1241
1242 if (task_cpu(arg->src_task) != arg->src_cpu)
1243 goto unlock;
1244
1245 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
1246 goto unlock;
1247
1248 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
1249 goto unlock;
1250
1251 __migrate_swap_task(arg->src_task, arg->dst_cpu);
1252 __migrate_swap_task(arg->dst_task, arg->src_cpu);
1253
1254 ret = 0;
1255
1256unlock:
1257 double_rq_unlock(src_rq, dst_rq);
1258 raw_spin_unlock(&arg->dst_task->pi_lock);
1259 raw_spin_unlock(&arg->src_task->pi_lock);
1260
1261 return ret;
1262}
1263
1264/*
1265 * Cross migrate two tasks
1266 */
1267int migrate_swap(struct task_struct *cur, struct task_struct *p)
1268{
1269 struct migration_swap_arg arg;
1270 int ret = -EINVAL;
1271
1272 arg = (struct migration_swap_arg){
1273 .src_task = cur,
1274 .src_cpu = task_cpu(cur),
1275 .dst_task = p,
1276 .dst_cpu = task_cpu(p),
1277 };
1278
1279 if (arg.src_cpu == arg.dst_cpu)
1280 goto out;
1281
1282 /*
1283 * These three tests are all lockless; this is OK since all of them
1284 * will be re-checked with proper locks held further down the line.
1285 */
1286 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1287 goto out;
1288
1289 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
1290 goto out;
1291
1292 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
1293 goto out;
1294
1295 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
1296 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1297
1298out:
1299 return ret;
1300}
1301
1302/*
1303 * wait_task_inactive - wait for a thread to unschedule.
1304 *
1305 * If @match_state is nonzero, it's the @p->state value just checked and
1306 * not expected to change. If it changes, i.e. @p might have woken up,
1307 * then return zero. When we succeed in waiting for @p to be off its CPU,
1308 * we return a positive number (its total switch count). If a second call
1309 * a short while later returns the same number, the caller can be sure that
1310 * @p has remained unscheduled the whole time.
1311 *
1312 * The caller must ensure that the task *will* unschedule sometime soon,
1313 * else this function might spin for a *long* time. This function can't
1314 * be called with interrupts off, or it may introduce deadlock with
1315 * smp_call_function() if an IPI is sent by the same process we are
1316 * waiting to become inactive.
1317 */
1318unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1319{
1320 unsigned long flags;
1321 int running, queued;
1322 unsigned long ncsw;
1323 struct rq *rq;
1324
1325 for (;;) {
1326 /*
1327 * We do the initial early heuristics without holding
1328 * any task-queue locks at all. We'll only try to get
1329 * the runqueue lock when things look like they will
1330 * work out!
1331 */
1332 rq = task_rq(p);
1333
1334 /*
1335 * If the task is actively running on another CPU
1336 * still, just relax and busy-wait without holding
1337 * any locks.
1338 *
1339 * NOTE! Since we don't hold any locks, it's not
1340 * even sure that "rq" stays as the right runqueue!
1341 * But we don't care, since "task_running()" will
1342 * return false if the runqueue has changed and p
1343 * is actually now running somewhere else!
1344 */
1345 while (task_running(rq, p)) {
1346 if (match_state && unlikely(p->state != match_state))
1347 return 0;
1348 cpu_relax();
1349 }
1350
1351 /*
1352 * Ok, time to look more closely! We need the rq
1353 * lock now, to be *sure*. If we're wrong, we'll
1354 * just go back and repeat.
1355 */
1356 rq = task_rq_lock(p, &flags);
1357 trace_sched_wait_task(p);
1358 running = task_running(rq, p);
1359 queued = task_on_rq_queued(p);
1360 ncsw = 0;
1361 if (!match_state || p->state == match_state)
1362 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1363 task_rq_unlock(rq, p, &flags);
1364
1365 /*
1366 * If it changed from the expected state, bail out now.
1367 */
1368 if (unlikely(!ncsw))
1369 break;
1370
1371 /*
1372 * Was it really running after all now that we
1373 * checked with the proper locks actually held?
1374 *
1375 * Oops. Go back and try again..
1376 */
1377 if (unlikely(running)) {
1378 cpu_relax();
1379 continue;
1380 }
1381
1382 /*
1383 * It's not enough that it's not actively running,
1384 * it must be off the runqueue _entirely_, and not
1385 * preempted!
1386 *
1387 * So if it was still runnable (but just not actively
1388 * running right now), it's preempted, and we should
1389 * yield - it could be a while.
1390 */
1391 if (unlikely(queued)) {
1392 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1393
1394 set_current_state(TASK_UNINTERRUPTIBLE);
1395 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1396 continue;
1397 }
1398
1399 /*
1400 * Ahh, all good. It wasn't running, and it wasn't
1401 * runnable, which means that it will never become
1402 * running in the future either. We're all done!
1403 */
1404 break;
1405 }
1406
1407 return ncsw;
1408}
1409
1410/***
1411 * kick_process - kick a running thread to enter/exit the kernel
1412 * @p: the to-be-kicked thread
1413 *
1414 * Cause a process which is running on another CPU to enter
1415 * kernel-mode, without any delay. (to get signals handled.)
1416 *
1417 * NOTE: this function doesn't have to take the runqueue lock,
1418 * because all it wants to ensure is that the remote task enters
1419 * the kernel. If the IPI races and the task has been migrated
1420 * to another CPU then no harm is done and the purpose has been
1421 * achieved as well.
1422 */
1423void kick_process(struct task_struct *p)
1424{
1425 int cpu;
1426
1427 preempt_disable();
1428 cpu = task_cpu(p);
1429 if ((cpu != smp_processor_id()) && task_curr(p))
1430 smp_send_reschedule(cpu);
1431 preempt_enable();
1432}
1433EXPORT_SYMBOL_GPL(kick_process);
1434
1435/*
1436 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1437 */
1438static int select_fallback_rq(int cpu, struct task_struct *p)
1439{
1440 int nid = cpu_to_node(cpu);
1441 const struct cpumask *nodemask = NULL;
1442 enum { cpuset, possible, fail } state = cpuset;
1443 int dest_cpu;
1444
1445 /*
1446 * If the node that the cpu is on has been offlined, cpu_to_node()
1447 * will return -1. There is no cpu on the node, and we should
1448 * select the cpu on the other node.
1449 */
1450 if (nid != -1) {
1451 nodemask = cpumask_of_node(nid);
1452
1453 /* Look for allowed, online CPU in same node. */
1454 for_each_cpu(dest_cpu, nodemask) {
1455 if (!cpu_online(dest_cpu))
1456 continue;
1457 if (!cpu_active(dest_cpu))
1458 continue;
1459 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1460 return dest_cpu;
1461 }
1462 }
1463
1464 for (;;) {
1465 /* Any allowed, online CPU? */
1466 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1467 if (!cpu_online(dest_cpu))
1468 continue;
1469 if (!cpu_active(dest_cpu))
1470 continue;
1471 goto out;
1472 }
1473
1474 /* No more Mr. Nice Guy. */
1475 switch (state) {
1476 case cpuset:
1477 if (IS_ENABLED(CONFIG_CPUSETS)) {
1478 cpuset_cpus_allowed_fallback(p);
1479 state = possible;
1480 break;
1481 }
1482 /* fall-through */
1483 case possible:
1484 do_set_cpus_allowed(p, cpu_possible_mask);
1485 state = fail;
1486 break;
1487
1488 case fail:
1489 BUG();
1490 break;
1491 }
1492 }
1493
1494out:
1495 if (state != cpuset) {
1496 /*
1497 * Don't tell them about moving exiting tasks or
1498 * kernel threads (both mm NULL), since they never
1499 * leave kernel.
1500 */
1501 if (p->mm && printk_ratelimit()) {
1502 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
1503 task_pid_nr(p), p->comm, cpu);
1504 }
1505 }
1506
1507 return dest_cpu;
1508}
1509
1510/*
1511 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1512 */
1513static inline
1514int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1515{
1516 lockdep_assert_held(&p->pi_lock);
1517
1518 if (p->nr_cpus_allowed > 1)
1519 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
1520
1521 /*
1522 * In order not to call set_task_cpu() on a blocking task we need
1523 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1524 * cpu.
1525 *
1526 * Since this is common to all placement strategies, this lives here.
1527 *
1528 * [ this allows ->select_task() to simply return task_cpu(p) and
1529 * not worry about this generic constraint ]
1530 */
1531 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
1532 !cpu_online(cpu)))
1533 cpu = select_fallback_rq(task_cpu(p), p);
1534
1535 return cpu;
1536}
1537
1538static void update_avg(u64 *avg, u64 sample)
1539{
1540 s64 diff = sample - *avg;
1541 *avg += diff >> 3;
1542}
1543
1544#else
1545
1546static inline int __set_cpus_allowed_ptr(struct task_struct *p,
1547 const struct cpumask *new_mask, bool check)
1548{
1549 return set_cpus_allowed_ptr(p, new_mask);
1550}
1551
1552#endif /* CONFIG_SMP */
1553
1554static void
1555ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1556{
1557#ifdef CONFIG_SCHEDSTATS
1558 struct rq *rq = this_rq();
1559
1560#ifdef CONFIG_SMP
1561 int this_cpu = smp_processor_id();
1562
1563 if (cpu == this_cpu) {
1564 schedstat_inc(rq, ttwu_local);
1565 schedstat_inc(p, se.statistics.nr_wakeups_local);
1566 } else {
1567 struct sched_domain *sd;
1568
1569 schedstat_inc(p, se.statistics.nr_wakeups_remote);
1570 rcu_read_lock();
1571 for_each_domain(this_cpu, sd) {
1572 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1573 schedstat_inc(sd, ttwu_wake_remote);
1574 break;
1575 }
1576 }
1577 rcu_read_unlock();
1578 }
1579
1580 if (wake_flags & WF_MIGRATED)
1581 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1582
1583#endif /* CONFIG_SMP */
1584
1585 schedstat_inc(rq, ttwu_count);
1586 schedstat_inc(p, se.statistics.nr_wakeups);
1587
1588 if (wake_flags & WF_SYNC)
1589 schedstat_inc(p, se.statistics.nr_wakeups_sync);
1590
1591#endif /* CONFIG_SCHEDSTATS */
1592}
1593
1594static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1595{
1596 activate_task(rq, p, en_flags);
1597 p->on_rq = TASK_ON_RQ_QUEUED;
1598
1599 /* if a worker is waking up, notify workqueue */
1600 if (p->flags & PF_WQ_WORKER)
1601 wq_worker_waking_up(p, cpu_of(rq));
1602}
1603
1604/*
1605 * Mark the task runnable and perform wakeup-preemption.
1606 */
1607static void
1608ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1609{
1610 check_preempt_curr(rq, p, wake_flags);
1611 p->state = TASK_RUNNING;
1612 trace_sched_wakeup(p);
1613
1614#ifdef CONFIG_SMP
1615 if (p->sched_class->task_woken) {
1616 /*
1617 * Our task @p is fully woken up and running; so its safe to
1618 * drop the rq->lock, hereafter rq is only used for statistics.
1619 */
1620 lockdep_unpin_lock(&rq->lock);
1621 p->sched_class->task_woken(rq, p);
1622 lockdep_pin_lock(&rq->lock);
1623 }
1624
1625 if (rq->idle_stamp) {
1626 u64 delta = rq_clock(rq) - rq->idle_stamp;
1627 u64 max = 2*rq->max_idle_balance_cost;
1628
1629 update_avg(&rq->avg_idle, delta);
1630
1631 if (rq->avg_idle > max)
1632 rq->avg_idle = max;
1633
1634 rq->idle_stamp = 0;
1635 }
1636#endif
1637}
1638
1639static void
1640ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1641{
1642 lockdep_assert_held(&rq->lock);
1643
1644#ifdef CONFIG_SMP
1645 if (p->sched_contributes_to_load)
1646 rq->nr_uninterruptible--;
1647#endif
1648
1649 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1650 ttwu_do_wakeup(rq, p, wake_flags);
1651}
1652
1653/*
1654 * Called in case the task @p isn't fully descheduled from its runqueue,
1655 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1656 * since all we need to do is flip p->state to TASK_RUNNING, since
1657 * the task is still ->on_rq.
1658 */
1659static int ttwu_remote(struct task_struct *p, int wake_flags)
1660{
1661 struct rq *rq;
1662 int ret = 0;
1663
1664 rq = __task_rq_lock(p);
1665 if (task_on_rq_queued(p)) {
1666 /* check_preempt_curr() may use rq clock */
1667 update_rq_clock(rq);
1668 ttwu_do_wakeup(rq, p, wake_flags);
1669 ret = 1;
1670 }
1671 __task_rq_unlock(rq);
1672
1673 return ret;
1674}
1675
1676#ifdef CONFIG_SMP
1677void sched_ttwu_pending(void)
1678{
1679 struct rq *rq = this_rq();
1680 struct llist_node *llist = llist_del_all(&rq->wake_list);
1681 struct task_struct *p;
1682 unsigned long flags;
1683
1684 if (!llist)
1685 return;
1686
1687 raw_spin_lock_irqsave(&rq->lock, flags);
1688 lockdep_pin_lock(&rq->lock);
1689
1690 while (llist) {
1691 p = llist_entry(llist, struct task_struct, wake_entry);
1692 llist = llist_next(llist);
1693 ttwu_do_activate(rq, p, 0);
1694 }
1695
1696 lockdep_unpin_lock(&rq->lock);
1697 raw_spin_unlock_irqrestore(&rq->lock, flags);
1698}
1699
1700void scheduler_ipi(void)
1701{
1702 /*
1703 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1704 * TIF_NEED_RESCHED remotely (for the first time) will also send
1705 * this IPI.
1706 */
1707 preempt_fold_need_resched();
1708
1709 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
1710 return;
1711
1712 /*
1713 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1714 * traditionally all their work was done from the interrupt return
1715 * path. Now that we actually do some work, we need to make sure
1716 * we do call them.
1717 *
1718 * Some archs already do call them, luckily irq_enter/exit nest
1719 * properly.
1720 *
1721 * Arguably we should visit all archs and update all handlers,
1722 * however a fair share of IPIs are still resched only so this would
1723 * somewhat pessimize the simple resched case.
1724 */
1725 irq_enter();
1726 sched_ttwu_pending();
1727
1728 /*
1729 * Check if someone kicked us for doing the nohz idle load balance.
1730 */
1731 if (unlikely(got_nohz_idle_kick())) {
1732 this_rq()->idle_balance = 1;
1733 raise_softirq_irqoff(SCHED_SOFTIRQ);
1734 }
1735 irq_exit();
1736}
1737
1738static void ttwu_queue_remote(struct task_struct *p, int cpu)
1739{
1740 struct rq *rq = cpu_rq(cpu);
1741
1742 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
1743 if (!set_nr_if_polling(rq->idle))
1744 smp_send_reschedule(cpu);
1745 else
1746 trace_sched_wake_idle_without_ipi(cpu);
1747 }
1748}
1749
1750void wake_up_if_idle(int cpu)
1751{
1752 struct rq *rq = cpu_rq(cpu);
1753 unsigned long flags;
1754
1755 rcu_read_lock();
1756
1757 if (!is_idle_task(rcu_dereference(rq->curr)))
1758 goto out;
1759
1760 if (set_nr_if_polling(rq->idle)) {
1761 trace_sched_wake_idle_without_ipi(cpu);
1762 } else {
1763 raw_spin_lock_irqsave(&rq->lock, flags);
1764 if (is_idle_task(rq->curr))
1765 smp_send_reschedule(cpu);
1766 /* Else cpu is not in idle, do nothing here */
1767 raw_spin_unlock_irqrestore(&rq->lock, flags);
1768 }
1769
1770out:
1771 rcu_read_unlock();
1772}
1773
1774bool cpus_share_cache(int this_cpu, int that_cpu)
1775{
1776 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1777}
1778#endif /* CONFIG_SMP */
1779
1780static void ttwu_queue(struct task_struct *p, int cpu)
1781{
1782 struct rq *rq = cpu_rq(cpu);
1783
1784#if defined(CONFIG_SMP)
1785 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1786 sched_clock_cpu(cpu); /* sync clocks x-cpu */
1787 ttwu_queue_remote(p, cpu);
1788 return;
1789 }
1790#endif
1791
1792 raw_spin_lock(&rq->lock);
1793 lockdep_pin_lock(&rq->lock);
1794 ttwu_do_activate(rq, p, 0);
1795 lockdep_unpin_lock(&rq->lock);
1796 raw_spin_unlock(&rq->lock);
1797}
1798
1799/*
1800 * Notes on Program-Order guarantees on SMP systems.
1801 *
1802 * MIGRATION
1803 *
1804 * The basic program-order guarantee on SMP systems is that when a task [t]
1805 * migrates, all its activity on its old cpu [c0] happens-before any subsequent
1806 * execution on its new cpu [c1].
1807 *
1808 * For migration (of runnable tasks) this is provided by the following means:
1809 *
1810 * A) UNLOCK of the rq(c0)->lock scheduling out task t
1811 * B) migration for t is required to synchronize *both* rq(c0)->lock and
1812 * rq(c1)->lock (if not at the same time, then in that order).
1813 * C) LOCK of the rq(c1)->lock scheduling in task
1814 *
1815 * Transitivity guarantees that B happens after A and C after B.
1816 * Note: we only require RCpc transitivity.
1817 * Note: the cpu doing B need not be c0 or c1
1818 *
1819 * Example:
1820 *
1821 * CPU0 CPU1 CPU2
1822 *
1823 * LOCK rq(0)->lock
1824 * sched-out X
1825 * sched-in Y
1826 * UNLOCK rq(0)->lock
1827 *
1828 * LOCK rq(0)->lock // orders against CPU0
1829 * dequeue X
1830 * UNLOCK rq(0)->lock
1831 *
1832 * LOCK rq(1)->lock
1833 * enqueue X
1834 * UNLOCK rq(1)->lock
1835 *
1836 * LOCK rq(1)->lock // orders against CPU2
1837 * sched-out Z
1838 * sched-in X
1839 * UNLOCK rq(1)->lock
1840 *
1841 *
1842 * BLOCKING -- aka. SLEEP + WAKEUP
1843 *
1844 * For blocking we (obviously) need to provide the same guarantee as for
1845 * migration. However the means are completely different as there is no lock
1846 * chain to provide order. Instead we do:
1847 *
1848 * 1) smp_store_release(X->on_cpu, 0)
1849 * 2) smp_cond_acquire(!X->on_cpu)
1850 *
1851 * Example:
1852 *
1853 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
1854 *
1855 * LOCK rq(0)->lock LOCK X->pi_lock
1856 * dequeue X
1857 * sched-out X
1858 * smp_store_release(X->on_cpu, 0);
1859 *
1860 * smp_cond_acquire(!X->on_cpu);
1861 * X->state = WAKING
1862 * set_task_cpu(X,2)
1863 *
1864 * LOCK rq(2)->lock
1865 * enqueue X
1866 * X->state = RUNNING
1867 * UNLOCK rq(2)->lock
1868 *
1869 * LOCK rq(2)->lock // orders against CPU1
1870 * sched-out Z
1871 * sched-in X
1872 * UNLOCK rq(2)->lock
1873 *
1874 * UNLOCK X->pi_lock
1875 * UNLOCK rq(0)->lock
1876 *
1877 *
1878 * However; for wakeups there is a second guarantee we must provide, namely we
1879 * must observe the state that lead to our wakeup. That is, not only must our
1880 * task observe its own prior state, it must also observe the stores prior to
1881 * its wakeup.
1882 *
1883 * This means that any means of doing remote wakeups must order the CPU doing
1884 * the wakeup against the CPU the task is going to end up running on. This,
1885 * however, is already required for the regular Program-Order guarantee above,
1886 * since the waking CPU is the one issueing the ACQUIRE (smp_cond_acquire).
1887 *
1888 */
1889
1890/**
1891 * try_to_wake_up - wake up a thread
1892 * @p: the thread to be awakened
1893 * @state: the mask of task states that can be woken
1894 * @wake_flags: wake modifier flags (WF_*)
1895 *
1896 * Put it on the run-queue if it's not already there. The "current"
1897 * thread is always on the run-queue (except when the actual
1898 * re-schedule is in progress), and as such you're allowed to do
1899 * the simpler "current->state = TASK_RUNNING" to mark yourself
1900 * runnable without the overhead of this.
1901 *
1902 * Return: %true if @p was woken up, %false if it was already running.
1903 * or @state didn't match @p's state.
1904 */
1905static int
1906try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1907{
1908 unsigned long flags;
1909 int cpu, success = 0;
1910
1911 /*
1912 * If we are going to wake up a thread waiting for CONDITION we
1913 * need to ensure that CONDITION=1 done by the caller can not be
1914 * reordered with p->state check below. This pairs with mb() in
1915 * set_current_state() the waiting thread does.
1916 */
1917 smp_mb__before_spinlock();
1918 raw_spin_lock_irqsave(&p->pi_lock, flags);
1919 if (!(p->state & state))
1920 goto out;
1921
1922 trace_sched_waking(p);
1923
1924 success = 1; /* we're going to change ->state */
1925 cpu = task_cpu(p);
1926
1927 if (p->on_rq && ttwu_remote(p, wake_flags))
1928 goto stat;
1929
1930#ifdef CONFIG_SMP
1931 /*
1932 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
1933 * possible to, falsely, observe p->on_cpu == 0.
1934 *
1935 * One must be running (->on_cpu == 1) in order to remove oneself
1936 * from the runqueue.
1937 *
1938 * [S] ->on_cpu = 1; [L] ->on_rq
1939 * UNLOCK rq->lock
1940 * RMB
1941 * LOCK rq->lock
1942 * [S] ->on_rq = 0; [L] ->on_cpu
1943 *
1944 * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
1945 * from the consecutive calls to schedule(); the first switching to our
1946 * task, the second putting it to sleep.
1947 */
1948 smp_rmb();
1949
1950 /*
1951 * If the owning (remote) cpu is still in the middle of schedule() with
1952 * this task as prev, wait until its done referencing the task.
1953 *
1954 * Pairs with the smp_store_release() in finish_lock_switch().
1955 *
1956 * This ensures that tasks getting woken will be fully ordered against
1957 * their previous state and preserve Program Order.
1958 */
1959 smp_cond_acquire(!p->on_cpu);
1960
1961 p->sched_contributes_to_load = !!task_contributes_to_load(p);
1962 p->state = TASK_WAKING;
1963
1964 if (p->sched_class->task_waking)
1965 p->sched_class->task_waking(p);
1966
1967 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
1968 if (task_cpu(p) != cpu) {
1969 wake_flags |= WF_MIGRATED;
1970 set_task_cpu(p, cpu);
1971 }
1972#endif /* CONFIG_SMP */
1973
1974 ttwu_queue(p, cpu);
1975stat:
1976 if (schedstat_enabled())
1977 ttwu_stat(p, cpu, wake_flags);
1978out:
1979 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1980
1981 return success;
1982}
1983
1984/**
1985 * try_to_wake_up_local - try to wake up a local task with rq lock held
1986 * @p: the thread to be awakened
1987 *
1988 * Put @p on the run-queue if it's not already there. The caller must
1989 * ensure that this_rq() is locked, @p is bound to this_rq() and not
1990 * the current task.
1991 */
1992static void try_to_wake_up_local(struct task_struct *p)
1993{
1994 struct rq *rq = task_rq(p);
1995
1996 if (WARN_ON_ONCE(rq != this_rq()) ||
1997 WARN_ON_ONCE(p == current))
1998 return;
1999
2000 lockdep_assert_held(&rq->lock);
2001
2002 if (!raw_spin_trylock(&p->pi_lock)) {
2003 /*
2004 * This is OK, because current is on_cpu, which avoids it being
2005 * picked for load-balance and preemption/IRQs are still
2006 * disabled avoiding further scheduler activity on it and we've
2007 * not yet picked a replacement task.
2008 */
2009 lockdep_unpin_lock(&rq->lock);
2010 raw_spin_unlock(&rq->lock);
2011 raw_spin_lock(&p->pi_lock);
2012 raw_spin_lock(&rq->lock);
2013 lockdep_pin_lock(&rq->lock);
2014 }
2015
2016 if (!(p->state & TASK_NORMAL))
2017 goto out;
2018
2019 trace_sched_waking(p);
2020
2021 if (!task_on_rq_queued(p))
2022 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2023
2024 ttwu_do_wakeup(rq, p, 0);
2025 if (schedstat_enabled())
2026 ttwu_stat(p, smp_processor_id(), 0);
2027out:
2028 raw_spin_unlock(&p->pi_lock);
2029}
2030
2031/**
2032 * wake_up_process - Wake up a specific process
2033 * @p: The process to be woken up.
2034 *
2035 * Attempt to wake up the nominated process and move it to the set of runnable
2036 * processes.
2037 *
2038 * Return: 1 if the process was woken up, 0 if it was already running.
2039 *
2040 * It may be assumed that this function implies a write memory barrier before
2041 * changing the task state if and only if any tasks are woken up.
2042 */
2043int wake_up_process(struct task_struct *p)
2044{
2045 return try_to_wake_up(p, TASK_NORMAL, 0);
2046}
2047EXPORT_SYMBOL(wake_up_process);
2048
2049int wake_up_state(struct task_struct *p, unsigned int state)
2050{
2051 return try_to_wake_up(p, state, 0);
2052}
2053
2054/*
2055 * This function clears the sched_dl_entity static params.
2056 */
2057void __dl_clear_params(struct task_struct *p)
2058{
2059 struct sched_dl_entity *dl_se = &p->dl;
2060
2061 dl_se->dl_runtime = 0;
2062 dl_se->dl_deadline = 0;
2063 dl_se->dl_period = 0;
2064 dl_se->flags = 0;
2065 dl_se->dl_bw = 0;
2066
2067 dl_se->dl_throttled = 0;
2068 dl_se->dl_yielded = 0;
2069}
2070
2071/*
2072 * Perform scheduler related setup for a newly forked process p.
2073 * p is forked by current.
2074 *
2075 * __sched_fork() is basic setup used by init_idle() too:
2076 */
2077static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
2078{
2079 p->on_rq = 0;
2080
2081 p->se.on_rq = 0;
2082 p->se.exec_start = 0;
2083 p->se.sum_exec_runtime = 0;
2084 p->se.prev_sum_exec_runtime = 0;
2085 p->se.nr_migrations = 0;
2086 p->se.vruntime = 0;
2087 INIT_LIST_HEAD(&p->se.group_node);
2088
2089#ifdef CONFIG_FAIR_GROUP_SCHED
2090 p->se.cfs_rq = NULL;
2091#endif
2092
2093#ifdef CONFIG_SCHEDSTATS
2094 /* Even if schedstat is disabled, there should not be garbage */
2095 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2096#endif
2097
2098 RB_CLEAR_NODE(&p->dl.rb_node);
2099 init_dl_task_timer(&p->dl);
2100 __dl_clear_params(p);
2101
2102 INIT_LIST_HEAD(&p->rt.run_list);
2103 p->rt.timeout = 0;
2104 p->rt.time_slice = sched_rr_timeslice;
2105 p->rt.on_rq = 0;
2106 p->rt.on_list = 0;
2107
2108#ifdef CONFIG_PREEMPT_NOTIFIERS
2109 INIT_HLIST_HEAD(&p->preempt_notifiers);
2110#endif
2111
2112#ifdef CONFIG_NUMA_BALANCING
2113 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
2114 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2115 p->mm->numa_scan_seq = 0;
2116 }
2117
2118 if (clone_flags & CLONE_VM)
2119 p->numa_preferred_nid = current->numa_preferred_nid;
2120 else
2121 p->numa_preferred_nid = -1;
2122
2123 p->node_stamp = 0ULL;
2124 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
2125 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
2126 p->numa_work.next = &p->numa_work;
2127 p->numa_faults = NULL;
2128 p->last_task_numa_placement = 0;
2129 p->last_sum_exec_runtime = 0;
2130
2131 p->numa_group = NULL;
2132#endif /* CONFIG_NUMA_BALANCING */
2133}
2134
2135DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
2136
2137#ifdef CONFIG_NUMA_BALANCING
2138
2139void set_numabalancing_state(bool enabled)
2140{
2141 if (enabled)
2142 static_branch_enable(&sched_numa_balancing);
2143 else
2144 static_branch_disable(&sched_numa_balancing);
2145}
2146
2147#ifdef CONFIG_PROC_SYSCTL
2148int sysctl_numa_balancing(struct ctl_table *table, int write,
2149 void __user *buffer, size_t *lenp, loff_t *ppos)
2150{
2151 struct ctl_table t;
2152 int err;
2153 int state = static_branch_likely(&sched_numa_balancing);
2154
2155 if (write && !capable(CAP_SYS_ADMIN))
2156 return -EPERM;
2157
2158 t = *table;
2159 t.data = &state;
2160 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2161 if (err < 0)
2162 return err;
2163 if (write)
2164 set_numabalancing_state(state);
2165 return err;
2166}
2167#endif
2168#endif
2169
2170DEFINE_STATIC_KEY_FALSE(sched_schedstats);
2171
2172#ifdef CONFIG_SCHEDSTATS
2173static void set_schedstats(bool enabled)
2174{
2175 if (enabled)
2176 static_branch_enable(&sched_schedstats);
2177 else
2178 static_branch_disable(&sched_schedstats);
2179}
2180
2181void force_schedstat_enabled(void)
2182{
2183 if (!schedstat_enabled()) {
2184 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
2185 static_branch_enable(&sched_schedstats);
2186 }
2187}
2188
2189static int __init setup_schedstats(char *str)
2190{
2191 int ret = 0;
2192 if (!str)
2193 goto out;
2194
2195 if (!strcmp(str, "enable")) {
2196 set_schedstats(true);
2197 ret = 1;
2198 } else if (!strcmp(str, "disable")) {
2199 set_schedstats(false);
2200 ret = 1;
2201 }
2202out:
2203 if (!ret)
2204 pr_warn("Unable to parse schedstats=\n");
2205
2206 return ret;
2207}
2208__setup("schedstats=", setup_schedstats);
2209
2210#ifdef CONFIG_PROC_SYSCTL
2211int sysctl_schedstats(struct ctl_table *table, int write,
2212 void __user *buffer, size_t *lenp, loff_t *ppos)
2213{
2214 struct ctl_table t;
2215 int err;
2216 int state = static_branch_likely(&sched_schedstats);
2217
2218 if (write && !capable(CAP_SYS_ADMIN))
2219 return -EPERM;
2220
2221 t = *table;
2222 t.data = &state;
2223 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2224 if (err < 0)
2225 return err;
2226 if (write)
2227 set_schedstats(state);
2228 return err;
2229}
2230#endif
2231#endif
2232
2233/*
2234 * fork()/clone()-time setup:
2235 */
2236int sched_fork(unsigned long clone_flags, struct task_struct *p)
2237{
2238 unsigned long flags;
2239 int cpu = get_cpu();
2240
2241 __sched_fork(clone_flags, p);
2242 /*
2243 * We mark the process as running here. This guarantees that
2244 * nobody will actually run it, and a signal or other external
2245 * event cannot wake it up and insert it on the runqueue either.
2246 */
2247 p->state = TASK_RUNNING;
2248
2249 /*
2250 * Make sure we do not leak PI boosting priority to the child.
2251 */
2252 p->prio = current->normal_prio;
2253
2254 /*
2255 * Revert to default priority/policy on fork if requested.
2256 */
2257 if (unlikely(p->sched_reset_on_fork)) {
2258 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
2259 p->policy = SCHED_NORMAL;
2260 p->static_prio = NICE_TO_PRIO(0);
2261 p->rt_priority = 0;
2262 } else if (PRIO_TO_NICE(p->static_prio) < 0)
2263 p->static_prio = NICE_TO_PRIO(0);
2264
2265 p->prio = p->normal_prio = __normal_prio(p);
2266 set_load_weight(p);
2267
2268 /*
2269 * We don't need the reset flag anymore after the fork. It has
2270 * fulfilled its duty:
2271 */
2272 p->sched_reset_on_fork = 0;
2273 }
2274
2275 if (dl_prio(p->prio)) {
2276 put_cpu();
2277 return -EAGAIN;
2278 } else if (rt_prio(p->prio)) {
2279 p->sched_class = &rt_sched_class;
2280 } else {
2281 p->sched_class = &fair_sched_class;
2282 }
2283
2284 if (p->sched_class->task_fork)
2285 p->sched_class->task_fork(p);
2286
2287 /*
2288 * The child is not yet in the pid-hash so no cgroup attach races,
2289 * and the cgroup is pinned to this child due to cgroup_fork()
2290 * is ran before sched_fork().
2291 *
2292 * Silence PROVE_RCU.
2293 */
2294 raw_spin_lock_irqsave(&p->pi_lock, flags);
2295 set_task_cpu(p, cpu);
2296 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2297
2298#ifdef CONFIG_SCHED_INFO
2299 if (likely(sched_info_on()))
2300 memset(&p->sched_info, 0, sizeof(p->sched_info));
2301#endif
2302#if defined(CONFIG_SMP)
2303 p->on_cpu = 0;
2304#endif
2305 init_task_preempt_count(p);
2306#ifdef CONFIG_SMP
2307 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2308 RB_CLEAR_NODE(&p->pushable_dl_tasks);
2309#endif
2310
2311 put_cpu();
2312 return 0;
2313}
2314
2315unsigned long to_ratio(u64 period, u64 runtime)
2316{
2317 if (runtime == RUNTIME_INF)
2318 return 1ULL << 20;
2319
2320 /*
2321 * Doing this here saves a lot of checks in all
2322 * the calling paths, and returning zero seems
2323 * safe for them anyway.
2324 */
2325 if (period == 0)
2326 return 0;
2327
2328 return div64_u64(runtime << 20, period);
2329}
2330
2331#ifdef CONFIG_SMP
2332inline struct dl_bw *dl_bw_of(int i)
2333{
2334 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2335 "sched RCU must be held");
2336 return &cpu_rq(i)->rd->dl_bw;
2337}
2338
2339static inline int dl_bw_cpus(int i)
2340{
2341 struct root_domain *rd = cpu_rq(i)->rd;
2342 int cpus = 0;
2343
2344 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2345 "sched RCU must be held");
2346 for_each_cpu_and(i, rd->span, cpu_active_mask)
2347 cpus++;
2348
2349 return cpus;
2350}
2351#else
2352inline struct dl_bw *dl_bw_of(int i)
2353{
2354 return &cpu_rq(i)->dl.dl_bw;
2355}
2356
2357static inline int dl_bw_cpus(int i)
2358{
2359 return 1;
2360}
2361#endif
2362
2363/*
2364 * We must be sure that accepting a new task (or allowing changing the
2365 * parameters of an existing one) is consistent with the bandwidth
2366 * constraints. If yes, this function also accordingly updates the currently
2367 * allocated bandwidth to reflect the new situation.
2368 *
2369 * This function is called while holding p's rq->lock.
2370 *
2371 * XXX we should delay bw change until the task's 0-lag point, see
2372 * __setparam_dl().
2373 */
2374static int dl_overflow(struct task_struct *p, int policy,
2375 const struct sched_attr *attr)
2376{
2377
2378 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2379 u64 period = attr->sched_period ?: attr->sched_deadline;
2380 u64 runtime = attr->sched_runtime;
2381 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2382 int cpus, err = -1;
2383
2384 if (new_bw == p->dl.dl_bw)
2385 return 0;
2386
2387 /*
2388 * Either if a task, enters, leave, or stays -deadline but changes
2389 * its parameters, we may need to update accordingly the total
2390 * allocated bandwidth of the container.
2391 */
2392 raw_spin_lock(&dl_b->lock);
2393 cpus = dl_bw_cpus(task_cpu(p));
2394 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2395 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2396 __dl_add(dl_b, new_bw);
2397 err = 0;
2398 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2399 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2400 __dl_clear(dl_b, p->dl.dl_bw);
2401 __dl_add(dl_b, new_bw);
2402 err = 0;
2403 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2404 __dl_clear(dl_b, p->dl.dl_bw);
2405 err = 0;
2406 }
2407 raw_spin_unlock(&dl_b->lock);
2408
2409 return err;
2410}
2411
2412extern void init_dl_bw(struct dl_bw *dl_b);
2413
2414/*
2415 * wake_up_new_task - wake up a newly created task for the first time.
2416 *
2417 * This function will do some initial scheduler statistics housekeeping
2418 * that must be done for every newly created context, then puts the task
2419 * on the runqueue and wakes it.
2420 */
2421void wake_up_new_task(struct task_struct *p)
2422{
2423 unsigned long flags;
2424 struct rq *rq;
2425
2426 raw_spin_lock_irqsave(&p->pi_lock, flags);
2427 /* Initialize new task's runnable average */
2428 init_entity_runnable_average(&p->se);
2429#ifdef CONFIG_SMP
2430 /*
2431 * Fork balancing, do it here and not earlier because:
2432 * - cpus_allowed can change in the fork path
2433 * - any previously selected cpu might disappear through hotplug
2434 */
2435 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
2436#endif
2437
2438 rq = __task_rq_lock(p);
2439 activate_task(rq, p, 0);
2440 p->on_rq = TASK_ON_RQ_QUEUED;
2441 trace_sched_wakeup_new(p);
2442 check_preempt_curr(rq, p, WF_FORK);
2443#ifdef CONFIG_SMP
2444 if (p->sched_class->task_woken) {
2445 /*
2446 * Nothing relies on rq->lock after this, so its fine to
2447 * drop it.
2448 */
2449 lockdep_unpin_lock(&rq->lock);
2450 p->sched_class->task_woken(rq, p);
2451 lockdep_pin_lock(&rq->lock);
2452 }
2453#endif
2454 task_rq_unlock(rq, p, &flags);
2455}
2456
2457#ifdef CONFIG_PREEMPT_NOTIFIERS
2458
2459static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
2460
2461void preempt_notifier_inc(void)
2462{
2463 static_key_slow_inc(&preempt_notifier_key);
2464}
2465EXPORT_SYMBOL_GPL(preempt_notifier_inc);
2466
2467void preempt_notifier_dec(void)
2468{
2469 static_key_slow_dec(&preempt_notifier_key);
2470}
2471EXPORT_SYMBOL_GPL(preempt_notifier_dec);
2472
2473/**
2474 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2475 * @notifier: notifier struct to register
2476 */
2477void preempt_notifier_register(struct preempt_notifier *notifier)
2478{
2479 if (!static_key_false(&preempt_notifier_key))
2480 WARN(1, "registering preempt_notifier while notifiers disabled\n");
2481
2482 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
2483}
2484EXPORT_SYMBOL_GPL(preempt_notifier_register);
2485
2486/**
2487 * preempt_notifier_unregister - no longer interested in preemption notifications
2488 * @notifier: notifier struct to unregister
2489 *
2490 * This is *not* safe to call from within a preemption notifier.
2491 */
2492void preempt_notifier_unregister(struct preempt_notifier *notifier)
2493{
2494 hlist_del(¬ifier->link);
2495}
2496EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2497
2498static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
2499{
2500 struct preempt_notifier *notifier;
2501
2502 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2503 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2504}
2505
2506static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2507{
2508 if (static_key_false(&preempt_notifier_key))
2509 __fire_sched_in_preempt_notifiers(curr);
2510}
2511
2512static void
2513__fire_sched_out_preempt_notifiers(struct task_struct *curr,
2514 struct task_struct *next)
2515{
2516 struct preempt_notifier *notifier;
2517
2518 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2519 notifier->ops->sched_out(notifier, next);
2520}
2521
2522static __always_inline void
2523fire_sched_out_preempt_notifiers(struct task_struct *curr,
2524 struct task_struct *next)
2525{
2526 if (static_key_false(&preempt_notifier_key))
2527 __fire_sched_out_preempt_notifiers(curr, next);
2528}
2529
2530#else /* !CONFIG_PREEMPT_NOTIFIERS */
2531
2532static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2533{
2534}
2535
2536static inline void
2537fire_sched_out_preempt_notifiers(struct task_struct *curr,
2538 struct task_struct *next)
2539{
2540}
2541
2542#endif /* CONFIG_PREEMPT_NOTIFIERS */
2543
2544/**
2545 * prepare_task_switch - prepare to switch tasks
2546 * @rq: the runqueue preparing to switch
2547 * @prev: the current task that is being switched out
2548 * @next: the task we are going to switch to.
2549 *
2550 * This is called with the rq lock held and interrupts off. It must
2551 * be paired with a subsequent finish_task_switch after the context
2552 * switch.
2553 *
2554 * prepare_task_switch sets up locking and calls architecture specific
2555 * hooks.
2556 */
2557static inline void
2558prepare_task_switch(struct rq *rq, struct task_struct *prev,
2559 struct task_struct *next)
2560{
2561 sched_info_switch(rq, prev, next);
2562 perf_event_task_sched_out(prev, next);
2563 fire_sched_out_preempt_notifiers(prev, next);
2564 prepare_lock_switch(rq, next);
2565 prepare_arch_switch(next);
2566}
2567
2568/**
2569 * finish_task_switch - clean up after a task-switch
2570 * @prev: the thread we just switched away from.
2571 *
2572 * finish_task_switch must be called after the context switch, paired
2573 * with a prepare_task_switch call before the context switch.
2574 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2575 * and do any other architecture-specific cleanup actions.
2576 *
2577 * Note that we may have delayed dropping an mm in context_switch(). If
2578 * so, we finish that here outside of the runqueue lock. (Doing it
2579 * with the lock held can cause deadlocks; see schedule() for
2580 * details.)
2581 *
2582 * The context switch have flipped the stack from under us and restored the
2583 * local variables which were saved when this task called schedule() in the
2584 * past. prev == current is still correct but we need to recalculate this_rq
2585 * because prev may have moved to another CPU.
2586 */
2587static struct rq *finish_task_switch(struct task_struct *prev)
2588 __releases(rq->lock)
2589{
2590 struct rq *rq = this_rq();
2591 struct mm_struct *mm = rq->prev_mm;
2592 long prev_state;
2593
2594 /*
2595 * The previous task will have left us with a preempt_count of 2
2596 * because it left us after:
2597 *
2598 * schedule()
2599 * preempt_disable(); // 1
2600 * __schedule()
2601 * raw_spin_lock_irq(&rq->lock) // 2
2602 *
2603 * Also, see FORK_PREEMPT_COUNT.
2604 */
2605 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
2606 "corrupted preempt_count: %s/%d/0x%x\n",
2607 current->comm, current->pid, preempt_count()))
2608 preempt_count_set(FORK_PREEMPT_COUNT);
2609
2610 rq->prev_mm = NULL;
2611
2612 /*
2613 * A task struct has one reference for the use as "current".
2614 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
2615 * schedule one last time. The schedule call will never return, and
2616 * the scheduled task must drop that reference.
2617 *
2618 * We must observe prev->state before clearing prev->on_cpu (in
2619 * finish_lock_switch), otherwise a concurrent wakeup can get prev
2620 * running on another CPU and we could rave with its RUNNING -> DEAD
2621 * transition, resulting in a double drop.
2622 */
2623 prev_state = prev->state;
2624 vtime_task_switch(prev);
2625 perf_event_task_sched_in(prev, current);
2626 finish_lock_switch(rq, prev);
2627 finish_arch_post_lock_switch();
2628
2629 fire_sched_in_preempt_notifiers(current);
2630 if (mm)
2631 mmdrop(mm);
2632 if (unlikely(prev_state == TASK_DEAD)) {
2633 if (prev->sched_class->task_dead)
2634 prev->sched_class->task_dead(prev);
2635
2636 /*
2637 * Remove function-return probe instances associated with this
2638 * task and put them back on the free list.
2639 */
2640 kprobe_flush_task(prev);
2641 put_task_struct(prev);
2642 }
2643
2644 tick_nohz_task_switch();
2645 return rq;
2646}
2647
2648#ifdef CONFIG_SMP
2649
2650/* rq->lock is NOT held, but preemption is disabled */
2651static void __balance_callback(struct rq *rq)
2652{
2653 struct callback_head *head, *next;
2654 void (*func)(struct rq *rq);
2655 unsigned long flags;
2656
2657 raw_spin_lock_irqsave(&rq->lock, flags);
2658 head = rq->balance_callback;
2659 rq->balance_callback = NULL;
2660 while (head) {
2661 func = (void (*)(struct rq *))head->func;
2662 next = head->next;
2663 head->next = NULL;
2664 head = next;
2665
2666 func(rq);
2667 }
2668 raw_spin_unlock_irqrestore(&rq->lock, flags);
2669}
2670
2671static inline void balance_callback(struct rq *rq)
2672{
2673 if (unlikely(rq->balance_callback))
2674 __balance_callback(rq);
2675}
2676
2677#else
2678
2679static inline void balance_callback(struct rq *rq)
2680{
2681}
2682
2683#endif
2684
2685/**
2686 * schedule_tail - first thing a freshly forked thread must call.
2687 * @prev: the thread we just switched away from.
2688 */
2689asmlinkage __visible void schedule_tail(struct task_struct *prev)
2690 __releases(rq->lock)
2691{
2692 struct rq *rq;
2693
2694 /*
2695 * New tasks start with FORK_PREEMPT_COUNT, see there and
2696 * finish_task_switch() for details.
2697 *
2698 * finish_task_switch() will drop rq->lock() and lower preempt_count
2699 * and the preempt_enable() will end up enabling preemption (on
2700 * PREEMPT_COUNT kernels).
2701 */
2702
2703 rq = finish_task_switch(prev);
2704 balance_callback(rq);
2705 preempt_enable();
2706
2707 if (current->set_child_tid)
2708 put_user(task_pid_vnr(current), current->set_child_tid);
2709}
2710
2711/*
2712 * context_switch - switch to the new MM and the new thread's register state.
2713 */
2714static __always_inline struct rq *
2715context_switch(struct rq *rq, struct task_struct *prev,
2716 struct task_struct *next)
2717{
2718 struct mm_struct *mm, *oldmm;
2719
2720 prepare_task_switch(rq, prev, next);
2721
2722 mm = next->mm;
2723 oldmm = prev->active_mm;
2724 /*
2725 * For paravirt, this is coupled with an exit in switch_to to
2726 * combine the page table reload and the switch backend into
2727 * one hypercall.
2728 */
2729 arch_start_context_switch(prev);
2730
2731 if (!mm) {
2732 next->active_mm = oldmm;
2733 atomic_inc(&oldmm->mm_count);
2734 enter_lazy_tlb(oldmm, next);
2735 } else
2736 switch_mm(oldmm, mm, next);
2737
2738 if (!prev->mm) {
2739 prev->active_mm = NULL;
2740 rq->prev_mm = oldmm;
2741 }
2742 /*
2743 * Since the runqueue lock will be released by the next
2744 * task (which is an invalid locking op but in the case
2745 * of the scheduler it's an obvious special-case), so we
2746 * do an early lockdep release here:
2747 */
2748 lockdep_unpin_lock(&rq->lock);
2749 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2750
2751 /* Here we just switch the register state and the stack. */
2752 switch_to(prev, next, prev);
2753 barrier();
2754
2755 return finish_task_switch(prev);
2756}
2757
2758/*
2759 * nr_running and nr_context_switches:
2760 *
2761 * externally visible scheduler statistics: current number of runnable
2762 * threads, total number of context switches performed since bootup.
2763 */
2764unsigned long nr_running(void)
2765{
2766 unsigned long i, sum = 0;
2767
2768 for_each_online_cpu(i)
2769 sum += cpu_rq(i)->nr_running;
2770
2771 return sum;
2772}
2773
2774/*
2775 * Check if only the current task is running on the cpu.
2776 *
2777 * Caution: this function does not check that the caller has disabled
2778 * preemption, thus the result might have a time-of-check-to-time-of-use
2779 * race. The caller is responsible to use it correctly, for example:
2780 *
2781 * - from a non-preemptable section (of course)
2782 *
2783 * - from a thread that is bound to a single CPU
2784 *
2785 * - in a loop with very short iterations (e.g. a polling loop)
2786 */
2787bool single_task_running(void)
2788{
2789 return raw_rq()->nr_running == 1;
2790}
2791EXPORT_SYMBOL(single_task_running);
2792
2793unsigned long long nr_context_switches(void)
2794{
2795 int i;
2796 unsigned long long sum = 0;
2797
2798 for_each_possible_cpu(i)
2799 sum += cpu_rq(i)->nr_switches;
2800
2801 return sum;
2802}
2803
2804unsigned long nr_iowait(void)
2805{
2806 unsigned long i, sum = 0;
2807
2808 for_each_possible_cpu(i)
2809 sum += atomic_read(&cpu_rq(i)->nr_iowait);
2810
2811 return sum;
2812}
2813
2814unsigned long nr_iowait_cpu(int cpu)
2815{
2816 struct rq *this = cpu_rq(cpu);
2817 return atomic_read(&this->nr_iowait);
2818}
2819
2820void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2821{
2822 struct rq *rq = this_rq();
2823 *nr_waiters = atomic_read(&rq->nr_iowait);
2824 *load = rq->load.weight;
2825}
2826
2827#ifdef CONFIG_SMP
2828
2829/*
2830 * sched_exec - execve() is a valuable balancing opportunity, because at
2831 * this point the task has the smallest effective memory and cache footprint.
2832 */
2833void sched_exec(void)
2834{
2835 struct task_struct *p = current;
2836 unsigned long flags;
2837 int dest_cpu;
2838
2839 raw_spin_lock_irqsave(&p->pi_lock, flags);
2840 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
2841 if (dest_cpu == smp_processor_id())
2842 goto unlock;
2843
2844 if (likely(cpu_active(dest_cpu))) {
2845 struct migration_arg arg = { p, dest_cpu };
2846
2847 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2848 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
2849 return;
2850 }
2851unlock:
2852 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2853}
2854
2855#endif
2856
2857DEFINE_PER_CPU(struct kernel_stat, kstat);
2858DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
2859
2860EXPORT_PER_CPU_SYMBOL(kstat);
2861EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2862
2863/*
2864 * Return accounted runtime for the task.
2865 * In case the task is currently running, return the runtime plus current's
2866 * pending runtime that have not been accounted yet.
2867 */
2868unsigned long long task_sched_runtime(struct task_struct *p)
2869{
2870 unsigned long flags;
2871 struct rq *rq;
2872 u64 ns;
2873
2874#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
2875 /*
2876 * 64-bit doesn't need locks to atomically read a 64bit value.
2877 * So we have a optimization chance when the task's delta_exec is 0.
2878 * Reading ->on_cpu is racy, but this is ok.
2879 *
2880 * If we race with it leaving cpu, we'll take a lock. So we're correct.
2881 * If we race with it entering cpu, unaccounted time is 0. This is
2882 * indistinguishable from the read occurring a few cycles earlier.
2883 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
2884 * been accounted, so we're correct here as well.
2885 */
2886 if (!p->on_cpu || !task_on_rq_queued(p))
2887 return p->se.sum_exec_runtime;
2888#endif
2889
2890 rq = task_rq_lock(p, &flags);
2891 /*
2892 * Must be ->curr _and_ ->on_rq. If dequeued, we would
2893 * project cycles that may never be accounted to this
2894 * thread, breaking clock_gettime().
2895 */
2896 if (task_current(rq, p) && task_on_rq_queued(p)) {
2897 update_rq_clock(rq);
2898 p->sched_class->update_curr(rq);
2899 }
2900 ns = p->se.sum_exec_runtime;
2901 task_rq_unlock(rq, p, &flags);
2902
2903 return ns;
2904}
2905
2906/*
2907 * This function gets called by the timer code, with HZ frequency.
2908 * We call it with interrupts disabled.
2909 */
2910void scheduler_tick(void)
2911{
2912 int cpu = smp_processor_id();
2913 struct rq *rq = cpu_rq(cpu);
2914 struct task_struct *curr = rq->curr;
2915
2916 sched_clock_tick();
2917
2918 raw_spin_lock(&rq->lock);
2919 update_rq_clock(rq);
2920 curr->sched_class->task_tick(rq, curr, 0);
2921 update_cpu_load_active(rq);
2922 calc_global_load_tick(rq);
2923 raw_spin_unlock(&rq->lock);
2924
2925 perf_event_task_tick();
2926
2927#ifdef CONFIG_SMP
2928 rq->idle_balance = idle_cpu(cpu);
2929 trigger_load_balance(rq);
2930#endif
2931 rq_last_tick_reset(rq);
2932}
2933
2934#ifdef CONFIG_NO_HZ_FULL
2935/**
2936 * scheduler_tick_max_deferment
2937 *
2938 * Keep at least one tick per second when a single
2939 * active task is running because the scheduler doesn't
2940 * yet completely support full dynticks environment.
2941 *
2942 * This makes sure that uptime, CFS vruntime, load
2943 * balancing, etc... continue to move forward, even
2944 * with a very low granularity.
2945 *
2946 * Return: Maximum deferment in nanoseconds.
2947 */
2948u64 scheduler_tick_max_deferment(void)
2949{
2950 struct rq *rq = this_rq();
2951 unsigned long next, now = READ_ONCE(jiffies);
2952
2953 next = rq->last_sched_tick + HZ;
2954
2955 if (time_before_eq(next, now))
2956 return 0;
2957
2958 return jiffies_to_nsecs(next - now);
2959}
2960#endif
2961
2962#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2963 defined(CONFIG_PREEMPT_TRACER))
2964
2965void preempt_count_add(int val)
2966{
2967#ifdef CONFIG_DEBUG_PREEMPT
2968 /*
2969 * Underflow?
2970 */
2971 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2972 return;
2973#endif
2974 __preempt_count_add(val);
2975#ifdef CONFIG_DEBUG_PREEMPT
2976 /*
2977 * Spinlock count overflowing soon?
2978 */
2979 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2980 PREEMPT_MASK - 10);
2981#endif
2982 if (preempt_count() == val) {
2983 unsigned long ip = get_lock_parent_ip();
2984#ifdef CONFIG_DEBUG_PREEMPT
2985 current->preempt_disable_ip = ip;
2986#endif
2987 trace_preempt_off(CALLER_ADDR0, ip);
2988 }
2989}
2990EXPORT_SYMBOL(preempt_count_add);
2991NOKPROBE_SYMBOL(preempt_count_add);
2992
2993void preempt_count_sub(int val)
2994{
2995#ifdef CONFIG_DEBUG_PREEMPT
2996 /*
2997 * Underflow?
2998 */
2999 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3000 return;
3001 /*
3002 * Is the spinlock portion underflowing?
3003 */
3004 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3005 !(preempt_count() & PREEMPT_MASK)))
3006 return;
3007#endif
3008
3009 if (preempt_count() == val)
3010 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
3011 __preempt_count_sub(val);
3012}
3013EXPORT_SYMBOL(preempt_count_sub);
3014NOKPROBE_SYMBOL(preempt_count_sub);
3015
3016#endif
3017
3018/*
3019 * Print scheduling while atomic bug:
3020 */
3021static noinline void __schedule_bug(struct task_struct *prev)
3022{
3023 if (oops_in_progress)
3024 return;
3025
3026 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3027 prev->comm, prev->pid, preempt_count());
3028
3029 debug_show_held_locks(prev);
3030 print_modules();
3031 if (irqs_disabled())
3032 print_irqtrace_events(prev);
3033#ifdef CONFIG_DEBUG_PREEMPT
3034 if (in_atomic_preempt_off()) {
3035 pr_err("Preemption disabled at:");
3036 print_ip_sym(current->preempt_disable_ip);
3037 pr_cont("\n");
3038 }
3039#endif
3040 dump_stack();
3041 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
3042}
3043
3044/*
3045 * Various schedule()-time debugging checks and statistics:
3046 */
3047static inline void schedule_debug(struct task_struct *prev)
3048{
3049#ifdef CONFIG_SCHED_STACK_END_CHECK
3050 BUG_ON(task_stack_end_corrupted(prev));
3051#endif
3052
3053 if (unlikely(in_atomic_preempt_off())) {
3054 __schedule_bug(prev);
3055 preempt_count_set(PREEMPT_DISABLED);
3056 }
3057 rcu_sleep_check();
3058
3059 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3060
3061 schedstat_inc(this_rq(), sched_count);
3062}
3063
3064/*
3065 * Pick up the highest-prio task:
3066 */
3067static inline struct task_struct *
3068pick_next_task(struct rq *rq, struct task_struct *prev)
3069{
3070 const struct sched_class *class = &fair_sched_class;
3071 struct task_struct *p;
3072
3073 /*
3074 * Optimization: we know that if all tasks are in
3075 * the fair class we can call that function directly:
3076 */
3077 if (likely(prev->sched_class == class &&
3078 rq->nr_running == rq->cfs.h_nr_running)) {
3079 p = fair_sched_class.pick_next_task(rq, prev);
3080 if (unlikely(p == RETRY_TASK))
3081 goto again;
3082
3083 /* assumes fair_sched_class->next == idle_sched_class */
3084 if (unlikely(!p))
3085 p = idle_sched_class.pick_next_task(rq, prev);
3086
3087 return p;
3088 }
3089
3090again:
3091 for_each_class(class) {
3092 p = class->pick_next_task(rq, prev);
3093 if (p) {
3094 if (unlikely(p == RETRY_TASK))
3095 goto again;
3096 return p;
3097 }
3098 }
3099
3100 BUG(); /* the idle class will always have a runnable task */
3101}
3102
3103/*
3104 * __schedule() is the main scheduler function.
3105 *
3106 * The main means of driving the scheduler and thus entering this function are:
3107 *
3108 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
3109 *
3110 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
3111 * paths. For example, see arch/x86/entry_64.S.
3112 *
3113 * To drive preemption between tasks, the scheduler sets the flag in timer
3114 * interrupt handler scheduler_tick().
3115 *
3116 * 3. Wakeups don't really cause entry into schedule(). They add a
3117 * task to the run-queue and that's it.
3118 *
3119 * Now, if the new task added to the run-queue preempts the current
3120 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
3121 * called on the nearest possible occasion:
3122 *
3123 * - If the kernel is preemptible (CONFIG_PREEMPT=y):
3124 *
3125 * - in syscall or exception context, at the next outmost
3126 * preempt_enable(). (this might be as soon as the wake_up()'s
3127 * spin_unlock()!)
3128 *
3129 * - in IRQ context, return from interrupt-handler to
3130 * preemptible context
3131 *
3132 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
3133 * then at the next:
3134 *
3135 * - cond_resched() call
3136 * - explicit schedule() call
3137 * - return from syscall or exception to user-space
3138 * - return from interrupt-handler to user-space
3139 *
3140 * WARNING: must be called with preemption disabled!
3141 */
3142static void __sched notrace __schedule(bool preempt)
3143{
3144 struct task_struct *prev, *next;
3145 unsigned long *switch_count;
3146 struct rq *rq;
3147 int cpu;
3148
3149 cpu = smp_processor_id();
3150 rq = cpu_rq(cpu);
3151 prev = rq->curr;
3152
3153 /*
3154 * do_exit() calls schedule() with preemption disabled as an exception;
3155 * however we must fix that up, otherwise the next task will see an
3156 * inconsistent (higher) preempt count.
3157 *
3158 * It also avoids the below schedule_debug() test from complaining
3159 * about this.
3160 */
3161 if (unlikely(prev->state == TASK_DEAD))
3162 preempt_enable_no_resched_notrace();
3163
3164 schedule_debug(prev);
3165
3166 if (sched_feat(HRTICK))
3167 hrtick_clear(rq);
3168
3169 local_irq_disable();
3170 rcu_note_context_switch();
3171
3172 /*
3173 * Make sure that signal_pending_state()->signal_pending() below
3174 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
3175 * done by the caller to avoid the race with signal_wake_up().
3176 */
3177 smp_mb__before_spinlock();
3178 raw_spin_lock(&rq->lock);
3179 lockdep_pin_lock(&rq->lock);
3180
3181 rq->clock_skip_update <<= 1; /* promote REQ to ACT */
3182
3183 switch_count = &prev->nivcsw;
3184 if (!preempt && prev->state) {
3185 if (unlikely(signal_pending_state(prev->state, prev))) {
3186 prev->state = TASK_RUNNING;
3187 } else {
3188 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3189 prev->on_rq = 0;
3190
3191 /*
3192 * If a worker went to sleep, notify and ask workqueue
3193 * whether it wants to wake up a task to maintain
3194 * concurrency.
3195 */
3196 if (prev->flags & PF_WQ_WORKER) {
3197 struct task_struct *to_wakeup;
3198
3199 to_wakeup = wq_worker_sleeping(prev);
3200 if (to_wakeup)
3201 try_to_wake_up_local(to_wakeup);
3202 }
3203 }
3204 switch_count = &prev->nvcsw;
3205 }
3206
3207 if (task_on_rq_queued(prev))
3208 update_rq_clock(rq);
3209
3210 next = pick_next_task(rq, prev);
3211 clear_tsk_need_resched(prev);
3212 clear_preempt_need_resched();
3213 rq->clock_skip_update = 0;
3214
3215 if (likely(prev != next)) {
3216 rq->nr_switches++;
3217 rq->curr = next;
3218 ++*switch_count;
3219
3220 trace_sched_switch(preempt, prev, next);
3221 rq = context_switch(rq, prev, next); /* unlocks the rq */
3222 } else {
3223 lockdep_unpin_lock(&rq->lock);
3224 raw_spin_unlock_irq(&rq->lock);
3225 }
3226
3227 balance_callback(rq);
3228}
3229STACK_FRAME_NON_STANDARD(__schedule); /* switch_to() */
3230
3231static inline void sched_submit_work(struct task_struct *tsk)
3232{
3233 if (!tsk->state || tsk_is_pi_blocked(tsk))
3234 return;
3235 /*
3236 * If we are going to sleep and we have plugged IO queued,
3237 * make sure to submit it to avoid deadlocks.
3238 */
3239 if (blk_needs_flush_plug(tsk))
3240 blk_schedule_flush_plug(tsk);
3241}
3242
3243asmlinkage __visible void __sched schedule(void)
3244{
3245 struct task_struct *tsk = current;
3246
3247 sched_submit_work(tsk);
3248 do {
3249 preempt_disable();
3250 __schedule(false);
3251 sched_preempt_enable_no_resched();
3252 } while (need_resched());
3253}
3254EXPORT_SYMBOL(schedule);
3255
3256#ifdef CONFIG_CONTEXT_TRACKING
3257asmlinkage __visible void __sched schedule_user(void)
3258{
3259 /*
3260 * If we come here after a random call to set_need_resched(),
3261 * or we have been woken up remotely but the IPI has not yet arrived,
3262 * we haven't yet exited the RCU idle mode. Do it here manually until
3263 * we find a better solution.
3264 *
3265 * NB: There are buggy callers of this function. Ideally we
3266 * should warn if prev_state != CONTEXT_USER, but that will trigger
3267 * too frequently to make sense yet.
3268 */
3269 enum ctx_state prev_state = exception_enter();
3270 schedule();
3271 exception_exit(prev_state);
3272}
3273#endif
3274
3275/**
3276 * schedule_preempt_disabled - called with preemption disabled
3277 *
3278 * Returns with preemption disabled. Note: preempt_count must be 1
3279 */
3280void __sched schedule_preempt_disabled(void)
3281{
3282 sched_preempt_enable_no_resched();
3283 schedule();
3284 preempt_disable();
3285}
3286
3287static void __sched notrace preempt_schedule_common(void)
3288{
3289 do {
3290 preempt_disable_notrace();
3291 __schedule(true);
3292 preempt_enable_no_resched_notrace();
3293
3294 /*
3295 * Check again in case we missed a preemption opportunity
3296 * between schedule and now.
3297 */
3298 } while (need_resched());
3299}
3300
3301#ifdef CONFIG_PREEMPT
3302/*
3303 * this is the entry point to schedule() from in-kernel preemption
3304 * off of preempt_enable. Kernel preemptions off return from interrupt
3305 * occur there and call schedule directly.
3306 */
3307asmlinkage __visible void __sched notrace preempt_schedule(void)
3308{
3309 /*
3310 * If there is a non-zero preempt_count or interrupts are disabled,
3311 * we do not want to preempt the current task. Just return..
3312 */
3313 if (likely(!preemptible()))
3314 return;
3315
3316 preempt_schedule_common();
3317}
3318NOKPROBE_SYMBOL(preempt_schedule);
3319EXPORT_SYMBOL(preempt_schedule);
3320
3321/**
3322 * preempt_schedule_notrace - preempt_schedule called by tracing
3323 *
3324 * The tracing infrastructure uses preempt_enable_notrace to prevent
3325 * recursion and tracing preempt enabling caused by the tracing
3326 * infrastructure itself. But as tracing can happen in areas coming
3327 * from userspace or just about to enter userspace, a preempt enable
3328 * can occur before user_exit() is called. This will cause the scheduler
3329 * to be called when the system is still in usermode.
3330 *
3331 * To prevent this, the preempt_enable_notrace will use this function
3332 * instead of preempt_schedule() to exit user context if needed before
3333 * calling the scheduler.
3334 */
3335asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
3336{
3337 enum ctx_state prev_ctx;
3338
3339 if (likely(!preemptible()))
3340 return;
3341
3342 do {
3343 preempt_disable_notrace();
3344 /*
3345 * Needs preempt disabled in case user_exit() is traced
3346 * and the tracer calls preempt_enable_notrace() causing
3347 * an infinite recursion.
3348 */
3349 prev_ctx = exception_enter();
3350 __schedule(true);
3351 exception_exit(prev_ctx);
3352
3353 preempt_enable_no_resched_notrace();
3354 } while (need_resched());
3355}
3356EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
3357
3358#endif /* CONFIG_PREEMPT */
3359
3360/*
3361 * this is the entry point to schedule() from kernel preemption
3362 * off of irq context.
3363 * Note, that this is called and return with irqs disabled. This will
3364 * protect us against recursive calling from irq.
3365 */
3366asmlinkage __visible void __sched preempt_schedule_irq(void)
3367{
3368 enum ctx_state prev_state;
3369
3370 /* Catch callers which need to be fixed */
3371 BUG_ON(preempt_count() || !irqs_disabled());
3372
3373 prev_state = exception_enter();
3374
3375 do {
3376 preempt_disable();
3377 local_irq_enable();
3378 __schedule(true);
3379 local_irq_disable();
3380 sched_preempt_enable_no_resched();
3381 } while (need_resched());
3382
3383 exception_exit(prev_state);
3384}
3385
3386int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
3387 void *key)
3388{
3389 return try_to_wake_up(curr->private, mode, wake_flags);
3390}
3391EXPORT_SYMBOL(default_wake_function);
3392
3393#ifdef CONFIG_RT_MUTEXES
3394
3395/*
3396 * rt_mutex_setprio - set the current priority of a task
3397 * @p: task
3398 * @prio: prio value (kernel-internal form)
3399 *
3400 * This function changes the 'effective' priority of a task. It does
3401 * not touch ->normal_prio like __setscheduler().
3402 *
3403 * Used by the rt_mutex code to implement priority inheritance
3404 * logic. Call site only calls if the priority of the task changed.
3405 */
3406void rt_mutex_setprio(struct task_struct *p, int prio)
3407{
3408 int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
3409 struct rq *rq;
3410 const struct sched_class *prev_class;
3411
3412 BUG_ON(prio > MAX_PRIO);
3413
3414 rq = __task_rq_lock(p);
3415
3416 /*
3417 * Idle task boosting is a nono in general. There is one
3418 * exception, when PREEMPT_RT and NOHZ is active:
3419 *
3420 * The idle task calls get_next_timer_interrupt() and holds
3421 * the timer wheel base->lock on the CPU and another CPU wants
3422 * to access the timer (probably to cancel it). We can safely
3423 * ignore the boosting request, as the idle CPU runs this code
3424 * with interrupts disabled and will complete the lock
3425 * protected section without being interrupted. So there is no
3426 * real need to boost.
3427 */
3428 if (unlikely(p == rq->idle)) {
3429 WARN_ON(p != rq->curr);
3430 WARN_ON(p->pi_blocked_on);
3431 goto out_unlock;
3432 }
3433
3434 trace_sched_pi_setprio(p, prio);
3435 oldprio = p->prio;
3436
3437 if (oldprio == prio)
3438 queue_flag &= ~DEQUEUE_MOVE;
3439
3440 prev_class = p->sched_class;
3441 queued = task_on_rq_queued(p);
3442 running = task_current(rq, p);
3443 if (queued)
3444 dequeue_task(rq, p, queue_flag);
3445 if (running)
3446 put_prev_task(rq, p);
3447
3448 /*
3449 * Boosting condition are:
3450 * 1. -rt task is running and holds mutex A
3451 * --> -dl task blocks on mutex A
3452 *
3453 * 2. -dl task is running and holds mutex A
3454 * --> -dl task blocks on mutex A and could preempt the
3455 * running task
3456 */
3457 if (dl_prio(prio)) {
3458 struct task_struct *pi_task = rt_mutex_get_top_task(p);
3459 if (!dl_prio(p->normal_prio) ||
3460 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
3461 p->dl.dl_boosted = 1;
3462 queue_flag |= ENQUEUE_REPLENISH;
3463 } else
3464 p->dl.dl_boosted = 0;
3465 p->sched_class = &dl_sched_class;
3466 } else if (rt_prio(prio)) {
3467 if (dl_prio(oldprio))
3468 p->dl.dl_boosted = 0;
3469 if (oldprio < prio)
3470 queue_flag |= ENQUEUE_HEAD;
3471 p->sched_class = &rt_sched_class;
3472 } else {
3473 if (dl_prio(oldprio))
3474 p->dl.dl_boosted = 0;
3475 if (rt_prio(oldprio))
3476 p->rt.timeout = 0;
3477 p->sched_class = &fair_sched_class;
3478 }
3479
3480 p->prio = prio;
3481
3482 if (running)
3483 p->sched_class->set_curr_task(rq);
3484 if (queued)
3485 enqueue_task(rq, p, queue_flag);
3486
3487 check_class_changed(rq, p, prev_class, oldprio);
3488out_unlock:
3489 preempt_disable(); /* avoid rq from going away on us */
3490 __task_rq_unlock(rq);
3491
3492 balance_callback(rq);
3493 preempt_enable();
3494}
3495#endif
3496
3497void set_user_nice(struct task_struct *p, long nice)
3498{
3499 int old_prio, delta, queued;
3500 unsigned long flags;
3501 struct rq *rq;
3502
3503 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
3504 return;
3505 /*
3506 * We have to be careful, if called from sys_setpriority(),
3507 * the task might be in the middle of scheduling on another CPU.
3508 */
3509 rq = task_rq_lock(p, &flags);
3510 /*
3511 * The RT priorities are set via sched_setscheduler(), but we still
3512 * allow the 'normal' nice value to be set - but as expected
3513 * it wont have any effect on scheduling until the task is
3514 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
3515 */
3516 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
3517 p->static_prio = NICE_TO_PRIO(nice);
3518 goto out_unlock;
3519 }
3520 queued = task_on_rq_queued(p);
3521 if (queued)
3522 dequeue_task(rq, p, DEQUEUE_SAVE);
3523
3524 p->static_prio = NICE_TO_PRIO(nice);
3525 set_load_weight(p);
3526 old_prio = p->prio;
3527 p->prio = effective_prio(p);
3528 delta = p->prio - old_prio;
3529
3530 if (queued) {
3531 enqueue_task(rq, p, ENQUEUE_RESTORE);
3532 /*
3533 * If the task increased its priority or is running and
3534 * lowered its priority, then reschedule its CPU:
3535 */
3536 if (delta < 0 || (delta > 0 && task_running(rq, p)))
3537 resched_curr(rq);
3538 }
3539out_unlock:
3540 task_rq_unlock(rq, p, &flags);
3541}
3542EXPORT_SYMBOL(set_user_nice);
3543
3544/*
3545 * can_nice - check if a task can reduce its nice value
3546 * @p: task
3547 * @nice: nice value
3548 */
3549int can_nice(const struct task_struct *p, const int nice)
3550{
3551 /* convert nice value [19,-20] to rlimit style value [1,40] */
3552 int nice_rlim = nice_to_rlimit(nice);
3553
3554 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
3555 capable(CAP_SYS_NICE));
3556}
3557
3558#ifdef __ARCH_WANT_SYS_NICE
3559
3560/*
3561 * sys_nice - change the priority of the current process.
3562 * @increment: priority increment
3563 *
3564 * sys_setpriority is a more generic, but much slower function that
3565 * does similar things.
3566 */
3567SYSCALL_DEFINE1(nice, int, increment)
3568{
3569 long nice, retval;
3570
3571 /*
3572 * Setpriority might change our priority at the same moment.
3573 * We don't have to worry. Conceptually one call occurs first
3574 * and we have a single winner.
3575 */
3576 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
3577 nice = task_nice(current) + increment;
3578
3579 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
3580 if (increment < 0 && !can_nice(current, nice))
3581 return -EPERM;
3582
3583 retval = security_task_setnice(current, nice);
3584 if (retval)
3585 return retval;
3586
3587 set_user_nice(current, nice);
3588 return 0;
3589}
3590
3591#endif
3592
3593/**
3594 * task_prio - return the priority value of a given task.
3595 * @p: the task in question.
3596 *
3597 * Return: The priority value as seen by users in /proc.
3598 * RT tasks are offset by -200. Normal tasks are centered
3599 * around 0, value goes from -16 to +15.
3600 */
3601int task_prio(const struct task_struct *p)
3602{
3603 return p->prio - MAX_RT_PRIO;
3604}
3605
3606/**
3607 * idle_cpu - is a given cpu idle currently?
3608 * @cpu: the processor in question.
3609 *
3610 * Return: 1 if the CPU is currently idle. 0 otherwise.
3611 */
3612int idle_cpu(int cpu)
3613{
3614 struct rq *rq = cpu_rq(cpu);
3615
3616 if (rq->curr != rq->idle)
3617 return 0;
3618
3619 if (rq->nr_running)
3620 return 0;
3621
3622#ifdef CONFIG_SMP
3623 if (!llist_empty(&rq->wake_list))
3624 return 0;
3625#endif
3626
3627 return 1;
3628}
3629
3630/**
3631 * idle_task - return the idle task for a given cpu.
3632 * @cpu: the processor in question.
3633 *
3634 * Return: The idle task for the cpu @cpu.
3635 */
3636struct task_struct *idle_task(int cpu)
3637{
3638 return cpu_rq(cpu)->idle;
3639}
3640
3641/**
3642 * find_process_by_pid - find a process with a matching PID value.
3643 * @pid: the pid in question.
3644 *
3645 * The task of @pid, if found. %NULL otherwise.
3646 */
3647static struct task_struct *find_process_by_pid(pid_t pid)
3648{
3649 return pid ? find_task_by_vpid(pid) : current;
3650}
3651
3652/*
3653 * This function initializes the sched_dl_entity of a newly becoming
3654 * SCHED_DEADLINE task.
3655 *
3656 * Only the static values are considered here, the actual runtime and the
3657 * absolute deadline will be properly calculated when the task is enqueued
3658 * for the first time with its new policy.
3659 */
3660static void
3661__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3662{
3663 struct sched_dl_entity *dl_se = &p->dl;
3664
3665 dl_se->dl_runtime = attr->sched_runtime;
3666 dl_se->dl_deadline = attr->sched_deadline;
3667 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3668 dl_se->flags = attr->sched_flags;
3669 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3670
3671 /*
3672 * Changing the parameters of a task is 'tricky' and we're not doing
3673 * the correct thing -- also see task_dead_dl() and switched_from_dl().
3674 *
3675 * What we SHOULD do is delay the bandwidth release until the 0-lag
3676 * point. This would include retaining the task_struct until that time
3677 * and change dl_overflow() to not immediately decrement the current
3678 * amount.
3679 *
3680 * Instead we retain the current runtime/deadline and let the new
3681 * parameters take effect after the current reservation period lapses.
3682 * This is safe (albeit pessimistic) because the 0-lag point is always
3683 * before the current scheduling deadline.
3684 *
3685 * We can still have temporary overloads because we do not delay the
3686 * change in bandwidth until that time; so admission control is
3687 * not on the safe side. It does however guarantee tasks will never
3688 * consume more than promised.
3689 */
3690}
3691
3692/*
3693 * sched_setparam() passes in -1 for its policy, to let the functions
3694 * it calls know not to change it.
3695 */
3696#define SETPARAM_POLICY -1
3697
3698static void __setscheduler_params(struct task_struct *p,
3699 const struct sched_attr *attr)
3700{
3701 int policy = attr->sched_policy;
3702
3703 if (policy == SETPARAM_POLICY)
3704 policy = p->policy;
3705
3706 p->policy = policy;
3707
3708 if (dl_policy(policy))
3709 __setparam_dl(p, attr);
3710 else if (fair_policy(policy))
3711 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
3712
3713 /*
3714 * __sched_setscheduler() ensures attr->sched_priority == 0 when
3715 * !rt_policy. Always setting this ensures that things like
3716 * getparam()/getattr() don't report silly values for !rt tasks.
3717 */
3718 p->rt_priority = attr->sched_priority;
3719 p->normal_prio = normal_prio(p);
3720 set_load_weight(p);
3721}
3722
3723/* Actually do priority change: must hold pi & rq lock. */
3724static void __setscheduler(struct rq *rq, struct task_struct *p,
3725 const struct sched_attr *attr, bool keep_boost)
3726{
3727 __setscheduler_params(p, attr);
3728
3729 /*
3730 * Keep a potential priority boosting if called from
3731 * sched_setscheduler().
3732 */
3733 if (keep_boost)
3734 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3735 else
3736 p->prio = normal_prio(p);
3737
3738 if (dl_prio(p->prio))
3739 p->sched_class = &dl_sched_class;
3740 else if (rt_prio(p->prio))
3741 p->sched_class = &rt_sched_class;
3742 else
3743 p->sched_class = &fair_sched_class;
3744}
3745
3746static void
3747__getparam_dl(struct task_struct *p, struct sched_attr *attr)
3748{
3749 struct sched_dl_entity *dl_se = &p->dl;
3750
3751 attr->sched_priority = p->rt_priority;
3752 attr->sched_runtime = dl_se->dl_runtime;
3753 attr->sched_deadline = dl_se->dl_deadline;
3754 attr->sched_period = dl_se->dl_period;
3755 attr->sched_flags = dl_se->flags;
3756}
3757
3758/*
3759 * This function validates the new parameters of a -deadline task.
3760 * We ask for the deadline not being zero, and greater or equal
3761 * than the runtime, as well as the period of being zero or
3762 * greater than deadline. Furthermore, we have to be sure that
3763 * user parameters are above the internal resolution of 1us (we
3764 * check sched_runtime only since it is always the smaller one) and
3765 * below 2^63 ns (we have to check both sched_deadline and
3766 * sched_period, as the latter can be zero).
3767 */
3768static bool
3769__checkparam_dl(const struct sched_attr *attr)
3770{
3771 /* deadline != 0 */
3772 if (attr->sched_deadline == 0)
3773 return false;
3774
3775 /*
3776 * Since we truncate DL_SCALE bits, make sure we're at least
3777 * that big.
3778 */
3779 if (attr->sched_runtime < (1ULL << DL_SCALE))
3780 return false;
3781
3782 /*
3783 * Since we use the MSB for wrap-around and sign issues, make
3784 * sure it's not set (mind that period can be equal to zero).
3785 */
3786 if (attr->sched_deadline & (1ULL << 63) ||
3787 attr->sched_period & (1ULL << 63))
3788 return false;
3789
3790 /* runtime <= deadline <= period (if period != 0) */
3791 if ((attr->sched_period != 0 &&
3792 attr->sched_period < attr->sched_deadline) ||
3793 attr->sched_deadline < attr->sched_runtime)
3794 return false;
3795
3796 return true;
3797}
3798
3799/*
3800 * check the target process has a UID that matches the current process's
3801 */
3802static bool check_same_owner(struct task_struct *p)
3803{
3804 const struct cred *cred = current_cred(), *pcred;
3805 bool match;
3806
3807 rcu_read_lock();
3808 pcred = __task_cred(p);
3809 match = (uid_eq(cred->euid, pcred->euid) ||
3810 uid_eq(cred->euid, pcred->uid));
3811 rcu_read_unlock();
3812 return match;
3813}
3814
3815static bool dl_param_changed(struct task_struct *p,
3816 const struct sched_attr *attr)
3817{
3818 struct sched_dl_entity *dl_se = &p->dl;
3819
3820 if (dl_se->dl_runtime != attr->sched_runtime ||
3821 dl_se->dl_deadline != attr->sched_deadline ||
3822 dl_se->dl_period != attr->sched_period ||
3823 dl_se->flags != attr->sched_flags)
3824 return true;
3825
3826 return false;
3827}
3828
3829static int __sched_setscheduler(struct task_struct *p,
3830 const struct sched_attr *attr,
3831 bool user, bool pi)
3832{
3833 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3834 MAX_RT_PRIO - 1 - attr->sched_priority;
3835 int retval, oldprio, oldpolicy = -1, queued, running;
3836 int new_effective_prio, policy = attr->sched_policy;
3837 unsigned long flags;
3838 const struct sched_class *prev_class;
3839 struct rq *rq;
3840 int reset_on_fork;
3841 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
3842
3843 /* may grab non-irq protected spin_locks */
3844 BUG_ON(in_interrupt());
3845recheck:
3846 /* double check policy once rq lock held */
3847 if (policy < 0) {
3848 reset_on_fork = p->sched_reset_on_fork;
3849 policy = oldpolicy = p->policy;
3850 } else {
3851 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
3852
3853 if (!valid_policy(policy))
3854 return -EINVAL;
3855 }
3856
3857 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
3858 return -EINVAL;
3859
3860 /*
3861 * Valid priorities for SCHED_FIFO and SCHED_RR are
3862 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3863 * SCHED_BATCH and SCHED_IDLE is 0.
3864 */
3865 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
3866 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
3867 return -EINVAL;
3868 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
3869 (rt_policy(policy) != (attr->sched_priority != 0)))
3870 return -EINVAL;
3871
3872 /*
3873 * Allow unprivileged RT tasks to decrease priority:
3874 */
3875 if (user && !capable(CAP_SYS_NICE)) {
3876 if (fair_policy(policy)) {
3877 if (attr->sched_nice < task_nice(p) &&
3878 !can_nice(p, attr->sched_nice))
3879 return -EPERM;
3880 }
3881
3882 if (rt_policy(policy)) {
3883 unsigned long rlim_rtprio =
3884 task_rlimit(p, RLIMIT_RTPRIO);
3885
3886 /* can't set/change the rt policy */
3887 if (policy != p->policy && !rlim_rtprio)
3888 return -EPERM;
3889
3890 /* can't increase priority */
3891 if (attr->sched_priority > p->rt_priority &&
3892 attr->sched_priority > rlim_rtprio)
3893 return -EPERM;
3894 }
3895
3896 /*
3897 * Can't set/change SCHED_DEADLINE policy at all for now
3898 * (safest behavior); in the future we would like to allow
3899 * unprivileged DL tasks to increase their relative deadline
3900 * or reduce their runtime (both ways reducing utilization)
3901 */
3902 if (dl_policy(policy))
3903 return -EPERM;
3904
3905 /*
3906 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3907 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
3908 */
3909 if (idle_policy(p->policy) && !idle_policy(policy)) {
3910 if (!can_nice(p, task_nice(p)))
3911 return -EPERM;
3912 }
3913
3914 /* can't change other user's priorities */
3915 if (!check_same_owner(p))
3916 return -EPERM;
3917
3918 /* Normal users shall not reset the sched_reset_on_fork flag */
3919 if (p->sched_reset_on_fork && !reset_on_fork)
3920 return -EPERM;
3921 }
3922
3923 if (user) {
3924 retval = security_task_setscheduler(p);
3925 if (retval)
3926 return retval;
3927 }
3928
3929 /*
3930 * make sure no PI-waiters arrive (or leave) while we are
3931 * changing the priority of the task:
3932 *
3933 * To be able to change p->policy safely, the appropriate
3934 * runqueue lock must be held.
3935 */
3936 rq = task_rq_lock(p, &flags);
3937
3938 /*
3939 * Changing the policy of the stop threads its a very bad idea
3940 */
3941 if (p == rq->stop) {
3942 task_rq_unlock(rq, p, &flags);
3943 return -EINVAL;
3944 }
3945
3946 /*
3947 * If not changing anything there's no need to proceed further,
3948 * but store a possible modification of reset_on_fork.
3949 */
3950 if (unlikely(policy == p->policy)) {
3951 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
3952 goto change;
3953 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3954 goto change;
3955 if (dl_policy(policy) && dl_param_changed(p, attr))
3956 goto change;
3957
3958 p->sched_reset_on_fork = reset_on_fork;
3959 task_rq_unlock(rq, p, &flags);
3960 return 0;
3961 }
3962change:
3963
3964 if (user) {
3965#ifdef CONFIG_RT_GROUP_SCHED
3966 /*
3967 * Do not allow realtime tasks into groups that have no runtime
3968 * assigned.
3969 */
3970 if (rt_bandwidth_enabled() && rt_policy(policy) &&
3971 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3972 !task_group_is_autogroup(task_group(p))) {
3973 task_rq_unlock(rq, p, &flags);
3974 return -EPERM;
3975 }
3976#endif
3977#ifdef CONFIG_SMP
3978 if (dl_bandwidth_enabled() && dl_policy(policy)) {
3979 cpumask_t *span = rq->rd->span;
3980
3981 /*
3982 * Don't allow tasks with an affinity mask smaller than
3983 * the entire root_domain to become SCHED_DEADLINE. We
3984 * will also fail if there's no bandwidth available.
3985 */
3986 if (!cpumask_subset(span, &p->cpus_allowed) ||
3987 rq->rd->dl_bw.bw == 0) {
3988 task_rq_unlock(rq, p, &flags);
3989 return -EPERM;
3990 }
3991 }
3992#endif
3993 }
3994
3995 /* recheck policy now with rq lock held */
3996 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3997 policy = oldpolicy = -1;
3998 task_rq_unlock(rq, p, &flags);
3999 goto recheck;
4000 }
4001
4002 /*
4003 * If setscheduling to SCHED_DEADLINE (or changing the parameters
4004 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
4005 * is available.
4006 */
4007 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
4008 task_rq_unlock(rq, p, &flags);
4009 return -EBUSY;
4010 }
4011
4012 p->sched_reset_on_fork = reset_on_fork;
4013 oldprio = p->prio;
4014
4015 if (pi) {
4016 /*
4017 * Take priority boosted tasks into account. If the new
4018 * effective priority is unchanged, we just store the new
4019 * normal parameters and do not touch the scheduler class and
4020 * the runqueue. This will be done when the task deboost
4021 * itself.
4022 */
4023 new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
4024 if (new_effective_prio == oldprio)
4025 queue_flags &= ~DEQUEUE_MOVE;
4026 }
4027
4028 queued = task_on_rq_queued(p);
4029 running = task_current(rq, p);
4030 if (queued)
4031 dequeue_task(rq, p, queue_flags);
4032 if (running)
4033 put_prev_task(rq, p);
4034
4035 prev_class = p->sched_class;
4036 __setscheduler(rq, p, attr, pi);
4037
4038 if (running)
4039 p->sched_class->set_curr_task(rq);
4040 if (queued) {
4041 /*
4042 * We enqueue to tail when the priority of a task is
4043 * increased (user space view).
4044 */
4045 if (oldprio < p->prio)
4046 queue_flags |= ENQUEUE_HEAD;
4047
4048 enqueue_task(rq, p, queue_flags);
4049 }
4050
4051 check_class_changed(rq, p, prev_class, oldprio);
4052 preempt_disable(); /* avoid rq from going away on us */
4053 task_rq_unlock(rq, p, &flags);
4054
4055 if (pi)
4056 rt_mutex_adjust_pi(p);
4057
4058 /*
4059 * Run balance callbacks after we've adjusted the PI chain.
4060 */
4061 balance_callback(rq);
4062 preempt_enable();
4063
4064 return 0;
4065}
4066
4067static int _sched_setscheduler(struct task_struct *p, int policy,
4068 const struct sched_param *param, bool check)
4069{
4070 struct sched_attr attr = {
4071 .sched_policy = policy,
4072 .sched_priority = param->sched_priority,
4073 .sched_nice = PRIO_TO_NICE(p->static_prio),
4074 };
4075
4076 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
4077 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
4078 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4079 policy &= ~SCHED_RESET_ON_FORK;
4080 attr.sched_policy = policy;
4081 }
4082
4083 return __sched_setscheduler(p, &attr, check, true);
4084}
4085/**
4086 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4087 * @p: the task in question.
4088 * @policy: new policy.
4089 * @param: structure containing the new RT priority.
4090 *
4091 * Return: 0 on success. An error code otherwise.
4092 *
4093 * NOTE that the task may be already dead.
4094 */
4095int sched_setscheduler(struct task_struct *p, int policy,
4096 const struct sched_param *param)
4097{
4098 return _sched_setscheduler(p, policy, param, true);
4099}
4100EXPORT_SYMBOL_GPL(sched_setscheduler);
4101
4102int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
4103{
4104 return __sched_setscheduler(p, attr, true, true);
4105}
4106EXPORT_SYMBOL_GPL(sched_setattr);
4107
4108/**
4109 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4110 * @p: the task in question.
4111 * @policy: new policy.
4112 * @param: structure containing the new RT priority.
4113 *
4114 * Just like sched_setscheduler, only don't bother checking if the
4115 * current context has permission. For example, this is needed in
4116 * stop_machine(): we create temporary high priority worker threads,
4117 * but our caller might not have that capability.
4118 *
4119 * Return: 0 on success. An error code otherwise.
4120 */
4121int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4122 const struct sched_param *param)
4123{
4124 return _sched_setscheduler(p, policy, param, false);
4125}
4126EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
4127
4128static int
4129do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4130{
4131 struct sched_param lparam;
4132 struct task_struct *p;
4133 int retval;
4134
4135 if (!param || pid < 0)
4136 return -EINVAL;
4137 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4138 return -EFAULT;
4139
4140 rcu_read_lock();
4141 retval = -ESRCH;
4142 p = find_process_by_pid(pid);
4143 if (p != NULL)
4144 retval = sched_setscheduler(p, policy, &lparam);
4145 rcu_read_unlock();
4146
4147 return retval;
4148}
4149
4150/*
4151 * Mimics kernel/events/core.c perf_copy_attr().
4152 */
4153static int sched_copy_attr(struct sched_attr __user *uattr,
4154 struct sched_attr *attr)
4155{
4156 u32 size;
4157 int ret;
4158
4159 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
4160 return -EFAULT;
4161
4162 /*
4163 * zero the full structure, so that a short copy will be nice.
4164 */
4165 memset(attr, 0, sizeof(*attr));
4166
4167 ret = get_user(size, &uattr->size);
4168 if (ret)
4169 return ret;
4170
4171 if (size > PAGE_SIZE) /* silly large */
4172 goto err_size;
4173
4174 if (!size) /* abi compat */
4175 size = SCHED_ATTR_SIZE_VER0;
4176
4177 if (size < SCHED_ATTR_SIZE_VER0)
4178 goto err_size;
4179
4180 /*
4181 * If we're handed a bigger struct than we know of,
4182 * ensure all the unknown bits are 0 - i.e. new
4183 * user-space does not rely on any kernel feature
4184 * extensions we dont know about yet.
4185 */
4186 if (size > sizeof(*attr)) {
4187 unsigned char __user *addr;
4188 unsigned char __user *end;
4189 unsigned char val;
4190
4191 addr = (void __user *)uattr + sizeof(*attr);
4192 end = (void __user *)uattr + size;
4193
4194 for (; addr < end; addr++) {
4195 ret = get_user(val, addr);
4196 if (ret)
4197 return ret;
4198 if (val)
4199 goto err_size;
4200 }
4201 size = sizeof(*attr);
4202 }
4203
4204 ret = copy_from_user(attr, uattr, size);
4205 if (ret)
4206 return -EFAULT;
4207
4208 /*
4209 * XXX: do we want to be lenient like existing syscalls; or do we want
4210 * to be strict and return an error on out-of-bounds values?
4211 */
4212 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
4213
4214 return 0;
4215
4216err_size:
4217 put_user(sizeof(*attr), &uattr->size);
4218 return -E2BIG;
4219}
4220
4221/**
4222 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4223 * @pid: the pid in question.
4224 * @policy: new policy.
4225 * @param: structure containing the new RT priority.
4226 *
4227 * Return: 0 on success. An error code otherwise.
4228 */
4229SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4230 struct sched_param __user *, param)
4231{
4232 /* negative values for policy are not valid */
4233 if (policy < 0)
4234 return -EINVAL;
4235
4236 return do_sched_setscheduler(pid, policy, param);
4237}
4238
4239/**
4240 * sys_sched_setparam - set/change the RT priority of a thread
4241 * @pid: the pid in question.
4242 * @param: structure containing the new RT priority.
4243 *
4244 * Return: 0 on success. An error code otherwise.
4245 */
4246SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
4247{
4248 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
4249}
4250
4251/**
4252 * sys_sched_setattr - same as above, but with extended sched_attr
4253 * @pid: the pid in question.
4254 * @uattr: structure containing the extended parameters.
4255 * @flags: for future extension.
4256 */
4257SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
4258 unsigned int, flags)
4259{
4260 struct sched_attr attr;
4261 struct task_struct *p;
4262 int retval;
4263
4264 if (!uattr || pid < 0 || flags)
4265 return -EINVAL;
4266
4267 retval = sched_copy_attr(uattr, &attr);
4268 if (retval)
4269 return retval;
4270
4271 if ((int)attr.sched_policy < 0)
4272 return -EINVAL;
4273
4274 rcu_read_lock();
4275 retval = -ESRCH;
4276 p = find_process_by_pid(pid);
4277 if (p != NULL)
4278 retval = sched_setattr(p, &attr);
4279 rcu_read_unlock();
4280
4281 return retval;
4282}
4283
4284/**
4285 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4286 * @pid: the pid in question.
4287 *
4288 * Return: On success, the policy of the thread. Otherwise, a negative error
4289 * code.
4290 */
4291SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
4292{
4293 struct task_struct *p;
4294 int retval;
4295
4296 if (pid < 0)
4297 return -EINVAL;
4298
4299 retval = -ESRCH;
4300 rcu_read_lock();
4301 p = find_process_by_pid(pid);
4302 if (p) {
4303 retval = security_task_getscheduler(p);
4304 if (!retval)
4305 retval = p->policy
4306 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
4307 }
4308 rcu_read_unlock();
4309 return retval;
4310}
4311
4312/**
4313 * sys_sched_getparam - get the RT priority of a thread
4314 * @pid: the pid in question.
4315 * @param: structure containing the RT priority.
4316 *
4317 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
4318 * code.
4319 */
4320SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
4321{
4322 struct sched_param lp = { .sched_priority = 0 };
4323 struct task_struct *p;
4324 int retval;
4325
4326 if (!param || pid < 0)
4327 return -EINVAL;
4328
4329 rcu_read_lock();
4330 p = find_process_by_pid(pid);
4331 retval = -ESRCH;
4332 if (!p)
4333 goto out_unlock;
4334
4335 retval = security_task_getscheduler(p);
4336 if (retval)
4337 goto out_unlock;
4338
4339 if (task_has_rt_policy(p))
4340 lp.sched_priority = p->rt_priority;
4341 rcu_read_unlock();
4342
4343 /*
4344 * This one might sleep, we cannot do it with a spinlock held ...
4345 */
4346 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4347
4348 return retval;
4349
4350out_unlock:
4351 rcu_read_unlock();
4352 return retval;
4353}
4354
4355static int sched_read_attr(struct sched_attr __user *uattr,
4356 struct sched_attr *attr,
4357 unsigned int usize)
4358{
4359 int ret;
4360
4361 if (!access_ok(VERIFY_WRITE, uattr, usize))
4362 return -EFAULT;
4363
4364 /*
4365 * If we're handed a smaller struct than we know of,
4366 * ensure all the unknown bits are 0 - i.e. old
4367 * user-space does not get uncomplete information.
4368 */
4369 if (usize < sizeof(*attr)) {
4370 unsigned char *addr;
4371 unsigned char *end;
4372
4373 addr = (void *)attr + usize;
4374 end = (void *)attr + sizeof(*attr);
4375
4376 for (; addr < end; addr++) {
4377 if (*addr)
4378 return -EFBIG;
4379 }
4380
4381 attr->size = usize;
4382 }
4383
4384 ret = copy_to_user(uattr, attr, attr->size);
4385 if (ret)
4386 return -EFAULT;
4387
4388 return 0;
4389}
4390
4391/**
4392 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
4393 * @pid: the pid in question.
4394 * @uattr: structure containing the extended parameters.
4395 * @size: sizeof(attr) for fwd/bwd comp.
4396 * @flags: for future extension.
4397 */
4398SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
4399 unsigned int, size, unsigned int, flags)
4400{
4401 struct sched_attr attr = {
4402 .size = sizeof(struct sched_attr),
4403 };
4404 struct task_struct *p;
4405 int retval;
4406
4407 if (!uattr || pid < 0 || size > PAGE_SIZE ||
4408 size < SCHED_ATTR_SIZE_VER0 || flags)
4409 return -EINVAL;
4410
4411 rcu_read_lock();
4412 p = find_process_by_pid(pid);
4413 retval = -ESRCH;
4414 if (!p)
4415 goto out_unlock;
4416
4417 retval = security_task_getscheduler(p);
4418 if (retval)
4419 goto out_unlock;
4420
4421 attr.sched_policy = p->policy;
4422 if (p->sched_reset_on_fork)
4423 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4424 if (task_has_dl_policy(p))
4425 __getparam_dl(p, &attr);
4426 else if (task_has_rt_policy(p))
4427 attr.sched_priority = p->rt_priority;
4428 else
4429 attr.sched_nice = task_nice(p);
4430
4431 rcu_read_unlock();
4432
4433 retval = sched_read_attr(uattr, &attr, size);
4434 return retval;
4435
4436out_unlock:
4437 rcu_read_unlock();
4438 return retval;
4439}
4440
4441long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4442{
4443 cpumask_var_t cpus_allowed, new_mask;
4444 struct task_struct *p;
4445 int retval;
4446
4447 rcu_read_lock();
4448
4449 p = find_process_by_pid(pid);
4450 if (!p) {
4451 rcu_read_unlock();
4452 return -ESRCH;
4453 }
4454
4455 /* Prevent p going away */
4456 get_task_struct(p);
4457 rcu_read_unlock();
4458
4459 if (p->flags & PF_NO_SETAFFINITY) {
4460 retval = -EINVAL;
4461 goto out_put_task;
4462 }
4463 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4464 retval = -ENOMEM;
4465 goto out_put_task;
4466 }
4467 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4468 retval = -ENOMEM;
4469 goto out_free_cpus_allowed;
4470 }
4471 retval = -EPERM;
4472 if (!check_same_owner(p)) {
4473 rcu_read_lock();
4474 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
4475 rcu_read_unlock();
4476 goto out_free_new_mask;
4477 }
4478 rcu_read_unlock();
4479 }
4480
4481 retval = security_task_setscheduler(p);
4482 if (retval)
4483 goto out_free_new_mask;
4484
4485
4486 cpuset_cpus_allowed(p, cpus_allowed);
4487 cpumask_and(new_mask, in_mask, cpus_allowed);
4488
4489 /*
4490 * Since bandwidth control happens on root_domain basis,
4491 * if admission test is enabled, we only admit -deadline
4492 * tasks allowed to run on all the CPUs in the task's
4493 * root_domain.
4494 */
4495#ifdef CONFIG_SMP
4496 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
4497 rcu_read_lock();
4498 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
4499 retval = -EBUSY;
4500 rcu_read_unlock();
4501 goto out_free_new_mask;
4502 }
4503 rcu_read_unlock();
4504 }
4505#endif
4506again:
4507 retval = __set_cpus_allowed_ptr(p, new_mask, true);
4508
4509 if (!retval) {
4510 cpuset_cpus_allowed(p, cpus_allowed);
4511 if (!cpumask_subset(new_mask, cpus_allowed)) {
4512 /*
4513 * We must have raced with a concurrent cpuset
4514 * update. Just reset the cpus_allowed to the
4515 * cpuset's cpus_allowed
4516 */
4517 cpumask_copy(new_mask, cpus_allowed);
4518 goto again;
4519 }
4520 }
4521out_free_new_mask:
4522 free_cpumask_var(new_mask);
4523out_free_cpus_allowed:
4524 free_cpumask_var(cpus_allowed);
4525out_put_task:
4526 put_task_struct(p);
4527 return retval;
4528}
4529
4530static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4531 struct cpumask *new_mask)
4532{
4533 if (len < cpumask_size())
4534 cpumask_clear(new_mask);
4535 else if (len > cpumask_size())
4536 len = cpumask_size();
4537
4538 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4539}
4540
4541/**
4542 * sys_sched_setaffinity - set the cpu affinity of a process
4543 * @pid: pid of the process
4544 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4545 * @user_mask_ptr: user-space pointer to the new cpu mask
4546 *
4547 * Return: 0 on success. An error code otherwise.
4548 */
4549SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4550 unsigned long __user *, user_mask_ptr)
4551{
4552 cpumask_var_t new_mask;
4553 int retval;
4554
4555 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4556 return -ENOMEM;
4557
4558 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4559 if (retval == 0)
4560 retval = sched_setaffinity(pid, new_mask);
4561 free_cpumask_var(new_mask);
4562 return retval;
4563}
4564
4565long sched_getaffinity(pid_t pid, struct cpumask *mask)
4566{
4567 struct task_struct *p;
4568 unsigned long flags;
4569 int retval;
4570
4571 rcu_read_lock();
4572
4573 retval = -ESRCH;
4574 p = find_process_by_pid(pid);
4575 if (!p)
4576 goto out_unlock;
4577
4578 retval = security_task_getscheduler(p);
4579 if (retval)
4580 goto out_unlock;
4581
4582 raw_spin_lock_irqsave(&p->pi_lock, flags);
4583 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
4584 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4585
4586out_unlock:
4587 rcu_read_unlock();
4588
4589 return retval;
4590}
4591
4592/**
4593 * sys_sched_getaffinity - get the cpu affinity of a process
4594 * @pid: pid of the process
4595 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4596 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4597 *
4598 * Return: 0 on success. An error code otherwise.
4599 */
4600SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4601 unsigned long __user *, user_mask_ptr)
4602{
4603 int ret;
4604 cpumask_var_t mask;
4605
4606 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4607 return -EINVAL;
4608 if (len & (sizeof(unsigned long)-1))
4609 return -EINVAL;
4610
4611 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4612 return -ENOMEM;
4613
4614 ret = sched_getaffinity(pid, mask);
4615 if (ret == 0) {
4616 size_t retlen = min_t(size_t, len, cpumask_size());
4617
4618 if (copy_to_user(user_mask_ptr, mask, retlen))
4619 ret = -EFAULT;
4620 else
4621 ret = retlen;
4622 }
4623 free_cpumask_var(mask);
4624
4625 return ret;
4626}
4627
4628/**
4629 * sys_sched_yield - yield the current processor to other threads.
4630 *
4631 * This function yields the current CPU to other tasks. If there are no
4632 * other threads running on this CPU then this function will return.
4633 *
4634 * Return: 0.
4635 */
4636SYSCALL_DEFINE0(sched_yield)
4637{
4638 struct rq *rq = this_rq_lock();
4639
4640 schedstat_inc(rq, yld_count);
4641 current->sched_class->yield_task(rq);
4642
4643 /*
4644 * Since we are going to call schedule() anyway, there's
4645 * no need to preempt or enable interrupts:
4646 */
4647 __release(rq->lock);
4648 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4649 do_raw_spin_unlock(&rq->lock);
4650 sched_preempt_enable_no_resched();
4651
4652 schedule();
4653
4654 return 0;
4655}
4656
4657int __sched _cond_resched(void)
4658{
4659 if (should_resched(0)) {
4660 preempt_schedule_common();
4661 return 1;
4662 }
4663 return 0;
4664}
4665EXPORT_SYMBOL(_cond_resched);
4666
4667/*
4668 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
4669 * call schedule, and on return reacquire the lock.
4670 *
4671 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
4672 * operations here to prevent schedule() from being called twice (once via
4673 * spin_unlock(), once by hand).
4674 */
4675int __cond_resched_lock(spinlock_t *lock)
4676{
4677 int resched = should_resched(PREEMPT_LOCK_OFFSET);
4678 int ret = 0;
4679
4680 lockdep_assert_held(lock);
4681
4682 if (spin_needbreak(lock) || resched) {
4683 spin_unlock(lock);
4684 if (resched)
4685 preempt_schedule_common();
4686 else
4687 cpu_relax();
4688 ret = 1;
4689 spin_lock(lock);
4690 }
4691 return ret;
4692}
4693EXPORT_SYMBOL(__cond_resched_lock);
4694
4695int __sched __cond_resched_softirq(void)
4696{
4697 BUG_ON(!in_softirq());
4698
4699 if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
4700 local_bh_enable();
4701 preempt_schedule_common();
4702 local_bh_disable();
4703 return 1;
4704 }
4705 return 0;
4706}
4707EXPORT_SYMBOL(__cond_resched_softirq);
4708
4709/**
4710 * yield - yield the current processor to other threads.
4711 *
4712 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4713 *
4714 * The scheduler is at all times free to pick the calling task as the most
4715 * eligible task to run, if removing the yield() call from your code breaks
4716 * it, its already broken.
4717 *
4718 * Typical broken usage is:
4719 *
4720 * while (!event)
4721 * yield();
4722 *
4723 * where one assumes that yield() will let 'the other' process run that will
4724 * make event true. If the current task is a SCHED_FIFO task that will never
4725 * happen. Never use yield() as a progress guarantee!!
4726 *
4727 * If you want to use yield() to wait for something, use wait_event().
4728 * If you want to use yield() to be 'nice' for others, use cond_resched().
4729 * If you still want to use yield(), do not!
4730 */
4731void __sched yield(void)
4732{
4733 set_current_state(TASK_RUNNING);
4734 sys_sched_yield();
4735}
4736EXPORT_SYMBOL(yield);
4737
4738/**
4739 * yield_to - yield the current processor to another thread in
4740 * your thread group, or accelerate that thread toward the
4741 * processor it's on.
4742 * @p: target task
4743 * @preempt: whether task preemption is allowed or not
4744 *
4745 * It's the caller's job to ensure that the target task struct
4746 * can't go away on us before we can do any checks.
4747 *
4748 * Return:
4749 * true (>0) if we indeed boosted the target task.
4750 * false (0) if we failed to boost the target.
4751 * -ESRCH if there's no task to yield to.
4752 */
4753int __sched yield_to(struct task_struct *p, bool preempt)
4754{
4755 struct task_struct *curr = current;
4756 struct rq *rq, *p_rq;
4757 unsigned long flags;
4758 int yielded = 0;
4759
4760 local_irq_save(flags);
4761 rq = this_rq();
4762
4763again:
4764 p_rq = task_rq(p);
4765 /*
4766 * If we're the only runnable task on the rq and target rq also
4767 * has only one task, there's absolutely no point in yielding.
4768 */
4769 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
4770 yielded = -ESRCH;
4771 goto out_irq;
4772 }
4773
4774 double_rq_lock(rq, p_rq);
4775 if (task_rq(p) != p_rq) {
4776 double_rq_unlock(rq, p_rq);
4777 goto again;
4778 }
4779
4780 if (!curr->sched_class->yield_to_task)
4781 goto out_unlock;
4782
4783 if (curr->sched_class != p->sched_class)
4784 goto out_unlock;
4785
4786 if (task_running(p_rq, p) || p->state)
4787 goto out_unlock;
4788
4789 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
4790 if (yielded) {
4791 schedstat_inc(rq, yld_count);
4792 /*
4793 * Make p's CPU reschedule; pick_next_entity takes care of
4794 * fairness.
4795 */
4796 if (preempt && rq != p_rq)
4797 resched_curr(p_rq);
4798 }
4799
4800out_unlock:
4801 double_rq_unlock(rq, p_rq);
4802out_irq:
4803 local_irq_restore(flags);
4804
4805 if (yielded > 0)
4806 schedule();
4807
4808 return yielded;
4809}
4810EXPORT_SYMBOL_GPL(yield_to);
4811
4812/*
4813 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
4814 * that process accounting knows that this is a task in IO wait state.
4815 */
4816long __sched io_schedule_timeout(long timeout)
4817{
4818 int old_iowait = current->in_iowait;
4819 struct rq *rq;
4820 long ret;
4821
4822 current->in_iowait = 1;
4823 blk_schedule_flush_plug(current);
4824
4825 delayacct_blkio_start();
4826 rq = raw_rq();
4827 atomic_inc(&rq->nr_iowait);
4828 ret = schedule_timeout(timeout);
4829 current->in_iowait = old_iowait;
4830 atomic_dec(&rq->nr_iowait);
4831 delayacct_blkio_end();
4832
4833 return ret;
4834}
4835EXPORT_SYMBOL(io_schedule_timeout);
4836
4837/**
4838 * sys_sched_get_priority_max - return maximum RT priority.
4839 * @policy: scheduling class.
4840 *
4841 * Return: On success, this syscall returns the maximum
4842 * rt_priority that can be used by a given scheduling class.
4843 * On failure, a negative error code is returned.
4844 */
4845SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
4846{
4847 int ret = -EINVAL;
4848
4849 switch (policy) {
4850 case SCHED_FIFO:
4851 case SCHED_RR:
4852 ret = MAX_USER_RT_PRIO-1;
4853 break;
4854 case SCHED_DEADLINE:
4855 case SCHED_NORMAL:
4856 case SCHED_BATCH:
4857 case SCHED_IDLE:
4858 ret = 0;
4859 break;
4860 }
4861 return ret;
4862}
4863
4864/**
4865 * sys_sched_get_priority_min - return minimum RT priority.
4866 * @policy: scheduling class.
4867 *
4868 * Return: On success, this syscall returns the minimum
4869 * rt_priority that can be used by a given scheduling class.
4870 * On failure, a negative error code is returned.
4871 */
4872SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4873{
4874 int ret = -EINVAL;
4875
4876 switch (policy) {
4877 case SCHED_FIFO:
4878 case SCHED_RR:
4879 ret = 1;
4880 break;
4881 case SCHED_DEADLINE:
4882 case SCHED_NORMAL:
4883 case SCHED_BATCH:
4884 case SCHED_IDLE:
4885 ret = 0;
4886 }
4887 return ret;
4888}
4889
4890/**
4891 * sys_sched_rr_get_interval - return the default timeslice of a process.
4892 * @pid: pid of the process.
4893 * @interval: userspace pointer to the timeslice value.
4894 *
4895 * this syscall writes the default timeslice value of a given process
4896 * into the user-space timespec buffer. A value of '0' means infinity.
4897 *
4898 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4899 * an error code.
4900 */
4901SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4902 struct timespec __user *, interval)
4903{
4904 struct task_struct *p;
4905 unsigned int time_slice;
4906 unsigned long flags;
4907 struct rq *rq;
4908 int retval;
4909 struct timespec t;
4910
4911 if (pid < 0)
4912 return -EINVAL;
4913
4914 retval = -ESRCH;
4915 rcu_read_lock();
4916 p = find_process_by_pid(pid);
4917 if (!p)
4918 goto out_unlock;
4919
4920 retval = security_task_getscheduler(p);
4921 if (retval)
4922 goto out_unlock;
4923
4924 rq = task_rq_lock(p, &flags);
4925 time_slice = 0;
4926 if (p->sched_class->get_rr_interval)
4927 time_slice = p->sched_class->get_rr_interval(rq, p);
4928 task_rq_unlock(rq, p, &flags);
4929
4930 rcu_read_unlock();
4931 jiffies_to_timespec(time_slice, &t);
4932 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
4933 return retval;
4934
4935out_unlock:
4936 rcu_read_unlock();
4937 return retval;
4938}
4939
4940static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
4941
4942void sched_show_task(struct task_struct *p)
4943{
4944 unsigned long free = 0;
4945 int ppid;
4946 unsigned long state = p->state;
4947
4948 if (state)
4949 state = __ffs(state) + 1;
4950 printk(KERN_INFO "%-15.15s %c", p->comm,
4951 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4952#if BITS_PER_LONG == 32
4953 if (state == TASK_RUNNING)
4954 printk(KERN_CONT " running ");
4955 else
4956 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
4957#else
4958 if (state == TASK_RUNNING)
4959 printk(KERN_CONT " running task ");
4960 else
4961 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
4962#endif
4963#ifdef CONFIG_DEBUG_STACK_USAGE
4964 free = stack_not_used(p);
4965#endif
4966 ppid = 0;
4967 rcu_read_lock();
4968 if (pid_alive(p))
4969 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4970 rcu_read_unlock();
4971 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4972 task_pid_nr(p), ppid,
4973 (unsigned long)task_thread_info(p)->flags);
4974
4975 print_worker_info(KERN_INFO, p);
4976 show_stack(p, NULL);
4977}
4978
4979void show_state_filter(unsigned long state_filter)
4980{
4981 struct task_struct *g, *p;
4982
4983#if BITS_PER_LONG == 32
4984 printk(KERN_INFO
4985 " task PC stack pid father\n");
4986#else
4987 printk(KERN_INFO
4988 " task PC stack pid father\n");
4989#endif
4990 rcu_read_lock();
4991 for_each_process_thread(g, p) {
4992 /*
4993 * reset the NMI-timeout, listing all files on a slow
4994 * console might take a lot of time:
4995 */
4996 touch_nmi_watchdog();
4997 if (!state_filter || (p->state & state_filter))
4998 sched_show_task(p);
4999 }
5000
5001 touch_all_softlockup_watchdogs();
5002
5003#ifdef CONFIG_SCHED_DEBUG
5004 sysrq_sched_debug_show();
5005#endif
5006 rcu_read_unlock();
5007 /*
5008 * Only show locks if all tasks are dumped:
5009 */
5010 if (!state_filter)
5011 debug_show_all_locks();
5012}
5013
5014void init_idle_bootup_task(struct task_struct *idle)
5015{
5016 idle->sched_class = &idle_sched_class;
5017}
5018
5019/**
5020 * init_idle - set up an idle thread for a given CPU
5021 * @idle: task in question
5022 * @cpu: cpu the idle task belongs to
5023 *
5024 * NOTE: this function does not set the idle thread's NEED_RESCHED
5025 * flag, to make booting more robust.
5026 */
5027void init_idle(struct task_struct *idle, int cpu)
5028{
5029 struct rq *rq = cpu_rq(cpu);
5030 unsigned long flags;
5031
5032 raw_spin_lock_irqsave(&idle->pi_lock, flags);
5033 raw_spin_lock(&rq->lock);
5034
5035 __sched_fork(0, idle);
5036 idle->state = TASK_RUNNING;
5037 idle->se.exec_start = sched_clock();
5038
5039 kasan_unpoison_task_stack(idle);
5040
5041#ifdef CONFIG_SMP
5042 /*
5043 * Its possible that init_idle() gets called multiple times on a task,
5044 * in that case do_set_cpus_allowed() will not do the right thing.
5045 *
5046 * And since this is boot we can forgo the serialization.
5047 */
5048 set_cpus_allowed_common(idle, cpumask_of(cpu));
5049#endif
5050 /*
5051 * We're having a chicken and egg problem, even though we are
5052 * holding rq->lock, the cpu isn't yet set to this cpu so the
5053 * lockdep check in task_group() will fail.
5054 *
5055 * Similar case to sched_fork(). / Alternatively we could
5056 * use task_rq_lock() here and obtain the other rq->lock.
5057 *
5058 * Silence PROVE_RCU
5059 */
5060 rcu_read_lock();
5061 __set_task_cpu(idle, cpu);
5062 rcu_read_unlock();
5063
5064 rq->curr = rq->idle = idle;
5065 idle->on_rq = TASK_ON_RQ_QUEUED;
5066#ifdef CONFIG_SMP
5067 idle->on_cpu = 1;
5068#endif
5069 raw_spin_unlock(&rq->lock);
5070 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
5071
5072 /* Set the preempt count _outside_ the spinlocks! */
5073 init_idle_preempt_count(idle, cpu);
5074
5075 /*
5076 * The idle tasks have their own, simple scheduling class:
5077 */
5078 idle->sched_class = &idle_sched_class;
5079 ftrace_graph_init_idle_task(idle, cpu);
5080 vtime_init_idle(idle, cpu);
5081#ifdef CONFIG_SMP
5082 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
5083#endif
5084}
5085
5086int cpuset_cpumask_can_shrink(const struct cpumask *cur,
5087 const struct cpumask *trial)
5088{
5089 int ret = 1, trial_cpus;
5090 struct dl_bw *cur_dl_b;
5091 unsigned long flags;
5092
5093 if (!cpumask_weight(cur))
5094 return ret;
5095
5096 rcu_read_lock_sched();
5097 cur_dl_b = dl_bw_of(cpumask_any(cur));
5098 trial_cpus = cpumask_weight(trial);
5099
5100 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
5101 if (cur_dl_b->bw != -1 &&
5102 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
5103 ret = 0;
5104 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
5105 rcu_read_unlock_sched();
5106
5107 return ret;
5108}
5109
5110int task_can_attach(struct task_struct *p,
5111 const struct cpumask *cs_cpus_allowed)
5112{
5113 int ret = 0;
5114
5115 /*
5116 * Kthreads which disallow setaffinity shouldn't be moved
5117 * to a new cpuset; we don't want to change their cpu
5118 * affinity and isolating such threads by their set of
5119 * allowed nodes is unnecessary. Thus, cpusets are not
5120 * applicable for such threads. This prevents checking for
5121 * success of set_cpus_allowed_ptr() on all attached tasks
5122 * before cpus_allowed may be changed.
5123 */
5124 if (p->flags & PF_NO_SETAFFINITY) {
5125 ret = -EINVAL;
5126 goto out;
5127 }
5128
5129#ifdef CONFIG_SMP
5130 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
5131 cs_cpus_allowed)) {
5132 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
5133 cs_cpus_allowed);
5134 struct dl_bw *dl_b;
5135 bool overflow;
5136 int cpus;
5137 unsigned long flags;
5138
5139 rcu_read_lock_sched();
5140 dl_b = dl_bw_of(dest_cpu);
5141 raw_spin_lock_irqsave(&dl_b->lock, flags);
5142 cpus = dl_bw_cpus(dest_cpu);
5143 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
5144 if (overflow)
5145 ret = -EBUSY;
5146 else {
5147 /*
5148 * We reserve space for this task in the destination
5149 * root_domain, as we can't fail after this point.
5150 * We will free resources in the source root_domain
5151 * later on (see set_cpus_allowed_dl()).
5152 */
5153 __dl_add(dl_b, p->dl.dl_bw);
5154 }
5155 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
5156 rcu_read_unlock_sched();
5157
5158 }
5159#endif
5160out:
5161 return ret;
5162}
5163
5164#ifdef CONFIG_SMP
5165
5166#ifdef CONFIG_NUMA_BALANCING
5167/* Migrate current task p to target_cpu */
5168int migrate_task_to(struct task_struct *p, int target_cpu)
5169{
5170 struct migration_arg arg = { p, target_cpu };
5171 int curr_cpu = task_cpu(p);
5172
5173 if (curr_cpu == target_cpu)
5174 return 0;
5175
5176 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
5177 return -EINVAL;
5178
5179 /* TODO: This is not properly updating schedstats */
5180
5181 trace_sched_move_numa(p, curr_cpu, target_cpu);
5182 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
5183}
5184
5185/*
5186 * Requeue a task on a given node and accurately track the number of NUMA
5187 * tasks on the runqueues
5188 */
5189void sched_setnuma(struct task_struct *p, int nid)
5190{
5191 struct rq *rq;
5192 unsigned long flags;
5193 bool queued, running;
5194
5195 rq = task_rq_lock(p, &flags);
5196 queued = task_on_rq_queued(p);
5197 running = task_current(rq, p);
5198
5199 if (queued)
5200 dequeue_task(rq, p, DEQUEUE_SAVE);
5201 if (running)
5202 put_prev_task(rq, p);
5203
5204 p->numa_preferred_nid = nid;
5205
5206 if (running)
5207 p->sched_class->set_curr_task(rq);
5208 if (queued)
5209 enqueue_task(rq, p, ENQUEUE_RESTORE);
5210 task_rq_unlock(rq, p, &flags);
5211}
5212#endif /* CONFIG_NUMA_BALANCING */
5213
5214#ifdef CONFIG_HOTPLUG_CPU
5215/*
5216 * Ensures that the idle task is using init_mm right before its cpu goes
5217 * offline.
5218 */
5219void idle_task_exit(void)
5220{
5221 struct mm_struct *mm = current->active_mm;
5222
5223 BUG_ON(cpu_online(smp_processor_id()));
5224
5225 if (mm != &init_mm) {
5226 switch_mm(mm, &init_mm, current);
5227 finish_arch_post_lock_switch();
5228 }
5229 mmdrop(mm);
5230}
5231
5232/*
5233 * Since this CPU is going 'away' for a while, fold any nr_active delta
5234 * we might have. Assumes we're called after migrate_tasks() so that the
5235 * nr_active count is stable.
5236 *
5237 * Also see the comment "Global load-average calculations".
5238 */
5239static void calc_load_migrate(struct rq *rq)
5240{
5241 long delta = calc_load_fold_active(rq);
5242 if (delta)
5243 atomic_long_add(delta, &calc_load_tasks);
5244}
5245
5246static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
5247{
5248}
5249
5250static const struct sched_class fake_sched_class = {
5251 .put_prev_task = put_prev_task_fake,
5252};
5253
5254static struct task_struct fake_task = {
5255 /*
5256 * Avoid pull_{rt,dl}_task()
5257 */
5258 .prio = MAX_PRIO + 1,
5259 .sched_class = &fake_sched_class,
5260};
5261
5262/*
5263 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5264 * try_to_wake_up()->select_task_rq().
5265 *
5266 * Called with rq->lock held even though we'er in stop_machine() and
5267 * there's no concurrency possible, we hold the required locks anyway
5268 * because of lock validation efforts.
5269 */
5270static void migrate_tasks(struct rq *dead_rq)
5271{
5272 struct rq *rq = dead_rq;
5273 struct task_struct *next, *stop = rq->stop;
5274 int dest_cpu;
5275
5276 /*
5277 * Fudge the rq selection such that the below task selection loop
5278 * doesn't get stuck on the currently eligible stop task.
5279 *
5280 * We're currently inside stop_machine() and the rq is either stuck
5281 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5282 * either way we should never end up calling schedule() until we're
5283 * done here.
5284 */
5285 rq->stop = NULL;
5286
5287 /*
5288 * put_prev_task() and pick_next_task() sched
5289 * class method both need to have an up-to-date
5290 * value of rq->clock[_task]
5291 */
5292 update_rq_clock(rq);
5293
5294 for (;;) {
5295 /*
5296 * There's this thread running, bail when that's the only
5297 * remaining thread.
5298 */
5299 if (rq->nr_running == 1)
5300 break;
5301
5302 /*
5303 * pick_next_task assumes pinned rq->lock.
5304 */
5305 lockdep_pin_lock(&rq->lock);
5306 next = pick_next_task(rq, &fake_task);
5307 BUG_ON(!next);
5308 next->sched_class->put_prev_task(rq, next);
5309
5310 /*
5311 * Rules for changing task_struct::cpus_allowed are holding
5312 * both pi_lock and rq->lock, such that holding either
5313 * stabilizes the mask.
5314 *
5315 * Drop rq->lock is not quite as disastrous as it usually is
5316 * because !cpu_active at this point, which means load-balance
5317 * will not interfere. Also, stop-machine.
5318 */
5319 lockdep_unpin_lock(&rq->lock);
5320 raw_spin_unlock(&rq->lock);
5321 raw_spin_lock(&next->pi_lock);
5322 raw_spin_lock(&rq->lock);
5323
5324 /*
5325 * Since we're inside stop-machine, _nothing_ should have
5326 * changed the task, WARN if weird stuff happened, because in
5327 * that case the above rq->lock drop is a fail too.
5328 */
5329 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
5330 raw_spin_unlock(&next->pi_lock);
5331 continue;
5332 }
5333
5334 /* Find suitable destination for @next, with force if needed. */
5335 dest_cpu = select_fallback_rq(dead_rq->cpu, next);
5336
5337 rq = __migrate_task(rq, next, dest_cpu);
5338 if (rq != dead_rq) {
5339 raw_spin_unlock(&rq->lock);
5340 rq = dead_rq;
5341 raw_spin_lock(&rq->lock);
5342 }
5343 raw_spin_unlock(&next->pi_lock);
5344 }
5345
5346 rq->stop = stop;
5347}
5348#endif /* CONFIG_HOTPLUG_CPU */
5349
5350static void set_rq_online(struct rq *rq)
5351{
5352 if (!rq->online) {
5353 const struct sched_class *class;
5354
5355 cpumask_set_cpu(rq->cpu, rq->rd->online);
5356 rq->online = 1;
5357
5358 for_each_class(class) {
5359 if (class->rq_online)
5360 class->rq_online(rq);
5361 }
5362 }
5363}
5364
5365static void set_rq_offline(struct rq *rq)
5366{
5367 if (rq->online) {
5368 const struct sched_class *class;
5369
5370 for_each_class(class) {
5371 if (class->rq_offline)
5372 class->rq_offline(rq);
5373 }
5374
5375 cpumask_clear_cpu(rq->cpu, rq->rd->online);
5376 rq->online = 0;
5377 }
5378}
5379
5380/*
5381 * migration_call - callback that gets triggered when a CPU is added.
5382 * Here we can start up the necessary migration thread for the new CPU.
5383 */
5384static int
5385migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5386{
5387 int cpu = (long)hcpu;
5388 unsigned long flags;
5389 struct rq *rq = cpu_rq(cpu);
5390
5391 switch (action & ~CPU_TASKS_FROZEN) {
5392
5393 case CPU_UP_PREPARE:
5394 rq->calc_load_update = calc_load_update;
5395 account_reset_rq(rq);
5396 break;
5397
5398 case CPU_ONLINE:
5399 /* Update our root-domain */
5400 raw_spin_lock_irqsave(&rq->lock, flags);
5401 if (rq->rd) {
5402 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5403
5404 set_rq_online(rq);
5405 }
5406 raw_spin_unlock_irqrestore(&rq->lock, flags);
5407 break;
5408
5409#ifdef CONFIG_HOTPLUG_CPU
5410 case CPU_DYING:
5411 sched_ttwu_pending();
5412 /* Update our root-domain */
5413 raw_spin_lock_irqsave(&rq->lock, flags);
5414 if (rq->rd) {
5415 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5416 set_rq_offline(rq);
5417 }
5418 migrate_tasks(rq);
5419 BUG_ON(rq->nr_running != 1); /* the migration thread */
5420 raw_spin_unlock_irqrestore(&rq->lock, flags);
5421 break;
5422
5423 case CPU_DEAD:
5424 calc_load_migrate(rq);
5425 break;
5426#endif
5427 }
5428
5429 update_max_interval();
5430
5431 return NOTIFY_OK;
5432}
5433
5434/*
5435 * Register at high priority so that task migration (migrate_all_tasks)
5436 * happens before everything else. This has to be lower priority than
5437 * the notifier in the perf_event subsystem, though.
5438 */
5439static struct notifier_block migration_notifier = {
5440 .notifier_call = migration_call,
5441 .priority = CPU_PRI_MIGRATION,
5442};
5443
5444static void set_cpu_rq_start_time(void)
5445{
5446 int cpu = smp_processor_id();
5447 struct rq *rq = cpu_rq(cpu);
5448 rq->age_stamp = sched_clock_cpu(cpu);
5449}
5450
5451static int sched_cpu_active(struct notifier_block *nfb,
5452 unsigned long action, void *hcpu)
5453{
5454 int cpu = (long)hcpu;
5455
5456 switch (action & ~CPU_TASKS_FROZEN) {
5457 case CPU_STARTING:
5458 set_cpu_rq_start_time();
5459 return NOTIFY_OK;
5460
5461 case CPU_DOWN_FAILED:
5462 set_cpu_active(cpu, true);
5463 return NOTIFY_OK;
5464
5465 default:
5466 return NOTIFY_DONE;
5467 }
5468}
5469
5470static int sched_cpu_inactive(struct notifier_block *nfb,
5471 unsigned long action, void *hcpu)
5472{
5473 switch (action & ~CPU_TASKS_FROZEN) {
5474 case CPU_DOWN_PREPARE:
5475 set_cpu_active((long)hcpu, false);
5476 return NOTIFY_OK;
5477 default:
5478 return NOTIFY_DONE;
5479 }
5480}
5481
5482static int __init migration_init(void)
5483{
5484 void *cpu = (void *)(long)smp_processor_id();
5485 int err;
5486
5487 /* Initialize migration for the boot CPU */
5488 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5489 BUG_ON(err == NOTIFY_BAD);
5490 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5491 register_cpu_notifier(&migration_notifier);
5492
5493 /* Register cpu active notifiers */
5494 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5495 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5496
5497 return 0;
5498}
5499early_initcall(migration_init);
5500
5501static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5502
5503#ifdef CONFIG_SCHED_DEBUG
5504
5505static __read_mostly int sched_debug_enabled;
5506
5507static int __init sched_debug_setup(char *str)
5508{
5509 sched_debug_enabled = 1;
5510
5511 return 0;
5512}
5513early_param("sched_debug", sched_debug_setup);
5514
5515static inline bool sched_debug(void)
5516{
5517 return sched_debug_enabled;
5518}
5519
5520static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5521 struct cpumask *groupmask)
5522{
5523 struct sched_group *group = sd->groups;
5524
5525 cpumask_clear(groupmask);
5526
5527 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5528
5529 if (!(sd->flags & SD_LOAD_BALANCE)) {
5530 printk("does not load-balance\n");
5531 if (sd->parent)
5532 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5533 " has parent");
5534 return -1;
5535 }
5536
5537 printk(KERN_CONT "span %*pbl level %s\n",
5538 cpumask_pr_args(sched_domain_span(sd)), sd->name);
5539
5540 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
5541 printk(KERN_ERR "ERROR: domain->span does not contain "
5542 "CPU%d\n", cpu);
5543 }
5544 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
5545 printk(KERN_ERR "ERROR: domain->groups does not contain"
5546 " CPU%d\n", cpu);
5547 }
5548
5549 printk(KERN_DEBUG "%*s groups:", level + 1, "");
5550 do {
5551 if (!group) {
5552 printk("\n");
5553 printk(KERN_ERR "ERROR: group is NULL\n");
5554 break;
5555 }
5556
5557 if (!cpumask_weight(sched_group_cpus(group))) {
5558 printk(KERN_CONT "\n");
5559 printk(KERN_ERR "ERROR: empty group\n");
5560 break;
5561 }
5562
5563 if (!(sd->flags & SD_OVERLAP) &&
5564 cpumask_intersects(groupmask, sched_group_cpus(group))) {
5565 printk(KERN_CONT "\n");
5566 printk(KERN_ERR "ERROR: repeated CPUs\n");
5567 break;
5568 }
5569
5570 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
5571
5572 printk(KERN_CONT " %*pbl",
5573 cpumask_pr_args(sched_group_cpus(group)));
5574 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
5575 printk(KERN_CONT " (cpu_capacity = %d)",
5576 group->sgc->capacity);
5577 }
5578
5579 group = group->next;
5580 } while (group != sd->groups);
5581 printk(KERN_CONT "\n");
5582
5583 if (!cpumask_equal(sched_domain_span(sd), groupmask))
5584 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
5585
5586 if (sd->parent &&
5587 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
5588 printk(KERN_ERR "ERROR: parent span is not a superset "
5589 "of domain->span\n");
5590 return 0;
5591}
5592
5593static void sched_domain_debug(struct sched_domain *sd, int cpu)
5594{
5595 int level = 0;
5596
5597 if (!sched_debug_enabled)
5598 return;
5599
5600 if (!sd) {
5601 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5602 return;
5603 }
5604
5605 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5606
5607 for (;;) {
5608 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
5609 break;
5610 level++;
5611 sd = sd->parent;
5612 if (!sd)
5613 break;
5614 }
5615}
5616#else /* !CONFIG_SCHED_DEBUG */
5617# define sched_domain_debug(sd, cpu) do { } while (0)
5618static inline bool sched_debug(void)
5619{
5620 return false;
5621}
5622#endif /* CONFIG_SCHED_DEBUG */
5623
5624static int sd_degenerate(struct sched_domain *sd)
5625{
5626 if (cpumask_weight(sched_domain_span(sd)) == 1)
5627 return 1;
5628
5629 /* Following flags need at least 2 groups */
5630 if (sd->flags & (SD_LOAD_BALANCE |
5631 SD_BALANCE_NEWIDLE |
5632 SD_BALANCE_FORK |
5633 SD_BALANCE_EXEC |
5634 SD_SHARE_CPUCAPACITY |
5635 SD_SHARE_PKG_RESOURCES |
5636 SD_SHARE_POWERDOMAIN)) {
5637 if (sd->groups != sd->groups->next)
5638 return 0;
5639 }
5640
5641 /* Following flags don't use groups */
5642 if (sd->flags & (SD_WAKE_AFFINE))
5643 return 0;
5644
5645 return 1;
5646}
5647
5648static int
5649sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5650{
5651 unsigned long cflags = sd->flags, pflags = parent->flags;
5652
5653 if (sd_degenerate(parent))
5654 return 1;
5655
5656 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
5657 return 0;
5658
5659 /* Flags needing groups don't count if only 1 group in parent */
5660 if (parent->groups == parent->groups->next) {
5661 pflags &= ~(SD_LOAD_BALANCE |
5662 SD_BALANCE_NEWIDLE |
5663 SD_BALANCE_FORK |
5664 SD_BALANCE_EXEC |
5665 SD_SHARE_CPUCAPACITY |
5666 SD_SHARE_PKG_RESOURCES |
5667 SD_PREFER_SIBLING |
5668 SD_SHARE_POWERDOMAIN);
5669 if (nr_node_ids == 1)
5670 pflags &= ~SD_SERIALIZE;
5671 }
5672 if (~cflags & pflags)
5673 return 0;
5674
5675 return 1;
5676}
5677
5678static void free_rootdomain(struct rcu_head *rcu)
5679{
5680 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
5681
5682 cpupri_cleanup(&rd->cpupri);
5683 cpudl_cleanup(&rd->cpudl);
5684 free_cpumask_var(rd->dlo_mask);
5685 free_cpumask_var(rd->rto_mask);
5686 free_cpumask_var(rd->online);
5687 free_cpumask_var(rd->span);
5688 kfree(rd);
5689}
5690
5691static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5692{
5693 struct root_domain *old_rd = NULL;
5694 unsigned long flags;
5695
5696 raw_spin_lock_irqsave(&rq->lock, flags);
5697
5698 if (rq->rd) {
5699 old_rd = rq->rd;
5700
5701 if (cpumask_test_cpu(rq->cpu, old_rd->online))
5702 set_rq_offline(rq);
5703
5704 cpumask_clear_cpu(rq->cpu, old_rd->span);
5705
5706 /*
5707 * If we dont want to free the old_rd yet then
5708 * set old_rd to NULL to skip the freeing later
5709 * in this function:
5710 */
5711 if (!atomic_dec_and_test(&old_rd->refcount))
5712 old_rd = NULL;
5713 }
5714
5715 atomic_inc(&rd->refcount);
5716 rq->rd = rd;
5717
5718 cpumask_set_cpu(rq->cpu, rd->span);
5719 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
5720 set_rq_online(rq);
5721
5722 raw_spin_unlock_irqrestore(&rq->lock, flags);
5723
5724 if (old_rd)
5725 call_rcu_sched(&old_rd->rcu, free_rootdomain);
5726}
5727
5728static int init_rootdomain(struct root_domain *rd)
5729{
5730 memset(rd, 0, sizeof(*rd));
5731
5732 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
5733 goto out;
5734 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
5735 goto free_span;
5736 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
5737 goto free_online;
5738 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5739 goto free_dlo_mask;
5740
5741 init_dl_bw(&rd->dl_bw);
5742 if (cpudl_init(&rd->cpudl) != 0)
5743 goto free_dlo_mask;
5744
5745 if (cpupri_init(&rd->cpupri) != 0)
5746 goto free_rto_mask;
5747 return 0;
5748
5749free_rto_mask:
5750 free_cpumask_var(rd->rto_mask);
5751free_dlo_mask:
5752 free_cpumask_var(rd->dlo_mask);
5753free_online:
5754 free_cpumask_var(rd->online);
5755free_span:
5756 free_cpumask_var(rd->span);
5757out:
5758 return -ENOMEM;
5759}
5760
5761/*
5762 * By default the system creates a single root-domain with all cpus as
5763 * members (mimicking the global state we have today).
5764 */
5765struct root_domain def_root_domain;
5766
5767static void init_defrootdomain(void)
5768{
5769 init_rootdomain(&def_root_domain);
5770
5771 atomic_set(&def_root_domain.refcount, 1);
5772}
5773
5774static struct root_domain *alloc_rootdomain(void)
5775{
5776 struct root_domain *rd;
5777
5778 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5779 if (!rd)
5780 return NULL;
5781
5782 if (init_rootdomain(rd) != 0) {
5783 kfree(rd);
5784 return NULL;
5785 }
5786
5787 return rd;
5788}
5789
5790static void free_sched_groups(struct sched_group *sg, int free_sgc)
5791{
5792 struct sched_group *tmp, *first;
5793
5794 if (!sg)
5795 return;
5796
5797 first = sg;
5798 do {
5799 tmp = sg->next;
5800
5801 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
5802 kfree(sg->sgc);
5803
5804 kfree(sg);
5805 sg = tmp;
5806 } while (sg != first);
5807}
5808
5809static void free_sched_domain(struct rcu_head *rcu)
5810{
5811 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
5812
5813 /*
5814 * If its an overlapping domain it has private groups, iterate and
5815 * nuke them all.
5816 */
5817 if (sd->flags & SD_OVERLAP) {
5818 free_sched_groups(sd->groups, 1);
5819 } else if (atomic_dec_and_test(&sd->groups->ref)) {
5820 kfree(sd->groups->sgc);
5821 kfree(sd->groups);
5822 }
5823 kfree(sd);
5824}
5825
5826static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5827{
5828 call_rcu(&sd->rcu, free_sched_domain);
5829}
5830
5831static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5832{
5833 for (; sd; sd = sd->parent)
5834 destroy_sched_domain(sd, cpu);
5835}
5836
5837/*
5838 * Keep a special pointer to the highest sched_domain that has
5839 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5840 * allows us to avoid some pointer chasing select_idle_sibling().
5841 *
5842 * Also keep a unique ID per domain (we use the first cpu number in
5843 * the cpumask of the domain), this allows us to quickly tell if
5844 * two cpus are in the same cache domain, see cpus_share_cache().
5845 */
5846DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5847DEFINE_PER_CPU(int, sd_llc_size);
5848DEFINE_PER_CPU(int, sd_llc_id);
5849DEFINE_PER_CPU(struct sched_domain *, sd_numa);
5850DEFINE_PER_CPU(struct sched_domain *, sd_busy);
5851DEFINE_PER_CPU(struct sched_domain *, sd_asym);
5852
5853static void update_top_cache_domain(int cpu)
5854{
5855 struct sched_domain *sd;
5856 struct sched_domain *busy_sd = NULL;
5857 int id = cpu;
5858 int size = 1;
5859
5860 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
5861 if (sd) {
5862 id = cpumask_first(sched_domain_span(sd));
5863 size = cpumask_weight(sched_domain_span(sd));
5864 busy_sd = sd->parent; /* sd_busy */
5865 }
5866 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
5867
5868 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5869 per_cpu(sd_llc_size, cpu) = size;
5870 per_cpu(sd_llc_id, cpu) = id;
5871
5872 sd = lowest_flag_domain(cpu, SD_NUMA);
5873 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
5874
5875 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
5876 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
5877}
5878
5879/*
5880 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
5881 * hold the hotplug lock.
5882 */
5883static void
5884cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
5885{
5886 struct rq *rq = cpu_rq(cpu);
5887 struct sched_domain *tmp;
5888
5889 /* Remove the sched domains which do not contribute to scheduling. */
5890 for (tmp = sd; tmp; ) {
5891 struct sched_domain *parent = tmp->parent;
5892 if (!parent)
5893 break;
5894
5895 if (sd_parent_degenerate(tmp, parent)) {
5896 tmp->parent = parent->parent;
5897 if (parent->parent)
5898 parent->parent->child = tmp;
5899 /*
5900 * Transfer SD_PREFER_SIBLING down in case of a
5901 * degenerate parent; the spans match for this
5902 * so the property transfers.
5903 */
5904 if (parent->flags & SD_PREFER_SIBLING)
5905 tmp->flags |= SD_PREFER_SIBLING;
5906 destroy_sched_domain(parent, cpu);
5907 } else
5908 tmp = tmp->parent;
5909 }
5910
5911 if (sd && sd_degenerate(sd)) {
5912 tmp = sd;
5913 sd = sd->parent;
5914 destroy_sched_domain(tmp, cpu);
5915 if (sd)
5916 sd->child = NULL;
5917 }
5918
5919 sched_domain_debug(sd, cpu);
5920
5921 rq_attach_root(rq, rd);
5922 tmp = rq->sd;
5923 rcu_assign_pointer(rq->sd, sd);
5924 destroy_sched_domains(tmp, cpu);
5925
5926 update_top_cache_domain(cpu);
5927}
5928
5929/* Setup the mask of cpus configured for isolated domains */
5930static int __init isolated_cpu_setup(char *str)
5931{
5932 int ret;
5933
5934 alloc_bootmem_cpumask_var(&cpu_isolated_map);
5935 ret = cpulist_parse(str, cpu_isolated_map);
5936 if (ret) {
5937 pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids);
5938 return 0;
5939 }
5940 return 1;
5941}
5942__setup("isolcpus=", isolated_cpu_setup);
5943
5944struct s_data {
5945 struct sched_domain ** __percpu sd;
5946 struct root_domain *rd;
5947};
5948
5949enum s_alloc {
5950 sa_rootdomain,
5951 sa_sd,
5952 sa_sd_storage,
5953 sa_none,
5954};
5955
5956/*
5957 * Build an iteration mask that can exclude certain CPUs from the upwards
5958 * domain traversal.
5959 *
5960 * Asymmetric node setups can result in situations where the domain tree is of
5961 * unequal depth, make sure to skip domains that already cover the entire
5962 * range.
5963 *
5964 * In that case build_sched_domains() will have terminated the iteration early
5965 * and our sibling sd spans will be empty. Domains should always include the
5966 * cpu they're built on, so check that.
5967 *
5968 */
5969static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
5970{
5971 const struct cpumask *span = sched_domain_span(sd);
5972 struct sd_data *sdd = sd->private;
5973 struct sched_domain *sibling;
5974 int i;
5975
5976 for_each_cpu(i, span) {
5977 sibling = *per_cpu_ptr(sdd->sd, i);
5978 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
5979 continue;
5980
5981 cpumask_set_cpu(i, sched_group_mask(sg));
5982 }
5983}
5984
5985/*
5986 * Return the canonical balance cpu for this group, this is the first cpu
5987 * of this group that's also in the iteration mask.
5988 */
5989int group_balance_cpu(struct sched_group *sg)
5990{
5991 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
5992}
5993
5994static int
5995build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5996{
5997 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5998 const struct cpumask *span = sched_domain_span(sd);
5999 struct cpumask *covered = sched_domains_tmpmask;
6000 struct sd_data *sdd = sd->private;
6001 struct sched_domain *sibling;
6002 int i;
6003
6004 cpumask_clear(covered);
6005
6006 for_each_cpu(i, span) {
6007 struct cpumask *sg_span;
6008
6009 if (cpumask_test_cpu(i, covered))
6010 continue;
6011
6012 sibling = *per_cpu_ptr(sdd->sd, i);
6013
6014 /* See the comment near build_group_mask(). */
6015 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6016 continue;
6017
6018 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6019 GFP_KERNEL, cpu_to_node(cpu));
6020
6021 if (!sg)
6022 goto fail;
6023
6024 sg_span = sched_group_cpus(sg);
6025 if (sibling->child)
6026 cpumask_copy(sg_span, sched_domain_span(sibling->child));
6027 else
6028 cpumask_set_cpu(i, sg_span);
6029
6030 cpumask_or(covered, covered, sg_span);
6031
6032 sg->sgc = *per_cpu_ptr(sdd->sgc, i);
6033 if (atomic_inc_return(&sg->sgc->ref) == 1)
6034 build_group_mask(sd, sg);
6035
6036 /*
6037 * Initialize sgc->capacity such that even if we mess up the
6038 * domains and no possible iteration will get us here, we won't
6039 * die on a /0 trap.
6040 */
6041 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
6042
6043 /*
6044 * Make sure the first group of this domain contains the
6045 * canonical balance cpu. Otherwise the sched_domain iteration
6046 * breaks. See update_sg_lb_stats().
6047 */
6048 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
6049 group_balance_cpu(sg) == cpu)
6050 groups = sg;
6051
6052 if (!first)
6053 first = sg;
6054 if (last)
6055 last->next = sg;
6056 last = sg;
6057 last->next = first;
6058 }
6059 sd->groups = groups;
6060
6061 return 0;
6062
6063fail:
6064 free_sched_groups(first, 0);
6065
6066 return -ENOMEM;
6067}
6068
6069static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
6070{
6071 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6072 struct sched_domain *child = sd->child;
6073
6074 if (child)
6075 cpu = cpumask_first(sched_domain_span(child));
6076
6077 if (sg) {
6078 *sg = *per_cpu_ptr(sdd->sg, cpu);
6079 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
6080 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
6081 }
6082
6083 return cpu;
6084}
6085
6086/*
6087 * build_sched_groups will build a circular linked list of the groups
6088 * covered by the given span, and will set each group's ->cpumask correctly,
6089 * and ->cpu_capacity to 0.
6090 *
6091 * Assumes the sched_domain tree is fully constructed
6092 */
6093static int
6094build_sched_groups(struct sched_domain *sd, int cpu)
6095{
6096 struct sched_group *first = NULL, *last = NULL;
6097 struct sd_data *sdd = sd->private;
6098 const struct cpumask *span = sched_domain_span(sd);
6099 struct cpumask *covered;
6100 int i;
6101
6102 get_group(cpu, sdd, &sd->groups);
6103 atomic_inc(&sd->groups->ref);
6104
6105 if (cpu != cpumask_first(span))
6106 return 0;
6107
6108 lockdep_assert_held(&sched_domains_mutex);
6109 covered = sched_domains_tmpmask;
6110
6111 cpumask_clear(covered);
6112
6113 for_each_cpu(i, span) {
6114 struct sched_group *sg;
6115 int group, j;
6116
6117 if (cpumask_test_cpu(i, covered))
6118 continue;
6119
6120 group = get_group(i, sdd, &sg);
6121 cpumask_setall(sched_group_mask(sg));
6122
6123 for_each_cpu(j, span) {
6124 if (get_group(j, sdd, NULL) != group)
6125 continue;
6126
6127 cpumask_set_cpu(j, covered);
6128 cpumask_set_cpu(j, sched_group_cpus(sg));
6129 }
6130
6131 if (!first)
6132 first = sg;
6133 if (last)
6134 last->next = sg;
6135 last = sg;
6136 }
6137 last->next = first;
6138
6139 return 0;
6140}
6141
6142/*
6143 * Initialize sched groups cpu_capacity.
6144 *
6145 * cpu_capacity indicates the capacity of sched group, which is used while
6146 * distributing the load between different sched groups in a sched domain.
6147 * Typically cpu_capacity for all the groups in a sched domain will be same
6148 * unless there are asymmetries in the topology. If there are asymmetries,
6149 * group having more cpu_capacity will pickup more load compared to the
6150 * group having less cpu_capacity.
6151 */
6152static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
6153{
6154 struct sched_group *sg = sd->groups;
6155
6156 WARN_ON(!sg);
6157
6158 do {
6159 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6160 sg = sg->next;
6161 } while (sg != sd->groups);
6162
6163 if (cpu != group_balance_cpu(sg))
6164 return;
6165
6166 update_group_capacity(sd, cpu);
6167 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
6168}
6169
6170/*
6171 * Initializers for schedule domains
6172 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6173 */
6174
6175static int default_relax_domain_level = -1;
6176int sched_domain_level_max;
6177
6178static int __init setup_relax_domain_level(char *str)
6179{
6180 if (kstrtoint(str, 0, &default_relax_domain_level))
6181 pr_warn("Unable to set relax_domain_level\n");
6182
6183 return 1;
6184}
6185__setup("relax_domain_level=", setup_relax_domain_level);
6186
6187static void set_domain_attribute(struct sched_domain *sd,
6188 struct sched_domain_attr *attr)
6189{
6190 int request;
6191
6192 if (!attr || attr->relax_domain_level < 0) {
6193 if (default_relax_domain_level < 0)
6194 return;
6195 else
6196 request = default_relax_domain_level;
6197 } else
6198 request = attr->relax_domain_level;
6199 if (request < sd->level) {
6200 /* turn off idle balance on this domain */
6201 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6202 } else {
6203 /* turn on idle balance on this domain */
6204 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6205 }
6206}
6207
6208static void __sdt_free(const struct cpumask *cpu_map);
6209static int __sdt_alloc(const struct cpumask *cpu_map);
6210
6211static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6212 const struct cpumask *cpu_map)
6213{
6214 switch (what) {
6215 case sa_rootdomain:
6216 if (!atomic_read(&d->rd->refcount))
6217 free_rootdomain(&d->rd->rcu); /* fall through */
6218 case sa_sd:
6219 free_percpu(d->sd); /* fall through */
6220 case sa_sd_storage:
6221 __sdt_free(cpu_map); /* fall through */
6222 case sa_none:
6223 break;
6224 }
6225}
6226
6227static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6228 const struct cpumask *cpu_map)
6229{
6230 memset(d, 0, sizeof(*d));
6231
6232 if (__sdt_alloc(cpu_map))
6233 return sa_sd_storage;
6234 d->sd = alloc_percpu(struct sched_domain *);
6235 if (!d->sd)
6236 return sa_sd_storage;
6237 d->rd = alloc_rootdomain();
6238 if (!d->rd)
6239 return sa_sd;
6240 return sa_rootdomain;
6241}
6242
6243/*
6244 * NULL the sd_data elements we've used to build the sched_domain and
6245 * sched_group structure so that the subsequent __free_domain_allocs()
6246 * will not free the data we're using.
6247 */
6248static void claim_allocations(int cpu, struct sched_domain *sd)
6249{
6250 struct sd_data *sdd = sd->private;
6251
6252 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6253 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6254
6255 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
6256 *per_cpu_ptr(sdd->sg, cpu) = NULL;
6257
6258 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
6259 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
6260}
6261
6262#ifdef CONFIG_NUMA
6263static int sched_domains_numa_levels;
6264enum numa_topology_type sched_numa_topology_type;
6265static int *sched_domains_numa_distance;
6266int sched_max_numa_distance;
6267static struct cpumask ***sched_domains_numa_masks;
6268static int sched_domains_curr_level;
6269#endif
6270
6271/*
6272 * SD_flags allowed in topology descriptions.
6273 *
6274 * SD_SHARE_CPUCAPACITY - describes SMT topologies
6275 * SD_SHARE_PKG_RESOURCES - describes shared caches
6276 * SD_NUMA - describes NUMA topologies
6277 * SD_SHARE_POWERDOMAIN - describes shared power domain
6278 *
6279 * Odd one out:
6280 * SD_ASYM_PACKING - describes SMT quirks
6281 */
6282#define TOPOLOGY_SD_FLAGS \
6283 (SD_SHARE_CPUCAPACITY | \
6284 SD_SHARE_PKG_RESOURCES | \
6285 SD_NUMA | \
6286 SD_ASYM_PACKING | \
6287 SD_SHARE_POWERDOMAIN)
6288
6289static struct sched_domain *
6290sd_init(struct sched_domain_topology_level *tl, int cpu)
6291{
6292 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
6293 int sd_weight, sd_flags = 0;
6294
6295#ifdef CONFIG_NUMA
6296 /*
6297 * Ugly hack to pass state to sd_numa_mask()...
6298 */
6299 sched_domains_curr_level = tl->numa_level;
6300#endif
6301
6302 sd_weight = cpumask_weight(tl->mask(cpu));
6303
6304 if (tl->sd_flags)
6305 sd_flags = (*tl->sd_flags)();
6306 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
6307 "wrong sd_flags in topology description\n"))
6308 sd_flags &= ~TOPOLOGY_SD_FLAGS;
6309
6310 *sd = (struct sched_domain){
6311 .min_interval = sd_weight,
6312 .max_interval = 2*sd_weight,
6313 .busy_factor = 32,
6314 .imbalance_pct = 125,
6315
6316 .cache_nice_tries = 0,
6317 .busy_idx = 0,
6318 .idle_idx = 0,
6319 .newidle_idx = 0,
6320 .wake_idx = 0,
6321 .forkexec_idx = 0,
6322
6323 .flags = 1*SD_LOAD_BALANCE
6324 | 1*SD_BALANCE_NEWIDLE
6325 | 1*SD_BALANCE_EXEC
6326 | 1*SD_BALANCE_FORK
6327 | 0*SD_BALANCE_WAKE
6328 | 1*SD_WAKE_AFFINE
6329 | 0*SD_SHARE_CPUCAPACITY
6330 | 0*SD_SHARE_PKG_RESOURCES
6331 | 0*SD_SERIALIZE
6332 | 0*SD_PREFER_SIBLING
6333 | 0*SD_NUMA
6334 | sd_flags
6335 ,
6336
6337 .last_balance = jiffies,
6338 .balance_interval = sd_weight,
6339 .smt_gain = 0,
6340 .max_newidle_lb_cost = 0,
6341 .next_decay_max_lb_cost = jiffies,
6342#ifdef CONFIG_SCHED_DEBUG
6343 .name = tl->name,
6344#endif
6345 };
6346
6347 /*
6348 * Convert topological properties into behaviour.
6349 */
6350
6351 if (sd->flags & SD_SHARE_CPUCAPACITY) {
6352 sd->flags |= SD_PREFER_SIBLING;
6353 sd->imbalance_pct = 110;
6354 sd->smt_gain = 1178; /* ~15% */
6355
6356 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6357 sd->imbalance_pct = 117;
6358 sd->cache_nice_tries = 1;
6359 sd->busy_idx = 2;
6360
6361#ifdef CONFIG_NUMA
6362 } else if (sd->flags & SD_NUMA) {
6363 sd->cache_nice_tries = 2;
6364 sd->busy_idx = 3;
6365 sd->idle_idx = 2;
6366
6367 sd->flags |= SD_SERIALIZE;
6368 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
6369 sd->flags &= ~(SD_BALANCE_EXEC |
6370 SD_BALANCE_FORK |
6371 SD_WAKE_AFFINE);
6372 }
6373
6374#endif
6375 } else {
6376 sd->flags |= SD_PREFER_SIBLING;
6377 sd->cache_nice_tries = 1;
6378 sd->busy_idx = 2;
6379 sd->idle_idx = 1;
6380 }
6381
6382 sd->private = &tl->data;
6383
6384 return sd;
6385}
6386
6387/*
6388 * Topology list, bottom-up.
6389 */
6390static struct sched_domain_topology_level default_topology[] = {
6391#ifdef CONFIG_SCHED_SMT
6392 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
6393#endif
6394#ifdef CONFIG_SCHED_MC
6395 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
6396#endif
6397 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
6398 { NULL, },
6399};
6400
6401static struct sched_domain_topology_level *sched_domain_topology =
6402 default_topology;
6403
6404#define for_each_sd_topology(tl) \
6405 for (tl = sched_domain_topology; tl->mask; tl++)
6406
6407void set_sched_topology(struct sched_domain_topology_level *tl)
6408{
6409 sched_domain_topology = tl;
6410}
6411
6412#ifdef CONFIG_NUMA
6413
6414static const struct cpumask *sd_numa_mask(int cpu)
6415{
6416 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6417}
6418
6419static void sched_numa_warn(const char *str)
6420{
6421 static int done = false;
6422 int i,j;
6423
6424 if (done)
6425 return;
6426
6427 done = true;
6428
6429 printk(KERN_WARNING "ERROR: %s\n\n", str);
6430
6431 for (i = 0; i < nr_node_ids; i++) {
6432 printk(KERN_WARNING " ");
6433 for (j = 0; j < nr_node_ids; j++)
6434 printk(KERN_CONT "%02d ", node_distance(i,j));
6435 printk(KERN_CONT "\n");
6436 }
6437 printk(KERN_WARNING "\n");
6438}
6439
6440bool find_numa_distance(int distance)
6441{
6442 int i;
6443
6444 if (distance == node_distance(0, 0))
6445 return true;
6446
6447 for (i = 0; i < sched_domains_numa_levels; i++) {
6448 if (sched_domains_numa_distance[i] == distance)
6449 return true;
6450 }
6451
6452 return false;
6453}
6454
6455/*
6456 * A system can have three types of NUMA topology:
6457 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
6458 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
6459 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
6460 *
6461 * The difference between a glueless mesh topology and a backplane
6462 * topology lies in whether communication between not directly
6463 * connected nodes goes through intermediary nodes (where programs
6464 * could run), or through backplane controllers. This affects
6465 * placement of programs.
6466 *
6467 * The type of topology can be discerned with the following tests:
6468 * - If the maximum distance between any nodes is 1 hop, the system
6469 * is directly connected.
6470 * - If for two nodes A and B, located N > 1 hops away from each other,
6471 * there is an intermediary node C, which is < N hops away from both
6472 * nodes A and B, the system is a glueless mesh.
6473 */
6474static void init_numa_topology_type(void)
6475{
6476 int a, b, c, n;
6477
6478 n = sched_max_numa_distance;
6479
6480 if (sched_domains_numa_levels <= 1) {
6481 sched_numa_topology_type = NUMA_DIRECT;
6482 return;
6483 }
6484
6485 for_each_online_node(a) {
6486 for_each_online_node(b) {
6487 /* Find two nodes furthest removed from each other. */
6488 if (node_distance(a, b) < n)
6489 continue;
6490
6491 /* Is there an intermediary node between a and b? */
6492 for_each_online_node(c) {
6493 if (node_distance(a, c) < n &&
6494 node_distance(b, c) < n) {
6495 sched_numa_topology_type =
6496 NUMA_GLUELESS_MESH;
6497 return;
6498 }
6499 }
6500
6501 sched_numa_topology_type = NUMA_BACKPLANE;
6502 return;
6503 }
6504 }
6505}
6506
6507static void sched_init_numa(void)
6508{
6509 int next_distance, curr_distance = node_distance(0, 0);
6510 struct sched_domain_topology_level *tl;
6511 int level = 0;
6512 int i, j, k;
6513
6514 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6515 if (!sched_domains_numa_distance)
6516 return;
6517
6518 /*
6519 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6520 * unique distances in the node_distance() table.
6521 *
6522 * Assumes node_distance(0,j) includes all distances in
6523 * node_distance(i,j) in order to avoid cubic time.
6524 */
6525 next_distance = curr_distance;
6526 for (i = 0; i < nr_node_ids; i++) {
6527 for (j = 0; j < nr_node_ids; j++) {
6528 for (k = 0; k < nr_node_ids; k++) {
6529 int distance = node_distance(i, k);
6530
6531 if (distance > curr_distance &&
6532 (distance < next_distance ||
6533 next_distance == curr_distance))
6534 next_distance = distance;
6535
6536 /*
6537 * While not a strong assumption it would be nice to know
6538 * about cases where if node A is connected to B, B is not
6539 * equally connected to A.
6540 */
6541 if (sched_debug() && node_distance(k, i) != distance)
6542 sched_numa_warn("Node-distance not symmetric");
6543
6544 if (sched_debug() && i && !find_numa_distance(distance))
6545 sched_numa_warn("Node-0 not representative");
6546 }
6547 if (next_distance != curr_distance) {
6548 sched_domains_numa_distance[level++] = next_distance;
6549 sched_domains_numa_levels = level;
6550 curr_distance = next_distance;
6551 } else break;
6552 }
6553
6554 /*
6555 * In case of sched_debug() we verify the above assumption.
6556 */
6557 if (!sched_debug())
6558 break;
6559 }
6560
6561 if (!level)
6562 return;
6563
6564 /*
6565 * 'level' contains the number of unique distances, excluding the
6566 * identity distance node_distance(i,i).
6567 *
6568 * The sched_domains_numa_distance[] array includes the actual distance
6569 * numbers.
6570 */
6571
6572 /*
6573 * Here, we should temporarily reset sched_domains_numa_levels to 0.
6574 * If it fails to allocate memory for array sched_domains_numa_masks[][],
6575 * the array will contain less then 'level' members. This could be
6576 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
6577 * in other functions.
6578 *
6579 * We reset it to 'level' at the end of this function.
6580 */
6581 sched_domains_numa_levels = 0;
6582
6583 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6584 if (!sched_domains_numa_masks)
6585 return;
6586
6587 /*
6588 * Now for each level, construct a mask per node which contains all
6589 * cpus of nodes that are that many hops away from us.
6590 */
6591 for (i = 0; i < level; i++) {
6592 sched_domains_numa_masks[i] =
6593 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6594 if (!sched_domains_numa_masks[i])
6595 return;
6596
6597 for (j = 0; j < nr_node_ids; j++) {
6598 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
6599 if (!mask)
6600 return;
6601
6602 sched_domains_numa_masks[i][j] = mask;
6603
6604 for_each_node(k) {
6605 if (node_distance(j, k) > sched_domains_numa_distance[i])
6606 continue;
6607
6608 cpumask_or(mask, mask, cpumask_of_node(k));
6609 }
6610 }
6611 }
6612
6613 /* Compute default topology size */
6614 for (i = 0; sched_domain_topology[i].mask; i++);
6615
6616 tl = kzalloc((i + level + 1) *
6617 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6618 if (!tl)
6619 return;
6620
6621 /*
6622 * Copy the default topology bits..
6623 */
6624 for (i = 0; sched_domain_topology[i].mask; i++)
6625 tl[i] = sched_domain_topology[i];
6626
6627 /*
6628 * .. and append 'j' levels of NUMA goodness.
6629 */
6630 for (j = 0; j < level; i++, j++) {
6631 tl[i] = (struct sched_domain_topology_level){
6632 .mask = sd_numa_mask,
6633 .sd_flags = cpu_numa_flags,
6634 .flags = SDTL_OVERLAP,
6635 .numa_level = j,
6636 SD_INIT_NAME(NUMA)
6637 };
6638 }
6639
6640 sched_domain_topology = tl;
6641
6642 sched_domains_numa_levels = level;
6643 sched_max_numa_distance = sched_domains_numa_distance[level - 1];
6644
6645 init_numa_topology_type();
6646}
6647
6648static void sched_domains_numa_masks_set(int cpu)
6649{
6650 int i, j;
6651 int node = cpu_to_node(cpu);
6652
6653 for (i = 0; i < sched_domains_numa_levels; i++) {
6654 for (j = 0; j < nr_node_ids; j++) {
6655 if (node_distance(j, node) <= sched_domains_numa_distance[i])
6656 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
6657 }
6658 }
6659}
6660
6661static void sched_domains_numa_masks_clear(int cpu)
6662{
6663 int i, j;
6664 for (i = 0; i < sched_domains_numa_levels; i++) {
6665 for (j = 0; j < nr_node_ids; j++)
6666 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
6667 }
6668}
6669
6670/*
6671 * Update sched_domains_numa_masks[level][node] array when new cpus
6672 * are onlined.
6673 */
6674static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6675 unsigned long action,
6676 void *hcpu)
6677{
6678 int cpu = (long)hcpu;
6679
6680 switch (action & ~CPU_TASKS_FROZEN) {
6681 case CPU_ONLINE:
6682 sched_domains_numa_masks_set(cpu);
6683 break;
6684
6685 case CPU_DEAD:
6686 sched_domains_numa_masks_clear(cpu);
6687 break;
6688
6689 default:
6690 return NOTIFY_DONE;
6691 }
6692
6693 return NOTIFY_OK;
6694}
6695#else
6696static inline void sched_init_numa(void)
6697{
6698}
6699
6700static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6701 unsigned long action,
6702 void *hcpu)
6703{
6704 return 0;
6705}
6706#endif /* CONFIG_NUMA */
6707
6708static int __sdt_alloc(const struct cpumask *cpu_map)
6709{
6710 struct sched_domain_topology_level *tl;
6711 int j;
6712
6713 for_each_sd_topology(tl) {
6714 struct sd_data *sdd = &tl->data;
6715
6716 sdd->sd = alloc_percpu(struct sched_domain *);
6717 if (!sdd->sd)
6718 return -ENOMEM;
6719
6720 sdd->sg = alloc_percpu(struct sched_group *);
6721 if (!sdd->sg)
6722 return -ENOMEM;
6723
6724 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
6725 if (!sdd->sgc)
6726 return -ENOMEM;
6727
6728 for_each_cpu(j, cpu_map) {
6729 struct sched_domain *sd;
6730 struct sched_group *sg;
6731 struct sched_group_capacity *sgc;
6732
6733 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6734 GFP_KERNEL, cpu_to_node(j));
6735 if (!sd)
6736 return -ENOMEM;
6737
6738 *per_cpu_ptr(sdd->sd, j) = sd;
6739
6740 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6741 GFP_KERNEL, cpu_to_node(j));
6742 if (!sg)
6743 return -ENOMEM;
6744
6745 sg->next = sg;
6746
6747 *per_cpu_ptr(sdd->sg, j) = sg;
6748
6749 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
6750 GFP_KERNEL, cpu_to_node(j));
6751 if (!sgc)
6752 return -ENOMEM;
6753
6754 *per_cpu_ptr(sdd->sgc, j) = sgc;
6755 }
6756 }
6757
6758 return 0;
6759}
6760
6761static void __sdt_free(const struct cpumask *cpu_map)
6762{
6763 struct sched_domain_topology_level *tl;
6764 int j;
6765
6766 for_each_sd_topology(tl) {
6767 struct sd_data *sdd = &tl->data;
6768
6769 for_each_cpu(j, cpu_map) {
6770 struct sched_domain *sd;
6771
6772 if (sdd->sd) {
6773 sd = *per_cpu_ptr(sdd->sd, j);
6774 if (sd && (sd->flags & SD_OVERLAP))
6775 free_sched_groups(sd->groups, 0);
6776 kfree(*per_cpu_ptr(sdd->sd, j));
6777 }
6778
6779 if (sdd->sg)
6780 kfree(*per_cpu_ptr(sdd->sg, j));
6781 if (sdd->sgc)
6782 kfree(*per_cpu_ptr(sdd->sgc, j));
6783 }
6784 free_percpu(sdd->sd);
6785 sdd->sd = NULL;
6786 free_percpu(sdd->sg);
6787 sdd->sg = NULL;
6788 free_percpu(sdd->sgc);
6789 sdd->sgc = NULL;
6790 }
6791}
6792
6793struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6794 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6795 struct sched_domain *child, int cpu)
6796{
6797 struct sched_domain *sd = sd_init(tl, cpu);
6798 if (!sd)
6799 return child;
6800
6801 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6802 if (child) {
6803 sd->level = child->level + 1;
6804 sched_domain_level_max = max(sched_domain_level_max, sd->level);
6805 child->parent = sd;
6806 sd->child = child;
6807
6808 if (!cpumask_subset(sched_domain_span(child),
6809 sched_domain_span(sd))) {
6810 pr_err("BUG: arch topology borken\n");
6811#ifdef CONFIG_SCHED_DEBUG
6812 pr_err(" the %s domain not a subset of the %s domain\n",
6813 child->name, sd->name);
6814#endif
6815 /* Fixup, ensure @sd has at least @child cpus. */
6816 cpumask_or(sched_domain_span(sd),
6817 sched_domain_span(sd),
6818 sched_domain_span(child));
6819 }
6820
6821 }
6822 set_domain_attribute(sd, attr);
6823
6824 return sd;
6825}
6826
6827/*
6828 * Build sched domains for a given set of cpus and attach the sched domains
6829 * to the individual cpus
6830 */
6831static int build_sched_domains(const struct cpumask *cpu_map,
6832 struct sched_domain_attr *attr)
6833{
6834 enum s_alloc alloc_state;
6835 struct sched_domain *sd;
6836 struct s_data d;
6837 int i, ret = -ENOMEM;
6838
6839 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6840 if (alloc_state != sa_rootdomain)
6841 goto error;
6842
6843 /* Set up domains for cpus specified by the cpu_map. */
6844 for_each_cpu(i, cpu_map) {
6845 struct sched_domain_topology_level *tl;
6846
6847 sd = NULL;
6848 for_each_sd_topology(tl) {
6849 sd = build_sched_domain(tl, cpu_map, attr, sd, i);
6850 if (tl == sched_domain_topology)
6851 *per_cpu_ptr(d.sd, i) = sd;
6852 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6853 sd->flags |= SD_OVERLAP;
6854 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6855 break;
6856 }
6857 }
6858
6859 /* Build the groups for the domains */
6860 for_each_cpu(i, cpu_map) {
6861 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6862 sd->span_weight = cpumask_weight(sched_domain_span(sd));
6863 if (sd->flags & SD_OVERLAP) {
6864 if (build_overlap_sched_groups(sd, i))
6865 goto error;
6866 } else {
6867 if (build_sched_groups(sd, i))
6868 goto error;
6869 }
6870 }
6871 }
6872
6873 /* Calculate CPU capacity for physical packages and nodes */
6874 for (i = nr_cpumask_bits-1; i >= 0; i--) {
6875 if (!cpumask_test_cpu(i, cpu_map))
6876 continue;
6877
6878 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6879 claim_allocations(i, sd);
6880 init_sched_groups_capacity(i, sd);
6881 }
6882 }
6883
6884 /* Attach the domains */
6885 rcu_read_lock();
6886 for_each_cpu(i, cpu_map) {
6887 sd = *per_cpu_ptr(d.sd, i);
6888 cpu_attach_domain(sd, d.rd, i);
6889 }
6890 rcu_read_unlock();
6891
6892 ret = 0;
6893error:
6894 __free_domain_allocs(&d, alloc_state, cpu_map);
6895 return ret;
6896}
6897
6898static cpumask_var_t *doms_cur; /* current sched domains */
6899static int ndoms_cur; /* number of sched domains in 'doms_cur' */
6900static struct sched_domain_attr *dattr_cur;
6901 /* attribues of custom domains in 'doms_cur' */
6902
6903/*
6904 * Special case: If a kmalloc of a doms_cur partition (array of
6905 * cpumask) fails, then fallback to a single sched domain,
6906 * as determined by the single cpumask fallback_doms.
6907 */
6908static cpumask_var_t fallback_doms;
6909
6910/*
6911 * arch_update_cpu_topology lets virtualized architectures update the
6912 * cpu core maps. It is supposed to return 1 if the topology changed
6913 * or 0 if it stayed the same.
6914 */
6915int __weak arch_update_cpu_topology(void)
6916{
6917 return 0;
6918}
6919
6920cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6921{
6922 int i;
6923 cpumask_var_t *doms;
6924
6925 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6926 if (!doms)
6927 return NULL;
6928 for (i = 0; i < ndoms; i++) {
6929 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6930 free_sched_domains(doms, i);
6931 return NULL;
6932 }
6933 }
6934 return doms;
6935}
6936
6937void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6938{
6939 unsigned int i;
6940 for (i = 0; i < ndoms; i++)
6941 free_cpumask_var(doms[i]);
6942 kfree(doms);
6943}
6944
6945/*
6946 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
6947 * For now this just excludes isolated cpus, but could be used to
6948 * exclude other special cases in the future.
6949 */
6950static int init_sched_domains(const struct cpumask *cpu_map)
6951{
6952 int err;
6953
6954 arch_update_cpu_topology();
6955 ndoms_cur = 1;
6956 doms_cur = alloc_sched_domains(ndoms_cur);
6957 if (!doms_cur)
6958 doms_cur = &fallback_doms;
6959 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
6960 err = build_sched_domains(doms_cur[0], NULL);
6961 register_sched_domain_sysctl();
6962
6963 return err;
6964}
6965
6966/*
6967 * Detach sched domains from a group of cpus specified in cpu_map
6968 * These cpus will now be attached to the NULL domain
6969 */
6970static void detach_destroy_domains(const struct cpumask *cpu_map)
6971{
6972 int i;
6973
6974 rcu_read_lock();
6975 for_each_cpu(i, cpu_map)
6976 cpu_attach_domain(NULL, &def_root_domain, i);
6977 rcu_read_unlock();
6978}
6979
6980/* handle null as "default" */
6981static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
6982 struct sched_domain_attr *new, int idx_new)
6983{
6984 struct sched_domain_attr tmp;
6985
6986 /* fast path */
6987 if (!new && !cur)
6988 return 1;
6989
6990 tmp = SD_ATTR_INIT;
6991 return !memcmp(cur ? (cur + idx_cur) : &tmp,
6992 new ? (new + idx_new) : &tmp,
6993 sizeof(struct sched_domain_attr));
6994}
6995
6996/*
6997 * Partition sched domains as specified by the 'ndoms_new'
6998 * cpumasks in the array doms_new[] of cpumasks. This compares
6999 * doms_new[] to the current sched domain partitioning, doms_cur[].
7000 * It destroys each deleted domain and builds each new domain.
7001 *
7002 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
7003 * The masks don't intersect (don't overlap.) We should setup one
7004 * sched domain for each mask. CPUs not in any of the cpumasks will
7005 * not be load balanced. If the same cpumask appears both in the
7006 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7007 * it as it is.
7008 *
7009 * The passed in 'doms_new' should be allocated using
7010 * alloc_sched_domains. This routine takes ownership of it and will
7011 * free_sched_domains it when done with it. If the caller failed the
7012 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7013 * and partition_sched_domains() will fallback to the single partition
7014 * 'fallback_doms', it also forces the domains to be rebuilt.
7015 *
7016 * If doms_new == NULL it will be replaced with cpu_online_mask.
7017 * ndoms_new == 0 is a special case for destroying existing domains,
7018 * and it will not create the default domain.
7019 *
7020 * Call with hotplug lock held
7021 */
7022void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
7023 struct sched_domain_attr *dattr_new)
7024{
7025 int i, j, n;
7026 int new_topology;
7027
7028 mutex_lock(&sched_domains_mutex);
7029
7030 /* always unregister in case we don't destroy any domains */
7031 unregister_sched_domain_sysctl();
7032
7033 /* Let architecture update cpu core mappings. */
7034 new_topology = arch_update_cpu_topology();
7035
7036 n = doms_new ? ndoms_new : 0;
7037
7038 /* Destroy deleted domains */
7039 for (i = 0; i < ndoms_cur; i++) {
7040 for (j = 0; j < n && !new_topology; j++) {
7041 if (cpumask_equal(doms_cur[i], doms_new[j])
7042 && dattrs_equal(dattr_cur, i, dattr_new, j))
7043 goto match1;
7044 }
7045 /* no match - a current sched domain not in new doms_new[] */
7046 detach_destroy_domains(doms_cur[i]);
7047match1:
7048 ;
7049 }
7050
7051 n = ndoms_cur;
7052 if (doms_new == NULL) {
7053 n = 0;
7054 doms_new = &fallback_doms;
7055 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
7056 WARN_ON_ONCE(dattr_new);
7057 }
7058
7059 /* Build new domains */
7060 for (i = 0; i < ndoms_new; i++) {
7061 for (j = 0; j < n && !new_topology; j++) {
7062 if (cpumask_equal(doms_new[i], doms_cur[j])
7063 && dattrs_equal(dattr_new, i, dattr_cur, j))
7064 goto match2;
7065 }
7066 /* no match - add a new doms_new */
7067 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
7068match2:
7069 ;
7070 }
7071
7072 /* Remember the new sched domains */
7073 if (doms_cur != &fallback_doms)
7074 free_sched_domains(doms_cur, ndoms_cur);
7075 kfree(dattr_cur); /* kfree(NULL) is safe */
7076 doms_cur = doms_new;
7077 dattr_cur = dattr_new;
7078 ndoms_cur = ndoms_new;
7079
7080 register_sched_domain_sysctl();
7081
7082 mutex_unlock(&sched_domains_mutex);
7083}
7084
7085static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
7086
7087/*
7088 * Update cpusets according to cpu_active mask. If cpusets are
7089 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7090 * around partition_sched_domains().
7091 *
7092 * If we come here as part of a suspend/resume, don't touch cpusets because we
7093 * want to restore it back to its original state upon resume anyway.
7094 */
7095static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7096 void *hcpu)
7097{
7098 switch (action) {
7099 case CPU_ONLINE_FROZEN:
7100 case CPU_DOWN_FAILED_FROZEN:
7101
7102 /*
7103 * num_cpus_frozen tracks how many CPUs are involved in suspend
7104 * resume sequence. As long as this is not the last online
7105 * operation in the resume sequence, just build a single sched
7106 * domain, ignoring cpusets.
7107 */
7108 num_cpus_frozen--;
7109 if (likely(num_cpus_frozen)) {
7110 partition_sched_domains(1, NULL, NULL);
7111 break;
7112 }
7113
7114 /*
7115 * This is the last CPU online operation. So fall through and
7116 * restore the original sched domains by considering the
7117 * cpuset configurations.
7118 */
7119
7120 case CPU_ONLINE:
7121 cpuset_update_active_cpus(true);
7122 break;
7123 default:
7124 return NOTIFY_DONE;
7125 }
7126 return NOTIFY_OK;
7127}
7128
7129static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7130 void *hcpu)
7131{
7132 unsigned long flags;
7133 long cpu = (long)hcpu;
7134 struct dl_bw *dl_b;
7135 bool overflow;
7136 int cpus;
7137
7138 switch (action) {
7139 case CPU_DOWN_PREPARE:
7140 rcu_read_lock_sched();
7141 dl_b = dl_bw_of(cpu);
7142
7143 raw_spin_lock_irqsave(&dl_b->lock, flags);
7144 cpus = dl_bw_cpus(cpu);
7145 overflow = __dl_overflow(dl_b, cpus, 0, 0);
7146 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7147
7148 rcu_read_unlock_sched();
7149
7150 if (overflow)
7151 return notifier_from_errno(-EBUSY);
7152 cpuset_update_active_cpus(false);
7153 break;
7154 case CPU_DOWN_PREPARE_FROZEN:
7155 num_cpus_frozen++;
7156 partition_sched_domains(1, NULL, NULL);
7157 break;
7158 default:
7159 return NOTIFY_DONE;
7160 }
7161 return NOTIFY_OK;
7162}
7163
7164void __init sched_init_smp(void)
7165{
7166 cpumask_var_t non_isolated_cpus;
7167
7168 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
7169 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
7170
7171 sched_init_numa();
7172
7173 /*
7174 * There's no userspace yet to cause hotplug operations; hence all the
7175 * cpu masks are stable and all blatant races in the below code cannot
7176 * happen.
7177 */
7178 mutex_lock(&sched_domains_mutex);
7179 init_sched_domains(cpu_active_mask);
7180 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7181 if (cpumask_empty(non_isolated_cpus))
7182 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
7183 mutex_unlock(&sched_domains_mutex);
7184
7185 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
7186 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7187 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
7188
7189 init_hrtick();
7190
7191 /* Move init over to a non-isolated CPU */
7192 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
7193 BUG();
7194 sched_init_granularity();
7195 free_cpumask_var(non_isolated_cpus);
7196
7197 init_sched_rt_class();
7198 init_sched_dl_class();
7199}
7200#else
7201void __init sched_init_smp(void)
7202{
7203 sched_init_granularity();
7204}
7205#endif /* CONFIG_SMP */
7206
7207int in_sched_functions(unsigned long addr)
7208{
7209 return in_lock_functions(addr) ||
7210 (addr >= (unsigned long)__sched_text_start
7211 && addr < (unsigned long)__sched_text_end);
7212}
7213
7214#ifdef CONFIG_CGROUP_SCHED
7215/*
7216 * Default task group.
7217 * Every task in system belongs to this group at bootup.
7218 */
7219struct task_group root_task_group;
7220LIST_HEAD(task_groups);
7221
7222/* Cacheline aligned slab cache for task_group */
7223static struct kmem_cache *task_group_cache __read_mostly;
7224#endif
7225
7226DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
7227
7228void __init sched_init(void)
7229{
7230 int i, j;
7231 unsigned long alloc_size = 0, ptr;
7232
7233#ifdef CONFIG_FAIR_GROUP_SCHED
7234 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7235#endif
7236#ifdef CONFIG_RT_GROUP_SCHED
7237 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7238#endif
7239 if (alloc_size) {
7240 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
7241
7242#ifdef CONFIG_FAIR_GROUP_SCHED
7243 root_task_group.se = (struct sched_entity **)ptr;
7244 ptr += nr_cpu_ids * sizeof(void **);
7245
7246 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
7247 ptr += nr_cpu_ids * sizeof(void **);
7248
7249#endif /* CONFIG_FAIR_GROUP_SCHED */
7250#ifdef CONFIG_RT_GROUP_SCHED
7251 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
7252 ptr += nr_cpu_ids * sizeof(void **);
7253
7254 root_task_group.rt_rq = (struct rt_rq **)ptr;
7255 ptr += nr_cpu_ids * sizeof(void **);
7256
7257#endif /* CONFIG_RT_GROUP_SCHED */
7258 }
7259#ifdef CONFIG_CPUMASK_OFFSTACK
7260 for_each_possible_cpu(i) {
7261 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7262 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
7263 }
7264#endif /* CONFIG_CPUMASK_OFFSTACK */
7265
7266 init_rt_bandwidth(&def_rt_bandwidth,
7267 global_rt_period(), global_rt_runtime());
7268 init_dl_bandwidth(&def_dl_bandwidth,
7269 global_rt_period(), global_rt_runtime());
7270
7271#ifdef CONFIG_SMP
7272 init_defrootdomain();
7273#endif
7274
7275#ifdef CONFIG_RT_GROUP_SCHED
7276 init_rt_bandwidth(&root_task_group.rt_bandwidth,
7277 global_rt_period(), global_rt_runtime());
7278#endif /* CONFIG_RT_GROUP_SCHED */
7279
7280#ifdef CONFIG_CGROUP_SCHED
7281 task_group_cache = KMEM_CACHE(task_group, 0);
7282
7283 list_add(&root_task_group.list, &task_groups);
7284 INIT_LIST_HEAD(&root_task_group.children);
7285 INIT_LIST_HEAD(&root_task_group.siblings);
7286 autogroup_init(&init_task);
7287#endif /* CONFIG_CGROUP_SCHED */
7288
7289 for_each_possible_cpu(i) {
7290 struct rq *rq;
7291
7292 rq = cpu_rq(i);
7293 raw_spin_lock_init(&rq->lock);
7294 rq->nr_running = 0;
7295 rq->calc_load_active = 0;
7296 rq->calc_load_update = jiffies + LOAD_FREQ;
7297 init_cfs_rq(&rq->cfs);
7298 init_rt_rq(&rq->rt);
7299 init_dl_rq(&rq->dl);
7300#ifdef CONFIG_FAIR_GROUP_SCHED
7301 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
7302 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7303 /*
7304 * How much cpu bandwidth does root_task_group get?
7305 *
7306 * In case of task-groups formed thr' the cgroup filesystem, it
7307 * gets 100% of the cpu resources in the system. This overall
7308 * system cpu resource is divided among the tasks of
7309 * root_task_group and its child task-groups in a fair manner,
7310 * based on each entity's (task or task-group's) weight
7311 * (se->load.weight).
7312 *
7313 * In other words, if root_task_group has 10 tasks of weight
7314 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7315 * then A0's share of the cpu resource is:
7316 *
7317 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
7318 *
7319 * We achieve this by letting root_task_group's tasks sit
7320 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
7321 */
7322 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
7323 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
7324#endif /* CONFIG_FAIR_GROUP_SCHED */
7325
7326 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
7327#ifdef CONFIG_RT_GROUP_SCHED
7328 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
7329#endif
7330
7331 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7332 rq->cpu_load[j] = 0;
7333
7334 rq->last_load_update_tick = jiffies;
7335
7336#ifdef CONFIG_SMP
7337 rq->sd = NULL;
7338 rq->rd = NULL;
7339 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
7340 rq->balance_callback = NULL;
7341 rq->active_balance = 0;
7342 rq->next_balance = jiffies;
7343 rq->push_cpu = 0;
7344 rq->cpu = i;
7345 rq->online = 0;
7346 rq->idle_stamp = 0;
7347 rq->avg_idle = 2*sysctl_sched_migration_cost;
7348 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
7349
7350 INIT_LIST_HEAD(&rq->cfs_tasks);
7351
7352 rq_attach_root(rq, &def_root_domain);
7353#ifdef CONFIG_NO_HZ_COMMON
7354 rq->nohz_flags = 0;
7355#endif
7356#ifdef CONFIG_NO_HZ_FULL
7357 rq->last_sched_tick = 0;
7358#endif
7359#endif
7360 init_rq_hrtick(rq);
7361 atomic_set(&rq->nr_iowait, 0);
7362 }
7363
7364 set_load_weight(&init_task);
7365
7366#ifdef CONFIG_PREEMPT_NOTIFIERS
7367 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7368#endif
7369
7370 /*
7371 * The boot idle thread does lazy MMU switching as well:
7372 */
7373 atomic_inc(&init_mm.mm_count);
7374 enter_lazy_tlb(&init_mm, current);
7375
7376 /*
7377 * During early bootup we pretend to be a normal task:
7378 */
7379 current->sched_class = &fair_sched_class;
7380
7381 /*
7382 * Make us the idle thread. Technically, schedule() should not be
7383 * called from this thread, however somewhere below it might be,
7384 * but because we are the idle thread, we just pick up running again
7385 * when this runqueue becomes "idle".
7386 */
7387 init_idle(current, smp_processor_id());
7388
7389 calc_load_update = jiffies + LOAD_FREQ;
7390
7391#ifdef CONFIG_SMP
7392 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
7393 /* May be allocated at isolcpus cmdline parse time */
7394 if (cpu_isolated_map == NULL)
7395 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
7396 idle_thread_set_boot_cpu();
7397 set_cpu_rq_start_time();
7398#endif
7399 init_sched_fair_class();
7400
7401 scheduler_running = 1;
7402}
7403
7404#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
7405static inline int preempt_count_equals(int preempt_offset)
7406{
7407 int nested = preempt_count() + rcu_preempt_depth();
7408
7409 return (nested == preempt_offset);
7410}
7411
7412void __might_sleep(const char *file, int line, int preempt_offset)
7413{
7414 /*
7415 * Blocking primitives will set (and therefore destroy) current->state,
7416 * since we will exit with TASK_RUNNING make sure we enter with it,
7417 * otherwise we will destroy state.
7418 */
7419 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
7420 "do not call blocking ops when !TASK_RUNNING; "
7421 "state=%lx set at [<%p>] %pS\n",
7422 current->state,
7423 (void *)current->task_state_change,
7424 (void *)current->task_state_change);
7425
7426 ___might_sleep(file, line, preempt_offset);
7427}
7428EXPORT_SYMBOL(__might_sleep);
7429
7430void ___might_sleep(const char *file, int line, int preempt_offset)
7431{
7432 static unsigned long prev_jiffy; /* ratelimiting */
7433
7434 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
7435 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
7436 !is_idle_task(current)) ||
7437 system_state != SYSTEM_RUNNING || oops_in_progress)
7438 return;
7439 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7440 return;
7441 prev_jiffy = jiffies;
7442
7443 printk(KERN_ERR
7444 "BUG: sleeping function called from invalid context at %s:%d\n",
7445 file, line);
7446 printk(KERN_ERR
7447 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7448 in_atomic(), irqs_disabled(),
7449 current->pid, current->comm);
7450
7451 if (task_stack_end_corrupted(current))
7452 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7453
7454 debug_show_held_locks(current);
7455 if (irqs_disabled())
7456 print_irqtrace_events(current);
7457#ifdef CONFIG_DEBUG_PREEMPT
7458 if (!preempt_count_equals(preempt_offset)) {
7459 pr_err("Preemption disabled at:");
7460 print_ip_sym(current->preempt_disable_ip);
7461 pr_cont("\n");
7462 }
7463#endif
7464 dump_stack();
7465}
7466EXPORT_SYMBOL(___might_sleep);
7467#endif
7468
7469#ifdef CONFIG_MAGIC_SYSRQ
7470void normalize_rt_tasks(void)
7471{
7472 struct task_struct *g, *p;
7473 struct sched_attr attr = {
7474 .sched_policy = SCHED_NORMAL,
7475 };
7476
7477 read_lock(&tasklist_lock);
7478 for_each_process_thread(g, p) {
7479 /*
7480 * Only normalize user tasks:
7481 */
7482 if (p->flags & PF_KTHREAD)
7483 continue;
7484
7485 p->se.exec_start = 0;
7486#ifdef CONFIG_SCHEDSTATS
7487 p->se.statistics.wait_start = 0;
7488 p->se.statistics.sleep_start = 0;
7489 p->se.statistics.block_start = 0;
7490#endif
7491
7492 if (!dl_task(p) && !rt_task(p)) {
7493 /*
7494 * Renice negative nice level userspace
7495 * tasks back to 0:
7496 */
7497 if (task_nice(p) < 0)
7498 set_user_nice(p, 0);
7499 continue;
7500 }
7501
7502 __sched_setscheduler(p, &attr, false, false);
7503 }
7504 read_unlock(&tasklist_lock);
7505}
7506
7507#endif /* CONFIG_MAGIC_SYSRQ */
7508
7509#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
7510/*
7511 * These functions are only useful for the IA64 MCA handling, or kdb.
7512 *
7513 * They can only be called when the whole system has been
7514 * stopped - every CPU needs to be quiescent, and no scheduling
7515 * activity can take place. Using them for anything else would
7516 * be a serious bug, and as a result, they aren't even visible
7517 * under any other configuration.
7518 */
7519
7520/**
7521 * curr_task - return the current task for a given cpu.
7522 * @cpu: the processor in question.
7523 *
7524 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7525 *
7526 * Return: The current task for @cpu.
7527 */
7528struct task_struct *curr_task(int cpu)
7529{
7530 return cpu_curr(cpu);
7531}
7532
7533#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7534
7535#ifdef CONFIG_IA64
7536/**
7537 * set_curr_task - set the current task for a given cpu.
7538 * @cpu: the processor in question.
7539 * @p: the task pointer to set.
7540 *
7541 * Description: This function must only be used when non-maskable interrupts
7542 * are serviced on a separate stack. It allows the architecture to switch the
7543 * notion of the current task on a cpu in a non-blocking manner. This function
7544 * must be called with all CPU's synchronized, and interrupts disabled, the
7545 * and caller must save the original value of the current task (see
7546 * curr_task() above) and restore that value before reenabling interrupts and
7547 * re-starting the system.
7548 *
7549 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7550 */
7551void set_curr_task(int cpu, struct task_struct *p)
7552{
7553 cpu_curr(cpu) = p;
7554}
7555
7556#endif
7557
7558#ifdef CONFIG_CGROUP_SCHED
7559/* task_group_lock serializes the addition/removal of task groups */
7560static DEFINE_SPINLOCK(task_group_lock);
7561
7562static void sched_free_group(struct task_group *tg)
7563{
7564 free_fair_sched_group(tg);
7565 free_rt_sched_group(tg);
7566 autogroup_free(tg);
7567 kmem_cache_free(task_group_cache, tg);
7568}
7569
7570/* allocate runqueue etc for a new task group */
7571struct task_group *sched_create_group(struct task_group *parent)
7572{
7573 struct task_group *tg;
7574
7575 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
7576 if (!tg)
7577 return ERR_PTR(-ENOMEM);
7578
7579 if (!alloc_fair_sched_group(tg, parent))
7580 goto err;
7581
7582 if (!alloc_rt_sched_group(tg, parent))
7583 goto err;
7584
7585 return tg;
7586
7587err:
7588 sched_free_group(tg);
7589 return ERR_PTR(-ENOMEM);
7590}
7591
7592void sched_online_group(struct task_group *tg, struct task_group *parent)
7593{
7594 unsigned long flags;
7595
7596 spin_lock_irqsave(&task_group_lock, flags);
7597 list_add_rcu(&tg->list, &task_groups);
7598
7599 WARN_ON(!parent); /* root should already exist */
7600
7601 tg->parent = parent;
7602 INIT_LIST_HEAD(&tg->children);
7603 list_add_rcu(&tg->siblings, &parent->children);
7604 spin_unlock_irqrestore(&task_group_lock, flags);
7605}
7606
7607/* rcu callback to free various structures associated with a task group */
7608static void sched_free_group_rcu(struct rcu_head *rhp)
7609{
7610 /* now it should be safe to free those cfs_rqs */
7611 sched_free_group(container_of(rhp, struct task_group, rcu));
7612}
7613
7614void sched_destroy_group(struct task_group *tg)
7615{
7616 /* wait for possible concurrent references to cfs_rqs complete */
7617 call_rcu(&tg->rcu, sched_free_group_rcu);
7618}
7619
7620void sched_offline_group(struct task_group *tg)
7621{
7622 unsigned long flags;
7623
7624 /* end participation in shares distribution */
7625 unregister_fair_sched_group(tg);
7626
7627 spin_lock_irqsave(&task_group_lock, flags);
7628 list_del_rcu(&tg->list);
7629 list_del_rcu(&tg->siblings);
7630 spin_unlock_irqrestore(&task_group_lock, flags);
7631}
7632
7633/* change task's runqueue when it moves between groups.
7634 * The caller of this function should have put the task in its new group
7635 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7636 * reflect its new group.
7637 */
7638void sched_move_task(struct task_struct *tsk)
7639{
7640 struct task_group *tg;
7641 int queued, running;
7642 unsigned long flags;
7643 struct rq *rq;
7644
7645 rq = task_rq_lock(tsk, &flags);
7646
7647 running = task_current(rq, tsk);
7648 queued = task_on_rq_queued(tsk);
7649
7650 if (queued)
7651 dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
7652 if (unlikely(running))
7653 put_prev_task(rq, tsk);
7654
7655 /*
7656 * All callers are synchronized by task_rq_lock(); we do not use RCU
7657 * which is pointless here. Thus, we pass "true" to task_css_check()
7658 * to prevent lockdep warnings.
7659 */
7660 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
7661 struct task_group, css);
7662 tg = autogroup_task_group(tsk, tg);
7663 tsk->sched_task_group = tg;
7664
7665#ifdef CONFIG_FAIR_GROUP_SCHED
7666 if (tsk->sched_class->task_move_group)
7667 tsk->sched_class->task_move_group(tsk);
7668 else
7669#endif
7670 set_task_rq(tsk, task_cpu(tsk));
7671
7672 if (unlikely(running))
7673 tsk->sched_class->set_curr_task(rq);
7674 if (queued)
7675 enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
7676
7677 task_rq_unlock(rq, tsk, &flags);
7678}
7679#endif /* CONFIG_CGROUP_SCHED */
7680
7681#ifdef CONFIG_RT_GROUP_SCHED
7682/*
7683 * Ensure that the real time constraints are schedulable.
7684 */
7685static DEFINE_MUTEX(rt_constraints_mutex);
7686
7687/* Must be called with tasklist_lock held */
7688static inline int tg_has_rt_tasks(struct task_group *tg)
7689{
7690 struct task_struct *g, *p;
7691
7692 /*
7693 * Autogroups do not have RT tasks; see autogroup_create().
7694 */
7695 if (task_group_is_autogroup(tg))
7696 return 0;
7697
7698 for_each_process_thread(g, p) {
7699 if (rt_task(p) && task_group(p) == tg)
7700 return 1;
7701 }
7702
7703 return 0;
7704}
7705
7706struct rt_schedulable_data {
7707 struct task_group *tg;
7708 u64 rt_period;
7709 u64 rt_runtime;
7710};
7711
7712static int tg_rt_schedulable(struct task_group *tg, void *data)
7713{
7714 struct rt_schedulable_data *d = data;
7715 struct task_group *child;
7716 unsigned long total, sum = 0;
7717 u64 period, runtime;
7718
7719 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7720 runtime = tg->rt_bandwidth.rt_runtime;
7721
7722 if (tg == d->tg) {
7723 period = d->rt_period;
7724 runtime = d->rt_runtime;
7725 }
7726
7727 /*
7728 * Cannot have more runtime than the period.
7729 */
7730 if (runtime > period && runtime != RUNTIME_INF)
7731 return -EINVAL;
7732
7733 /*
7734 * Ensure we don't starve existing RT tasks.
7735 */
7736 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7737 return -EBUSY;
7738
7739 total = to_ratio(period, runtime);
7740
7741 /*
7742 * Nobody can have more than the global setting allows.
7743 */
7744 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7745 return -EINVAL;
7746
7747 /*
7748 * The sum of our children's runtime should not exceed our own.
7749 */
7750 list_for_each_entry_rcu(child, &tg->children, siblings) {
7751 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7752 runtime = child->rt_bandwidth.rt_runtime;
7753
7754 if (child == d->tg) {
7755 period = d->rt_period;
7756 runtime = d->rt_runtime;
7757 }
7758
7759 sum += to_ratio(period, runtime);
7760 }
7761
7762 if (sum > total)
7763 return -EINVAL;
7764
7765 return 0;
7766}
7767
7768static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
7769{
7770 int ret;
7771
7772 struct rt_schedulable_data data = {
7773 .tg = tg,
7774 .rt_period = period,
7775 .rt_runtime = runtime,
7776 };
7777
7778 rcu_read_lock();
7779 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7780 rcu_read_unlock();
7781
7782 return ret;
7783}
7784
7785static int tg_set_rt_bandwidth(struct task_group *tg,
7786 u64 rt_period, u64 rt_runtime)
7787{
7788 int i, err = 0;
7789
7790 /*
7791 * Disallowing the root group RT runtime is BAD, it would disallow the
7792 * kernel creating (and or operating) RT threads.
7793 */
7794 if (tg == &root_task_group && rt_runtime == 0)
7795 return -EINVAL;
7796
7797 /* No period doesn't make any sense. */
7798 if (rt_period == 0)
7799 return -EINVAL;
7800
7801 mutex_lock(&rt_constraints_mutex);
7802 read_lock(&tasklist_lock);
7803 err = __rt_schedulable(tg, rt_period, rt_runtime);
7804 if (err)
7805 goto unlock;
7806
7807 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7808 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7809 tg->rt_bandwidth.rt_runtime = rt_runtime;
7810
7811 for_each_possible_cpu(i) {
7812 struct rt_rq *rt_rq = tg->rt_rq[i];
7813
7814 raw_spin_lock(&rt_rq->rt_runtime_lock);
7815 rt_rq->rt_runtime = rt_runtime;
7816 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7817 }
7818 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7819unlock:
7820 read_unlock(&tasklist_lock);
7821 mutex_unlock(&rt_constraints_mutex);
7822
7823 return err;
7824}
7825
7826static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7827{
7828 u64 rt_runtime, rt_period;
7829
7830 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7831 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7832 if (rt_runtime_us < 0)
7833 rt_runtime = RUNTIME_INF;
7834
7835 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7836}
7837
7838static long sched_group_rt_runtime(struct task_group *tg)
7839{
7840 u64 rt_runtime_us;
7841
7842 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
7843 return -1;
7844
7845 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
7846 do_div(rt_runtime_us, NSEC_PER_USEC);
7847 return rt_runtime_us;
7848}
7849
7850static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
7851{
7852 u64 rt_runtime, rt_period;
7853
7854 rt_period = rt_period_us * NSEC_PER_USEC;
7855 rt_runtime = tg->rt_bandwidth.rt_runtime;
7856
7857 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7858}
7859
7860static long sched_group_rt_period(struct task_group *tg)
7861{
7862 u64 rt_period_us;
7863
7864 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7865 do_div(rt_period_us, NSEC_PER_USEC);
7866 return rt_period_us;
7867}
7868#endif /* CONFIG_RT_GROUP_SCHED */
7869
7870#ifdef CONFIG_RT_GROUP_SCHED
7871static int sched_rt_global_constraints(void)
7872{
7873 int ret = 0;
7874
7875 mutex_lock(&rt_constraints_mutex);
7876 read_lock(&tasklist_lock);
7877 ret = __rt_schedulable(NULL, 0, 0);
7878 read_unlock(&tasklist_lock);
7879 mutex_unlock(&rt_constraints_mutex);
7880
7881 return ret;
7882}
7883
7884static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7885{
7886 /* Don't accept realtime tasks when there is no way for them to run */
7887 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7888 return 0;
7889
7890 return 1;
7891}
7892
7893#else /* !CONFIG_RT_GROUP_SCHED */
7894static int sched_rt_global_constraints(void)
7895{
7896 unsigned long flags;
7897 int i, ret = 0;
7898
7899 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
7900 for_each_possible_cpu(i) {
7901 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7902
7903 raw_spin_lock(&rt_rq->rt_runtime_lock);
7904 rt_rq->rt_runtime = global_rt_runtime();
7905 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7906 }
7907 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
7908
7909 return ret;
7910}
7911#endif /* CONFIG_RT_GROUP_SCHED */
7912
7913static int sched_dl_global_validate(void)
7914{
7915 u64 runtime = global_rt_runtime();
7916 u64 period = global_rt_period();
7917 u64 new_bw = to_ratio(period, runtime);
7918 struct dl_bw *dl_b;
7919 int cpu, ret = 0;
7920 unsigned long flags;
7921
7922 /*
7923 * Here we want to check the bandwidth not being set to some
7924 * value smaller than the currently allocated bandwidth in
7925 * any of the root_domains.
7926 *
7927 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
7928 * cycling on root_domains... Discussion on different/better
7929 * solutions is welcome!
7930 */
7931 for_each_possible_cpu(cpu) {
7932 rcu_read_lock_sched();
7933 dl_b = dl_bw_of(cpu);
7934
7935 raw_spin_lock_irqsave(&dl_b->lock, flags);
7936 if (new_bw < dl_b->total_bw)
7937 ret = -EBUSY;
7938 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7939
7940 rcu_read_unlock_sched();
7941
7942 if (ret)
7943 break;
7944 }
7945
7946 return ret;
7947}
7948
7949static void sched_dl_do_global(void)
7950{
7951 u64 new_bw = -1;
7952 struct dl_bw *dl_b;
7953 int cpu;
7954 unsigned long flags;
7955
7956 def_dl_bandwidth.dl_period = global_rt_period();
7957 def_dl_bandwidth.dl_runtime = global_rt_runtime();
7958
7959 if (global_rt_runtime() != RUNTIME_INF)
7960 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
7961
7962 /*
7963 * FIXME: As above...
7964 */
7965 for_each_possible_cpu(cpu) {
7966 rcu_read_lock_sched();
7967 dl_b = dl_bw_of(cpu);
7968
7969 raw_spin_lock_irqsave(&dl_b->lock, flags);
7970 dl_b->bw = new_bw;
7971 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7972
7973 rcu_read_unlock_sched();
7974 }
7975}
7976
7977static int sched_rt_global_validate(void)
7978{
7979 if (sysctl_sched_rt_period <= 0)
7980 return -EINVAL;
7981
7982 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
7983 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
7984 return -EINVAL;
7985
7986 return 0;
7987}
7988
7989static void sched_rt_do_global(void)
7990{
7991 def_rt_bandwidth.rt_runtime = global_rt_runtime();
7992 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
7993}
7994
7995int sched_rt_handler(struct ctl_table *table, int write,
7996 void __user *buffer, size_t *lenp,
7997 loff_t *ppos)
7998{
7999 int old_period, old_runtime;
8000 static DEFINE_MUTEX(mutex);
8001 int ret;
8002
8003 mutex_lock(&mutex);
8004 old_period = sysctl_sched_rt_period;
8005 old_runtime = sysctl_sched_rt_runtime;
8006
8007 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8008
8009 if (!ret && write) {
8010 ret = sched_rt_global_validate();
8011 if (ret)
8012 goto undo;
8013
8014 ret = sched_dl_global_validate();
8015 if (ret)
8016 goto undo;
8017
8018 ret = sched_rt_global_constraints();
8019 if (ret)
8020 goto undo;
8021
8022 sched_rt_do_global();
8023 sched_dl_do_global();
8024 }
8025 if (0) {
8026undo:
8027 sysctl_sched_rt_period = old_period;
8028 sysctl_sched_rt_runtime = old_runtime;
8029 }
8030 mutex_unlock(&mutex);
8031
8032 return ret;
8033}
8034
8035int sched_rr_handler(struct ctl_table *table, int write,
8036 void __user *buffer, size_t *lenp,
8037 loff_t *ppos)
8038{
8039 int ret;
8040 static DEFINE_MUTEX(mutex);
8041
8042 mutex_lock(&mutex);
8043 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8044 /* make sure that internally we keep jiffies */
8045 /* also, writing zero resets timeslice to default */
8046 if (!ret && write) {
8047 sched_rr_timeslice = sched_rr_timeslice <= 0 ?
8048 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
8049 }
8050 mutex_unlock(&mutex);
8051 return ret;
8052}
8053
8054#ifdef CONFIG_CGROUP_SCHED
8055
8056static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
8057{
8058 return css ? container_of(css, struct task_group, css) : NULL;
8059}
8060
8061static struct cgroup_subsys_state *
8062cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8063{
8064 struct task_group *parent = css_tg(parent_css);
8065 struct task_group *tg;
8066
8067 if (!parent) {
8068 /* This is early initialization for the top cgroup */
8069 return &root_task_group.css;
8070 }
8071
8072 tg = sched_create_group(parent);
8073 if (IS_ERR(tg))
8074 return ERR_PTR(-ENOMEM);
8075
8076 sched_online_group(tg, parent);
8077
8078 return &tg->css;
8079}
8080
8081static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
8082{
8083 struct task_group *tg = css_tg(css);
8084
8085 sched_offline_group(tg);
8086}
8087
8088static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
8089{
8090 struct task_group *tg = css_tg(css);
8091
8092 /*
8093 * Relies on the RCU grace period between css_released() and this.
8094 */
8095 sched_free_group(tg);
8096}
8097
8098static void cpu_cgroup_fork(struct task_struct *task)
8099{
8100 sched_move_task(task);
8101}
8102
8103static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
8104{
8105 struct task_struct *task;
8106 struct cgroup_subsys_state *css;
8107
8108 cgroup_taskset_for_each(task, css, tset) {
8109#ifdef CONFIG_RT_GROUP_SCHED
8110 if (!sched_rt_can_attach(css_tg(css), task))
8111 return -EINVAL;
8112#else
8113 /* We don't support RT-tasks being in separate groups */
8114 if (task->sched_class != &fair_sched_class)
8115 return -EINVAL;
8116#endif
8117 }
8118 return 0;
8119}
8120
8121static void cpu_cgroup_attach(struct cgroup_taskset *tset)
8122{
8123 struct task_struct *task;
8124 struct cgroup_subsys_state *css;
8125
8126 cgroup_taskset_for_each(task, css, tset)
8127 sched_move_task(task);
8128}
8129
8130#ifdef CONFIG_FAIR_GROUP_SCHED
8131static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8132 struct cftype *cftype, u64 shareval)
8133{
8134 return sched_group_set_shares(css_tg(css), scale_load(shareval));
8135}
8136
8137static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8138 struct cftype *cft)
8139{
8140 struct task_group *tg = css_tg(css);
8141
8142 return (u64) scale_load_down(tg->shares);
8143}
8144
8145#ifdef CONFIG_CFS_BANDWIDTH
8146static DEFINE_MUTEX(cfs_constraints_mutex);
8147
8148const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8149const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8150
8151static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8152
8153static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8154{
8155 int i, ret = 0, runtime_enabled, runtime_was_enabled;
8156 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8157
8158 if (tg == &root_task_group)
8159 return -EINVAL;
8160
8161 /*
8162 * Ensure we have at some amount of bandwidth every period. This is
8163 * to prevent reaching a state of large arrears when throttled via
8164 * entity_tick() resulting in prolonged exit starvation.
8165 */
8166 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8167 return -EINVAL;
8168
8169 /*
8170 * Likewise, bound things on the otherside by preventing insane quota
8171 * periods. This also allows us to normalize in computing quota
8172 * feasibility.
8173 */
8174 if (period > max_cfs_quota_period)
8175 return -EINVAL;
8176
8177 /*
8178 * Prevent race between setting of cfs_rq->runtime_enabled and
8179 * unthrottle_offline_cfs_rqs().
8180 */
8181 get_online_cpus();
8182 mutex_lock(&cfs_constraints_mutex);
8183 ret = __cfs_schedulable(tg, period, quota);
8184 if (ret)
8185 goto out_unlock;
8186
8187 runtime_enabled = quota != RUNTIME_INF;
8188 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
8189 /*
8190 * If we need to toggle cfs_bandwidth_used, off->on must occur
8191 * before making related changes, and on->off must occur afterwards
8192 */
8193 if (runtime_enabled && !runtime_was_enabled)
8194 cfs_bandwidth_usage_inc();
8195 raw_spin_lock_irq(&cfs_b->lock);
8196 cfs_b->period = ns_to_ktime(period);
8197 cfs_b->quota = quota;
8198
8199 __refill_cfs_bandwidth_runtime(cfs_b);
8200 /* restart the period timer (if active) to handle new period expiry */
8201 if (runtime_enabled)
8202 start_cfs_bandwidth(cfs_b);
8203 raw_spin_unlock_irq(&cfs_b->lock);
8204
8205 for_each_online_cpu(i) {
8206 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
8207 struct rq *rq = cfs_rq->rq;
8208
8209 raw_spin_lock_irq(&rq->lock);
8210 cfs_rq->runtime_enabled = runtime_enabled;
8211 cfs_rq->runtime_remaining = 0;
8212
8213 if (cfs_rq->throttled)
8214 unthrottle_cfs_rq(cfs_rq);
8215 raw_spin_unlock_irq(&rq->lock);
8216 }
8217 if (runtime_was_enabled && !runtime_enabled)
8218 cfs_bandwidth_usage_dec();
8219out_unlock:
8220 mutex_unlock(&cfs_constraints_mutex);
8221 put_online_cpus();
8222
8223 return ret;
8224}
8225
8226int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8227{
8228 u64 quota, period;
8229
8230 period = ktime_to_ns(tg->cfs_bandwidth.period);
8231 if (cfs_quota_us < 0)
8232 quota = RUNTIME_INF;
8233 else
8234 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8235
8236 return tg_set_cfs_bandwidth(tg, period, quota);
8237}
8238
8239long tg_get_cfs_quota(struct task_group *tg)
8240{
8241 u64 quota_us;
8242
8243 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
8244 return -1;
8245
8246 quota_us = tg->cfs_bandwidth.quota;
8247 do_div(quota_us, NSEC_PER_USEC);
8248
8249 return quota_us;
8250}
8251
8252int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8253{
8254 u64 quota, period;
8255
8256 period = (u64)cfs_period_us * NSEC_PER_USEC;
8257 quota = tg->cfs_bandwidth.quota;
8258
8259 return tg_set_cfs_bandwidth(tg, period, quota);
8260}
8261
8262long tg_get_cfs_period(struct task_group *tg)
8263{
8264 u64 cfs_period_us;
8265
8266 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
8267 do_div(cfs_period_us, NSEC_PER_USEC);
8268
8269 return cfs_period_us;
8270}
8271
8272static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
8273 struct cftype *cft)
8274{
8275 return tg_get_cfs_quota(css_tg(css));
8276}
8277
8278static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
8279 struct cftype *cftype, s64 cfs_quota_us)
8280{
8281 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
8282}
8283
8284static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
8285 struct cftype *cft)
8286{
8287 return tg_get_cfs_period(css_tg(css));
8288}
8289
8290static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
8291 struct cftype *cftype, u64 cfs_period_us)
8292{
8293 return tg_set_cfs_period(css_tg(css), cfs_period_us);
8294}
8295
8296struct cfs_schedulable_data {
8297 struct task_group *tg;
8298 u64 period, quota;
8299};
8300
8301/*
8302 * normalize group quota/period to be quota/max_period
8303 * note: units are usecs
8304 */
8305static u64 normalize_cfs_quota(struct task_group *tg,
8306 struct cfs_schedulable_data *d)
8307{
8308 u64 quota, period;
8309
8310 if (tg == d->tg) {
8311 period = d->period;
8312 quota = d->quota;
8313 } else {
8314 period = tg_get_cfs_period(tg);
8315 quota = tg_get_cfs_quota(tg);
8316 }
8317
8318 /* note: these should typically be equivalent */
8319 if (quota == RUNTIME_INF || quota == -1)
8320 return RUNTIME_INF;
8321
8322 return to_ratio(period, quota);
8323}
8324
8325static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8326{
8327 struct cfs_schedulable_data *d = data;
8328 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8329 s64 quota = 0, parent_quota = -1;
8330
8331 if (!tg->parent) {
8332 quota = RUNTIME_INF;
8333 } else {
8334 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
8335
8336 quota = normalize_cfs_quota(tg, d);
8337 parent_quota = parent_b->hierarchical_quota;
8338
8339 /*
8340 * ensure max(child_quota) <= parent_quota, inherit when no
8341 * limit is set
8342 */
8343 if (quota == RUNTIME_INF)
8344 quota = parent_quota;
8345 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8346 return -EINVAL;
8347 }
8348 cfs_b->hierarchical_quota = quota;
8349
8350 return 0;
8351}
8352
8353static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8354{
8355 int ret;
8356 struct cfs_schedulable_data data = {
8357 .tg = tg,
8358 .period = period,
8359 .quota = quota,
8360 };
8361
8362 if (quota != RUNTIME_INF) {
8363 do_div(data.period, NSEC_PER_USEC);
8364 do_div(data.quota, NSEC_PER_USEC);
8365 }
8366
8367 rcu_read_lock();
8368 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8369 rcu_read_unlock();
8370
8371 return ret;
8372}
8373
8374static int cpu_stats_show(struct seq_file *sf, void *v)
8375{
8376 struct task_group *tg = css_tg(seq_css(sf));
8377 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8378
8379 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
8380 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
8381 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
8382
8383 return 0;
8384}
8385#endif /* CONFIG_CFS_BANDWIDTH */
8386#endif /* CONFIG_FAIR_GROUP_SCHED */
8387
8388#ifdef CONFIG_RT_GROUP_SCHED
8389static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
8390 struct cftype *cft, s64 val)
8391{
8392 return sched_group_set_rt_runtime(css_tg(css), val);
8393}
8394
8395static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
8396 struct cftype *cft)
8397{
8398 return sched_group_rt_runtime(css_tg(css));
8399}
8400
8401static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
8402 struct cftype *cftype, u64 rt_period_us)
8403{
8404 return sched_group_set_rt_period(css_tg(css), rt_period_us);
8405}
8406
8407static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
8408 struct cftype *cft)
8409{
8410 return sched_group_rt_period(css_tg(css));
8411}
8412#endif /* CONFIG_RT_GROUP_SCHED */
8413
8414static struct cftype cpu_files[] = {
8415#ifdef CONFIG_FAIR_GROUP_SCHED
8416 {
8417 .name = "shares",
8418 .read_u64 = cpu_shares_read_u64,
8419 .write_u64 = cpu_shares_write_u64,
8420 },
8421#endif
8422#ifdef CONFIG_CFS_BANDWIDTH
8423 {
8424 .name = "cfs_quota_us",
8425 .read_s64 = cpu_cfs_quota_read_s64,
8426 .write_s64 = cpu_cfs_quota_write_s64,
8427 },
8428 {
8429 .name = "cfs_period_us",
8430 .read_u64 = cpu_cfs_period_read_u64,
8431 .write_u64 = cpu_cfs_period_write_u64,
8432 },
8433 {
8434 .name = "stat",
8435 .seq_show = cpu_stats_show,
8436 },
8437#endif
8438#ifdef CONFIG_RT_GROUP_SCHED
8439 {
8440 .name = "rt_runtime_us",
8441 .read_s64 = cpu_rt_runtime_read,
8442 .write_s64 = cpu_rt_runtime_write,
8443 },
8444 {
8445 .name = "rt_period_us",
8446 .read_u64 = cpu_rt_period_read_uint,
8447 .write_u64 = cpu_rt_period_write_uint,
8448 },
8449#endif
8450 { } /* terminate */
8451};
8452
8453struct cgroup_subsys cpu_cgrp_subsys = {
8454 .css_alloc = cpu_cgroup_css_alloc,
8455 .css_released = cpu_cgroup_css_released,
8456 .css_free = cpu_cgroup_css_free,
8457 .fork = cpu_cgroup_fork,
8458 .can_attach = cpu_cgroup_can_attach,
8459 .attach = cpu_cgroup_attach,
8460 .legacy_cftypes = cpu_files,
8461 .early_init = true,
8462};
8463
8464#endif /* CONFIG_CGROUP_SCHED */
8465
8466void dump_cpu_task(int cpu)
8467{
8468 pr_info("Task dump for CPU %d:\n", cpu);
8469 sched_show_task(cpu_curr(cpu));
8470}
8471
8472/*
8473 * Nice levels are multiplicative, with a gentle 10% change for every
8474 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
8475 * nice 1, it will get ~10% less CPU time than another CPU-bound task
8476 * that remained on nice 0.
8477 *
8478 * The "10% effect" is relative and cumulative: from _any_ nice level,
8479 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
8480 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
8481 * If a task goes up by ~10% and another task goes down by ~10% then
8482 * the relative distance between them is ~25%.)
8483 */
8484const int sched_prio_to_weight[40] = {
8485 /* -20 */ 88761, 71755, 56483, 46273, 36291,
8486 /* -15 */ 29154, 23254, 18705, 14949, 11916,
8487 /* -10 */ 9548, 7620, 6100, 4904, 3906,
8488 /* -5 */ 3121, 2501, 1991, 1586, 1277,
8489 /* 0 */ 1024, 820, 655, 526, 423,
8490 /* 5 */ 335, 272, 215, 172, 137,
8491 /* 10 */ 110, 87, 70, 56, 45,
8492 /* 15 */ 36, 29, 23, 18, 15,
8493};
8494
8495/*
8496 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
8497 *
8498 * In cases where the weight does not change often, we can use the
8499 * precalculated inverse to speed up arithmetics by turning divisions
8500 * into multiplications:
8501 */
8502const u32 sched_prio_to_wmult[40] = {
8503 /* -20 */ 48388, 59856, 76040, 92818, 118348,
8504 /* -15 */ 147320, 184698, 229616, 287308, 360437,
8505 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
8506 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
8507 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
8508 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
8509 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
8510 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
8511};