Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/sched/core.c
4 *
5 * Core kernel CPU scheduler code
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
9 */
10#include <linux/highmem.h>
11#include <linux/hrtimer_api.h>
12#include <linux/ktime_api.h>
13#include <linux/sched/signal.h>
14#include <linux/syscalls_api.h>
15#include <linux/debug_locks.h>
16#include <linux/prefetch.h>
17#include <linux/capability.h>
18#include <linux/pgtable_api.h>
19#include <linux/wait_bit.h>
20#include <linux/jiffies.h>
21#include <linux/spinlock_api.h>
22#include <linux/cpumask_api.h>
23#include <linux/lockdep_api.h>
24#include <linux/hardirq.h>
25#include <linux/softirq.h>
26#include <linux/refcount_api.h>
27#include <linux/topology.h>
28#include <linux/sched/clock.h>
29#include <linux/sched/cond_resched.h>
30#include <linux/sched/cputime.h>
31#include <linux/sched/debug.h>
32#include <linux/sched/hotplug.h>
33#include <linux/sched/init.h>
34#include <linux/sched/isolation.h>
35#include <linux/sched/loadavg.h>
36#include <linux/sched/mm.h>
37#include <linux/sched/nohz.h>
38#include <linux/sched/rseq_api.h>
39#include <linux/sched/rt.h>
40
41#include <linux/blkdev.h>
42#include <linux/context_tracking.h>
43#include <linux/cpuset.h>
44#include <linux/delayacct.h>
45#include <linux/init_task.h>
46#include <linux/interrupt.h>
47#include <linux/ioprio.h>
48#include <linux/kallsyms.h>
49#include <linux/kcov.h>
50#include <linux/kprobes.h>
51#include <linux/llist_api.h>
52#include <linux/mmu_context.h>
53#include <linux/mmzone.h>
54#include <linux/mutex_api.h>
55#include <linux/nmi.h>
56#include <linux/nospec.h>
57#include <linux/perf_event_api.h>
58#include <linux/profile.h>
59#include <linux/psi.h>
60#include <linux/rcuwait_api.h>
61#include <linux/rseq.h>
62#include <linux/sched/wake_q.h>
63#include <linux/scs.h>
64#include <linux/slab.h>
65#include <linux/syscalls.h>
66#include <linux/vtime.h>
67#include <linux/wait_api.h>
68#include <linux/workqueue_api.h>
69
70#ifdef CONFIG_PREEMPT_DYNAMIC
71# ifdef CONFIG_GENERIC_ENTRY
72# include <linux/entry-common.h>
73# endif
74#endif
75
76#include <uapi/linux/sched/types.h>
77
78#include <asm/irq_regs.h>
79#include <asm/switch_to.h>
80#include <asm/tlb.h>
81
82#define CREATE_TRACE_POINTS
83#include <linux/sched/rseq_api.h>
84#include <trace/events/sched.h>
85#include <trace/events/ipi.h>
86#undef CREATE_TRACE_POINTS
87
88#include "sched.h"
89#include "stats.h"
90
91#include "autogroup.h"
92#include "pelt.h"
93#include "smp.h"
94#include "stats.h"
95
96#include "../workqueue_internal.h"
97#include "../../io_uring/io-wq.h"
98#include "../smpboot.h"
99
100EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
101EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
102
103/*
104 * Export tracepoints that act as a bare tracehook (ie: have no trace event
105 * associated with them) to allow external modules to probe them.
106 */
107EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
108EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
109EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
110EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
111EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
112EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
113EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
114EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
115EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
116EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
117EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
118EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
119
120DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
121
122#ifdef CONFIG_SCHED_DEBUG
123/*
124 * Debugging: various feature bits
125 *
126 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
127 * sysctl_sched_features, defined in sched.h, to allow constants propagation
128 * at compile time and compiler optimization based on features default.
129 */
130#define SCHED_FEAT(name, enabled) \
131 (1UL << __SCHED_FEAT_##name) * enabled |
132const_debug unsigned int sysctl_sched_features =
133#include "features.h"
134 0;
135#undef SCHED_FEAT
136
137/*
138 * Print a warning if need_resched is set for the given duration (if
139 * LATENCY_WARN is enabled).
140 *
141 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
142 * per boot.
143 */
144__read_mostly int sysctl_resched_latency_warn_ms = 100;
145__read_mostly int sysctl_resched_latency_warn_once = 1;
146#endif /* CONFIG_SCHED_DEBUG */
147
148/*
149 * Number of tasks to iterate in a single balance run.
150 * Limited because this is done with IRQs disabled.
151 */
152const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
153
154__read_mostly int scheduler_running;
155
156#ifdef CONFIG_SCHED_CORE
157
158DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
159
160/* kernel prio, less is more */
161static inline int __task_prio(const struct task_struct *p)
162{
163 if (p->sched_class == &stop_sched_class) /* trumps deadline */
164 return -2;
165
166 if (p->dl_server)
167 return -1; /* deadline */
168
169 if (rt_or_dl_prio(p->prio))
170 return p->prio; /* [-1, 99] */
171
172 if (p->sched_class == &idle_sched_class)
173 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
174
175 if (task_on_scx(p))
176 return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
177
178 return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
179}
180
181/*
182 * l(a,b)
183 * le(a,b) := !l(b,a)
184 * g(a,b) := l(b,a)
185 * ge(a,b) := !l(a,b)
186 */
187
188/* real prio, less is less */
189static inline bool prio_less(const struct task_struct *a,
190 const struct task_struct *b, bool in_fi)
191{
192
193 int pa = __task_prio(a), pb = __task_prio(b);
194
195 if (-pa < -pb)
196 return true;
197
198 if (-pb < -pa)
199 return false;
200
201 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
202 const struct sched_dl_entity *a_dl, *b_dl;
203
204 a_dl = &a->dl;
205 /*
206 * Since,'a' and 'b' can be CFS tasks served by DL server,
207 * __task_prio() can return -1 (for DL) even for those. In that
208 * case, get to the dl_server's DL entity.
209 */
210 if (a->dl_server)
211 a_dl = a->dl_server;
212
213 b_dl = &b->dl;
214 if (b->dl_server)
215 b_dl = b->dl_server;
216
217 return !dl_time_before(a_dl->deadline, b_dl->deadline);
218 }
219
220 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
221 return cfs_prio_less(a, b, in_fi);
222
223#ifdef CONFIG_SCHED_CLASS_EXT
224 if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */
225 return scx_prio_less(a, b, in_fi);
226#endif
227
228 return false;
229}
230
231static inline bool __sched_core_less(const struct task_struct *a,
232 const struct task_struct *b)
233{
234 if (a->core_cookie < b->core_cookie)
235 return true;
236
237 if (a->core_cookie > b->core_cookie)
238 return false;
239
240 /* flip prio, so high prio is leftmost */
241 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
242 return true;
243
244 return false;
245}
246
247#define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
248
249static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
250{
251 return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
252}
253
254static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
255{
256 const struct task_struct *p = __node_2_sc(node);
257 unsigned long cookie = (unsigned long)key;
258
259 if (cookie < p->core_cookie)
260 return -1;
261
262 if (cookie > p->core_cookie)
263 return 1;
264
265 return 0;
266}
267
268void sched_core_enqueue(struct rq *rq, struct task_struct *p)
269{
270 if (p->se.sched_delayed)
271 return;
272
273 rq->core->core_task_seq++;
274
275 if (!p->core_cookie)
276 return;
277
278 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
279}
280
281void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
282{
283 if (p->se.sched_delayed)
284 return;
285
286 rq->core->core_task_seq++;
287
288 if (sched_core_enqueued(p)) {
289 rb_erase(&p->core_node, &rq->core_tree);
290 RB_CLEAR_NODE(&p->core_node);
291 }
292
293 /*
294 * Migrating the last task off the cpu, with the cpu in forced idle
295 * state. Reschedule to create an accounting edge for forced idle,
296 * and re-examine whether the core is still in forced idle state.
297 */
298 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
299 rq->core->core_forceidle_count && rq->curr == rq->idle)
300 resched_curr(rq);
301}
302
303static int sched_task_is_throttled(struct task_struct *p, int cpu)
304{
305 if (p->sched_class->task_is_throttled)
306 return p->sched_class->task_is_throttled(p, cpu);
307
308 return 0;
309}
310
311static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
312{
313 struct rb_node *node = &p->core_node;
314 int cpu = task_cpu(p);
315
316 do {
317 node = rb_next(node);
318 if (!node)
319 return NULL;
320
321 p = __node_2_sc(node);
322 if (p->core_cookie != cookie)
323 return NULL;
324
325 } while (sched_task_is_throttled(p, cpu));
326
327 return p;
328}
329
330/*
331 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
332 * If no suitable task is found, NULL will be returned.
333 */
334static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
335{
336 struct task_struct *p;
337 struct rb_node *node;
338
339 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
340 if (!node)
341 return NULL;
342
343 p = __node_2_sc(node);
344 if (!sched_task_is_throttled(p, rq->cpu))
345 return p;
346
347 return sched_core_next(p, cookie);
348}
349
350/*
351 * Magic required such that:
352 *
353 * raw_spin_rq_lock(rq);
354 * ...
355 * raw_spin_rq_unlock(rq);
356 *
357 * ends up locking and unlocking the _same_ lock, and all CPUs
358 * always agree on what rq has what lock.
359 *
360 * XXX entirely possible to selectively enable cores, don't bother for now.
361 */
362
363static DEFINE_MUTEX(sched_core_mutex);
364static atomic_t sched_core_count;
365static struct cpumask sched_core_mask;
366
367static void sched_core_lock(int cpu, unsigned long *flags)
368{
369 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
370 int t, i = 0;
371
372 local_irq_save(*flags);
373 for_each_cpu(t, smt_mask)
374 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
375}
376
377static void sched_core_unlock(int cpu, unsigned long *flags)
378{
379 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
380 int t;
381
382 for_each_cpu(t, smt_mask)
383 raw_spin_unlock(&cpu_rq(t)->__lock);
384 local_irq_restore(*flags);
385}
386
387static void __sched_core_flip(bool enabled)
388{
389 unsigned long flags;
390 int cpu, t;
391
392 cpus_read_lock();
393
394 /*
395 * Toggle the online cores, one by one.
396 */
397 cpumask_copy(&sched_core_mask, cpu_online_mask);
398 for_each_cpu(cpu, &sched_core_mask) {
399 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
400
401 sched_core_lock(cpu, &flags);
402
403 for_each_cpu(t, smt_mask)
404 cpu_rq(t)->core_enabled = enabled;
405
406 cpu_rq(cpu)->core->core_forceidle_start = 0;
407
408 sched_core_unlock(cpu, &flags);
409
410 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
411 }
412
413 /*
414 * Toggle the offline CPUs.
415 */
416 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
417 cpu_rq(cpu)->core_enabled = enabled;
418
419 cpus_read_unlock();
420}
421
422static void sched_core_assert_empty(void)
423{
424 int cpu;
425
426 for_each_possible_cpu(cpu)
427 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
428}
429
430static void __sched_core_enable(void)
431{
432 static_branch_enable(&__sched_core_enabled);
433 /*
434 * Ensure all previous instances of raw_spin_rq_*lock() have finished
435 * and future ones will observe !sched_core_disabled().
436 */
437 synchronize_rcu();
438 __sched_core_flip(true);
439 sched_core_assert_empty();
440}
441
442static void __sched_core_disable(void)
443{
444 sched_core_assert_empty();
445 __sched_core_flip(false);
446 static_branch_disable(&__sched_core_enabled);
447}
448
449void sched_core_get(void)
450{
451 if (atomic_inc_not_zero(&sched_core_count))
452 return;
453
454 mutex_lock(&sched_core_mutex);
455 if (!atomic_read(&sched_core_count))
456 __sched_core_enable();
457
458 smp_mb__before_atomic();
459 atomic_inc(&sched_core_count);
460 mutex_unlock(&sched_core_mutex);
461}
462
463static void __sched_core_put(struct work_struct *work)
464{
465 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
466 __sched_core_disable();
467 mutex_unlock(&sched_core_mutex);
468 }
469}
470
471void sched_core_put(void)
472{
473 static DECLARE_WORK(_work, __sched_core_put);
474
475 /*
476 * "There can be only one"
477 *
478 * Either this is the last one, or we don't actually need to do any
479 * 'work'. If it is the last *again*, we rely on
480 * WORK_STRUCT_PENDING_BIT.
481 */
482 if (!atomic_add_unless(&sched_core_count, -1, 1))
483 schedule_work(&_work);
484}
485
486#else /* !CONFIG_SCHED_CORE */
487
488static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
489static inline void
490sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
491
492#endif /* CONFIG_SCHED_CORE */
493
494/*
495 * Serialization rules:
496 *
497 * Lock order:
498 *
499 * p->pi_lock
500 * rq->lock
501 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
502 *
503 * rq1->lock
504 * rq2->lock where: rq1 < rq2
505 *
506 * Regular state:
507 *
508 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
509 * local CPU's rq->lock, it optionally removes the task from the runqueue and
510 * always looks at the local rq data structures to find the most eligible task
511 * to run next.
512 *
513 * Task enqueue is also under rq->lock, possibly taken from another CPU.
514 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
515 * the local CPU to avoid bouncing the runqueue state around [ see
516 * ttwu_queue_wakelist() ]
517 *
518 * Task wakeup, specifically wakeups that involve migration, are horribly
519 * complicated to avoid having to take two rq->locks.
520 *
521 * Special state:
522 *
523 * System-calls and anything external will use task_rq_lock() which acquires
524 * both p->pi_lock and rq->lock. As a consequence the state they change is
525 * stable while holding either lock:
526 *
527 * - sched_setaffinity()/
528 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
529 * - set_user_nice(): p->se.load, p->*prio
530 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
531 * p->se.load, p->rt_priority,
532 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
533 * - sched_setnuma(): p->numa_preferred_nid
534 * - sched_move_task(): p->sched_task_group
535 * - uclamp_update_active() p->uclamp*
536 *
537 * p->state <- TASK_*:
538 *
539 * is changed locklessly using set_current_state(), __set_current_state() or
540 * set_special_state(), see their respective comments, or by
541 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
542 * concurrent self.
543 *
544 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
545 *
546 * is set by activate_task() and cleared by deactivate_task(), under
547 * rq->lock. Non-zero indicates the task is runnable, the special
548 * ON_RQ_MIGRATING state is used for migration without holding both
549 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
550 *
551 * Additionally it is possible to be ->on_rq but still be considered not
552 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
553 * but will be dequeued as soon as they get picked again. See the
554 * task_is_runnable() helper.
555 *
556 * p->on_cpu <- { 0, 1 }:
557 *
558 * is set by prepare_task() and cleared by finish_task() such that it will be
559 * set before p is scheduled-in and cleared after p is scheduled-out, both
560 * under rq->lock. Non-zero indicates the task is running on its CPU.
561 *
562 * [ The astute reader will observe that it is possible for two tasks on one
563 * CPU to have ->on_cpu = 1 at the same time. ]
564 *
565 * task_cpu(p): is changed by set_task_cpu(), the rules are:
566 *
567 * - Don't call set_task_cpu() on a blocked task:
568 *
569 * We don't care what CPU we're not running on, this simplifies hotplug,
570 * the CPU assignment of blocked tasks isn't required to be valid.
571 *
572 * - for try_to_wake_up(), called under p->pi_lock:
573 *
574 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
575 *
576 * - for migration called under rq->lock:
577 * [ see task_on_rq_migrating() in task_rq_lock() ]
578 *
579 * o move_queued_task()
580 * o detach_task()
581 *
582 * - for migration called under double_rq_lock():
583 *
584 * o __migrate_swap_task()
585 * o push_rt_task() / pull_rt_task()
586 * o push_dl_task() / pull_dl_task()
587 * o dl_task_offline_migration()
588 *
589 */
590
591void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
592{
593 raw_spinlock_t *lock;
594
595 /* Matches synchronize_rcu() in __sched_core_enable() */
596 preempt_disable();
597 if (sched_core_disabled()) {
598 raw_spin_lock_nested(&rq->__lock, subclass);
599 /* preempt_count *MUST* be > 1 */
600 preempt_enable_no_resched();
601 return;
602 }
603
604 for (;;) {
605 lock = __rq_lockp(rq);
606 raw_spin_lock_nested(lock, subclass);
607 if (likely(lock == __rq_lockp(rq))) {
608 /* preempt_count *MUST* be > 1 */
609 preempt_enable_no_resched();
610 return;
611 }
612 raw_spin_unlock(lock);
613 }
614}
615
616bool raw_spin_rq_trylock(struct rq *rq)
617{
618 raw_spinlock_t *lock;
619 bool ret;
620
621 /* Matches synchronize_rcu() in __sched_core_enable() */
622 preempt_disable();
623 if (sched_core_disabled()) {
624 ret = raw_spin_trylock(&rq->__lock);
625 preempt_enable();
626 return ret;
627 }
628
629 for (;;) {
630 lock = __rq_lockp(rq);
631 ret = raw_spin_trylock(lock);
632 if (!ret || (likely(lock == __rq_lockp(rq)))) {
633 preempt_enable();
634 return ret;
635 }
636 raw_spin_unlock(lock);
637 }
638}
639
640void raw_spin_rq_unlock(struct rq *rq)
641{
642 raw_spin_unlock(rq_lockp(rq));
643}
644
645#ifdef CONFIG_SMP
646/*
647 * double_rq_lock - safely lock two runqueues
648 */
649void double_rq_lock(struct rq *rq1, struct rq *rq2)
650{
651 lockdep_assert_irqs_disabled();
652
653 if (rq_order_less(rq2, rq1))
654 swap(rq1, rq2);
655
656 raw_spin_rq_lock(rq1);
657 if (__rq_lockp(rq1) != __rq_lockp(rq2))
658 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
659
660 double_rq_clock_clear_update(rq1, rq2);
661}
662#endif
663
664/*
665 * __task_rq_lock - lock the rq @p resides on.
666 */
667struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
668 __acquires(rq->lock)
669{
670 struct rq *rq;
671
672 lockdep_assert_held(&p->pi_lock);
673
674 for (;;) {
675 rq = task_rq(p);
676 raw_spin_rq_lock(rq);
677 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
678 rq_pin_lock(rq, rf);
679 return rq;
680 }
681 raw_spin_rq_unlock(rq);
682
683 while (unlikely(task_on_rq_migrating(p)))
684 cpu_relax();
685 }
686}
687
688/*
689 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
690 */
691struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
692 __acquires(p->pi_lock)
693 __acquires(rq->lock)
694{
695 struct rq *rq;
696
697 for (;;) {
698 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
699 rq = task_rq(p);
700 raw_spin_rq_lock(rq);
701 /*
702 * move_queued_task() task_rq_lock()
703 *
704 * ACQUIRE (rq->lock)
705 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
706 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
707 * [S] ->cpu = new_cpu [L] task_rq()
708 * [L] ->on_rq
709 * RELEASE (rq->lock)
710 *
711 * If we observe the old CPU in task_rq_lock(), the acquire of
712 * the old rq->lock will fully serialize against the stores.
713 *
714 * If we observe the new CPU in task_rq_lock(), the address
715 * dependency headed by '[L] rq = task_rq()' and the acquire
716 * will pair with the WMB to ensure we then also see migrating.
717 */
718 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
719 rq_pin_lock(rq, rf);
720 return rq;
721 }
722 raw_spin_rq_unlock(rq);
723 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
724
725 while (unlikely(task_on_rq_migrating(p)))
726 cpu_relax();
727 }
728}
729
730/*
731 * RQ-clock updating methods:
732 */
733
734static void update_rq_clock_task(struct rq *rq, s64 delta)
735{
736/*
737 * In theory, the compile should just see 0 here, and optimize out the call
738 * to sched_rt_avg_update. But I don't trust it...
739 */
740 s64 __maybe_unused steal = 0, irq_delta = 0;
741
742#ifdef CONFIG_IRQ_TIME_ACCOUNTING
743 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
744
745 /*
746 * Since irq_time is only updated on {soft,}irq_exit, we might run into
747 * this case when a previous update_rq_clock() happened inside a
748 * {soft,}IRQ region.
749 *
750 * When this happens, we stop ->clock_task and only update the
751 * prev_irq_time stamp to account for the part that fit, so that a next
752 * update will consume the rest. This ensures ->clock_task is
753 * monotonic.
754 *
755 * It does however cause some slight miss-attribution of {soft,}IRQ
756 * time, a more accurate solution would be to update the irq_time using
757 * the current rq->clock timestamp, except that would require using
758 * atomic ops.
759 */
760 if (irq_delta > delta)
761 irq_delta = delta;
762
763 rq->prev_irq_time += irq_delta;
764 delta -= irq_delta;
765 delayacct_irq(rq->curr, irq_delta);
766#endif
767#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
768 if (static_key_false((¶virt_steal_rq_enabled))) {
769 u64 prev_steal;
770
771 steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
772 steal -= rq->prev_steal_time_rq;
773
774 if (unlikely(steal > delta))
775 steal = delta;
776
777 rq->prev_steal_time_rq = prev_steal;
778 delta -= steal;
779 }
780#endif
781
782 rq->clock_task += delta;
783
784#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
785 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
786 update_irq_load_avg(rq, irq_delta + steal);
787#endif
788 update_rq_clock_pelt(rq, delta);
789}
790
791void update_rq_clock(struct rq *rq)
792{
793 s64 delta;
794
795 lockdep_assert_rq_held(rq);
796
797 if (rq->clock_update_flags & RQCF_ACT_SKIP)
798 return;
799
800#ifdef CONFIG_SCHED_DEBUG
801 if (sched_feat(WARN_DOUBLE_CLOCK))
802 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
803 rq->clock_update_flags |= RQCF_UPDATED;
804#endif
805
806 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
807 if (delta < 0)
808 return;
809 rq->clock += delta;
810 update_rq_clock_task(rq, delta);
811}
812
813#ifdef CONFIG_SCHED_HRTICK
814/*
815 * Use HR-timers to deliver accurate preemption points.
816 */
817
818static void hrtick_clear(struct rq *rq)
819{
820 if (hrtimer_active(&rq->hrtick_timer))
821 hrtimer_cancel(&rq->hrtick_timer);
822}
823
824/*
825 * High-resolution timer tick.
826 * Runs from hardirq context with interrupts disabled.
827 */
828static enum hrtimer_restart hrtick(struct hrtimer *timer)
829{
830 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
831 struct rq_flags rf;
832
833 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
834
835 rq_lock(rq, &rf);
836 update_rq_clock(rq);
837 rq->donor->sched_class->task_tick(rq, rq->curr, 1);
838 rq_unlock(rq, &rf);
839
840 return HRTIMER_NORESTART;
841}
842
843#ifdef CONFIG_SMP
844
845static void __hrtick_restart(struct rq *rq)
846{
847 struct hrtimer *timer = &rq->hrtick_timer;
848 ktime_t time = rq->hrtick_time;
849
850 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
851}
852
853/*
854 * called from hardirq (IPI) context
855 */
856static void __hrtick_start(void *arg)
857{
858 struct rq *rq = arg;
859 struct rq_flags rf;
860
861 rq_lock(rq, &rf);
862 __hrtick_restart(rq);
863 rq_unlock(rq, &rf);
864}
865
866/*
867 * Called to set the hrtick timer state.
868 *
869 * called with rq->lock held and IRQs disabled
870 */
871void hrtick_start(struct rq *rq, u64 delay)
872{
873 struct hrtimer *timer = &rq->hrtick_timer;
874 s64 delta;
875
876 /*
877 * Don't schedule slices shorter than 10000ns, that just
878 * doesn't make sense and can cause timer DoS.
879 */
880 delta = max_t(s64, delay, 10000LL);
881 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
882
883 if (rq == this_rq())
884 __hrtick_restart(rq);
885 else
886 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
887}
888
889#else
890/*
891 * Called to set the hrtick timer state.
892 *
893 * called with rq->lock held and IRQs disabled
894 */
895void hrtick_start(struct rq *rq, u64 delay)
896{
897 /*
898 * Don't schedule slices shorter than 10000ns, that just
899 * doesn't make sense. Rely on vruntime for fairness.
900 */
901 delay = max_t(u64, delay, 10000LL);
902 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
903 HRTIMER_MODE_REL_PINNED_HARD);
904}
905
906#endif /* CONFIG_SMP */
907
908static void hrtick_rq_init(struct rq *rq)
909{
910#ifdef CONFIG_SMP
911 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
912#endif
913 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
914 rq->hrtick_timer.function = hrtick;
915}
916#else /* CONFIG_SCHED_HRTICK */
917static inline void hrtick_clear(struct rq *rq)
918{
919}
920
921static inline void hrtick_rq_init(struct rq *rq)
922{
923}
924#endif /* CONFIG_SCHED_HRTICK */
925
926/*
927 * try_cmpxchg based fetch_or() macro so it works for different integer types:
928 */
929#define fetch_or(ptr, mask) \
930 ({ \
931 typeof(ptr) _ptr = (ptr); \
932 typeof(mask) _mask = (mask); \
933 typeof(*_ptr) _val = *_ptr; \
934 \
935 do { \
936 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
937 _val; \
938})
939
940#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
941/*
942 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
943 * this avoids any races wrt polling state changes and thereby avoids
944 * spurious IPIs.
945 */
946static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
947{
948 return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG);
949}
950
951/*
952 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
953 *
954 * If this returns true, then the idle task promises to call
955 * sched_ttwu_pending() and reschedule soon.
956 */
957static bool set_nr_if_polling(struct task_struct *p)
958{
959 struct thread_info *ti = task_thread_info(p);
960 typeof(ti->flags) val = READ_ONCE(ti->flags);
961
962 do {
963 if (!(val & _TIF_POLLING_NRFLAG))
964 return false;
965 if (val & _TIF_NEED_RESCHED)
966 return true;
967 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
968
969 return true;
970}
971
972#else
973static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
974{
975 set_ti_thread_flag(ti, tif);
976 return true;
977}
978
979#ifdef CONFIG_SMP
980static inline bool set_nr_if_polling(struct task_struct *p)
981{
982 return false;
983}
984#endif
985#endif
986
987static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
988{
989 struct wake_q_node *node = &task->wake_q;
990
991 /*
992 * Atomically grab the task, if ->wake_q is !nil already it means
993 * it's already queued (either by us or someone else) and will get the
994 * wakeup due to that.
995 *
996 * In order to ensure that a pending wakeup will observe our pending
997 * state, even in the failed case, an explicit smp_mb() must be used.
998 */
999 smp_mb__before_atomic();
1000 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
1001 return false;
1002
1003 /*
1004 * The head is context local, there can be no concurrency.
1005 */
1006 *head->lastp = node;
1007 head->lastp = &node->next;
1008 return true;
1009}
1010
1011/**
1012 * wake_q_add() - queue a wakeup for 'later' waking.
1013 * @head: the wake_q_head to add @task to
1014 * @task: the task to queue for 'later' wakeup
1015 *
1016 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1017 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1018 * instantly.
1019 *
1020 * This function must be used as-if it were wake_up_process(); IOW the task
1021 * must be ready to be woken at this location.
1022 */
1023void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1024{
1025 if (__wake_q_add(head, task))
1026 get_task_struct(task);
1027}
1028
1029/**
1030 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1031 * @head: the wake_q_head to add @task to
1032 * @task: the task to queue for 'later' wakeup
1033 *
1034 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1035 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1036 * instantly.
1037 *
1038 * This function must be used as-if it were wake_up_process(); IOW the task
1039 * must be ready to be woken at this location.
1040 *
1041 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1042 * that already hold reference to @task can call the 'safe' version and trust
1043 * wake_q to do the right thing depending whether or not the @task is already
1044 * queued for wakeup.
1045 */
1046void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1047{
1048 if (!__wake_q_add(head, task))
1049 put_task_struct(task);
1050}
1051
1052void wake_up_q(struct wake_q_head *head)
1053{
1054 struct wake_q_node *node = head->first;
1055
1056 while (node != WAKE_Q_TAIL) {
1057 struct task_struct *task;
1058
1059 task = container_of(node, struct task_struct, wake_q);
1060 /* Task can safely be re-inserted now: */
1061 node = node->next;
1062 task->wake_q.next = NULL;
1063
1064 /*
1065 * wake_up_process() executes a full barrier, which pairs with
1066 * the queueing in wake_q_add() so as not to miss wakeups.
1067 */
1068 wake_up_process(task);
1069 put_task_struct(task);
1070 }
1071}
1072
1073/*
1074 * resched_curr - mark rq's current task 'to be rescheduled now'.
1075 *
1076 * On UP this means the setting of the need_resched flag, on SMP it
1077 * might also involve a cross-CPU call to trigger the scheduler on
1078 * the target CPU.
1079 */
1080static void __resched_curr(struct rq *rq, int tif)
1081{
1082 struct task_struct *curr = rq->curr;
1083 struct thread_info *cti = task_thread_info(curr);
1084 int cpu;
1085
1086 lockdep_assert_rq_held(rq);
1087
1088 /*
1089 * Always immediately preempt the idle task; no point in delaying doing
1090 * actual work.
1091 */
1092 if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
1093 tif = TIF_NEED_RESCHED;
1094
1095 if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
1096 return;
1097
1098 cpu = cpu_of(rq);
1099
1100 if (cpu == smp_processor_id()) {
1101 set_ti_thread_flag(cti, tif);
1102 if (tif == TIF_NEED_RESCHED)
1103 set_preempt_need_resched();
1104 return;
1105 }
1106
1107 if (set_nr_and_not_polling(cti, tif)) {
1108 if (tif == TIF_NEED_RESCHED)
1109 smp_send_reschedule(cpu);
1110 } else {
1111 trace_sched_wake_idle_without_ipi(cpu);
1112 }
1113}
1114
1115void resched_curr(struct rq *rq)
1116{
1117 __resched_curr(rq, TIF_NEED_RESCHED);
1118}
1119
1120#ifdef CONFIG_PREEMPT_DYNAMIC
1121static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy);
1122static __always_inline bool dynamic_preempt_lazy(void)
1123{
1124 return static_branch_unlikely(&sk_dynamic_preempt_lazy);
1125}
1126#else
1127static __always_inline bool dynamic_preempt_lazy(void)
1128{
1129 return IS_ENABLED(CONFIG_PREEMPT_LAZY);
1130}
1131#endif
1132
1133static __always_inline int get_lazy_tif_bit(void)
1134{
1135 if (dynamic_preempt_lazy())
1136 return TIF_NEED_RESCHED_LAZY;
1137
1138 return TIF_NEED_RESCHED;
1139}
1140
1141void resched_curr_lazy(struct rq *rq)
1142{
1143 __resched_curr(rq, get_lazy_tif_bit());
1144}
1145
1146void resched_cpu(int cpu)
1147{
1148 struct rq *rq = cpu_rq(cpu);
1149 unsigned long flags;
1150
1151 raw_spin_rq_lock_irqsave(rq, flags);
1152 if (cpu_online(cpu) || cpu == smp_processor_id())
1153 resched_curr(rq);
1154 raw_spin_rq_unlock_irqrestore(rq, flags);
1155}
1156
1157#ifdef CONFIG_SMP
1158#ifdef CONFIG_NO_HZ_COMMON
1159/*
1160 * In the semi idle case, use the nearest busy CPU for migrating timers
1161 * from an idle CPU. This is good for power-savings.
1162 *
1163 * We don't do similar optimization for completely idle system, as
1164 * selecting an idle CPU will add more delays to the timers than intended
1165 * (as that CPU's timer base may not be up to date wrt jiffies etc).
1166 */
1167int get_nohz_timer_target(void)
1168{
1169 int i, cpu = smp_processor_id(), default_cpu = -1;
1170 struct sched_domain *sd;
1171 const struct cpumask *hk_mask;
1172
1173 if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
1174 if (!idle_cpu(cpu))
1175 return cpu;
1176 default_cpu = cpu;
1177 }
1178
1179 hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
1180
1181 guard(rcu)();
1182
1183 for_each_domain(cpu, sd) {
1184 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1185 if (cpu == i)
1186 continue;
1187
1188 if (!idle_cpu(i))
1189 return i;
1190 }
1191 }
1192
1193 if (default_cpu == -1)
1194 default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
1195
1196 return default_cpu;
1197}
1198
1199/*
1200 * When add_timer_on() enqueues a timer into the timer wheel of an
1201 * idle CPU then this timer might expire before the next timer event
1202 * which is scheduled to wake up that CPU. In case of a completely
1203 * idle system the next event might even be infinite time into the
1204 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1205 * leaves the inner idle loop so the newly added timer is taken into
1206 * account when the CPU goes back to idle and evaluates the timer
1207 * wheel for the next timer event.
1208 */
1209static void wake_up_idle_cpu(int cpu)
1210{
1211 struct rq *rq = cpu_rq(cpu);
1212
1213 if (cpu == smp_processor_id())
1214 return;
1215
1216 /*
1217 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1218 * part of the idle loop. This forces an exit from the idle loop
1219 * and a round trip to schedule(). Now this could be optimized
1220 * because a simple new idle loop iteration is enough to
1221 * re-evaluate the next tick. Provided some re-ordering of tick
1222 * nohz functions that would need to follow TIF_NR_POLLING
1223 * clearing:
1224 *
1225 * - On most architectures, a simple fetch_or on ti::flags with a
1226 * "0" value would be enough to know if an IPI needs to be sent.
1227 *
1228 * - x86 needs to perform a last need_resched() check between
1229 * monitor and mwait which doesn't take timers into account.
1230 * There a dedicated TIF_TIMER flag would be required to
1231 * fetch_or here and be checked along with TIF_NEED_RESCHED
1232 * before mwait().
1233 *
1234 * However, remote timer enqueue is not such a frequent event
1235 * and testing of the above solutions didn't appear to report
1236 * much benefits.
1237 */
1238 if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED))
1239 smp_send_reschedule(cpu);
1240 else
1241 trace_sched_wake_idle_without_ipi(cpu);
1242}
1243
1244static bool wake_up_full_nohz_cpu(int cpu)
1245{
1246 /*
1247 * We just need the target to call irq_exit() and re-evaluate
1248 * the next tick. The nohz full kick at least implies that.
1249 * If needed we can still optimize that later with an
1250 * empty IRQ.
1251 */
1252 if (cpu_is_offline(cpu))
1253 return true; /* Don't try to wake offline CPUs. */
1254 if (tick_nohz_full_cpu(cpu)) {
1255 if (cpu != smp_processor_id() ||
1256 tick_nohz_tick_stopped())
1257 tick_nohz_full_kick_cpu(cpu);
1258 return true;
1259 }
1260
1261 return false;
1262}
1263
1264/*
1265 * Wake up the specified CPU. If the CPU is going offline, it is the
1266 * caller's responsibility to deal with the lost wakeup, for example,
1267 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1268 */
1269void wake_up_nohz_cpu(int cpu)
1270{
1271 if (!wake_up_full_nohz_cpu(cpu))
1272 wake_up_idle_cpu(cpu);
1273}
1274
1275static void nohz_csd_func(void *info)
1276{
1277 struct rq *rq = info;
1278 int cpu = cpu_of(rq);
1279 unsigned int flags;
1280
1281 /*
1282 * Release the rq::nohz_csd.
1283 */
1284 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1285 WARN_ON(!(flags & NOHZ_KICK_MASK));
1286
1287 rq->idle_balance = idle_cpu(cpu);
1288 if (rq->idle_balance) {
1289 rq->nohz_idle_balance = flags;
1290 __raise_softirq_irqoff(SCHED_SOFTIRQ);
1291 }
1292}
1293
1294#endif /* CONFIG_NO_HZ_COMMON */
1295
1296#ifdef CONFIG_NO_HZ_FULL
1297static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1298{
1299 if (rq->nr_running != 1)
1300 return false;
1301
1302 if (p->sched_class != &fair_sched_class)
1303 return false;
1304
1305 if (!task_on_rq_queued(p))
1306 return false;
1307
1308 return true;
1309}
1310
1311bool sched_can_stop_tick(struct rq *rq)
1312{
1313 int fifo_nr_running;
1314
1315 /* Deadline tasks, even if single, need the tick */
1316 if (rq->dl.dl_nr_running)
1317 return false;
1318
1319 /*
1320 * If there are more than one RR tasks, we need the tick to affect the
1321 * actual RR behaviour.
1322 */
1323 if (rq->rt.rr_nr_running) {
1324 if (rq->rt.rr_nr_running == 1)
1325 return true;
1326 else
1327 return false;
1328 }
1329
1330 /*
1331 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1332 * forced preemption between FIFO tasks.
1333 */
1334 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1335 if (fifo_nr_running)
1336 return true;
1337
1338 /*
1339 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1340 * left. For CFS, if there's more than one we need the tick for
1341 * involuntary preemption. For SCX, ask.
1342 */
1343 if (scx_enabled() && !scx_can_stop_tick(rq))
1344 return false;
1345
1346 if (rq->cfs.h_nr_running > 1)
1347 return false;
1348
1349 /*
1350 * If there is one task and it has CFS runtime bandwidth constraints
1351 * and it's on the cpu now we don't want to stop the tick.
1352 * This check prevents clearing the bit if a newly enqueued task here is
1353 * dequeued by migrating while the constrained task continues to run.
1354 * E.g. going from 2->1 without going through pick_next_task().
1355 */
1356 if (__need_bw_check(rq, rq->curr)) {
1357 if (cfs_task_bw_constrained(rq->curr))
1358 return false;
1359 }
1360
1361 return true;
1362}
1363#endif /* CONFIG_NO_HZ_FULL */
1364#endif /* CONFIG_SMP */
1365
1366#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1367 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1368/*
1369 * Iterate task_group tree rooted at *from, calling @down when first entering a
1370 * node and @up when leaving it for the final time.
1371 *
1372 * Caller must hold rcu_lock or sufficient equivalent.
1373 */
1374int walk_tg_tree_from(struct task_group *from,
1375 tg_visitor down, tg_visitor up, void *data)
1376{
1377 struct task_group *parent, *child;
1378 int ret;
1379
1380 parent = from;
1381
1382down:
1383 ret = (*down)(parent, data);
1384 if (ret)
1385 goto out;
1386 list_for_each_entry_rcu(child, &parent->children, siblings) {
1387 parent = child;
1388 goto down;
1389
1390up:
1391 continue;
1392 }
1393 ret = (*up)(parent, data);
1394 if (ret || parent == from)
1395 goto out;
1396
1397 child = parent;
1398 parent = parent->parent;
1399 if (parent)
1400 goto up;
1401out:
1402 return ret;
1403}
1404
1405int tg_nop(struct task_group *tg, void *data)
1406{
1407 return 0;
1408}
1409#endif
1410
1411void set_load_weight(struct task_struct *p, bool update_load)
1412{
1413 int prio = p->static_prio - MAX_RT_PRIO;
1414 struct load_weight lw;
1415
1416 if (task_has_idle_policy(p)) {
1417 lw.weight = scale_load(WEIGHT_IDLEPRIO);
1418 lw.inv_weight = WMULT_IDLEPRIO;
1419 } else {
1420 lw.weight = scale_load(sched_prio_to_weight[prio]);
1421 lw.inv_weight = sched_prio_to_wmult[prio];
1422 }
1423
1424 /*
1425 * SCHED_OTHER tasks have to update their load when changing their
1426 * weight
1427 */
1428 if (update_load && p->sched_class->reweight_task)
1429 p->sched_class->reweight_task(task_rq(p), p, &lw);
1430 else
1431 p->se.load = lw;
1432}
1433
1434#ifdef CONFIG_UCLAMP_TASK
1435/*
1436 * Serializes updates of utilization clamp values
1437 *
1438 * The (slow-path) user-space triggers utilization clamp value updates which
1439 * can require updates on (fast-path) scheduler's data structures used to
1440 * support enqueue/dequeue operations.
1441 * While the per-CPU rq lock protects fast-path update operations, user-space
1442 * requests are serialized using a mutex to reduce the risk of conflicting
1443 * updates or API abuses.
1444 */
1445static __maybe_unused DEFINE_MUTEX(uclamp_mutex);
1446
1447/* Max allowed minimum utilization */
1448static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1449
1450/* Max allowed maximum utilization */
1451static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1452
1453/*
1454 * By default RT tasks run at the maximum performance point/capacity of the
1455 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1456 * SCHED_CAPACITY_SCALE.
1457 *
1458 * This knob allows admins to change the default behavior when uclamp is being
1459 * used. In battery powered devices, particularly, running at the maximum
1460 * capacity and frequency will increase energy consumption and shorten the
1461 * battery life.
1462 *
1463 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1464 *
1465 * This knob will not override the system default sched_util_clamp_min defined
1466 * above.
1467 */
1468unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1469
1470/* All clamps are required to be less or equal than these values */
1471static struct uclamp_se uclamp_default[UCLAMP_CNT];
1472
1473/*
1474 * This static key is used to reduce the uclamp overhead in the fast path. It
1475 * primarily disables the call to uclamp_rq_{inc, dec}() in
1476 * enqueue/dequeue_task().
1477 *
1478 * This allows users to continue to enable uclamp in their kernel config with
1479 * minimum uclamp overhead in the fast path.
1480 *
1481 * As soon as userspace modifies any of the uclamp knobs, the static key is
1482 * enabled, since we have an actual users that make use of uclamp
1483 * functionality.
1484 *
1485 * The knobs that would enable this static key are:
1486 *
1487 * * A task modifying its uclamp value with sched_setattr().
1488 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1489 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1490 */
1491DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1492
1493static inline unsigned int
1494uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1495 unsigned int clamp_value)
1496{
1497 /*
1498 * Avoid blocked utilization pushing up the frequency when we go
1499 * idle (which drops the max-clamp) by retaining the last known
1500 * max-clamp.
1501 */
1502 if (clamp_id == UCLAMP_MAX) {
1503 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1504 return clamp_value;
1505 }
1506
1507 return uclamp_none(UCLAMP_MIN);
1508}
1509
1510static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1511 unsigned int clamp_value)
1512{
1513 /* Reset max-clamp retention only on idle exit */
1514 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1515 return;
1516
1517 uclamp_rq_set(rq, clamp_id, clamp_value);
1518}
1519
1520static inline
1521unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1522 unsigned int clamp_value)
1523{
1524 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1525 int bucket_id = UCLAMP_BUCKETS - 1;
1526
1527 /*
1528 * Since both min and max clamps are max aggregated, find the
1529 * top most bucket with tasks in.
1530 */
1531 for ( ; bucket_id >= 0; bucket_id--) {
1532 if (!bucket[bucket_id].tasks)
1533 continue;
1534 return bucket[bucket_id].value;
1535 }
1536
1537 /* No tasks -- default clamp values */
1538 return uclamp_idle_value(rq, clamp_id, clamp_value);
1539}
1540
1541static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1542{
1543 unsigned int default_util_min;
1544 struct uclamp_se *uc_se;
1545
1546 lockdep_assert_held(&p->pi_lock);
1547
1548 uc_se = &p->uclamp_req[UCLAMP_MIN];
1549
1550 /* Only sync if user didn't override the default */
1551 if (uc_se->user_defined)
1552 return;
1553
1554 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1555 uclamp_se_set(uc_se, default_util_min, false);
1556}
1557
1558static void uclamp_update_util_min_rt_default(struct task_struct *p)
1559{
1560 if (!rt_task(p))
1561 return;
1562
1563 /* Protect updates to p->uclamp_* */
1564 guard(task_rq_lock)(p);
1565 __uclamp_update_util_min_rt_default(p);
1566}
1567
1568static inline struct uclamp_se
1569uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1570{
1571 /* Copy by value as we could modify it */
1572 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1573#ifdef CONFIG_UCLAMP_TASK_GROUP
1574 unsigned int tg_min, tg_max, value;
1575
1576 /*
1577 * Tasks in autogroups or root task group will be
1578 * restricted by system defaults.
1579 */
1580 if (task_group_is_autogroup(task_group(p)))
1581 return uc_req;
1582 if (task_group(p) == &root_task_group)
1583 return uc_req;
1584
1585 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1586 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1587 value = uc_req.value;
1588 value = clamp(value, tg_min, tg_max);
1589 uclamp_se_set(&uc_req, value, false);
1590#endif
1591
1592 return uc_req;
1593}
1594
1595/*
1596 * The effective clamp bucket index of a task depends on, by increasing
1597 * priority:
1598 * - the task specific clamp value, when explicitly requested from userspace
1599 * - the task group effective clamp value, for tasks not either in the root
1600 * group or in an autogroup
1601 * - the system default clamp value, defined by the sysadmin
1602 */
1603static inline struct uclamp_se
1604uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1605{
1606 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1607 struct uclamp_se uc_max = uclamp_default[clamp_id];
1608
1609 /* System default restrictions always apply */
1610 if (unlikely(uc_req.value > uc_max.value))
1611 return uc_max;
1612
1613 return uc_req;
1614}
1615
1616unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1617{
1618 struct uclamp_se uc_eff;
1619
1620 /* Task currently refcounted: use back-annotated (effective) value */
1621 if (p->uclamp[clamp_id].active)
1622 return (unsigned long)p->uclamp[clamp_id].value;
1623
1624 uc_eff = uclamp_eff_get(p, clamp_id);
1625
1626 return (unsigned long)uc_eff.value;
1627}
1628
1629/*
1630 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1631 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1632 * updates the rq's clamp value if required.
1633 *
1634 * Tasks can have a task-specific value requested from user-space, track
1635 * within each bucket the maximum value for tasks refcounted in it.
1636 * This "local max aggregation" allows to track the exact "requested" value
1637 * for each bucket when all its RUNNABLE tasks require the same clamp.
1638 */
1639static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1640 enum uclamp_id clamp_id)
1641{
1642 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1643 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1644 struct uclamp_bucket *bucket;
1645
1646 lockdep_assert_rq_held(rq);
1647
1648 /* Update task effective clamp */
1649 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1650
1651 bucket = &uc_rq->bucket[uc_se->bucket_id];
1652 bucket->tasks++;
1653 uc_se->active = true;
1654
1655 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1656
1657 /*
1658 * Local max aggregation: rq buckets always track the max
1659 * "requested" clamp value of its RUNNABLE tasks.
1660 */
1661 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1662 bucket->value = uc_se->value;
1663
1664 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1665 uclamp_rq_set(rq, clamp_id, uc_se->value);
1666}
1667
1668/*
1669 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1670 * is released. If this is the last task reference counting the rq's max
1671 * active clamp value, then the rq's clamp value is updated.
1672 *
1673 * Both refcounted tasks and rq's cached clamp values are expected to be
1674 * always valid. If it's detected they are not, as defensive programming,
1675 * enforce the expected state and warn.
1676 */
1677static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1678 enum uclamp_id clamp_id)
1679{
1680 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1681 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1682 struct uclamp_bucket *bucket;
1683 unsigned int bkt_clamp;
1684 unsigned int rq_clamp;
1685
1686 lockdep_assert_rq_held(rq);
1687
1688 /*
1689 * If sched_uclamp_used was enabled after task @p was enqueued,
1690 * we could end up with unbalanced call to uclamp_rq_dec_id().
1691 *
1692 * In this case the uc_se->active flag should be false since no uclamp
1693 * accounting was performed at enqueue time and we can just return
1694 * here.
1695 *
1696 * Need to be careful of the following enqueue/dequeue ordering
1697 * problem too
1698 *
1699 * enqueue(taskA)
1700 * // sched_uclamp_used gets enabled
1701 * enqueue(taskB)
1702 * dequeue(taskA)
1703 * // Must not decrement bucket->tasks here
1704 * dequeue(taskB)
1705 *
1706 * where we could end up with stale data in uc_se and
1707 * bucket[uc_se->bucket_id].
1708 *
1709 * The following check here eliminates the possibility of such race.
1710 */
1711 if (unlikely(!uc_se->active))
1712 return;
1713
1714 bucket = &uc_rq->bucket[uc_se->bucket_id];
1715
1716 SCHED_WARN_ON(!bucket->tasks);
1717 if (likely(bucket->tasks))
1718 bucket->tasks--;
1719
1720 uc_se->active = false;
1721
1722 /*
1723 * Keep "local max aggregation" simple and accept to (possibly)
1724 * overboost some RUNNABLE tasks in the same bucket.
1725 * The rq clamp bucket value is reset to its base value whenever
1726 * there are no more RUNNABLE tasks refcounting it.
1727 */
1728 if (likely(bucket->tasks))
1729 return;
1730
1731 rq_clamp = uclamp_rq_get(rq, clamp_id);
1732 /*
1733 * Defensive programming: this should never happen. If it happens,
1734 * e.g. due to future modification, warn and fix up the expected value.
1735 */
1736 SCHED_WARN_ON(bucket->value > rq_clamp);
1737 if (bucket->value >= rq_clamp) {
1738 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1739 uclamp_rq_set(rq, clamp_id, bkt_clamp);
1740 }
1741}
1742
1743static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1744{
1745 enum uclamp_id clamp_id;
1746
1747 /*
1748 * Avoid any overhead until uclamp is actually used by the userspace.
1749 *
1750 * The condition is constructed such that a NOP is generated when
1751 * sched_uclamp_used is disabled.
1752 */
1753 if (!static_branch_unlikely(&sched_uclamp_used))
1754 return;
1755
1756 if (unlikely(!p->sched_class->uclamp_enabled))
1757 return;
1758
1759 if (p->se.sched_delayed)
1760 return;
1761
1762 for_each_clamp_id(clamp_id)
1763 uclamp_rq_inc_id(rq, p, clamp_id);
1764
1765 /* Reset clamp idle holding when there is one RUNNABLE task */
1766 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1767 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1768}
1769
1770static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1771{
1772 enum uclamp_id clamp_id;
1773
1774 /*
1775 * Avoid any overhead until uclamp is actually used by the userspace.
1776 *
1777 * The condition is constructed such that a NOP is generated when
1778 * sched_uclamp_used is disabled.
1779 */
1780 if (!static_branch_unlikely(&sched_uclamp_used))
1781 return;
1782
1783 if (unlikely(!p->sched_class->uclamp_enabled))
1784 return;
1785
1786 if (p->se.sched_delayed)
1787 return;
1788
1789 for_each_clamp_id(clamp_id)
1790 uclamp_rq_dec_id(rq, p, clamp_id);
1791}
1792
1793static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1794 enum uclamp_id clamp_id)
1795{
1796 if (!p->uclamp[clamp_id].active)
1797 return;
1798
1799 uclamp_rq_dec_id(rq, p, clamp_id);
1800 uclamp_rq_inc_id(rq, p, clamp_id);
1801
1802 /*
1803 * Make sure to clear the idle flag if we've transiently reached 0
1804 * active tasks on rq.
1805 */
1806 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1807 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1808}
1809
1810static inline void
1811uclamp_update_active(struct task_struct *p)
1812{
1813 enum uclamp_id clamp_id;
1814 struct rq_flags rf;
1815 struct rq *rq;
1816
1817 /*
1818 * Lock the task and the rq where the task is (or was) queued.
1819 *
1820 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1821 * price to pay to safely serialize util_{min,max} updates with
1822 * enqueues, dequeues and migration operations.
1823 * This is the same locking schema used by __set_cpus_allowed_ptr().
1824 */
1825 rq = task_rq_lock(p, &rf);
1826
1827 /*
1828 * Setting the clamp bucket is serialized by task_rq_lock().
1829 * If the task is not yet RUNNABLE and its task_struct is not
1830 * affecting a valid clamp bucket, the next time it's enqueued,
1831 * it will already see the updated clamp bucket value.
1832 */
1833 for_each_clamp_id(clamp_id)
1834 uclamp_rq_reinc_id(rq, p, clamp_id);
1835
1836 task_rq_unlock(rq, p, &rf);
1837}
1838
1839#ifdef CONFIG_UCLAMP_TASK_GROUP
1840static inline void
1841uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1842{
1843 struct css_task_iter it;
1844 struct task_struct *p;
1845
1846 css_task_iter_start(css, 0, &it);
1847 while ((p = css_task_iter_next(&it)))
1848 uclamp_update_active(p);
1849 css_task_iter_end(&it);
1850}
1851
1852static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1853#endif
1854
1855#ifdef CONFIG_SYSCTL
1856#ifdef CONFIG_UCLAMP_TASK_GROUP
1857static void uclamp_update_root_tg(void)
1858{
1859 struct task_group *tg = &root_task_group;
1860
1861 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1862 sysctl_sched_uclamp_util_min, false);
1863 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1864 sysctl_sched_uclamp_util_max, false);
1865
1866 guard(rcu)();
1867 cpu_util_update_eff(&root_task_group.css);
1868}
1869#else
1870static void uclamp_update_root_tg(void) { }
1871#endif
1872
1873static void uclamp_sync_util_min_rt_default(void)
1874{
1875 struct task_struct *g, *p;
1876
1877 /*
1878 * copy_process() sysctl_uclamp
1879 * uclamp_min_rt = X;
1880 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1881 * // link thread smp_mb__after_spinlock()
1882 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1883 * sched_post_fork() for_each_process_thread()
1884 * __uclamp_sync_rt() __uclamp_sync_rt()
1885 *
1886 * Ensures that either sched_post_fork() will observe the new
1887 * uclamp_min_rt or for_each_process_thread() will observe the new
1888 * task.
1889 */
1890 read_lock(&tasklist_lock);
1891 smp_mb__after_spinlock();
1892 read_unlock(&tasklist_lock);
1893
1894 guard(rcu)();
1895 for_each_process_thread(g, p)
1896 uclamp_update_util_min_rt_default(p);
1897}
1898
1899static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1900 void *buffer, size_t *lenp, loff_t *ppos)
1901{
1902 bool update_root_tg = false;
1903 int old_min, old_max, old_min_rt;
1904 int result;
1905
1906 guard(mutex)(&uclamp_mutex);
1907
1908 old_min = sysctl_sched_uclamp_util_min;
1909 old_max = sysctl_sched_uclamp_util_max;
1910 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1911
1912 result = proc_dointvec(table, write, buffer, lenp, ppos);
1913 if (result)
1914 goto undo;
1915 if (!write)
1916 return 0;
1917
1918 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1919 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1920 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1921
1922 result = -EINVAL;
1923 goto undo;
1924 }
1925
1926 if (old_min != sysctl_sched_uclamp_util_min) {
1927 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1928 sysctl_sched_uclamp_util_min, false);
1929 update_root_tg = true;
1930 }
1931 if (old_max != sysctl_sched_uclamp_util_max) {
1932 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1933 sysctl_sched_uclamp_util_max, false);
1934 update_root_tg = true;
1935 }
1936
1937 if (update_root_tg) {
1938 static_branch_enable(&sched_uclamp_used);
1939 uclamp_update_root_tg();
1940 }
1941
1942 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1943 static_branch_enable(&sched_uclamp_used);
1944 uclamp_sync_util_min_rt_default();
1945 }
1946
1947 /*
1948 * We update all RUNNABLE tasks only when task groups are in use.
1949 * Otherwise, keep it simple and do just a lazy update at each next
1950 * task enqueue time.
1951 */
1952 return 0;
1953
1954undo:
1955 sysctl_sched_uclamp_util_min = old_min;
1956 sysctl_sched_uclamp_util_max = old_max;
1957 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1958 return result;
1959}
1960#endif
1961
1962static void uclamp_fork(struct task_struct *p)
1963{
1964 enum uclamp_id clamp_id;
1965
1966 /*
1967 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1968 * as the task is still at its early fork stages.
1969 */
1970 for_each_clamp_id(clamp_id)
1971 p->uclamp[clamp_id].active = false;
1972
1973 if (likely(!p->sched_reset_on_fork))
1974 return;
1975
1976 for_each_clamp_id(clamp_id) {
1977 uclamp_se_set(&p->uclamp_req[clamp_id],
1978 uclamp_none(clamp_id), false);
1979 }
1980}
1981
1982static void uclamp_post_fork(struct task_struct *p)
1983{
1984 uclamp_update_util_min_rt_default(p);
1985}
1986
1987static void __init init_uclamp_rq(struct rq *rq)
1988{
1989 enum uclamp_id clamp_id;
1990 struct uclamp_rq *uc_rq = rq->uclamp;
1991
1992 for_each_clamp_id(clamp_id) {
1993 uc_rq[clamp_id] = (struct uclamp_rq) {
1994 .value = uclamp_none(clamp_id)
1995 };
1996 }
1997
1998 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
1999}
2000
2001static void __init init_uclamp(void)
2002{
2003 struct uclamp_se uc_max = {};
2004 enum uclamp_id clamp_id;
2005 int cpu;
2006
2007 for_each_possible_cpu(cpu)
2008 init_uclamp_rq(cpu_rq(cpu));
2009
2010 for_each_clamp_id(clamp_id) {
2011 uclamp_se_set(&init_task.uclamp_req[clamp_id],
2012 uclamp_none(clamp_id), false);
2013 }
2014
2015 /* System defaults allow max clamp values for both indexes */
2016 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2017 for_each_clamp_id(clamp_id) {
2018 uclamp_default[clamp_id] = uc_max;
2019#ifdef CONFIG_UCLAMP_TASK_GROUP
2020 root_task_group.uclamp_req[clamp_id] = uc_max;
2021 root_task_group.uclamp[clamp_id] = uc_max;
2022#endif
2023 }
2024}
2025
2026#else /* !CONFIG_UCLAMP_TASK */
2027static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
2028static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
2029static inline void uclamp_fork(struct task_struct *p) { }
2030static inline void uclamp_post_fork(struct task_struct *p) { }
2031static inline void init_uclamp(void) { }
2032#endif /* CONFIG_UCLAMP_TASK */
2033
2034bool sched_task_on_rq(struct task_struct *p)
2035{
2036 return task_on_rq_queued(p);
2037}
2038
2039unsigned long get_wchan(struct task_struct *p)
2040{
2041 unsigned long ip = 0;
2042 unsigned int state;
2043
2044 if (!p || p == current)
2045 return 0;
2046
2047 /* Only get wchan if task is blocked and we can keep it that way. */
2048 raw_spin_lock_irq(&p->pi_lock);
2049 state = READ_ONCE(p->__state);
2050 smp_rmb(); /* see try_to_wake_up() */
2051 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2052 ip = __get_wchan(p);
2053 raw_spin_unlock_irq(&p->pi_lock);
2054
2055 return ip;
2056}
2057
2058void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2059{
2060 if (!(flags & ENQUEUE_NOCLOCK))
2061 update_rq_clock(rq);
2062
2063 p->sched_class->enqueue_task(rq, p, flags);
2064 /*
2065 * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear
2066 * ->sched_delayed.
2067 */
2068 uclamp_rq_inc(rq, p);
2069
2070 psi_enqueue(p, flags);
2071
2072 if (!(flags & ENQUEUE_RESTORE))
2073 sched_info_enqueue(rq, p);
2074
2075 if (sched_core_enabled(rq))
2076 sched_core_enqueue(rq, p);
2077}
2078
2079/*
2080 * Must only return false when DEQUEUE_SLEEP.
2081 */
2082inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2083{
2084 if (sched_core_enabled(rq))
2085 sched_core_dequeue(rq, p, flags);
2086
2087 if (!(flags & DEQUEUE_NOCLOCK))
2088 update_rq_clock(rq);
2089
2090 if (!(flags & DEQUEUE_SAVE))
2091 sched_info_dequeue(rq, p);
2092
2093 psi_dequeue(p, flags);
2094
2095 /*
2096 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2097 * and mark the task ->sched_delayed.
2098 */
2099 uclamp_rq_dec(rq, p);
2100 return p->sched_class->dequeue_task(rq, p, flags);
2101}
2102
2103void activate_task(struct rq *rq, struct task_struct *p, int flags)
2104{
2105 if (task_on_rq_migrating(p))
2106 flags |= ENQUEUE_MIGRATED;
2107 if (flags & ENQUEUE_MIGRATED)
2108 sched_mm_cid_migrate_to(rq, p);
2109
2110 enqueue_task(rq, p, flags);
2111
2112 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2113 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2114}
2115
2116void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2117{
2118 SCHED_WARN_ON(flags & DEQUEUE_SLEEP);
2119
2120 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
2121 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2122
2123 /*
2124 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2125 * dequeue_task() and cleared *after* enqueue_task().
2126 */
2127
2128 dequeue_task(rq, p, flags);
2129}
2130
2131static void block_task(struct rq *rq, struct task_struct *p, int flags)
2132{
2133 if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2134 __block_task(rq, p);
2135}
2136
2137/**
2138 * task_curr - is this task currently executing on a CPU?
2139 * @p: the task in question.
2140 *
2141 * Return: 1 if the task is currently executing. 0 otherwise.
2142 */
2143inline int task_curr(const struct task_struct *p)
2144{
2145 return cpu_curr(task_cpu(p)) == p;
2146}
2147
2148/*
2149 * ->switching_to() is called with the pi_lock and rq_lock held and must not
2150 * mess with locking.
2151 */
2152void check_class_changing(struct rq *rq, struct task_struct *p,
2153 const struct sched_class *prev_class)
2154{
2155 if (prev_class != p->sched_class && p->sched_class->switching_to)
2156 p->sched_class->switching_to(rq, p);
2157}
2158
2159/*
2160 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2161 * use the balance_callback list if you want balancing.
2162 *
2163 * this means any call to check_class_changed() must be followed by a call to
2164 * balance_callback().
2165 */
2166void check_class_changed(struct rq *rq, struct task_struct *p,
2167 const struct sched_class *prev_class,
2168 int oldprio)
2169{
2170 if (prev_class != p->sched_class) {
2171 if (prev_class->switched_from)
2172 prev_class->switched_from(rq, p);
2173
2174 p->sched_class->switched_to(rq, p);
2175 } else if (oldprio != p->prio || dl_task(p))
2176 p->sched_class->prio_changed(rq, p, oldprio);
2177}
2178
2179void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2180{
2181 struct task_struct *donor = rq->donor;
2182
2183 if (p->sched_class == donor->sched_class)
2184 donor->sched_class->wakeup_preempt(rq, p, flags);
2185 else if (sched_class_above(p->sched_class, donor->sched_class))
2186 resched_curr(rq);
2187
2188 /*
2189 * A queue event has occurred, and we're going to schedule. In
2190 * this case, we can save a useless back to back clock update.
2191 */
2192 if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
2193 rq_clock_skip_update(rq);
2194}
2195
2196static __always_inline
2197int __task_state_match(struct task_struct *p, unsigned int state)
2198{
2199 if (READ_ONCE(p->__state) & state)
2200 return 1;
2201
2202 if (READ_ONCE(p->saved_state) & state)
2203 return -1;
2204
2205 return 0;
2206}
2207
2208static __always_inline
2209int task_state_match(struct task_struct *p, unsigned int state)
2210{
2211 /*
2212 * Serialize against current_save_and_set_rtlock_wait_state(),
2213 * current_restore_rtlock_saved_state(), and __refrigerator().
2214 */
2215 guard(raw_spinlock_irq)(&p->pi_lock);
2216 return __task_state_match(p, state);
2217}
2218
2219/*
2220 * wait_task_inactive - wait for a thread to unschedule.
2221 *
2222 * Wait for the thread to block in any of the states set in @match_state.
2223 * If it changes, i.e. @p might have woken up, then return zero. When we
2224 * succeed in waiting for @p to be off its CPU, we return a positive number
2225 * (its total switch count). If a second call a short while later returns the
2226 * same number, the caller can be sure that @p has remained unscheduled the
2227 * whole time.
2228 *
2229 * The caller must ensure that the task *will* unschedule sometime soon,
2230 * else this function might spin for a *long* time. This function can't
2231 * be called with interrupts off, or it may introduce deadlock with
2232 * smp_call_function() if an IPI is sent by the same process we are
2233 * waiting to become inactive.
2234 */
2235unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2236{
2237 int running, queued, match;
2238 struct rq_flags rf;
2239 unsigned long ncsw;
2240 struct rq *rq;
2241
2242 for (;;) {
2243 /*
2244 * We do the initial early heuristics without holding
2245 * any task-queue locks at all. We'll only try to get
2246 * the runqueue lock when things look like they will
2247 * work out!
2248 */
2249 rq = task_rq(p);
2250
2251 /*
2252 * If the task is actively running on another CPU
2253 * still, just relax and busy-wait without holding
2254 * any locks.
2255 *
2256 * NOTE! Since we don't hold any locks, it's not
2257 * even sure that "rq" stays as the right runqueue!
2258 * But we don't care, since "task_on_cpu()" will
2259 * return false if the runqueue has changed and p
2260 * is actually now running somewhere else!
2261 */
2262 while (task_on_cpu(rq, p)) {
2263 if (!task_state_match(p, match_state))
2264 return 0;
2265 cpu_relax();
2266 }
2267
2268 /*
2269 * Ok, time to look more closely! We need the rq
2270 * lock now, to be *sure*. If we're wrong, we'll
2271 * just go back and repeat.
2272 */
2273 rq = task_rq_lock(p, &rf);
2274 trace_sched_wait_task(p);
2275 running = task_on_cpu(rq, p);
2276 queued = task_on_rq_queued(p);
2277 ncsw = 0;
2278 if ((match = __task_state_match(p, match_state))) {
2279 /*
2280 * When matching on p->saved_state, consider this task
2281 * still queued so it will wait.
2282 */
2283 if (match < 0)
2284 queued = 1;
2285 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2286 }
2287 task_rq_unlock(rq, p, &rf);
2288
2289 /*
2290 * If it changed from the expected state, bail out now.
2291 */
2292 if (unlikely(!ncsw))
2293 break;
2294
2295 /*
2296 * Was it really running after all now that we
2297 * checked with the proper locks actually held?
2298 *
2299 * Oops. Go back and try again..
2300 */
2301 if (unlikely(running)) {
2302 cpu_relax();
2303 continue;
2304 }
2305
2306 /*
2307 * It's not enough that it's not actively running,
2308 * it must be off the runqueue _entirely_, and not
2309 * preempted!
2310 *
2311 * So if it was still runnable (but just not actively
2312 * running right now), it's preempted, and we should
2313 * yield - it could be a while.
2314 */
2315 if (unlikely(queued)) {
2316 ktime_t to = NSEC_PER_SEC / HZ;
2317
2318 set_current_state(TASK_UNINTERRUPTIBLE);
2319 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2320 continue;
2321 }
2322
2323 /*
2324 * Ahh, all good. It wasn't running, and it wasn't
2325 * runnable, which means that it will never become
2326 * running in the future either. We're all done!
2327 */
2328 break;
2329 }
2330
2331 return ncsw;
2332}
2333
2334#ifdef CONFIG_SMP
2335
2336static void
2337__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2338
2339static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2340{
2341 struct affinity_context ac = {
2342 .new_mask = cpumask_of(rq->cpu),
2343 .flags = SCA_MIGRATE_DISABLE,
2344 };
2345
2346 if (likely(!p->migration_disabled))
2347 return;
2348
2349 if (p->cpus_ptr != &p->cpus_mask)
2350 return;
2351
2352 /*
2353 * Violates locking rules! See comment in __do_set_cpus_allowed().
2354 */
2355 __do_set_cpus_allowed(p, &ac);
2356}
2357
2358void migrate_disable(void)
2359{
2360 struct task_struct *p = current;
2361
2362 if (p->migration_disabled) {
2363#ifdef CONFIG_DEBUG_PREEMPT
2364 /*
2365 *Warn about overflow half-way through the range.
2366 */
2367 WARN_ON_ONCE((s16)p->migration_disabled < 0);
2368#endif
2369 p->migration_disabled++;
2370 return;
2371 }
2372
2373 guard(preempt)();
2374 this_rq()->nr_pinned++;
2375 p->migration_disabled = 1;
2376}
2377EXPORT_SYMBOL_GPL(migrate_disable);
2378
2379void migrate_enable(void)
2380{
2381 struct task_struct *p = current;
2382 struct affinity_context ac = {
2383 .new_mask = &p->cpus_mask,
2384 .flags = SCA_MIGRATE_ENABLE,
2385 };
2386
2387#ifdef CONFIG_DEBUG_PREEMPT
2388 /*
2389 * Check both overflow from migrate_disable() and superfluous
2390 * migrate_enable().
2391 */
2392 if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
2393 return;
2394#endif
2395
2396 if (p->migration_disabled > 1) {
2397 p->migration_disabled--;
2398 return;
2399 }
2400
2401 /*
2402 * Ensure stop_task runs either before or after this, and that
2403 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2404 */
2405 guard(preempt)();
2406 if (p->cpus_ptr != &p->cpus_mask)
2407 __set_cpus_allowed_ptr(p, &ac);
2408 /*
2409 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2410 * regular cpus_mask, otherwise things that race (eg.
2411 * select_fallback_rq) get confused.
2412 */
2413 barrier();
2414 p->migration_disabled = 0;
2415 this_rq()->nr_pinned--;
2416}
2417EXPORT_SYMBOL_GPL(migrate_enable);
2418
2419static inline bool rq_has_pinned_tasks(struct rq *rq)
2420{
2421 return rq->nr_pinned;
2422}
2423
2424/*
2425 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2426 * __set_cpus_allowed_ptr() and select_fallback_rq().
2427 */
2428static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2429{
2430 /* When not in the task's cpumask, no point in looking further. */
2431 if (!task_allowed_on_cpu(p, cpu))
2432 return false;
2433
2434 /* migrate_disabled() must be allowed to finish. */
2435 if (is_migration_disabled(p))
2436 return cpu_online(cpu);
2437
2438 /* Non kernel threads are not allowed during either online or offline. */
2439 if (!(p->flags & PF_KTHREAD))
2440 return cpu_active(cpu);
2441
2442 /* KTHREAD_IS_PER_CPU is always allowed. */
2443 if (kthread_is_per_cpu(p))
2444 return cpu_online(cpu);
2445
2446 /* Regular kernel threads don't get to stay during offline. */
2447 if (cpu_dying(cpu))
2448 return false;
2449
2450 /* But are allowed during online. */
2451 return cpu_online(cpu);
2452}
2453
2454/*
2455 * This is how migration works:
2456 *
2457 * 1) we invoke migration_cpu_stop() on the target CPU using
2458 * stop_one_cpu().
2459 * 2) stopper starts to run (implicitly forcing the migrated thread
2460 * off the CPU)
2461 * 3) it checks whether the migrated task is still in the wrong runqueue.
2462 * 4) if it's in the wrong runqueue then the migration thread removes
2463 * it and puts it into the right queue.
2464 * 5) stopper completes and stop_one_cpu() returns and the migration
2465 * is done.
2466 */
2467
2468/*
2469 * move_queued_task - move a queued task to new rq.
2470 *
2471 * Returns (locked) new rq. Old rq's lock is released.
2472 */
2473static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2474 struct task_struct *p, int new_cpu)
2475{
2476 lockdep_assert_rq_held(rq);
2477
2478 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2479 set_task_cpu(p, new_cpu);
2480 rq_unlock(rq, rf);
2481
2482 rq = cpu_rq(new_cpu);
2483
2484 rq_lock(rq, rf);
2485 WARN_ON_ONCE(task_cpu(p) != new_cpu);
2486 activate_task(rq, p, 0);
2487 wakeup_preempt(rq, p, 0);
2488
2489 return rq;
2490}
2491
2492struct migration_arg {
2493 struct task_struct *task;
2494 int dest_cpu;
2495 struct set_affinity_pending *pending;
2496};
2497
2498/*
2499 * @refs: number of wait_for_completion()
2500 * @stop_pending: is @stop_work in use
2501 */
2502struct set_affinity_pending {
2503 refcount_t refs;
2504 unsigned int stop_pending;
2505 struct completion done;
2506 struct cpu_stop_work stop_work;
2507 struct migration_arg arg;
2508};
2509
2510/*
2511 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2512 * this because either it can't run here any more (set_cpus_allowed()
2513 * away from this CPU, or CPU going down), or because we're
2514 * attempting to rebalance this task on exec (sched_exec).
2515 *
2516 * So we race with normal scheduler movements, but that's OK, as long
2517 * as the task is no longer on this CPU.
2518 */
2519static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2520 struct task_struct *p, int dest_cpu)
2521{
2522 /* Affinity changed (again). */
2523 if (!is_cpu_allowed(p, dest_cpu))
2524 return rq;
2525
2526 rq = move_queued_task(rq, rf, p, dest_cpu);
2527
2528 return rq;
2529}
2530
2531/*
2532 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2533 * and performs thread migration by bumping thread off CPU then
2534 * 'pushing' onto another runqueue.
2535 */
2536static int migration_cpu_stop(void *data)
2537{
2538 struct migration_arg *arg = data;
2539 struct set_affinity_pending *pending = arg->pending;
2540 struct task_struct *p = arg->task;
2541 struct rq *rq = this_rq();
2542 bool complete = false;
2543 struct rq_flags rf;
2544
2545 /*
2546 * The original target CPU might have gone down and we might
2547 * be on another CPU but it doesn't matter.
2548 */
2549 local_irq_save(rf.flags);
2550 /*
2551 * We need to explicitly wake pending tasks before running
2552 * __migrate_task() such that we will not miss enforcing cpus_ptr
2553 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2554 */
2555 flush_smp_call_function_queue();
2556
2557 raw_spin_lock(&p->pi_lock);
2558 rq_lock(rq, &rf);
2559
2560 /*
2561 * If we were passed a pending, then ->stop_pending was set, thus
2562 * p->migration_pending must have remained stable.
2563 */
2564 WARN_ON_ONCE(pending && pending != p->migration_pending);
2565
2566 /*
2567 * If task_rq(p) != rq, it cannot be migrated here, because we're
2568 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2569 * we're holding p->pi_lock.
2570 */
2571 if (task_rq(p) == rq) {
2572 if (is_migration_disabled(p))
2573 goto out;
2574
2575 if (pending) {
2576 p->migration_pending = NULL;
2577 complete = true;
2578
2579 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2580 goto out;
2581 }
2582
2583 if (task_on_rq_queued(p)) {
2584 update_rq_clock(rq);
2585 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2586 } else {
2587 p->wake_cpu = arg->dest_cpu;
2588 }
2589
2590 /*
2591 * XXX __migrate_task() can fail, at which point we might end
2592 * up running on a dodgy CPU, AFAICT this can only happen
2593 * during CPU hotplug, at which point we'll get pushed out
2594 * anyway, so it's probably not a big deal.
2595 */
2596
2597 } else if (pending) {
2598 /*
2599 * This happens when we get migrated between migrate_enable()'s
2600 * preempt_enable() and scheduling the stopper task. At that
2601 * point we're a regular task again and not current anymore.
2602 *
2603 * A !PREEMPT kernel has a giant hole here, which makes it far
2604 * more likely.
2605 */
2606
2607 /*
2608 * The task moved before the stopper got to run. We're holding
2609 * ->pi_lock, so the allowed mask is stable - if it got
2610 * somewhere allowed, we're done.
2611 */
2612 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2613 p->migration_pending = NULL;
2614 complete = true;
2615 goto out;
2616 }
2617
2618 /*
2619 * When migrate_enable() hits a rq mis-match we can't reliably
2620 * determine is_migration_disabled() and so have to chase after
2621 * it.
2622 */
2623 WARN_ON_ONCE(!pending->stop_pending);
2624 preempt_disable();
2625 task_rq_unlock(rq, p, &rf);
2626 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2627 &pending->arg, &pending->stop_work);
2628 preempt_enable();
2629 return 0;
2630 }
2631out:
2632 if (pending)
2633 pending->stop_pending = false;
2634 task_rq_unlock(rq, p, &rf);
2635
2636 if (complete)
2637 complete_all(&pending->done);
2638
2639 return 0;
2640}
2641
2642int push_cpu_stop(void *arg)
2643{
2644 struct rq *lowest_rq = NULL, *rq = this_rq();
2645 struct task_struct *p = arg;
2646
2647 raw_spin_lock_irq(&p->pi_lock);
2648 raw_spin_rq_lock(rq);
2649
2650 if (task_rq(p) != rq)
2651 goto out_unlock;
2652
2653 if (is_migration_disabled(p)) {
2654 p->migration_flags |= MDF_PUSH;
2655 goto out_unlock;
2656 }
2657
2658 p->migration_flags &= ~MDF_PUSH;
2659
2660 if (p->sched_class->find_lock_rq)
2661 lowest_rq = p->sched_class->find_lock_rq(p, rq);
2662
2663 if (!lowest_rq)
2664 goto out_unlock;
2665
2666 // XXX validate p is still the highest prio task
2667 if (task_rq(p) == rq) {
2668 move_queued_task_locked(rq, lowest_rq, p);
2669 resched_curr(lowest_rq);
2670 }
2671
2672 double_unlock_balance(rq, lowest_rq);
2673
2674out_unlock:
2675 rq->push_busy = false;
2676 raw_spin_rq_unlock(rq);
2677 raw_spin_unlock_irq(&p->pi_lock);
2678
2679 put_task_struct(p);
2680 return 0;
2681}
2682
2683/*
2684 * sched_class::set_cpus_allowed must do the below, but is not required to
2685 * actually call this function.
2686 */
2687void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2688{
2689 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2690 p->cpus_ptr = ctx->new_mask;
2691 return;
2692 }
2693
2694 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2695 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2696
2697 /*
2698 * Swap in a new user_cpus_ptr if SCA_USER flag set
2699 */
2700 if (ctx->flags & SCA_USER)
2701 swap(p->user_cpus_ptr, ctx->user_mask);
2702}
2703
2704static void
2705__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2706{
2707 struct rq *rq = task_rq(p);
2708 bool queued, running;
2709
2710 /*
2711 * This here violates the locking rules for affinity, since we're only
2712 * supposed to change these variables while holding both rq->lock and
2713 * p->pi_lock.
2714 *
2715 * HOWEVER, it magically works, because ttwu() is the only code that
2716 * accesses these variables under p->pi_lock and only does so after
2717 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2718 * before finish_task().
2719 *
2720 * XXX do further audits, this smells like something putrid.
2721 */
2722 if (ctx->flags & SCA_MIGRATE_DISABLE)
2723 SCHED_WARN_ON(!p->on_cpu);
2724 else
2725 lockdep_assert_held(&p->pi_lock);
2726
2727 queued = task_on_rq_queued(p);
2728 running = task_current_donor(rq, p);
2729
2730 if (queued) {
2731 /*
2732 * Because __kthread_bind() calls this on blocked tasks without
2733 * holding rq->lock.
2734 */
2735 lockdep_assert_rq_held(rq);
2736 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2737 }
2738 if (running)
2739 put_prev_task(rq, p);
2740
2741 p->sched_class->set_cpus_allowed(p, ctx);
2742 mm_set_cpus_allowed(p->mm, ctx->new_mask);
2743
2744 if (queued)
2745 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2746 if (running)
2747 set_next_task(rq, p);
2748}
2749
2750/*
2751 * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2752 * affinity (if any) should be destroyed too.
2753 */
2754void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2755{
2756 struct affinity_context ac = {
2757 .new_mask = new_mask,
2758 .user_mask = NULL,
2759 .flags = SCA_USER, /* clear the user requested mask */
2760 };
2761 union cpumask_rcuhead {
2762 cpumask_t cpumask;
2763 struct rcu_head rcu;
2764 };
2765
2766 __do_set_cpus_allowed(p, &ac);
2767
2768 /*
2769 * Because this is called with p->pi_lock held, it is not possible
2770 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2771 * kfree_rcu().
2772 */
2773 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2774}
2775
2776int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2777 int node)
2778{
2779 cpumask_t *user_mask;
2780 unsigned long flags;
2781
2782 /*
2783 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2784 * may differ by now due to racing.
2785 */
2786 dst->user_cpus_ptr = NULL;
2787
2788 /*
2789 * This check is racy and losing the race is a valid situation.
2790 * It is not worth the extra overhead of taking the pi_lock on
2791 * every fork/clone.
2792 */
2793 if (data_race(!src->user_cpus_ptr))
2794 return 0;
2795
2796 user_mask = alloc_user_cpus_ptr(node);
2797 if (!user_mask)
2798 return -ENOMEM;
2799
2800 /*
2801 * Use pi_lock to protect content of user_cpus_ptr
2802 *
2803 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2804 * do_set_cpus_allowed().
2805 */
2806 raw_spin_lock_irqsave(&src->pi_lock, flags);
2807 if (src->user_cpus_ptr) {
2808 swap(dst->user_cpus_ptr, user_mask);
2809 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2810 }
2811 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2812
2813 if (unlikely(user_mask))
2814 kfree(user_mask);
2815
2816 return 0;
2817}
2818
2819static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2820{
2821 struct cpumask *user_mask = NULL;
2822
2823 swap(p->user_cpus_ptr, user_mask);
2824
2825 return user_mask;
2826}
2827
2828void release_user_cpus_ptr(struct task_struct *p)
2829{
2830 kfree(clear_user_cpus_ptr(p));
2831}
2832
2833/*
2834 * This function is wildly self concurrent; here be dragons.
2835 *
2836 *
2837 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2838 * designated task is enqueued on an allowed CPU. If that task is currently
2839 * running, we have to kick it out using the CPU stopper.
2840 *
2841 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2842 * Consider:
2843 *
2844 * Initial conditions: P0->cpus_mask = [0, 1]
2845 *
2846 * P0@CPU0 P1
2847 *
2848 * migrate_disable();
2849 * <preempted>
2850 * set_cpus_allowed_ptr(P0, [1]);
2851 *
2852 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2853 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2854 * This means we need the following scheme:
2855 *
2856 * P0@CPU0 P1
2857 *
2858 * migrate_disable();
2859 * <preempted>
2860 * set_cpus_allowed_ptr(P0, [1]);
2861 * <blocks>
2862 * <resumes>
2863 * migrate_enable();
2864 * __set_cpus_allowed_ptr();
2865 * <wakes local stopper>
2866 * `--> <woken on migration completion>
2867 *
2868 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2869 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2870 * task p are serialized by p->pi_lock, which we can leverage: the one that
2871 * should come into effect at the end of the Migrate-Disable region is the last
2872 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2873 * but we still need to properly signal those waiting tasks at the appropriate
2874 * moment.
2875 *
2876 * This is implemented using struct set_affinity_pending. The first
2877 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2878 * setup an instance of that struct and install it on the targeted task_struct.
2879 * Any and all further callers will reuse that instance. Those then wait for
2880 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2881 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2882 *
2883 *
2884 * (1) In the cases covered above. There is one more where the completion is
2885 * signaled within affine_move_task() itself: when a subsequent affinity request
2886 * occurs after the stopper bailed out due to the targeted task still being
2887 * Migrate-Disable. Consider:
2888 *
2889 * Initial conditions: P0->cpus_mask = [0, 1]
2890 *
2891 * CPU0 P1 P2
2892 * <P0>
2893 * migrate_disable();
2894 * <preempted>
2895 * set_cpus_allowed_ptr(P0, [1]);
2896 * <blocks>
2897 * <migration/0>
2898 * migration_cpu_stop()
2899 * is_migration_disabled()
2900 * <bails>
2901 * set_cpus_allowed_ptr(P0, [0, 1]);
2902 * <signal completion>
2903 * <awakes>
2904 *
2905 * Note that the above is safe vs a concurrent migrate_enable(), as any
2906 * pending affinity completion is preceded by an uninstallation of
2907 * p->migration_pending done with p->pi_lock held.
2908 */
2909static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2910 int dest_cpu, unsigned int flags)
2911 __releases(rq->lock)
2912 __releases(p->pi_lock)
2913{
2914 struct set_affinity_pending my_pending = { }, *pending = NULL;
2915 bool stop_pending, complete = false;
2916
2917 /* Can the task run on the task's current CPU? If so, we're done */
2918 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2919 struct task_struct *push_task = NULL;
2920
2921 if ((flags & SCA_MIGRATE_ENABLE) &&
2922 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2923 rq->push_busy = true;
2924 push_task = get_task_struct(p);
2925 }
2926
2927 /*
2928 * If there are pending waiters, but no pending stop_work,
2929 * then complete now.
2930 */
2931 pending = p->migration_pending;
2932 if (pending && !pending->stop_pending) {
2933 p->migration_pending = NULL;
2934 complete = true;
2935 }
2936
2937 preempt_disable();
2938 task_rq_unlock(rq, p, rf);
2939 if (push_task) {
2940 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2941 p, &rq->push_work);
2942 }
2943 preempt_enable();
2944
2945 if (complete)
2946 complete_all(&pending->done);
2947
2948 return 0;
2949 }
2950
2951 if (!(flags & SCA_MIGRATE_ENABLE)) {
2952 /* serialized by p->pi_lock */
2953 if (!p->migration_pending) {
2954 /* Install the request */
2955 refcount_set(&my_pending.refs, 1);
2956 init_completion(&my_pending.done);
2957 my_pending.arg = (struct migration_arg) {
2958 .task = p,
2959 .dest_cpu = dest_cpu,
2960 .pending = &my_pending,
2961 };
2962
2963 p->migration_pending = &my_pending;
2964 } else {
2965 pending = p->migration_pending;
2966 refcount_inc(&pending->refs);
2967 /*
2968 * Affinity has changed, but we've already installed a
2969 * pending. migration_cpu_stop() *must* see this, else
2970 * we risk a completion of the pending despite having a
2971 * task on a disallowed CPU.
2972 *
2973 * Serialized by p->pi_lock, so this is safe.
2974 */
2975 pending->arg.dest_cpu = dest_cpu;
2976 }
2977 }
2978 pending = p->migration_pending;
2979 /*
2980 * - !MIGRATE_ENABLE:
2981 * we'll have installed a pending if there wasn't one already.
2982 *
2983 * - MIGRATE_ENABLE:
2984 * we're here because the current CPU isn't matching anymore,
2985 * the only way that can happen is because of a concurrent
2986 * set_cpus_allowed_ptr() call, which should then still be
2987 * pending completion.
2988 *
2989 * Either way, we really should have a @pending here.
2990 */
2991 if (WARN_ON_ONCE(!pending)) {
2992 task_rq_unlock(rq, p, rf);
2993 return -EINVAL;
2994 }
2995
2996 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
2997 /*
2998 * MIGRATE_ENABLE gets here because 'p == current', but for
2999 * anything else we cannot do is_migration_disabled(), punt
3000 * and have the stopper function handle it all race-free.
3001 */
3002 stop_pending = pending->stop_pending;
3003 if (!stop_pending)
3004 pending->stop_pending = true;
3005
3006 if (flags & SCA_MIGRATE_ENABLE)
3007 p->migration_flags &= ~MDF_PUSH;
3008
3009 preempt_disable();
3010 task_rq_unlock(rq, p, rf);
3011 if (!stop_pending) {
3012 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
3013 &pending->arg, &pending->stop_work);
3014 }
3015 preempt_enable();
3016
3017 if (flags & SCA_MIGRATE_ENABLE)
3018 return 0;
3019 } else {
3020
3021 if (!is_migration_disabled(p)) {
3022 if (task_on_rq_queued(p))
3023 rq = move_queued_task(rq, rf, p, dest_cpu);
3024
3025 if (!pending->stop_pending) {
3026 p->migration_pending = NULL;
3027 complete = true;
3028 }
3029 }
3030 task_rq_unlock(rq, p, rf);
3031
3032 if (complete)
3033 complete_all(&pending->done);
3034 }
3035
3036 wait_for_completion(&pending->done);
3037
3038 if (refcount_dec_and_test(&pending->refs))
3039 wake_up_var(&pending->refs); /* No UaF, just an address */
3040
3041 /*
3042 * Block the original owner of &pending until all subsequent callers
3043 * have seen the completion and decremented the refcount
3044 */
3045 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3046
3047 /* ARGH */
3048 WARN_ON_ONCE(my_pending.stop_pending);
3049
3050 return 0;
3051}
3052
3053/*
3054 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3055 */
3056static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3057 struct affinity_context *ctx,
3058 struct rq *rq,
3059 struct rq_flags *rf)
3060 __releases(rq->lock)
3061 __releases(p->pi_lock)
3062{
3063 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3064 const struct cpumask *cpu_valid_mask = cpu_active_mask;
3065 bool kthread = p->flags & PF_KTHREAD;
3066 unsigned int dest_cpu;
3067 int ret = 0;
3068
3069 update_rq_clock(rq);
3070
3071 if (kthread || is_migration_disabled(p)) {
3072 /*
3073 * Kernel threads are allowed on online && !active CPUs,
3074 * however, during cpu-hot-unplug, even these might get pushed
3075 * away if not KTHREAD_IS_PER_CPU.
3076 *
3077 * Specifically, migration_disabled() tasks must not fail the
3078 * cpumask_any_and_distribute() pick below, esp. so on
3079 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3080 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3081 */
3082 cpu_valid_mask = cpu_online_mask;
3083 }
3084
3085 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3086 ret = -EINVAL;
3087 goto out;
3088 }
3089
3090 /*
3091 * Must re-check here, to close a race against __kthread_bind(),
3092 * sched_setaffinity() is not guaranteed to observe the flag.
3093 */
3094 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3095 ret = -EINVAL;
3096 goto out;
3097 }
3098
3099 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3100 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3101 if (ctx->flags & SCA_USER)
3102 swap(p->user_cpus_ptr, ctx->user_mask);
3103 goto out;
3104 }
3105
3106 if (WARN_ON_ONCE(p == current &&
3107 is_migration_disabled(p) &&
3108 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3109 ret = -EBUSY;
3110 goto out;
3111 }
3112 }
3113
3114 /*
3115 * Picking a ~random cpu helps in cases where we are changing affinity
3116 * for groups of tasks (ie. cpuset), so that load balancing is not
3117 * immediately required to distribute the tasks within their new mask.
3118 */
3119 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3120 if (dest_cpu >= nr_cpu_ids) {
3121 ret = -EINVAL;
3122 goto out;
3123 }
3124
3125 __do_set_cpus_allowed(p, ctx);
3126
3127 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3128
3129out:
3130 task_rq_unlock(rq, p, rf);
3131
3132 return ret;
3133}
3134
3135/*
3136 * Change a given task's CPU affinity. Migrate the thread to a
3137 * proper CPU and schedule it away if the CPU it's executing on
3138 * is removed from the allowed bitmask.
3139 *
3140 * NOTE: the caller must have a valid reference to the task, the
3141 * task must not exit() & deallocate itself prematurely. The
3142 * call is not atomic; no spinlocks may be held.
3143 */
3144int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3145{
3146 struct rq_flags rf;
3147 struct rq *rq;
3148
3149 rq = task_rq_lock(p, &rf);
3150 /*
3151 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3152 * flags are set.
3153 */
3154 if (p->user_cpus_ptr &&
3155 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3156 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3157 ctx->new_mask = rq->scratch_mask;
3158
3159 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3160}
3161
3162int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3163{
3164 struct affinity_context ac = {
3165 .new_mask = new_mask,
3166 .flags = 0,
3167 };
3168
3169 return __set_cpus_allowed_ptr(p, &ac);
3170}
3171EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3172
3173/*
3174 * Change a given task's CPU affinity to the intersection of its current
3175 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3176 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3177 * affinity or use cpu_online_mask instead.
3178 *
3179 * If the resulting mask is empty, leave the affinity unchanged and return
3180 * -EINVAL.
3181 */
3182static int restrict_cpus_allowed_ptr(struct task_struct *p,
3183 struct cpumask *new_mask,
3184 const struct cpumask *subset_mask)
3185{
3186 struct affinity_context ac = {
3187 .new_mask = new_mask,
3188 .flags = 0,
3189 };
3190 struct rq_flags rf;
3191 struct rq *rq;
3192 int err;
3193
3194 rq = task_rq_lock(p, &rf);
3195
3196 /*
3197 * Forcefully restricting the affinity of a deadline task is
3198 * likely to cause problems, so fail and noisily override the
3199 * mask entirely.
3200 */
3201 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3202 err = -EPERM;
3203 goto err_unlock;
3204 }
3205
3206 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3207 err = -EINVAL;
3208 goto err_unlock;
3209 }
3210
3211 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3212
3213err_unlock:
3214 task_rq_unlock(rq, p, &rf);
3215 return err;
3216}
3217
3218/*
3219 * Restrict the CPU affinity of task @p so that it is a subset of
3220 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3221 * old affinity mask. If the resulting mask is empty, we warn and walk
3222 * up the cpuset hierarchy until we find a suitable mask.
3223 */
3224void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3225{
3226 cpumask_var_t new_mask;
3227 const struct cpumask *override_mask = task_cpu_possible_mask(p);
3228
3229 alloc_cpumask_var(&new_mask, GFP_KERNEL);
3230
3231 /*
3232 * __migrate_task() can fail silently in the face of concurrent
3233 * offlining of the chosen destination CPU, so take the hotplug
3234 * lock to ensure that the migration succeeds.
3235 */
3236 cpus_read_lock();
3237 if (!cpumask_available(new_mask))
3238 goto out_set_mask;
3239
3240 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3241 goto out_free_mask;
3242
3243 /*
3244 * We failed to find a valid subset of the affinity mask for the
3245 * task, so override it based on its cpuset hierarchy.
3246 */
3247 cpuset_cpus_allowed(p, new_mask);
3248 override_mask = new_mask;
3249
3250out_set_mask:
3251 if (printk_ratelimit()) {
3252 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3253 task_pid_nr(p), p->comm,
3254 cpumask_pr_args(override_mask));
3255 }
3256
3257 WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3258out_free_mask:
3259 cpus_read_unlock();
3260 free_cpumask_var(new_mask);
3261}
3262
3263/*
3264 * Restore the affinity of a task @p which was previously restricted by a
3265 * call to force_compatible_cpus_allowed_ptr().
3266 *
3267 * It is the caller's responsibility to serialise this with any calls to
3268 * force_compatible_cpus_allowed_ptr(@p).
3269 */
3270void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3271{
3272 struct affinity_context ac = {
3273 .new_mask = task_user_cpus(p),
3274 .flags = 0,
3275 };
3276 int ret;
3277
3278 /*
3279 * Try to restore the old affinity mask with __sched_setaffinity().
3280 * Cpuset masking will be done there too.
3281 */
3282 ret = __sched_setaffinity(p, &ac);
3283 WARN_ON_ONCE(ret);
3284}
3285
3286void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3287{
3288#ifdef CONFIG_SCHED_DEBUG
3289 unsigned int state = READ_ONCE(p->__state);
3290
3291 /*
3292 * We should never call set_task_cpu() on a blocked task,
3293 * ttwu() will sort out the placement.
3294 */
3295 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3296
3297 /*
3298 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3299 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3300 * time relying on p->on_rq.
3301 */
3302 WARN_ON_ONCE(state == TASK_RUNNING &&
3303 p->sched_class == &fair_sched_class &&
3304 (p->on_rq && !task_on_rq_migrating(p)));
3305
3306#ifdef CONFIG_LOCKDEP
3307 /*
3308 * The caller should hold either p->pi_lock or rq->lock, when changing
3309 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3310 *
3311 * sched_move_task() holds both and thus holding either pins the cgroup,
3312 * see task_group().
3313 *
3314 * Furthermore, all task_rq users should acquire both locks, see
3315 * task_rq_lock().
3316 */
3317 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3318 lockdep_is_held(__rq_lockp(task_rq(p)))));
3319#endif
3320 /*
3321 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3322 */
3323 WARN_ON_ONCE(!cpu_online(new_cpu));
3324
3325 WARN_ON_ONCE(is_migration_disabled(p));
3326#endif
3327
3328 trace_sched_migrate_task(p, new_cpu);
3329
3330 if (task_cpu(p) != new_cpu) {
3331 if (p->sched_class->migrate_task_rq)
3332 p->sched_class->migrate_task_rq(p, new_cpu);
3333 p->se.nr_migrations++;
3334 rseq_migrate(p);
3335 sched_mm_cid_migrate_from(p);
3336 perf_event_task_migrate(p);
3337 }
3338
3339 __set_task_cpu(p, new_cpu);
3340}
3341
3342#ifdef CONFIG_NUMA_BALANCING
3343static void __migrate_swap_task(struct task_struct *p, int cpu)
3344{
3345 if (task_on_rq_queued(p)) {
3346 struct rq *src_rq, *dst_rq;
3347 struct rq_flags srf, drf;
3348
3349 src_rq = task_rq(p);
3350 dst_rq = cpu_rq(cpu);
3351
3352 rq_pin_lock(src_rq, &srf);
3353 rq_pin_lock(dst_rq, &drf);
3354
3355 move_queued_task_locked(src_rq, dst_rq, p);
3356 wakeup_preempt(dst_rq, p, 0);
3357
3358 rq_unpin_lock(dst_rq, &drf);
3359 rq_unpin_lock(src_rq, &srf);
3360
3361 } else {
3362 /*
3363 * Task isn't running anymore; make it appear like we migrated
3364 * it before it went to sleep. This means on wakeup we make the
3365 * previous CPU our target instead of where it really is.
3366 */
3367 p->wake_cpu = cpu;
3368 }
3369}
3370
3371struct migration_swap_arg {
3372 struct task_struct *src_task, *dst_task;
3373 int src_cpu, dst_cpu;
3374};
3375
3376static int migrate_swap_stop(void *data)
3377{
3378 struct migration_swap_arg *arg = data;
3379 struct rq *src_rq, *dst_rq;
3380
3381 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3382 return -EAGAIN;
3383
3384 src_rq = cpu_rq(arg->src_cpu);
3385 dst_rq = cpu_rq(arg->dst_cpu);
3386
3387 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3388 guard(double_rq_lock)(src_rq, dst_rq);
3389
3390 if (task_cpu(arg->dst_task) != arg->dst_cpu)
3391 return -EAGAIN;
3392
3393 if (task_cpu(arg->src_task) != arg->src_cpu)
3394 return -EAGAIN;
3395
3396 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3397 return -EAGAIN;
3398
3399 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3400 return -EAGAIN;
3401
3402 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3403 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3404
3405 return 0;
3406}
3407
3408/*
3409 * Cross migrate two tasks
3410 */
3411int migrate_swap(struct task_struct *cur, struct task_struct *p,
3412 int target_cpu, int curr_cpu)
3413{
3414 struct migration_swap_arg arg;
3415 int ret = -EINVAL;
3416
3417 arg = (struct migration_swap_arg){
3418 .src_task = cur,
3419 .src_cpu = curr_cpu,
3420 .dst_task = p,
3421 .dst_cpu = target_cpu,
3422 };
3423
3424 if (arg.src_cpu == arg.dst_cpu)
3425 goto out;
3426
3427 /*
3428 * These three tests are all lockless; this is OK since all of them
3429 * will be re-checked with proper locks held further down the line.
3430 */
3431 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3432 goto out;
3433
3434 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3435 goto out;
3436
3437 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3438 goto out;
3439
3440 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3441 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3442
3443out:
3444 return ret;
3445}
3446#endif /* CONFIG_NUMA_BALANCING */
3447
3448/***
3449 * kick_process - kick a running thread to enter/exit the kernel
3450 * @p: the to-be-kicked thread
3451 *
3452 * Cause a process which is running on another CPU to enter
3453 * kernel-mode, without any delay. (to get signals handled.)
3454 *
3455 * NOTE: this function doesn't have to take the runqueue lock,
3456 * because all it wants to ensure is that the remote task enters
3457 * the kernel. If the IPI races and the task has been migrated
3458 * to another CPU then no harm is done and the purpose has been
3459 * achieved as well.
3460 */
3461void kick_process(struct task_struct *p)
3462{
3463 guard(preempt)();
3464 int cpu = task_cpu(p);
3465
3466 if ((cpu != smp_processor_id()) && task_curr(p))
3467 smp_send_reschedule(cpu);
3468}
3469EXPORT_SYMBOL_GPL(kick_process);
3470
3471/*
3472 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3473 *
3474 * A few notes on cpu_active vs cpu_online:
3475 *
3476 * - cpu_active must be a subset of cpu_online
3477 *
3478 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3479 * see __set_cpus_allowed_ptr(). At this point the newly online
3480 * CPU isn't yet part of the sched domains, and balancing will not
3481 * see it.
3482 *
3483 * - on CPU-down we clear cpu_active() to mask the sched domains and
3484 * avoid the load balancer to place new tasks on the to be removed
3485 * CPU. Existing tasks will remain running there and will be taken
3486 * off.
3487 *
3488 * This means that fallback selection must not select !active CPUs.
3489 * And can assume that any active CPU must be online. Conversely
3490 * select_task_rq() below may allow selection of !active CPUs in order
3491 * to satisfy the above rules.
3492 */
3493static int select_fallback_rq(int cpu, struct task_struct *p)
3494{
3495 int nid = cpu_to_node(cpu);
3496 const struct cpumask *nodemask = NULL;
3497 enum { cpuset, possible, fail } state = cpuset;
3498 int dest_cpu;
3499
3500 /*
3501 * If the node that the CPU is on has been offlined, cpu_to_node()
3502 * will return -1. There is no CPU on the node, and we should
3503 * select the CPU on the other node.
3504 */
3505 if (nid != -1) {
3506 nodemask = cpumask_of_node(nid);
3507
3508 /* Look for allowed, online CPU in same node. */
3509 for_each_cpu(dest_cpu, nodemask) {
3510 if (is_cpu_allowed(p, dest_cpu))
3511 return dest_cpu;
3512 }
3513 }
3514
3515 for (;;) {
3516 /* Any allowed, online CPU? */
3517 for_each_cpu(dest_cpu, p->cpus_ptr) {
3518 if (!is_cpu_allowed(p, dest_cpu))
3519 continue;
3520
3521 goto out;
3522 }
3523
3524 /* No more Mr. Nice Guy. */
3525 switch (state) {
3526 case cpuset:
3527 if (cpuset_cpus_allowed_fallback(p)) {
3528 state = possible;
3529 break;
3530 }
3531 fallthrough;
3532 case possible:
3533 /*
3534 * XXX When called from select_task_rq() we only
3535 * hold p->pi_lock and again violate locking order.
3536 *
3537 * More yuck to audit.
3538 */
3539 do_set_cpus_allowed(p, task_cpu_possible_mask(p));
3540 state = fail;
3541 break;
3542 case fail:
3543 BUG();
3544 break;
3545 }
3546 }
3547
3548out:
3549 if (state != cpuset) {
3550 /*
3551 * Don't tell them about moving exiting tasks or
3552 * kernel threads (both mm NULL), since they never
3553 * leave kernel.
3554 */
3555 if (p->mm && printk_ratelimit()) {
3556 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3557 task_pid_nr(p), p->comm, cpu);
3558 }
3559 }
3560
3561 return dest_cpu;
3562}
3563
3564/*
3565 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3566 */
3567static inline
3568int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
3569{
3570 lockdep_assert_held(&p->pi_lock);
3571
3572 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) {
3573 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
3574 *wake_flags |= WF_RQ_SELECTED;
3575 } else {
3576 cpu = cpumask_any(p->cpus_ptr);
3577 }
3578
3579 /*
3580 * In order not to call set_task_cpu() on a blocking task we need
3581 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3582 * CPU.
3583 *
3584 * Since this is common to all placement strategies, this lives here.
3585 *
3586 * [ this allows ->select_task() to simply return task_cpu(p) and
3587 * not worry about this generic constraint ]
3588 */
3589 if (unlikely(!is_cpu_allowed(p, cpu)))
3590 cpu = select_fallback_rq(task_cpu(p), p);
3591
3592 return cpu;
3593}
3594
3595void sched_set_stop_task(int cpu, struct task_struct *stop)
3596{
3597 static struct lock_class_key stop_pi_lock;
3598 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3599 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3600
3601 if (stop) {
3602 /*
3603 * Make it appear like a SCHED_FIFO task, its something
3604 * userspace knows about and won't get confused about.
3605 *
3606 * Also, it will make PI more or less work without too
3607 * much confusion -- but then, stop work should not
3608 * rely on PI working anyway.
3609 */
3610 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
3611
3612 stop->sched_class = &stop_sched_class;
3613
3614 /*
3615 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3616 * adjust the effective priority of a task. As a result,
3617 * rt_mutex_setprio() can trigger (RT) balancing operations,
3618 * which can then trigger wakeups of the stop thread to push
3619 * around the current task.
3620 *
3621 * The stop task itself will never be part of the PI-chain, it
3622 * never blocks, therefore that ->pi_lock recursion is safe.
3623 * Tell lockdep about this by placing the stop->pi_lock in its
3624 * own class.
3625 */
3626 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3627 }
3628
3629 cpu_rq(cpu)->stop = stop;
3630
3631 if (old_stop) {
3632 /*
3633 * Reset it back to a normal scheduling class so that
3634 * it can die in pieces.
3635 */
3636 old_stop->sched_class = &rt_sched_class;
3637 }
3638}
3639
3640#else /* CONFIG_SMP */
3641
3642static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3643
3644static inline bool rq_has_pinned_tasks(struct rq *rq)
3645{
3646 return false;
3647}
3648
3649#endif /* !CONFIG_SMP */
3650
3651static void
3652ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3653{
3654 struct rq *rq;
3655
3656 if (!schedstat_enabled())
3657 return;
3658
3659 rq = this_rq();
3660
3661#ifdef CONFIG_SMP
3662 if (cpu == rq->cpu) {
3663 __schedstat_inc(rq->ttwu_local);
3664 __schedstat_inc(p->stats.nr_wakeups_local);
3665 } else {
3666 struct sched_domain *sd;
3667
3668 __schedstat_inc(p->stats.nr_wakeups_remote);
3669
3670 guard(rcu)();
3671 for_each_domain(rq->cpu, sd) {
3672 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3673 __schedstat_inc(sd->ttwu_wake_remote);
3674 break;
3675 }
3676 }
3677 }
3678
3679 if (wake_flags & WF_MIGRATED)
3680 __schedstat_inc(p->stats.nr_wakeups_migrate);
3681#endif /* CONFIG_SMP */
3682
3683 __schedstat_inc(rq->ttwu_count);
3684 __schedstat_inc(p->stats.nr_wakeups);
3685
3686 if (wake_flags & WF_SYNC)
3687 __schedstat_inc(p->stats.nr_wakeups_sync);
3688}
3689
3690/*
3691 * Mark the task runnable.
3692 */
3693static inline void ttwu_do_wakeup(struct task_struct *p)
3694{
3695 WRITE_ONCE(p->__state, TASK_RUNNING);
3696 trace_sched_wakeup(p);
3697}
3698
3699static void
3700ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3701 struct rq_flags *rf)
3702{
3703 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3704
3705 lockdep_assert_rq_held(rq);
3706
3707 if (p->sched_contributes_to_load)
3708 rq->nr_uninterruptible--;
3709
3710#ifdef CONFIG_SMP
3711 if (wake_flags & WF_RQ_SELECTED)
3712 en_flags |= ENQUEUE_RQ_SELECTED;
3713 if (wake_flags & WF_MIGRATED)
3714 en_flags |= ENQUEUE_MIGRATED;
3715 else
3716#endif
3717 if (p->in_iowait) {
3718 delayacct_blkio_end(p);
3719 atomic_dec(&task_rq(p)->nr_iowait);
3720 }
3721
3722 activate_task(rq, p, en_flags);
3723 wakeup_preempt(rq, p, wake_flags);
3724
3725 ttwu_do_wakeup(p);
3726
3727#ifdef CONFIG_SMP
3728 if (p->sched_class->task_woken) {
3729 /*
3730 * Our task @p is fully woken up and running; so it's safe to
3731 * drop the rq->lock, hereafter rq is only used for statistics.
3732 */
3733 rq_unpin_lock(rq, rf);
3734 p->sched_class->task_woken(rq, p);
3735 rq_repin_lock(rq, rf);
3736 }
3737
3738 if (rq->idle_stamp) {
3739 u64 delta = rq_clock(rq) - rq->idle_stamp;
3740 u64 max = 2*rq->max_idle_balance_cost;
3741
3742 update_avg(&rq->avg_idle, delta);
3743
3744 if (rq->avg_idle > max)
3745 rq->avg_idle = max;
3746
3747 rq->idle_stamp = 0;
3748 }
3749#endif
3750}
3751
3752/*
3753 * Consider @p being inside a wait loop:
3754 *
3755 * for (;;) {
3756 * set_current_state(TASK_UNINTERRUPTIBLE);
3757 *
3758 * if (CONDITION)
3759 * break;
3760 *
3761 * schedule();
3762 * }
3763 * __set_current_state(TASK_RUNNING);
3764 *
3765 * between set_current_state() and schedule(). In this case @p is still
3766 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3767 * an atomic manner.
3768 *
3769 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3770 * then schedule() must still happen and p->state can be changed to
3771 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3772 * need to do a full wakeup with enqueue.
3773 *
3774 * Returns: %true when the wakeup is done,
3775 * %false otherwise.
3776 */
3777static int ttwu_runnable(struct task_struct *p, int wake_flags)
3778{
3779 struct rq_flags rf;
3780 struct rq *rq;
3781 int ret = 0;
3782
3783 rq = __task_rq_lock(p, &rf);
3784 if (task_on_rq_queued(p)) {
3785 update_rq_clock(rq);
3786 if (p->se.sched_delayed)
3787 enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
3788 if (!task_on_cpu(rq, p)) {
3789 /*
3790 * When on_rq && !on_cpu the task is preempted, see if
3791 * it should preempt the task that is current now.
3792 */
3793 wakeup_preempt(rq, p, wake_flags);
3794 }
3795 ttwu_do_wakeup(p);
3796 ret = 1;
3797 }
3798 __task_rq_unlock(rq, &rf);
3799
3800 return ret;
3801}
3802
3803#ifdef CONFIG_SMP
3804void sched_ttwu_pending(void *arg)
3805{
3806 struct llist_node *llist = arg;
3807 struct rq *rq = this_rq();
3808 struct task_struct *p, *t;
3809 struct rq_flags rf;
3810
3811 if (!llist)
3812 return;
3813
3814 rq_lock_irqsave(rq, &rf);
3815 update_rq_clock(rq);
3816
3817 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3818 if (WARN_ON_ONCE(p->on_cpu))
3819 smp_cond_load_acquire(&p->on_cpu, !VAL);
3820
3821 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3822 set_task_cpu(p, cpu_of(rq));
3823
3824 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3825 }
3826
3827 /*
3828 * Must be after enqueueing at least once task such that
3829 * idle_cpu() does not observe a false-negative -- if it does,
3830 * it is possible for select_idle_siblings() to stack a number
3831 * of tasks on this CPU during that window.
3832 *
3833 * It is OK to clear ttwu_pending when another task pending.
3834 * We will receive IPI after local IRQ enabled and then enqueue it.
3835 * Since now nr_running > 0, idle_cpu() will always get correct result.
3836 */
3837 WRITE_ONCE(rq->ttwu_pending, 0);
3838 rq_unlock_irqrestore(rq, &rf);
3839}
3840
3841/*
3842 * Prepare the scene for sending an IPI for a remote smp_call
3843 *
3844 * Returns true if the caller can proceed with sending the IPI.
3845 * Returns false otherwise.
3846 */
3847bool call_function_single_prep_ipi(int cpu)
3848{
3849 if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3850 trace_sched_wake_idle_without_ipi(cpu);
3851 return false;
3852 }
3853
3854 return true;
3855}
3856
3857/*
3858 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3859 * necessary. The wakee CPU on receipt of the IPI will queue the task
3860 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3861 * of the wakeup instead of the waker.
3862 */
3863static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3864{
3865 struct rq *rq = cpu_rq(cpu);
3866
3867 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3868
3869 WRITE_ONCE(rq->ttwu_pending, 1);
3870 __smp_call_single_queue(cpu, &p->wake_entry.llist);
3871}
3872
3873void wake_up_if_idle(int cpu)
3874{
3875 struct rq *rq = cpu_rq(cpu);
3876
3877 guard(rcu)();
3878 if (is_idle_task(rcu_dereference(rq->curr))) {
3879 guard(rq_lock_irqsave)(rq);
3880 if (is_idle_task(rq->curr))
3881 resched_curr(rq);
3882 }
3883}
3884
3885bool cpus_equal_capacity(int this_cpu, int that_cpu)
3886{
3887 if (!sched_asym_cpucap_active())
3888 return true;
3889
3890 if (this_cpu == that_cpu)
3891 return true;
3892
3893 return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3894}
3895
3896bool cpus_share_cache(int this_cpu, int that_cpu)
3897{
3898 if (this_cpu == that_cpu)
3899 return true;
3900
3901 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3902}
3903
3904/*
3905 * Whether CPUs are share cache resources, which means LLC on non-cluster
3906 * machines and LLC tag or L2 on machines with clusters.
3907 */
3908bool cpus_share_resources(int this_cpu, int that_cpu)
3909{
3910 if (this_cpu == that_cpu)
3911 return true;
3912
3913 return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3914}
3915
3916static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3917{
3918 /*
3919 * The BPF scheduler may depend on select_task_rq() being invoked during
3920 * wakeups. In addition, @p may end up executing on a different CPU
3921 * regardless of what happens in the wakeup path making the ttwu_queue
3922 * optimization less meaningful. Skip if on SCX.
3923 */
3924 if (task_on_scx(p))
3925 return false;
3926
3927 /*
3928 * Do not complicate things with the async wake_list while the CPU is
3929 * in hotplug state.
3930 */
3931 if (!cpu_active(cpu))
3932 return false;
3933
3934 /* Ensure the task will still be allowed to run on the CPU. */
3935 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3936 return false;
3937
3938 /*
3939 * If the CPU does not share cache, then queue the task on the
3940 * remote rqs wakelist to avoid accessing remote data.
3941 */
3942 if (!cpus_share_cache(smp_processor_id(), cpu))
3943 return true;
3944
3945 if (cpu == smp_processor_id())
3946 return false;
3947
3948 /*
3949 * If the wakee cpu is idle, or the task is descheduling and the
3950 * only running task on the CPU, then use the wakelist to offload
3951 * the task activation to the idle (or soon-to-be-idle) CPU as
3952 * the current CPU is likely busy. nr_running is checked to
3953 * avoid unnecessary task stacking.
3954 *
3955 * Note that we can only get here with (wakee) p->on_rq=0,
3956 * p->on_cpu can be whatever, we've done the dequeue, so
3957 * the wakee has been accounted out of ->nr_running.
3958 */
3959 if (!cpu_rq(cpu)->nr_running)
3960 return true;
3961
3962 return false;
3963}
3964
3965static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3966{
3967 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3968 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3969 __ttwu_queue_wakelist(p, cpu, wake_flags);
3970 return true;
3971 }
3972
3973 return false;
3974}
3975
3976#else /* !CONFIG_SMP */
3977
3978static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3979{
3980 return false;
3981}
3982
3983#endif /* CONFIG_SMP */
3984
3985static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3986{
3987 struct rq *rq = cpu_rq(cpu);
3988 struct rq_flags rf;
3989
3990 if (ttwu_queue_wakelist(p, cpu, wake_flags))
3991 return;
3992
3993 rq_lock(rq, &rf);
3994 update_rq_clock(rq);
3995 ttwu_do_activate(rq, p, wake_flags, &rf);
3996 rq_unlock(rq, &rf);
3997}
3998
3999/*
4000 * Invoked from try_to_wake_up() to check whether the task can be woken up.
4001 *
4002 * The caller holds p::pi_lock if p != current or has preemption
4003 * disabled when p == current.
4004 *
4005 * The rules of saved_state:
4006 *
4007 * The related locking code always holds p::pi_lock when updating
4008 * p::saved_state, which means the code is fully serialized in both cases.
4009 *
4010 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
4011 * No other bits set. This allows to distinguish all wakeup scenarios.
4012 *
4013 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
4014 * allows us to prevent early wakeup of tasks before they can be run on
4015 * asymmetric ISA architectures (eg ARMv9).
4016 */
4017static __always_inline
4018bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
4019{
4020 int match;
4021
4022 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
4023 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
4024 state != TASK_RTLOCK_WAIT);
4025 }
4026
4027 *success = !!(match = __task_state_match(p, state));
4028
4029 /*
4030 * Saved state preserves the task state across blocking on
4031 * an RT lock or TASK_FREEZABLE tasks. If the state matches,
4032 * set p::saved_state to TASK_RUNNING, but do not wake the task
4033 * because it waits for a lock wakeup or __thaw_task(). Also
4034 * indicate success because from the regular waker's point of
4035 * view this has succeeded.
4036 *
4037 * After acquiring the lock the task will restore p::__state
4038 * from p::saved_state which ensures that the regular
4039 * wakeup is not lost. The restore will also set
4040 * p::saved_state to TASK_RUNNING so any further tests will
4041 * not result in false positives vs. @success
4042 */
4043 if (match < 0)
4044 p->saved_state = TASK_RUNNING;
4045
4046 return match > 0;
4047}
4048
4049/*
4050 * Notes on Program-Order guarantees on SMP systems.
4051 *
4052 * MIGRATION
4053 *
4054 * The basic program-order guarantee on SMP systems is that when a task [t]
4055 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4056 * execution on its new CPU [c1].
4057 *
4058 * For migration (of runnable tasks) this is provided by the following means:
4059 *
4060 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4061 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4062 * rq(c1)->lock (if not at the same time, then in that order).
4063 * C) LOCK of the rq(c1)->lock scheduling in task
4064 *
4065 * Release/acquire chaining guarantees that B happens after A and C after B.
4066 * Note: the CPU doing B need not be c0 or c1
4067 *
4068 * Example:
4069 *
4070 * CPU0 CPU1 CPU2
4071 *
4072 * LOCK rq(0)->lock
4073 * sched-out X
4074 * sched-in Y
4075 * UNLOCK rq(0)->lock
4076 *
4077 * LOCK rq(0)->lock // orders against CPU0
4078 * dequeue X
4079 * UNLOCK rq(0)->lock
4080 *
4081 * LOCK rq(1)->lock
4082 * enqueue X
4083 * UNLOCK rq(1)->lock
4084 *
4085 * LOCK rq(1)->lock // orders against CPU2
4086 * sched-out Z
4087 * sched-in X
4088 * UNLOCK rq(1)->lock
4089 *
4090 *
4091 * BLOCKING -- aka. SLEEP + WAKEUP
4092 *
4093 * For blocking we (obviously) need to provide the same guarantee as for
4094 * migration. However the means are completely different as there is no lock
4095 * chain to provide order. Instead we do:
4096 *
4097 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4098 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4099 *
4100 * Example:
4101 *
4102 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4103 *
4104 * LOCK rq(0)->lock LOCK X->pi_lock
4105 * dequeue X
4106 * sched-out X
4107 * smp_store_release(X->on_cpu, 0);
4108 *
4109 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4110 * X->state = WAKING
4111 * set_task_cpu(X,2)
4112 *
4113 * LOCK rq(2)->lock
4114 * enqueue X
4115 * X->state = RUNNING
4116 * UNLOCK rq(2)->lock
4117 *
4118 * LOCK rq(2)->lock // orders against CPU1
4119 * sched-out Z
4120 * sched-in X
4121 * UNLOCK rq(2)->lock
4122 *
4123 * UNLOCK X->pi_lock
4124 * UNLOCK rq(0)->lock
4125 *
4126 *
4127 * However, for wakeups there is a second guarantee we must provide, namely we
4128 * must ensure that CONDITION=1 done by the caller can not be reordered with
4129 * accesses to the task state; see try_to_wake_up() and set_current_state().
4130 */
4131
4132/**
4133 * try_to_wake_up - wake up a thread
4134 * @p: the thread to be awakened
4135 * @state: the mask of task states that can be woken
4136 * @wake_flags: wake modifier flags (WF_*)
4137 *
4138 * Conceptually does:
4139 *
4140 * If (@state & @p->state) @p->state = TASK_RUNNING.
4141 *
4142 * If the task was not queued/runnable, also place it back on a runqueue.
4143 *
4144 * This function is atomic against schedule() which would dequeue the task.
4145 *
4146 * It issues a full memory barrier before accessing @p->state, see the comment
4147 * with set_current_state().
4148 *
4149 * Uses p->pi_lock to serialize against concurrent wake-ups.
4150 *
4151 * Relies on p->pi_lock stabilizing:
4152 * - p->sched_class
4153 * - p->cpus_ptr
4154 * - p->sched_task_group
4155 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4156 *
4157 * Tries really hard to only take one task_rq(p)->lock for performance.
4158 * Takes rq->lock in:
4159 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4160 * - ttwu_queue() -- new rq, for enqueue of the task;
4161 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4162 *
4163 * As a consequence we race really badly with just about everything. See the
4164 * many memory barriers and their comments for details.
4165 *
4166 * Return: %true if @p->state changes (an actual wakeup was done),
4167 * %false otherwise.
4168 */
4169int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4170{
4171 guard(preempt)();
4172 int cpu, success = 0;
4173
4174 wake_flags |= WF_TTWU;
4175
4176 if (p == current) {
4177 /*
4178 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4179 * == smp_processor_id()'. Together this means we can special
4180 * case the whole 'p->on_rq && ttwu_runnable()' case below
4181 * without taking any locks.
4182 *
4183 * Specifically, given current runs ttwu() we must be before
4184 * schedule()'s block_task(), as such this must not observe
4185 * sched_delayed.
4186 *
4187 * In particular:
4188 * - we rely on Program-Order guarantees for all the ordering,
4189 * - we're serialized against set_special_state() by virtue of
4190 * it disabling IRQs (this allows not taking ->pi_lock).
4191 */
4192 SCHED_WARN_ON(p->se.sched_delayed);
4193 if (!ttwu_state_match(p, state, &success))
4194 goto out;
4195
4196 trace_sched_waking(p);
4197 ttwu_do_wakeup(p);
4198 goto out;
4199 }
4200
4201 /*
4202 * If we are going to wake up a thread waiting for CONDITION we
4203 * need to ensure that CONDITION=1 done by the caller can not be
4204 * reordered with p->state check below. This pairs with smp_store_mb()
4205 * in set_current_state() that the waiting thread does.
4206 */
4207 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4208 smp_mb__after_spinlock();
4209 if (!ttwu_state_match(p, state, &success))
4210 break;
4211
4212 trace_sched_waking(p);
4213
4214 /*
4215 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4216 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4217 * in smp_cond_load_acquire() below.
4218 *
4219 * sched_ttwu_pending() try_to_wake_up()
4220 * STORE p->on_rq = 1 LOAD p->state
4221 * UNLOCK rq->lock
4222 *
4223 * __schedule() (switch to task 'p')
4224 * LOCK rq->lock smp_rmb();
4225 * smp_mb__after_spinlock();
4226 * UNLOCK rq->lock
4227 *
4228 * [task p]
4229 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4230 *
4231 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4232 * __schedule(). See the comment for smp_mb__after_spinlock().
4233 *
4234 * A similar smp_rmb() lives in __task_needs_rq_lock().
4235 */
4236 smp_rmb();
4237 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4238 break;
4239
4240#ifdef CONFIG_SMP
4241 /*
4242 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4243 * possible to, falsely, observe p->on_cpu == 0.
4244 *
4245 * One must be running (->on_cpu == 1) in order to remove oneself
4246 * from the runqueue.
4247 *
4248 * __schedule() (switch to task 'p') try_to_wake_up()
4249 * STORE p->on_cpu = 1 LOAD p->on_rq
4250 * UNLOCK rq->lock
4251 *
4252 * __schedule() (put 'p' to sleep)
4253 * LOCK rq->lock smp_rmb();
4254 * smp_mb__after_spinlock();
4255 * STORE p->on_rq = 0 LOAD p->on_cpu
4256 *
4257 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4258 * __schedule(). See the comment for smp_mb__after_spinlock().
4259 *
4260 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4261 * schedule()'s deactivate_task() has 'happened' and p will no longer
4262 * care about it's own p->state. See the comment in __schedule().
4263 */
4264 smp_acquire__after_ctrl_dep();
4265
4266 /*
4267 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4268 * == 0), which means we need to do an enqueue, change p->state to
4269 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4270 * enqueue, such as ttwu_queue_wakelist().
4271 */
4272 WRITE_ONCE(p->__state, TASK_WAKING);
4273
4274 /*
4275 * If the owning (remote) CPU is still in the middle of schedule() with
4276 * this task as prev, considering queueing p on the remote CPUs wake_list
4277 * which potentially sends an IPI instead of spinning on p->on_cpu to
4278 * let the waker make forward progress. This is safe because IRQs are
4279 * disabled and the IPI will deliver after on_cpu is cleared.
4280 *
4281 * Ensure we load task_cpu(p) after p->on_cpu:
4282 *
4283 * set_task_cpu(p, cpu);
4284 * STORE p->cpu = @cpu
4285 * __schedule() (switch to task 'p')
4286 * LOCK rq->lock
4287 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4288 * STORE p->on_cpu = 1 LOAD p->cpu
4289 *
4290 * to ensure we observe the correct CPU on which the task is currently
4291 * scheduling.
4292 */
4293 if (smp_load_acquire(&p->on_cpu) &&
4294 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4295 break;
4296
4297 /*
4298 * If the owning (remote) CPU is still in the middle of schedule() with
4299 * this task as prev, wait until it's done referencing the task.
4300 *
4301 * Pairs with the smp_store_release() in finish_task().
4302 *
4303 * This ensures that tasks getting woken will be fully ordered against
4304 * their previous state and preserve Program Order.
4305 */
4306 smp_cond_load_acquire(&p->on_cpu, !VAL);
4307
4308 cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
4309 if (task_cpu(p) != cpu) {
4310 if (p->in_iowait) {
4311 delayacct_blkio_end(p);
4312 atomic_dec(&task_rq(p)->nr_iowait);
4313 }
4314
4315 wake_flags |= WF_MIGRATED;
4316 psi_ttwu_dequeue(p);
4317 set_task_cpu(p, cpu);
4318 }
4319#else
4320 cpu = task_cpu(p);
4321#endif /* CONFIG_SMP */
4322
4323 ttwu_queue(p, cpu, wake_flags);
4324 }
4325out:
4326 if (success)
4327 ttwu_stat(p, task_cpu(p), wake_flags);
4328
4329 return success;
4330}
4331
4332static bool __task_needs_rq_lock(struct task_struct *p)
4333{
4334 unsigned int state = READ_ONCE(p->__state);
4335
4336 /*
4337 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4338 * the task is blocked. Make sure to check @state since ttwu() can drop
4339 * locks at the end, see ttwu_queue_wakelist().
4340 */
4341 if (state == TASK_RUNNING || state == TASK_WAKING)
4342 return true;
4343
4344 /*
4345 * Ensure we load p->on_rq after p->__state, otherwise it would be
4346 * possible to, falsely, observe p->on_rq == 0.
4347 *
4348 * See try_to_wake_up() for a longer comment.
4349 */
4350 smp_rmb();
4351 if (p->on_rq)
4352 return true;
4353
4354#ifdef CONFIG_SMP
4355 /*
4356 * Ensure the task has finished __schedule() and will not be referenced
4357 * anymore. Again, see try_to_wake_up() for a longer comment.
4358 */
4359 smp_rmb();
4360 smp_cond_load_acquire(&p->on_cpu, !VAL);
4361#endif
4362
4363 return false;
4364}
4365
4366/**
4367 * task_call_func - Invoke a function on task in fixed state
4368 * @p: Process for which the function is to be invoked, can be @current.
4369 * @func: Function to invoke.
4370 * @arg: Argument to function.
4371 *
4372 * Fix the task in it's current state by avoiding wakeups and or rq operations
4373 * and call @func(@arg) on it. This function can use task_is_runnable() and
4374 * task_curr() to work out what the state is, if required. Given that @func
4375 * can be invoked with a runqueue lock held, it had better be quite
4376 * lightweight.
4377 *
4378 * Returns:
4379 * Whatever @func returns
4380 */
4381int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4382{
4383 struct rq *rq = NULL;
4384 struct rq_flags rf;
4385 int ret;
4386
4387 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4388
4389 if (__task_needs_rq_lock(p))
4390 rq = __task_rq_lock(p, &rf);
4391
4392 /*
4393 * At this point the task is pinned; either:
4394 * - blocked and we're holding off wakeups (pi->lock)
4395 * - woken, and we're holding off enqueue (rq->lock)
4396 * - queued, and we're holding off schedule (rq->lock)
4397 * - running, and we're holding off de-schedule (rq->lock)
4398 *
4399 * The called function (@func) can use: task_curr(), p->on_rq and
4400 * p->__state to differentiate between these states.
4401 */
4402 ret = func(p, arg);
4403
4404 if (rq)
4405 rq_unlock(rq, &rf);
4406
4407 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4408 return ret;
4409}
4410
4411/**
4412 * cpu_curr_snapshot - Return a snapshot of the currently running task
4413 * @cpu: The CPU on which to snapshot the task.
4414 *
4415 * Returns the task_struct pointer of the task "currently" running on
4416 * the specified CPU.
4417 *
4418 * If the specified CPU was offline, the return value is whatever it
4419 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4420 * task, but there is no guarantee. Callers wishing a useful return
4421 * value must take some action to ensure that the specified CPU remains
4422 * online throughout.
4423 *
4424 * This function executes full memory barriers before and after fetching
4425 * the pointer, which permits the caller to confine this function's fetch
4426 * with respect to the caller's accesses to other shared variables.
4427 */
4428struct task_struct *cpu_curr_snapshot(int cpu)
4429{
4430 struct rq *rq = cpu_rq(cpu);
4431 struct task_struct *t;
4432 struct rq_flags rf;
4433
4434 rq_lock_irqsave(rq, &rf);
4435 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4436 t = rcu_dereference(cpu_curr(cpu));
4437 rq_unlock_irqrestore(rq, &rf);
4438 smp_mb(); /* Pairing determined by caller's synchronization design. */
4439
4440 return t;
4441}
4442
4443/**
4444 * wake_up_process - Wake up a specific process
4445 * @p: The process to be woken up.
4446 *
4447 * Attempt to wake up the nominated process and move it to the set of runnable
4448 * processes.
4449 *
4450 * Return: 1 if the process was woken up, 0 if it was already running.
4451 *
4452 * This function executes a full memory barrier before accessing the task state.
4453 */
4454int wake_up_process(struct task_struct *p)
4455{
4456 return try_to_wake_up(p, TASK_NORMAL, 0);
4457}
4458EXPORT_SYMBOL(wake_up_process);
4459
4460int wake_up_state(struct task_struct *p, unsigned int state)
4461{
4462 return try_to_wake_up(p, state, 0);
4463}
4464
4465/*
4466 * Perform scheduler related setup for a newly forked process p.
4467 * p is forked by current.
4468 *
4469 * __sched_fork() is basic setup which is also used by sched_init() to
4470 * initialize the boot CPU's idle task.
4471 */
4472static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4473{
4474 p->on_rq = 0;
4475
4476 p->se.on_rq = 0;
4477 p->se.exec_start = 0;
4478 p->se.sum_exec_runtime = 0;
4479 p->se.prev_sum_exec_runtime = 0;
4480 p->se.nr_migrations = 0;
4481 p->se.vruntime = 0;
4482 p->se.vlag = 0;
4483 INIT_LIST_HEAD(&p->se.group_node);
4484
4485 /* A delayed task cannot be in clone(). */
4486 SCHED_WARN_ON(p->se.sched_delayed);
4487
4488#ifdef CONFIG_FAIR_GROUP_SCHED
4489 p->se.cfs_rq = NULL;
4490#endif
4491
4492#ifdef CONFIG_SCHEDSTATS
4493 /* Even if schedstat is disabled, there should not be garbage */
4494 memset(&p->stats, 0, sizeof(p->stats));
4495#endif
4496
4497 init_dl_entity(&p->dl);
4498
4499 INIT_LIST_HEAD(&p->rt.run_list);
4500 p->rt.timeout = 0;
4501 p->rt.time_slice = sched_rr_timeslice;
4502 p->rt.on_rq = 0;
4503 p->rt.on_list = 0;
4504
4505#ifdef CONFIG_SCHED_CLASS_EXT
4506 init_scx_entity(&p->scx);
4507#endif
4508
4509#ifdef CONFIG_PREEMPT_NOTIFIERS
4510 INIT_HLIST_HEAD(&p->preempt_notifiers);
4511#endif
4512
4513#ifdef CONFIG_COMPACTION
4514 p->capture_control = NULL;
4515#endif
4516 init_numa_balancing(clone_flags, p);
4517#ifdef CONFIG_SMP
4518 p->wake_entry.u_flags = CSD_TYPE_TTWU;
4519 p->migration_pending = NULL;
4520#endif
4521 init_sched_mm_cid(p);
4522}
4523
4524DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4525
4526#ifdef CONFIG_NUMA_BALANCING
4527
4528int sysctl_numa_balancing_mode;
4529
4530static void __set_numabalancing_state(bool enabled)
4531{
4532 if (enabled)
4533 static_branch_enable(&sched_numa_balancing);
4534 else
4535 static_branch_disable(&sched_numa_balancing);
4536}
4537
4538void set_numabalancing_state(bool enabled)
4539{
4540 if (enabled)
4541 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4542 else
4543 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4544 __set_numabalancing_state(enabled);
4545}
4546
4547#ifdef CONFIG_PROC_SYSCTL
4548static void reset_memory_tiering(void)
4549{
4550 struct pglist_data *pgdat;
4551
4552 for_each_online_pgdat(pgdat) {
4553 pgdat->nbp_threshold = 0;
4554 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4555 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4556 }
4557}
4558
4559static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4560 void *buffer, size_t *lenp, loff_t *ppos)
4561{
4562 struct ctl_table t;
4563 int err;
4564 int state = sysctl_numa_balancing_mode;
4565
4566 if (write && !capable(CAP_SYS_ADMIN))
4567 return -EPERM;
4568
4569 t = *table;
4570 t.data = &state;
4571 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4572 if (err < 0)
4573 return err;
4574 if (write) {
4575 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4576 (state & NUMA_BALANCING_MEMORY_TIERING))
4577 reset_memory_tiering();
4578 sysctl_numa_balancing_mode = state;
4579 __set_numabalancing_state(state);
4580 }
4581 return err;
4582}
4583#endif
4584#endif
4585
4586#ifdef CONFIG_SCHEDSTATS
4587
4588DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4589
4590static void set_schedstats(bool enabled)
4591{
4592 if (enabled)
4593 static_branch_enable(&sched_schedstats);
4594 else
4595 static_branch_disable(&sched_schedstats);
4596}
4597
4598void force_schedstat_enabled(void)
4599{
4600 if (!schedstat_enabled()) {
4601 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4602 static_branch_enable(&sched_schedstats);
4603 }
4604}
4605
4606static int __init setup_schedstats(char *str)
4607{
4608 int ret = 0;
4609 if (!str)
4610 goto out;
4611
4612 if (!strcmp(str, "enable")) {
4613 set_schedstats(true);
4614 ret = 1;
4615 } else if (!strcmp(str, "disable")) {
4616 set_schedstats(false);
4617 ret = 1;
4618 }
4619out:
4620 if (!ret)
4621 pr_warn("Unable to parse schedstats=\n");
4622
4623 return ret;
4624}
4625__setup("schedstats=", setup_schedstats);
4626
4627#ifdef CONFIG_PROC_SYSCTL
4628static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4629 size_t *lenp, loff_t *ppos)
4630{
4631 struct ctl_table t;
4632 int err;
4633 int state = static_branch_likely(&sched_schedstats);
4634
4635 if (write && !capable(CAP_SYS_ADMIN))
4636 return -EPERM;
4637
4638 t = *table;
4639 t.data = &state;
4640 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4641 if (err < 0)
4642 return err;
4643 if (write)
4644 set_schedstats(state);
4645 return err;
4646}
4647#endif /* CONFIG_PROC_SYSCTL */
4648#endif /* CONFIG_SCHEDSTATS */
4649
4650#ifdef CONFIG_SYSCTL
4651static struct ctl_table sched_core_sysctls[] = {
4652#ifdef CONFIG_SCHEDSTATS
4653 {
4654 .procname = "sched_schedstats",
4655 .data = NULL,
4656 .maxlen = sizeof(unsigned int),
4657 .mode = 0644,
4658 .proc_handler = sysctl_schedstats,
4659 .extra1 = SYSCTL_ZERO,
4660 .extra2 = SYSCTL_ONE,
4661 },
4662#endif /* CONFIG_SCHEDSTATS */
4663#ifdef CONFIG_UCLAMP_TASK
4664 {
4665 .procname = "sched_util_clamp_min",
4666 .data = &sysctl_sched_uclamp_util_min,
4667 .maxlen = sizeof(unsigned int),
4668 .mode = 0644,
4669 .proc_handler = sysctl_sched_uclamp_handler,
4670 },
4671 {
4672 .procname = "sched_util_clamp_max",
4673 .data = &sysctl_sched_uclamp_util_max,
4674 .maxlen = sizeof(unsigned int),
4675 .mode = 0644,
4676 .proc_handler = sysctl_sched_uclamp_handler,
4677 },
4678 {
4679 .procname = "sched_util_clamp_min_rt_default",
4680 .data = &sysctl_sched_uclamp_util_min_rt_default,
4681 .maxlen = sizeof(unsigned int),
4682 .mode = 0644,
4683 .proc_handler = sysctl_sched_uclamp_handler,
4684 },
4685#endif /* CONFIG_UCLAMP_TASK */
4686#ifdef CONFIG_NUMA_BALANCING
4687 {
4688 .procname = "numa_balancing",
4689 .data = NULL, /* filled in by handler */
4690 .maxlen = sizeof(unsigned int),
4691 .mode = 0644,
4692 .proc_handler = sysctl_numa_balancing,
4693 .extra1 = SYSCTL_ZERO,
4694 .extra2 = SYSCTL_FOUR,
4695 },
4696#endif /* CONFIG_NUMA_BALANCING */
4697};
4698static int __init sched_core_sysctl_init(void)
4699{
4700 register_sysctl_init("kernel", sched_core_sysctls);
4701 return 0;
4702}
4703late_initcall(sched_core_sysctl_init);
4704#endif /* CONFIG_SYSCTL */
4705
4706/*
4707 * fork()/clone()-time setup:
4708 */
4709int sched_fork(unsigned long clone_flags, struct task_struct *p)
4710{
4711 __sched_fork(clone_flags, p);
4712 /*
4713 * We mark the process as NEW here. This guarantees that
4714 * nobody will actually run it, and a signal or other external
4715 * event cannot wake it up and insert it on the runqueue either.
4716 */
4717 p->__state = TASK_NEW;
4718
4719 /*
4720 * Make sure we do not leak PI boosting priority to the child.
4721 */
4722 p->prio = current->normal_prio;
4723
4724 uclamp_fork(p);
4725
4726 /*
4727 * Revert to default priority/policy on fork if requested.
4728 */
4729 if (unlikely(p->sched_reset_on_fork)) {
4730 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4731 p->policy = SCHED_NORMAL;
4732 p->static_prio = NICE_TO_PRIO(0);
4733 p->rt_priority = 0;
4734 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4735 p->static_prio = NICE_TO_PRIO(0);
4736
4737 p->prio = p->normal_prio = p->static_prio;
4738 set_load_weight(p, false);
4739 p->se.custom_slice = 0;
4740 p->se.slice = sysctl_sched_base_slice;
4741
4742 /*
4743 * We don't need the reset flag anymore after the fork. It has
4744 * fulfilled its duty:
4745 */
4746 p->sched_reset_on_fork = 0;
4747 }
4748
4749 if (dl_prio(p->prio))
4750 return -EAGAIN;
4751
4752 scx_pre_fork(p);
4753
4754 if (rt_prio(p->prio)) {
4755 p->sched_class = &rt_sched_class;
4756#ifdef CONFIG_SCHED_CLASS_EXT
4757 } else if (task_should_scx(p->policy)) {
4758 p->sched_class = &ext_sched_class;
4759#endif
4760 } else {
4761 p->sched_class = &fair_sched_class;
4762 }
4763
4764 init_entity_runnable_average(&p->se);
4765
4766
4767#ifdef CONFIG_SCHED_INFO
4768 if (likely(sched_info_on()))
4769 memset(&p->sched_info, 0, sizeof(p->sched_info));
4770#endif
4771#if defined(CONFIG_SMP)
4772 p->on_cpu = 0;
4773#endif
4774 init_task_preempt_count(p);
4775#ifdef CONFIG_SMP
4776 plist_node_init(&p->pushable_tasks, MAX_PRIO);
4777 RB_CLEAR_NODE(&p->pushable_dl_tasks);
4778#endif
4779 return 0;
4780}
4781
4782int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4783{
4784 unsigned long flags;
4785
4786 /*
4787 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4788 * required yet, but lockdep gets upset if rules are violated.
4789 */
4790 raw_spin_lock_irqsave(&p->pi_lock, flags);
4791#ifdef CONFIG_CGROUP_SCHED
4792 if (1) {
4793 struct task_group *tg;
4794 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4795 struct task_group, css);
4796 tg = autogroup_task_group(p, tg);
4797 p->sched_task_group = tg;
4798 }
4799#endif
4800 rseq_migrate(p);
4801 /*
4802 * We're setting the CPU for the first time, we don't migrate,
4803 * so use __set_task_cpu().
4804 */
4805 __set_task_cpu(p, smp_processor_id());
4806 if (p->sched_class->task_fork)
4807 p->sched_class->task_fork(p);
4808 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4809
4810 return scx_fork(p);
4811}
4812
4813void sched_cancel_fork(struct task_struct *p)
4814{
4815 scx_cancel_fork(p);
4816}
4817
4818void sched_post_fork(struct task_struct *p)
4819{
4820 uclamp_post_fork(p);
4821 scx_post_fork(p);
4822}
4823
4824unsigned long to_ratio(u64 period, u64 runtime)
4825{
4826 if (runtime == RUNTIME_INF)
4827 return BW_UNIT;
4828
4829 /*
4830 * Doing this here saves a lot of checks in all
4831 * the calling paths, and returning zero seems
4832 * safe for them anyway.
4833 */
4834 if (period == 0)
4835 return 0;
4836
4837 return div64_u64(runtime << BW_SHIFT, period);
4838}
4839
4840/*
4841 * wake_up_new_task - wake up a newly created task for the first time.
4842 *
4843 * This function will do some initial scheduler statistics housekeeping
4844 * that must be done for every newly created context, then puts the task
4845 * on the runqueue and wakes it.
4846 */
4847void wake_up_new_task(struct task_struct *p)
4848{
4849 struct rq_flags rf;
4850 struct rq *rq;
4851 int wake_flags = WF_FORK;
4852
4853 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4854 WRITE_ONCE(p->__state, TASK_RUNNING);
4855#ifdef CONFIG_SMP
4856 /*
4857 * Fork balancing, do it here and not earlier because:
4858 * - cpus_ptr can change in the fork path
4859 * - any previously selected CPU might disappear through hotplug
4860 *
4861 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4862 * as we're not fully set-up yet.
4863 */
4864 p->recent_used_cpu = task_cpu(p);
4865 rseq_migrate(p);
4866 __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
4867#endif
4868 rq = __task_rq_lock(p, &rf);
4869 update_rq_clock(rq);
4870 post_init_entity_util_avg(p);
4871
4872 activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4873 trace_sched_wakeup_new(p);
4874 wakeup_preempt(rq, p, wake_flags);
4875#ifdef CONFIG_SMP
4876 if (p->sched_class->task_woken) {
4877 /*
4878 * Nothing relies on rq->lock after this, so it's fine to
4879 * drop it.
4880 */
4881 rq_unpin_lock(rq, &rf);
4882 p->sched_class->task_woken(rq, p);
4883 rq_repin_lock(rq, &rf);
4884 }
4885#endif
4886 task_rq_unlock(rq, p, &rf);
4887}
4888
4889#ifdef CONFIG_PREEMPT_NOTIFIERS
4890
4891static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4892
4893void preempt_notifier_inc(void)
4894{
4895 static_branch_inc(&preempt_notifier_key);
4896}
4897EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4898
4899void preempt_notifier_dec(void)
4900{
4901 static_branch_dec(&preempt_notifier_key);
4902}
4903EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4904
4905/**
4906 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4907 * @notifier: notifier struct to register
4908 */
4909void preempt_notifier_register(struct preempt_notifier *notifier)
4910{
4911 if (!static_branch_unlikely(&preempt_notifier_key))
4912 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4913
4914 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
4915}
4916EXPORT_SYMBOL_GPL(preempt_notifier_register);
4917
4918/**
4919 * preempt_notifier_unregister - no longer interested in preemption notifications
4920 * @notifier: notifier struct to unregister
4921 *
4922 * This is *not* safe to call from within a preemption notifier.
4923 */
4924void preempt_notifier_unregister(struct preempt_notifier *notifier)
4925{
4926 hlist_del(¬ifier->link);
4927}
4928EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4929
4930static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4931{
4932 struct preempt_notifier *notifier;
4933
4934 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4935 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4936}
4937
4938static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4939{
4940 if (static_branch_unlikely(&preempt_notifier_key))
4941 __fire_sched_in_preempt_notifiers(curr);
4942}
4943
4944static void
4945__fire_sched_out_preempt_notifiers(struct task_struct *curr,
4946 struct task_struct *next)
4947{
4948 struct preempt_notifier *notifier;
4949
4950 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4951 notifier->ops->sched_out(notifier, next);
4952}
4953
4954static __always_inline void
4955fire_sched_out_preempt_notifiers(struct task_struct *curr,
4956 struct task_struct *next)
4957{
4958 if (static_branch_unlikely(&preempt_notifier_key))
4959 __fire_sched_out_preempt_notifiers(curr, next);
4960}
4961
4962#else /* !CONFIG_PREEMPT_NOTIFIERS */
4963
4964static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4965{
4966}
4967
4968static inline void
4969fire_sched_out_preempt_notifiers(struct task_struct *curr,
4970 struct task_struct *next)
4971{
4972}
4973
4974#endif /* CONFIG_PREEMPT_NOTIFIERS */
4975
4976static inline void prepare_task(struct task_struct *next)
4977{
4978#ifdef CONFIG_SMP
4979 /*
4980 * Claim the task as running, we do this before switching to it
4981 * such that any running task will have this set.
4982 *
4983 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4984 * its ordering comment.
4985 */
4986 WRITE_ONCE(next->on_cpu, 1);
4987#endif
4988}
4989
4990static inline void finish_task(struct task_struct *prev)
4991{
4992#ifdef CONFIG_SMP
4993 /*
4994 * This must be the very last reference to @prev from this CPU. After
4995 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4996 * must ensure this doesn't happen until the switch is completely
4997 * finished.
4998 *
4999 * In particular, the load of prev->state in finish_task_switch() must
5000 * happen before this.
5001 *
5002 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
5003 */
5004 smp_store_release(&prev->on_cpu, 0);
5005#endif
5006}
5007
5008#ifdef CONFIG_SMP
5009
5010static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
5011{
5012 void (*func)(struct rq *rq);
5013 struct balance_callback *next;
5014
5015 lockdep_assert_rq_held(rq);
5016
5017 while (head) {
5018 func = (void (*)(struct rq *))head->func;
5019 next = head->next;
5020 head->next = NULL;
5021 head = next;
5022
5023 func(rq);
5024 }
5025}
5026
5027static void balance_push(struct rq *rq);
5028
5029/*
5030 * balance_push_callback is a right abuse of the callback interface and plays
5031 * by significantly different rules.
5032 *
5033 * Where the normal balance_callback's purpose is to be ran in the same context
5034 * that queued it (only later, when it's safe to drop rq->lock again),
5035 * balance_push_callback is specifically targeted at __schedule().
5036 *
5037 * This abuse is tolerated because it places all the unlikely/odd cases behind
5038 * a single test, namely: rq->balance_callback == NULL.
5039 */
5040struct balance_callback balance_push_callback = {
5041 .next = NULL,
5042 .func = balance_push,
5043};
5044
5045static inline struct balance_callback *
5046__splice_balance_callbacks(struct rq *rq, bool split)
5047{
5048 struct balance_callback *head = rq->balance_callback;
5049
5050 if (likely(!head))
5051 return NULL;
5052
5053 lockdep_assert_rq_held(rq);
5054 /*
5055 * Must not take balance_push_callback off the list when
5056 * splice_balance_callbacks() and balance_callbacks() are not
5057 * in the same rq->lock section.
5058 *
5059 * In that case it would be possible for __schedule() to interleave
5060 * and observe the list empty.
5061 */
5062 if (split && head == &balance_push_callback)
5063 head = NULL;
5064 else
5065 rq->balance_callback = NULL;
5066
5067 return head;
5068}
5069
5070struct balance_callback *splice_balance_callbacks(struct rq *rq)
5071{
5072 return __splice_balance_callbacks(rq, true);
5073}
5074
5075static void __balance_callbacks(struct rq *rq)
5076{
5077 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5078}
5079
5080void balance_callbacks(struct rq *rq, struct balance_callback *head)
5081{
5082 unsigned long flags;
5083
5084 if (unlikely(head)) {
5085 raw_spin_rq_lock_irqsave(rq, flags);
5086 do_balance_callbacks(rq, head);
5087 raw_spin_rq_unlock_irqrestore(rq, flags);
5088 }
5089}
5090
5091#else
5092
5093static inline void __balance_callbacks(struct rq *rq)
5094{
5095}
5096
5097#endif
5098
5099static inline void
5100prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5101{
5102 /*
5103 * Since the runqueue lock will be released by the next
5104 * task (which is an invalid locking op but in the case
5105 * of the scheduler it's an obvious special-case), so we
5106 * do an early lockdep release here:
5107 */
5108 rq_unpin_lock(rq, rf);
5109 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5110#ifdef CONFIG_DEBUG_SPINLOCK
5111 /* this is a valid case when another task releases the spinlock */
5112 rq_lockp(rq)->owner = next;
5113#endif
5114}
5115
5116static inline void finish_lock_switch(struct rq *rq)
5117{
5118 /*
5119 * If we are tracking spinlock dependencies then we have to
5120 * fix up the runqueue lock - which gets 'carried over' from
5121 * prev into current:
5122 */
5123 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5124 __balance_callbacks(rq);
5125 raw_spin_rq_unlock_irq(rq);
5126}
5127
5128/*
5129 * NOP if the arch has not defined these:
5130 */
5131
5132#ifndef prepare_arch_switch
5133# define prepare_arch_switch(next) do { } while (0)
5134#endif
5135
5136#ifndef finish_arch_post_lock_switch
5137# define finish_arch_post_lock_switch() do { } while (0)
5138#endif
5139
5140static inline void kmap_local_sched_out(void)
5141{
5142#ifdef CONFIG_KMAP_LOCAL
5143 if (unlikely(current->kmap_ctrl.idx))
5144 __kmap_local_sched_out();
5145#endif
5146}
5147
5148static inline void kmap_local_sched_in(void)
5149{
5150#ifdef CONFIG_KMAP_LOCAL
5151 if (unlikely(current->kmap_ctrl.idx))
5152 __kmap_local_sched_in();
5153#endif
5154}
5155
5156/**
5157 * prepare_task_switch - prepare to switch tasks
5158 * @rq: the runqueue preparing to switch
5159 * @prev: the current task that is being switched out
5160 * @next: the task we are going to switch to.
5161 *
5162 * This is called with the rq lock held and interrupts off. It must
5163 * be paired with a subsequent finish_task_switch after the context
5164 * switch.
5165 *
5166 * prepare_task_switch sets up locking and calls architecture specific
5167 * hooks.
5168 */
5169static inline void
5170prepare_task_switch(struct rq *rq, struct task_struct *prev,
5171 struct task_struct *next)
5172{
5173 kcov_prepare_switch(prev);
5174 sched_info_switch(rq, prev, next);
5175 perf_event_task_sched_out(prev, next);
5176 rseq_preempt(prev);
5177 fire_sched_out_preempt_notifiers(prev, next);
5178 kmap_local_sched_out();
5179 prepare_task(next);
5180 prepare_arch_switch(next);
5181}
5182
5183/**
5184 * finish_task_switch - clean up after a task-switch
5185 * @prev: the thread we just switched away from.
5186 *
5187 * finish_task_switch must be called after the context switch, paired
5188 * with a prepare_task_switch call before the context switch.
5189 * finish_task_switch will reconcile locking set up by prepare_task_switch,
5190 * and do any other architecture-specific cleanup actions.
5191 *
5192 * Note that we may have delayed dropping an mm in context_switch(). If
5193 * so, we finish that here outside of the runqueue lock. (Doing it
5194 * with the lock held can cause deadlocks; see schedule() for
5195 * details.)
5196 *
5197 * The context switch have flipped the stack from under us and restored the
5198 * local variables which were saved when this task called schedule() in the
5199 * past. 'prev == current' is still correct but we need to recalculate this_rq
5200 * because prev may have moved to another CPU.
5201 */
5202static struct rq *finish_task_switch(struct task_struct *prev)
5203 __releases(rq->lock)
5204{
5205 struct rq *rq = this_rq();
5206 struct mm_struct *mm = rq->prev_mm;
5207 unsigned int prev_state;
5208
5209 /*
5210 * The previous task will have left us with a preempt_count of 2
5211 * because it left us after:
5212 *
5213 * schedule()
5214 * preempt_disable(); // 1
5215 * __schedule()
5216 * raw_spin_lock_irq(&rq->lock) // 2
5217 *
5218 * Also, see FORK_PREEMPT_COUNT.
5219 */
5220 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5221 "corrupted preempt_count: %s/%d/0x%x\n",
5222 current->comm, current->pid, preempt_count()))
5223 preempt_count_set(FORK_PREEMPT_COUNT);
5224
5225 rq->prev_mm = NULL;
5226
5227 /*
5228 * A task struct has one reference for the use as "current".
5229 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5230 * schedule one last time. The schedule call will never return, and
5231 * the scheduled task must drop that reference.
5232 *
5233 * We must observe prev->state before clearing prev->on_cpu (in
5234 * finish_task), otherwise a concurrent wakeup can get prev
5235 * running on another CPU and we could rave with its RUNNING -> DEAD
5236 * transition, resulting in a double drop.
5237 */
5238 prev_state = READ_ONCE(prev->__state);
5239 vtime_task_switch(prev);
5240 perf_event_task_sched_in(prev, current);
5241 finish_task(prev);
5242 tick_nohz_task_switch();
5243 finish_lock_switch(rq);
5244 finish_arch_post_lock_switch();
5245 kcov_finish_switch(current);
5246 /*
5247 * kmap_local_sched_out() is invoked with rq::lock held and
5248 * interrupts disabled. There is no requirement for that, but the
5249 * sched out code does not have an interrupt enabled section.
5250 * Restoring the maps on sched in does not require interrupts being
5251 * disabled either.
5252 */
5253 kmap_local_sched_in();
5254
5255 fire_sched_in_preempt_notifiers(current);
5256 /*
5257 * When switching through a kernel thread, the loop in
5258 * membarrier_{private,global}_expedited() may have observed that
5259 * kernel thread and not issued an IPI. It is therefore possible to
5260 * schedule between user->kernel->user threads without passing though
5261 * switch_mm(). Membarrier requires a barrier after storing to
5262 * rq->curr, before returning to userspace, so provide them here:
5263 *
5264 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5265 * provided by mmdrop_lazy_tlb(),
5266 * - a sync_core for SYNC_CORE.
5267 */
5268 if (mm) {
5269 membarrier_mm_sync_core_before_usermode(mm);
5270 mmdrop_lazy_tlb_sched(mm);
5271 }
5272
5273 if (unlikely(prev_state == TASK_DEAD)) {
5274 if (prev->sched_class->task_dead)
5275 prev->sched_class->task_dead(prev);
5276
5277 /* Task is done with its stack. */
5278 put_task_stack(prev);
5279
5280 put_task_struct_rcu_user(prev);
5281 }
5282
5283 return rq;
5284}
5285
5286/**
5287 * schedule_tail - first thing a freshly forked thread must call.
5288 * @prev: the thread we just switched away from.
5289 */
5290asmlinkage __visible void schedule_tail(struct task_struct *prev)
5291 __releases(rq->lock)
5292{
5293 /*
5294 * New tasks start with FORK_PREEMPT_COUNT, see there and
5295 * finish_task_switch() for details.
5296 *
5297 * finish_task_switch() will drop rq->lock() and lower preempt_count
5298 * and the preempt_enable() will end up enabling preemption (on
5299 * PREEMPT_COUNT kernels).
5300 */
5301
5302 finish_task_switch(prev);
5303 preempt_enable();
5304
5305 if (current->set_child_tid)
5306 put_user(task_pid_vnr(current), current->set_child_tid);
5307
5308 calculate_sigpending();
5309}
5310
5311/*
5312 * context_switch - switch to the new MM and the new thread's register state.
5313 */
5314static __always_inline struct rq *
5315context_switch(struct rq *rq, struct task_struct *prev,
5316 struct task_struct *next, struct rq_flags *rf)
5317{
5318 prepare_task_switch(rq, prev, next);
5319
5320 /*
5321 * For paravirt, this is coupled with an exit in switch_to to
5322 * combine the page table reload and the switch backend into
5323 * one hypercall.
5324 */
5325 arch_start_context_switch(prev);
5326
5327 /*
5328 * kernel -> kernel lazy + transfer active
5329 * user -> kernel lazy + mmgrab_lazy_tlb() active
5330 *
5331 * kernel -> user switch + mmdrop_lazy_tlb() active
5332 * user -> user switch
5333 *
5334 * switch_mm_cid() needs to be updated if the barriers provided
5335 * by context_switch() are modified.
5336 */
5337 if (!next->mm) { // to kernel
5338 enter_lazy_tlb(prev->active_mm, next);
5339
5340 next->active_mm = prev->active_mm;
5341 if (prev->mm) // from user
5342 mmgrab_lazy_tlb(prev->active_mm);
5343 else
5344 prev->active_mm = NULL;
5345 } else { // to user
5346 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5347 /*
5348 * sys_membarrier() requires an smp_mb() between setting
5349 * rq->curr / membarrier_switch_mm() and returning to userspace.
5350 *
5351 * The below provides this either through switch_mm(), or in
5352 * case 'prev->active_mm == next->mm' through
5353 * finish_task_switch()'s mmdrop().
5354 */
5355 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5356 lru_gen_use_mm(next->mm);
5357
5358 if (!prev->mm) { // from kernel
5359 /* will mmdrop_lazy_tlb() in finish_task_switch(). */
5360 rq->prev_mm = prev->active_mm;
5361 prev->active_mm = NULL;
5362 }
5363 }
5364
5365 /* switch_mm_cid() requires the memory barriers above. */
5366 switch_mm_cid(rq, prev, next);
5367
5368 prepare_lock_switch(rq, next, rf);
5369
5370 /* Here we just switch the register state and the stack. */
5371 switch_to(prev, next, prev);
5372 barrier();
5373
5374 return finish_task_switch(prev);
5375}
5376
5377/*
5378 * nr_running and nr_context_switches:
5379 *
5380 * externally visible scheduler statistics: current number of runnable
5381 * threads, total number of context switches performed since bootup.
5382 */
5383unsigned int nr_running(void)
5384{
5385 unsigned int i, sum = 0;
5386
5387 for_each_online_cpu(i)
5388 sum += cpu_rq(i)->nr_running;
5389
5390 return sum;
5391}
5392
5393/*
5394 * Check if only the current task is running on the CPU.
5395 *
5396 * Caution: this function does not check that the caller has disabled
5397 * preemption, thus the result might have a time-of-check-to-time-of-use
5398 * race. The caller is responsible to use it correctly, for example:
5399 *
5400 * - from a non-preemptible section (of course)
5401 *
5402 * - from a thread that is bound to a single CPU
5403 *
5404 * - in a loop with very short iterations (e.g. a polling loop)
5405 */
5406bool single_task_running(void)
5407{
5408 return raw_rq()->nr_running == 1;
5409}
5410EXPORT_SYMBOL(single_task_running);
5411
5412unsigned long long nr_context_switches_cpu(int cpu)
5413{
5414 return cpu_rq(cpu)->nr_switches;
5415}
5416
5417unsigned long long nr_context_switches(void)
5418{
5419 int i;
5420 unsigned long long sum = 0;
5421
5422 for_each_possible_cpu(i)
5423 sum += cpu_rq(i)->nr_switches;
5424
5425 return sum;
5426}
5427
5428/*
5429 * Consumers of these two interfaces, like for example the cpuidle menu
5430 * governor, are using nonsensical data. Preferring shallow idle state selection
5431 * for a CPU that has IO-wait which might not even end up running the task when
5432 * it does become runnable.
5433 */
5434
5435unsigned int nr_iowait_cpu(int cpu)
5436{
5437 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5438}
5439
5440/*
5441 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5442 *
5443 * The idea behind IO-wait account is to account the idle time that we could
5444 * have spend running if it were not for IO. That is, if we were to improve the
5445 * storage performance, we'd have a proportional reduction in IO-wait time.
5446 *
5447 * This all works nicely on UP, where, when a task blocks on IO, we account
5448 * idle time as IO-wait, because if the storage were faster, it could've been
5449 * running and we'd not be idle.
5450 *
5451 * This has been extended to SMP, by doing the same for each CPU. This however
5452 * is broken.
5453 *
5454 * Imagine for instance the case where two tasks block on one CPU, only the one
5455 * CPU will have IO-wait accounted, while the other has regular idle. Even
5456 * though, if the storage were faster, both could've ran at the same time,
5457 * utilising both CPUs.
5458 *
5459 * This means, that when looking globally, the current IO-wait accounting on
5460 * SMP is a lower bound, by reason of under accounting.
5461 *
5462 * Worse, since the numbers are provided per CPU, they are sometimes
5463 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5464 * associated with any one particular CPU, it can wake to another CPU than it
5465 * blocked on. This means the per CPU IO-wait number is meaningless.
5466 *
5467 * Task CPU affinities can make all that even more 'interesting'.
5468 */
5469
5470unsigned int nr_iowait(void)
5471{
5472 unsigned int i, sum = 0;
5473
5474 for_each_possible_cpu(i)
5475 sum += nr_iowait_cpu(i);
5476
5477 return sum;
5478}
5479
5480#ifdef CONFIG_SMP
5481
5482/*
5483 * sched_exec - execve() is a valuable balancing opportunity, because at
5484 * this point the task has the smallest effective memory and cache footprint.
5485 */
5486void sched_exec(void)
5487{
5488 struct task_struct *p = current;
5489 struct migration_arg arg;
5490 int dest_cpu;
5491
5492 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5493 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5494 if (dest_cpu == smp_processor_id())
5495 return;
5496
5497 if (unlikely(!cpu_active(dest_cpu)))
5498 return;
5499
5500 arg = (struct migration_arg){ p, dest_cpu };
5501 }
5502 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5503}
5504
5505#endif
5506
5507DEFINE_PER_CPU(struct kernel_stat, kstat);
5508DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5509
5510EXPORT_PER_CPU_SYMBOL(kstat);
5511EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5512
5513/*
5514 * The function fair_sched_class.update_curr accesses the struct curr
5515 * and its field curr->exec_start; when called from task_sched_runtime(),
5516 * we observe a high rate of cache misses in practice.
5517 * Prefetching this data results in improved performance.
5518 */
5519static inline void prefetch_curr_exec_start(struct task_struct *p)
5520{
5521#ifdef CONFIG_FAIR_GROUP_SCHED
5522 struct sched_entity *curr = p->se.cfs_rq->curr;
5523#else
5524 struct sched_entity *curr = task_rq(p)->cfs.curr;
5525#endif
5526 prefetch(curr);
5527 prefetch(&curr->exec_start);
5528}
5529
5530/*
5531 * Return accounted runtime for the task.
5532 * In case the task is currently running, return the runtime plus current's
5533 * pending runtime that have not been accounted yet.
5534 */
5535unsigned long long task_sched_runtime(struct task_struct *p)
5536{
5537 struct rq_flags rf;
5538 struct rq *rq;
5539 u64 ns;
5540
5541#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5542 /*
5543 * 64-bit doesn't need locks to atomically read a 64-bit value.
5544 * So we have a optimization chance when the task's delta_exec is 0.
5545 * Reading ->on_cpu is racy, but this is OK.
5546 *
5547 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5548 * If we race with it entering CPU, unaccounted time is 0. This is
5549 * indistinguishable from the read occurring a few cycles earlier.
5550 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5551 * been accounted, so we're correct here as well.
5552 */
5553 if (!p->on_cpu || !task_on_rq_queued(p))
5554 return p->se.sum_exec_runtime;
5555#endif
5556
5557 rq = task_rq_lock(p, &rf);
5558 /*
5559 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5560 * project cycles that may never be accounted to this
5561 * thread, breaking clock_gettime().
5562 */
5563 if (task_current_donor(rq, p) && task_on_rq_queued(p)) {
5564 prefetch_curr_exec_start(p);
5565 update_rq_clock(rq);
5566 p->sched_class->update_curr(rq);
5567 }
5568 ns = p->se.sum_exec_runtime;
5569 task_rq_unlock(rq, p, &rf);
5570
5571 return ns;
5572}
5573
5574#ifdef CONFIG_SCHED_DEBUG
5575static u64 cpu_resched_latency(struct rq *rq)
5576{
5577 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5578 u64 resched_latency, now = rq_clock(rq);
5579 static bool warned_once;
5580
5581 if (sysctl_resched_latency_warn_once && warned_once)
5582 return 0;
5583
5584 if (!need_resched() || !latency_warn_ms)
5585 return 0;
5586
5587 if (system_state == SYSTEM_BOOTING)
5588 return 0;
5589
5590 if (!rq->last_seen_need_resched_ns) {
5591 rq->last_seen_need_resched_ns = now;
5592 rq->ticks_without_resched = 0;
5593 return 0;
5594 }
5595
5596 rq->ticks_without_resched++;
5597 resched_latency = now - rq->last_seen_need_resched_ns;
5598 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5599 return 0;
5600
5601 warned_once = true;
5602
5603 return resched_latency;
5604}
5605
5606static int __init setup_resched_latency_warn_ms(char *str)
5607{
5608 long val;
5609
5610 if ((kstrtol(str, 0, &val))) {
5611 pr_warn("Unable to set resched_latency_warn_ms\n");
5612 return 1;
5613 }
5614
5615 sysctl_resched_latency_warn_ms = val;
5616 return 1;
5617}
5618__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5619#else
5620static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5621#endif /* CONFIG_SCHED_DEBUG */
5622
5623/*
5624 * This function gets called by the timer code, with HZ frequency.
5625 * We call it with interrupts disabled.
5626 */
5627void sched_tick(void)
5628{
5629 int cpu = smp_processor_id();
5630 struct rq *rq = cpu_rq(cpu);
5631 /* accounting goes to the donor task */
5632 struct task_struct *donor;
5633 struct rq_flags rf;
5634 unsigned long hw_pressure;
5635 u64 resched_latency;
5636
5637 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5638 arch_scale_freq_tick();
5639
5640 sched_clock_tick();
5641
5642 rq_lock(rq, &rf);
5643 donor = rq->donor;
5644
5645 psi_account_irqtime(rq, donor, NULL);
5646
5647 update_rq_clock(rq);
5648 hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5649 update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5650
5651 if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY))
5652 resched_curr(rq);
5653
5654 donor->sched_class->task_tick(rq, donor, 0);
5655 if (sched_feat(LATENCY_WARN))
5656 resched_latency = cpu_resched_latency(rq);
5657 calc_global_load_tick(rq);
5658 sched_core_tick(rq);
5659 task_tick_mm_cid(rq, donor);
5660 scx_tick(rq);
5661
5662 rq_unlock(rq, &rf);
5663
5664 if (sched_feat(LATENCY_WARN) && resched_latency)
5665 resched_latency_warn(cpu, resched_latency);
5666
5667 perf_event_task_tick();
5668
5669 if (donor->flags & PF_WQ_WORKER)
5670 wq_worker_tick(donor);
5671
5672#ifdef CONFIG_SMP
5673 if (!scx_switched_all()) {
5674 rq->idle_balance = idle_cpu(cpu);
5675 sched_balance_trigger(rq);
5676 }
5677#endif
5678}
5679
5680#ifdef CONFIG_NO_HZ_FULL
5681
5682struct tick_work {
5683 int cpu;
5684 atomic_t state;
5685 struct delayed_work work;
5686};
5687/* Values for ->state, see diagram below. */
5688#define TICK_SCHED_REMOTE_OFFLINE 0
5689#define TICK_SCHED_REMOTE_OFFLINING 1
5690#define TICK_SCHED_REMOTE_RUNNING 2
5691
5692/*
5693 * State diagram for ->state:
5694 *
5695 *
5696 * TICK_SCHED_REMOTE_OFFLINE
5697 * | ^
5698 * | |
5699 * | | sched_tick_remote()
5700 * | |
5701 * | |
5702 * +--TICK_SCHED_REMOTE_OFFLINING
5703 * | ^
5704 * | |
5705 * sched_tick_start() | | sched_tick_stop()
5706 * | |
5707 * V |
5708 * TICK_SCHED_REMOTE_RUNNING
5709 *
5710 *
5711 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5712 * and sched_tick_start() are happy to leave the state in RUNNING.
5713 */
5714
5715static struct tick_work __percpu *tick_work_cpu;
5716
5717static void sched_tick_remote(struct work_struct *work)
5718{
5719 struct delayed_work *dwork = to_delayed_work(work);
5720 struct tick_work *twork = container_of(dwork, struct tick_work, work);
5721 int cpu = twork->cpu;
5722 struct rq *rq = cpu_rq(cpu);
5723 int os;
5724
5725 /*
5726 * Handle the tick only if it appears the remote CPU is running in full
5727 * dynticks mode. The check is racy by nature, but missing a tick or
5728 * having one too much is no big deal because the scheduler tick updates
5729 * statistics and checks timeslices in a time-independent way, regardless
5730 * of when exactly it is running.
5731 */
5732 if (tick_nohz_tick_stopped_cpu(cpu)) {
5733 guard(rq_lock_irq)(rq);
5734 struct task_struct *curr = rq->curr;
5735
5736 if (cpu_online(cpu)) {
5737 /*
5738 * Since this is a remote tick for full dynticks mode,
5739 * we are always sure that there is no proxy (only a
5740 * single task is running).
5741 */
5742 SCHED_WARN_ON(rq->curr != rq->donor);
5743 update_rq_clock(rq);
5744
5745 if (!is_idle_task(curr)) {
5746 /*
5747 * Make sure the next tick runs within a
5748 * reasonable amount of time.
5749 */
5750 u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5751 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5752 }
5753 curr->sched_class->task_tick(rq, curr, 0);
5754
5755 calc_load_nohz_remote(rq);
5756 }
5757 }
5758
5759 /*
5760 * Run the remote tick once per second (1Hz). This arbitrary
5761 * frequency is large enough to avoid overload but short enough
5762 * to keep scheduler internal stats reasonably up to date. But
5763 * first update state to reflect hotplug activity if required.
5764 */
5765 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5766 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5767 if (os == TICK_SCHED_REMOTE_RUNNING)
5768 queue_delayed_work(system_unbound_wq, dwork, HZ);
5769}
5770
5771static void sched_tick_start(int cpu)
5772{
5773 int os;
5774 struct tick_work *twork;
5775
5776 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5777 return;
5778
5779 WARN_ON_ONCE(!tick_work_cpu);
5780
5781 twork = per_cpu_ptr(tick_work_cpu, cpu);
5782 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5783 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5784 if (os == TICK_SCHED_REMOTE_OFFLINE) {
5785 twork->cpu = cpu;
5786 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5787 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5788 }
5789}
5790
5791#ifdef CONFIG_HOTPLUG_CPU
5792static void sched_tick_stop(int cpu)
5793{
5794 struct tick_work *twork;
5795 int os;
5796
5797 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5798 return;
5799
5800 WARN_ON_ONCE(!tick_work_cpu);
5801
5802 twork = per_cpu_ptr(tick_work_cpu, cpu);
5803 /* There cannot be competing actions, but don't rely on stop-machine. */
5804 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5805 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5806 /* Don't cancel, as this would mess up the state machine. */
5807}
5808#endif /* CONFIG_HOTPLUG_CPU */
5809
5810int __init sched_tick_offload_init(void)
5811{
5812 tick_work_cpu = alloc_percpu(struct tick_work);
5813 BUG_ON(!tick_work_cpu);
5814 return 0;
5815}
5816
5817#else /* !CONFIG_NO_HZ_FULL */
5818static inline void sched_tick_start(int cpu) { }
5819static inline void sched_tick_stop(int cpu) { }
5820#endif
5821
5822#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5823 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5824/*
5825 * If the value passed in is equal to the current preempt count
5826 * then we just disabled preemption. Start timing the latency.
5827 */
5828static inline void preempt_latency_start(int val)
5829{
5830 if (preempt_count() == val) {
5831 unsigned long ip = get_lock_parent_ip();
5832#ifdef CONFIG_DEBUG_PREEMPT
5833 current->preempt_disable_ip = ip;
5834#endif
5835 trace_preempt_off(CALLER_ADDR0, ip);
5836 }
5837}
5838
5839void preempt_count_add(int val)
5840{
5841#ifdef CONFIG_DEBUG_PREEMPT
5842 /*
5843 * Underflow?
5844 */
5845 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5846 return;
5847#endif
5848 __preempt_count_add(val);
5849#ifdef CONFIG_DEBUG_PREEMPT
5850 /*
5851 * Spinlock count overflowing soon?
5852 */
5853 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5854 PREEMPT_MASK - 10);
5855#endif
5856 preempt_latency_start(val);
5857}
5858EXPORT_SYMBOL(preempt_count_add);
5859NOKPROBE_SYMBOL(preempt_count_add);
5860
5861/*
5862 * If the value passed in equals to the current preempt count
5863 * then we just enabled preemption. Stop timing the latency.
5864 */
5865static inline void preempt_latency_stop(int val)
5866{
5867 if (preempt_count() == val)
5868 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5869}
5870
5871void preempt_count_sub(int val)
5872{
5873#ifdef CONFIG_DEBUG_PREEMPT
5874 /*
5875 * Underflow?
5876 */
5877 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5878 return;
5879 /*
5880 * Is the spinlock portion underflowing?
5881 */
5882 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5883 !(preempt_count() & PREEMPT_MASK)))
5884 return;
5885#endif
5886
5887 preempt_latency_stop(val);
5888 __preempt_count_sub(val);
5889}
5890EXPORT_SYMBOL(preempt_count_sub);
5891NOKPROBE_SYMBOL(preempt_count_sub);
5892
5893#else
5894static inline void preempt_latency_start(int val) { }
5895static inline void preempt_latency_stop(int val) { }
5896#endif
5897
5898static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5899{
5900#ifdef CONFIG_DEBUG_PREEMPT
5901 return p->preempt_disable_ip;
5902#else
5903 return 0;
5904#endif
5905}
5906
5907/*
5908 * Print scheduling while atomic bug:
5909 */
5910static noinline void __schedule_bug(struct task_struct *prev)
5911{
5912 /* Save this before calling printk(), since that will clobber it */
5913 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5914
5915 if (oops_in_progress)
5916 return;
5917
5918 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5919 prev->comm, prev->pid, preempt_count());
5920
5921 debug_show_held_locks(prev);
5922 print_modules();
5923 if (irqs_disabled())
5924 print_irqtrace_events(prev);
5925 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5926 pr_err("Preemption disabled at:");
5927 print_ip_sym(KERN_ERR, preempt_disable_ip);
5928 }
5929 check_panic_on_warn("scheduling while atomic");
5930
5931 dump_stack();
5932 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5933}
5934
5935/*
5936 * Various schedule()-time debugging checks and statistics:
5937 */
5938static inline void schedule_debug(struct task_struct *prev, bool preempt)
5939{
5940#ifdef CONFIG_SCHED_STACK_END_CHECK
5941 if (task_stack_end_corrupted(prev))
5942 panic("corrupted stack end detected inside scheduler\n");
5943
5944 if (task_scs_end_corrupted(prev))
5945 panic("corrupted shadow stack detected inside scheduler\n");
5946#endif
5947
5948#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5949 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5950 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5951 prev->comm, prev->pid, prev->non_block_count);
5952 dump_stack();
5953 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5954 }
5955#endif
5956
5957 if (unlikely(in_atomic_preempt_off())) {
5958 __schedule_bug(prev);
5959 preempt_count_set(PREEMPT_DISABLED);
5960 }
5961 rcu_sleep_check();
5962 SCHED_WARN_ON(ct_state() == CT_STATE_USER);
5963
5964 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5965
5966 schedstat_inc(this_rq()->sched_count);
5967}
5968
5969static void prev_balance(struct rq *rq, struct task_struct *prev,
5970 struct rq_flags *rf)
5971{
5972 const struct sched_class *start_class = prev->sched_class;
5973 const struct sched_class *class;
5974
5975#ifdef CONFIG_SCHED_CLASS_EXT
5976 /*
5977 * SCX requires a balance() call before every pick_task() including when
5978 * waking up from SCHED_IDLE. If @start_class is below SCX, start from
5979 * SCX instead. Also, set a flag to detect missing balance() call.
5980 */
5981 if (scx_enabled()) {
5982 rq->scx.flags |= SCX_RQ_BAL_PENDING;
5983 if (sched_class_above(&ext_sched_class, start_class))
5984 start_class = &ext_sched_class;
5985 }
5986#endif
5987
5988 /*
5989 * We must do the balancing pass before put_prev_task(), such
5990 * that when we release the rq->lock the task is in the same
5991 * state as before we took rq->lock.
5992 *
5993 * We can terminate the balance pass as soon as we know there is
5994 * a runnable task of @class priority or higher.
5995 */
5996 for_active_class_range(class, start_class, &idle_sched_class) {
5997 if (class->balance && class->balance(rq, prev, rf))
5998 break;
5999 }
6000}
6001
6002/*
6003 * Pick up the highest-prio task:
6004 */
6005static inline struct task_struct *
6006__pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6007{
6008 const struct sched_class *class;
6009 struct task_struct *p;
6010
6011 rq->dl_server = NULL;
6012
6013 if (scx_enabled())
6014 goto restart;
6015
6016 /*
6017 * Optimization: we know that if all tasks are in the fair class we can
6018 * call that function directly, but only if the @prev task wasn't of a
6019 * higher scheduling class, because otherwise those lose the
6020 * opportunity to pull in more work from other CPUs.
6021 */
6022 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
6023 rq->nr_running == rq->cfs.h_nr_running)) {
6024
6025 p = pick_next_task_fair(rq, prev, rf);
6026 if (unlikely(p == RETRY_TASK))
6027 goto restart;
6028
6029 /* Assume the next prioritized class is idle_sched_class */
6030 if (!p) {
6031 p = pick_task_idle(rq);
6032 put_prev_set_next_task(rq, prev, p);
6033 }
6034
6035 return p;
6036 }
6037
6038restart:
6039 prev_balance(rq, prev, rf);
6040
6041 for_each_active_class(class) {
6042 if (class->pick_next_task) {
6043 p = class->pick_next_task(rq, prev);
6044 if (p)
6045 return p;
6046 } else {
6047 p = class->pick_task(rq);
6048 if (p) {
6049 put_prev_set_next_task(rq, prev, p);
6050 return p;
6051 }
6052 }
6053 }
6054
6055 BUG(); /* The idle class should always have a runnable task. */
6056}
6057
6058#ifdef CONFIG_SCHED_CORE
6059static inline bool is_task_rq_idle(struct task_struct *t)
6060{
6061 return (task_rq(t)->idle == t);
6062}
6063
6064static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
6065{
6066 return is_task_rq_idle(a) || (a->core_cookie == cookie);
6067}
6068
6069static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6070{
6071 if (is_task_rq_idle(a) || is_task_rq_idle(b))
6072 return true;
6073
6074 return a->core_cookie == b->core_cookie;
6075}
6076
6077static inline struct task_struct *pick_task(struct rq *rq)
6078{
6079 const struct sched_class *class;
6080 struct task_struct *p;
6081
6082 rq->dl_server = NULL;
6083
6084 for_each_active_class(class) {
6085 p = class->pick_task(rq);
6086 if (p)
6087 return p;
6088 }
6089
6090 BUG(); /* The idle class should always have a runnable task. */
6091}
6092
6093extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6094
6095static void queue_core_balance(struct rq *rq);
6096
6097static struct task_struct *
6098pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6099{
6100 struct task_struct *next, *p, *max = NULL;
6101 const struct cpumask *smt_mask;
6102 bool fi_before = false;
6103 bool core_clock_updated = (rq == rq->core);
6104 unsigned long cookie;
6105 int i, cpu, occ = 0;
6106 struct rq *rq_i;
6107 bool need_sync;
6108
6109 if (!sched_core_enabled(rq))
6110 return __pick_next_task(rq, prev, rf);
6111
6112 cpu = cpu_of(rq);
6113
6114 /* Stopper task is switching into idle, no need core-wide selection. */
6115 if (cpu_is_offline(cpu)) {
6116 /*
6117 * Reset core_pick so that we don't enter the fastpath when
6118 * coming online. core_pick would already be migrated to
6119 * another cpu during offline.
6120 */
6121 rq->core_pick = NULL;
6122 rq->core_dl_server = NULL;
6123 return __pick_next_task(rq, prev, rf);
6124 }
6125
6126 /*
6127 * If there were no {en,de}queues since we picked (IOW, the task
6128 * pointers are all still valid), and we haven't scheduled the last
6129 * pick yet, do so now.
6130 *
6131 * rq->core_pick can be NULL if no selection was made for a CPU because
6132 * it was either offline or went offline during a sibling's core-wide
6133 * selection. In this case, do a core-wide selection.
6134 */
6135 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6136 rq->core->core_pick_seq != rq->core_sched_seq &&
6137 rq->core_pick) {
6138 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6139
6140 next = rq->core_pick;
6141 rq->dl_server = rq->core_dl_server;
6142 rq->core_pick = NULL;
6143 rq->core_dl_server = NULL;
6144 goto out_set_next;
6145 }
6146
6147 prev_balance(rq, prev, rf);
6148
6149 smt_mask = cpu_smt_mask(cpu);
6150 need_sync = !!rq->core->core_cookie;
6151
6152 /* reset state */
6153 rq->core->core_cookie = 0UL;
6154 if (rq->core->core_forceidle_count) {
6155 if (!core_clock_updated) {
6156 update_rq_clock(rq->core);
6157 core_clock_updated = true;
6158 }
6159 sched_core_account_forceidle(rq);
6160 /* reset after accounting force idle */
6161 rq->core->core_forceidle_start = 0;
6162 rq->core->core_forceidle_count = 0;
6163 rq->core->core_forceidle_occupation = 0;
6164 need_sync = true;
6165 fi_before = true;
6166 }
6167
6168 /*
6169 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6170 *
6171 * @task_seq guards the task state ({en,de}queues)
6172 * @pick_seq is the @task_seq we did a selection on
6173 * @sched_seq is the @pick_seq we scheduled
6174 *
6175 * However, preemptions can cause multiple picks on the same task set.
6176 * 'Fix' this by also increasing @task_seq for every pick.
6177 */
6178 rq->core->core_task_seq++;
6179
6180 /*
6181 * Optimize for common case where this CPU has no cookies
6182 * and there are no cookied tasks running on siblings.
6183 */
6184 if (!need_sync) {
6185 next = pick_task(rq);
6186 if (!next->core_cookie) {
6187 rq->core_pick = NULL;
6188 rq->core_dl_server = NULL;
6189 /*
6190 * For robustness, update the min_vruntime_fi for
6191 * unconstrained picks as well.
6192 */
6193 WARN_ON_ONCE(fi_before);
6194 task_vruntime_update(rq, next, false);
6195 goto out_set_next;
6196 }
6197 }
6198
6199 /*
6200 * For each thread: do the regular task pick and find the max prio task
6201 * amongst them.
6202 *
6203 * Tie-break prio towards the current CPU
6204 */
6205 for_each_cpu_wrap(i, smt_mask, cpu) {
6206 rq_i = cpu_rq(i);
6207
6208 /*
6209 * Current cpu always has its clock updated on entrance to
6210 * pick_next_task(). If the current cpu is not the core,
6211 * the core may also have been updated above.
6212 */
6213 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6214 update_rq_clock(rq_i);
6215
6216 rq_i->core_pick = p = pick_task(rq_i);
6217 rq_i->core_dl_server = rq_i->dl_server;
6218
6219 if (!max || prio_less(max, p, fi_before))
6220 max = p;
6221 }
6222
6223 cookie = rq->core->core_cookie = max->core_cookie;
6224
6225 /*
6226 * For each thread: try and find a runnable task that matches @max or
6227 * force idle.
6228 */
6229 for_each_cpu(i, smt_mask) {
6230 rq_i = cpu_rq(i);
6231 p = rq_i->core_pick;
6232
6233 if (!cookie_equals(p, cookie)) {
6234 p = NULL;
6235 if (cookie)
6236 p = sched_core_find(rq_i, cookie);
6237 if (!p)
6238 p = idle_sched_class.pick_task(rq_i);
6239 }
6240
6241 rq_i->core_pick = p;
6242 rq_i->core_dl_server = NULL;
6243
6244 if (p == rq_i->idle) {
6245 if (rq_i->nr_running) {
6246 rq->core->core_forceidle_count++;
6247 if (!fi_before)
6248 rq->core->core_forceidle_seq++;
6249 }
6250 } else {
6251 occ++;
6252 }
6253 }
6254
6255 if (schedstat_enabled() && rq->core->core_forceidle_count) {
6256 rq->core->core_forceidle_start = rq_clock(rq->core);
6257 rq->core->core_forceidle_occupation = occ;
6258 }
6259
6260 rq->core->core_pick_seq = rq->core->core_task_seq;
6261 next = rq->core_pick;
6262 rq->core_sched_seq = rq->core->core_pick_seq;
6263
6264 /* Something should have been selected for current CPU */
6265 WARN_ON_ONCE(!next);
6266
6267 /*
6268 * Reschedule siblings
6269 *
6270 * NOTE: L1TF -- at this point we're no longer running the old task and
6271 * sending an IPI (below) ensures the sibling will no longer be running
6272 * their task. This ensures there is no inter-sibling overlap between
6273 * non-matching user state.
6274 */
6275 for_each_cpu(i, smt_mask) {
6276 rq_i = cpu_rq(i);
6277
6278 /*
6279 * An online sibling might have gone offline before a task
6280 * could be picked for it, or it might be offline but later
6281 * happen to come online, but its too late and nothing was
6282 * picked for it. That's Ok - it will pick tasks for itself,
6283 * so ignore it.
6284 */
6285 if (!rq_i->core_pick)
6286 continue;
6287
6288 /*
6289 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6290 * fi_before fi update?
6291 * 0 0 1
6292 * 0 1 1
6293 * 1 0 1
6294 * 1 1 0
6295 */
6296 if (!(fi_before && rq->core->core_forceidle_count))
6297 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6298
6299 rq_i->core_pick->core_occupation = occ;
6300
6301 if (i == cpu) {
6302 rq_i->core_pick = NULL;
6303 rq_i->core_dl_server = NULL;
6304 continue;
6305 }
6306
6307 /* Did we break L1TF mitigation requirements? */
6308 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6309
6310 if (rq_i->curr == rq_i->core_pick) {
6311 rq_i->core_pick = NULL;
6312 rq_i->core_dl_server = NULL;
6313 continue;
6314 }
6315
6316 resched_curr(rq_i);
6317 }
6318
6319out_set_next:
6320 put_prev_set_next_task(rq, prev, next);
6321 if (rq->core->core_forceidle_count && next == rq->idle)
6322 queue_core_balance(rq);
6323
6324 return next;
6325}
6326
6327static bool try_steal_cookie(int this, int that)
6328{
6329 struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6330 struct task_struct *p;
6331 unsigned long cookie;
6332 bool success = false;
6333
6334 guard(irq)();
6335 guard(double_rq_lock)(dst, src);
6336
6337 cookie = dst->core->core_cookie;
6338 if (!cookie)
6339 return false;
6340
6341 if (dst->curr != dst->idle)
6342 return false;
6343
6344 p = sched_core_find(src, cookie);
6345 if (!p)
6346 return false;
6347
6348 do {
6349 if (p == src->core_pick || p == src->curr)
6350 goto next;
6351
6352 if (!is_cpu_allowed(p, this))
6353 goto next;
6354
6355 if (p->core_occupation > dst->idle->core_occupation)
6356 goto next;
6357 /*
6358 * sched_core_find() and sched_core_next() will ensure
6359 * that task @p is not throttled now, we also need to
6360 * check whether the runqueue of the destination CPU is
6361 * being throttled.
6362 */
6363 if (sched_task_is_throttled(p, this))
6364 goto next;
6365
6366 move_queued_task_locked(src, dst, p);
6367 resched_curr(dst);
6368
6369 success = true;
6370 break;
6371
6372next:
6373 p = sched_core_next(p, cookie);
6374 } while (p);
6375
6376 return success;
6377}
6378
6379static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6380{
6381 int i;
6382
6383 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6384 if (i == cpu)
6385 continue;
6386
6387 if (need_resched())
6388 break;
6389
6390 if (try_steal_cookie(cpu, i))
6391 return true;
6392 }
6393
6394 return false;
6395}
6396
6397static void sched_core_balance(struct rq *rq)
6398{
6399 struct sched_domain *sd;
6400 int cpu = cpu_of(rq);
6401
6402 guard(preempt)();
6403 guard(rcu)();
6404
6405 raw_spin_rq_unlock_irq(rq);
6406 for_each_domain(cpu, sd) {
6407 if (need_resched())
6408 break;
6409
6410 if (steal_cookie_task(cpu, sd))
6411 break;
6412 }
6413 raw_spin_rq_lock_irq(rq);
6414}
6415
6416static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6417
6418static void queue_core_balance(struct rq *rq)
6419{
6420 if (!sched_core_enabled(rq))
6421 return;
6422
6423 if (!rq->core->core_cookie)
6424 return;
6425
6426 if (!rq->nr_running) /* not forced idle */
6427 return;
6428
6429 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6430}
6431
6432DEFINE_LOCK_GUARD_1(core_lock, int,
6433 sched_core_lock(*_T->lock, &_T->flags),
6434 sched_core_unlock(*_T->lock, &_T->flags),
6435 unsigned long flags)
6436
6437static void sched_core_cpu_starting(unsigned int cpu)
6438{
6439 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6440 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6441 int t;
6442
6443 guard(core_lock)(&cpu);
6444
6445 WARN_ON_ONCE(rq->core != rq);
6446
6447 /* if we're the first, we'll be our own leader */
6448 if (cpumask_weight(smt_mask) == 1)
6449 return;
6450
6451 /* find the leader */
6452 for_each_cpu(t, smt_mask) {
6453 if (t == cpu)
6454 continue;
6455 rq = cpu_rq(t);
6456 if (rq->core == rq) {
6457 core_rq = rq;
6458 break;
6459 }
6460 }
6461
6462 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6463 return;
6464
6465 /* install and validate core_rq */
6466 for_each_cpu(t, smt_mask) {
6467 rq = cpu_rq(t);
6468
6469 if (t == cpu)
6470 rq->core = core_rq;
6471
6472 WARN_ON_ONCE(rq->core != core_rq);
6473 }
6474}
6475
6476static void sched_core_cpu_deactivate(unsigned int cpu)
6477{
6478 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6479 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6480 int t;
6481
6482 guard(core_lock)(&cpu);
6483
6484 /* if we're the last man standing, nothing to do */
6485 if (cpumask_weight(smt_mask) == 1) {
6486 WARN_ON_ONCE(rq->core != rq);
6487 return;
6488 }
6489
6490 /* if we're not the leader, nothing to do */
6491 if (rq->core != rq)
6492 return;
6493
6494 /* find a new leader */
6495 for_each_cpu(t, smt_mask) {
6496 if (t == cpu)
6497 continue;
6498 core_rq = cpu_rq(t);
6499 break;
6500 }
6501
6502 if (WARN_ON_ONCE(!core_rq)) /* impossible */
6503 return;
6504
6505 /* copy the shared state to the new leader */
6506 core_rq->core_task_seq = rq->core_task_seq;
6507 core_rq->core_pick_seq = rq->core_pick_seq;
6508 core_rq->core_cookie = rq->core_cookie;
6509 core_rq->core_forceidle_count = rq->core_forceidle_count;
6510 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6511 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6512
6513 /*
6514 * Accounting edge for forced idle is handled in pick_next_task().
6515 * Don't need another one here, since the hotplug thread shouldn't
6516 * have a cookie.
6517 */
6518 core_rq->core_forceidle_start = 0;
6519
6520 /* install new leader */
6521 for_each_cpu(t, smt_mask) {
6522 rq = cpu_rq(t);
6523 rq->core = core_rq;
6524 }
6525}
6526
6527static inline void sched_core_cpu_dying(unsigned int cpu)
6528{
6529 struct rq *rq = cpu_rq(cpu);
6530
6531 if (rq->core != rq)
6532 rq->core = rq;
6533}
6534
6535#else /* !CONFIG_SCHED_CORE */
6536
6537static inline void sched_core_cpu_starting(unsigned int cpu) {}
6538static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
6539static inline void sched_core_cpu_dying(unsigned int cpu) {}
6540
6541static struct task_struct *
6542pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6543{
6544 return __pick_next_task(rq, prev, rf);
6545}
6546
6547#endif /* CONFIG_SCHED_CORE */
6548
6549/*
6550 * Constants for the sched_mode argument of __schedule().
6551 *
6552 * The mode argument allows RT enabled kernels to differentiate a
6553 * preemption from blocking on an 'sleeping' spin/rwlock.
6554 */
6555#define SM_IDLE (-1)
6556#define SM_NONE 0
6557#define SM_PREEMPT 1
6558#define SM_RTLOCK_WAIT 2
6559
6560/*
6561 * Helper function for __schedule()
6562 *
6563 * If a task does not have signals pending, deactivate it
6564 * Otherwise marks the task's __state as RUNNING
6565 */
6566static bool try_to_block_task(struct rq *rq, struct task_struct *p,
6567 unsigned long task_state)
6568{
6569 int flags = DEQUEUE_NOCLOCK;
6570
6571 if (signal_pending_state(task_state, p)) {
6572 WRITE_ONCE(p->__state, TASK_RUNNING);
6573 return false;
6574 }
6575
6576 p->sched_contributes_to_load =
6577 (task_state & TASK_UNINTERRUPTIBLE) &&
6578 !(task_state & TASK_NOLOAD) &&
6579 !(task_state & TASK_FROZEN);
6580
6581 if (unlikely(is_special_task_state(task_state)))
6582 flags |= DEQUEUE_SPECIAL;
6583
6584 /*
6585 * __schedule() ttwu()
6586 * prev_state = prev->state; if (p->on_rq && ...)
6587 * if (prev_state) goto out;
6588 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6589 * p->state = TASK_WAKING
6590 *
6591 * Where __schedule() and ttwu() have matching control dependencies.
6592 *
6593 * After this, schedule() must not care about p->state any more.
6594 */
6595 block_task(rq, p, flags);
6596 return true;
6597}
6598
6599/*
6600 * __schedule() is the main scheduler function.
6601 *
6602 * The main means of driving the scheduler and thus entering this function are:
6603 *
6604 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6605 *
6606 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6607 * paths. For example, see arch/x86/entry_64.S.
6608 *
6609 * To drive preemption between tasks, the scheduler sets the flag in timer
6610 * interrupt handler sched_tick().
6611 *
6612 * 3. Wakeups don't really cause entry into schedule(). They add a
6613 * task to the run-queue and that's it.
6614 *
6615 * Now, if the new task added to the run-queue preempts the current
6616 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6617 * called on the nearest possible occasion:
6618 *
6619 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6620 *
6621 * - in syscall or exception context, at the next outmost
6622 * preempt_enable(). (this might be as soon as the wake_up()'s
6623 * spin_unlock()!)
6624 *
6625 * - in IRQ context, return from interrupt-handler to
6626 * preemptible context
6627 *
6628 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6629 * then at the next:
6630 *
6631 * - cond_resched() call
6632 * - explicit schedule() call
6633 * - return from syscall or exception to user-space
6634 * - return from interrupt-handler to user-space
6635 *
6636 * WARNING: must be called with preemption disabled!
6637 */
6638static void __sched notrace __schedule(int sched_mode)
6639{
6640 struct task_struct *prev, *next;
6641 /*
6642 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6643 * as a preemption by schedule_debug() and RCU.
6644 */
6645 bool preempt = sched_mode > SM_NONE;
6646 unsigned long *switch_count;
6647 unsigned long prev_state;
6648 struct rq_flags rf;
6649 struct rq *rq;
6650 int cpu;
6651
6652 cpu = smp_processor_id();
6653 rq = cpu_rq(cpu);
6654 prev = rq->curr;
6655
6656 schedule_debug(prev, preempt);
6657
6658 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6659 hrtick_clear(rq);
6660
6661 local_irq_disable();
6662 rcu_note_context_switch(preempt);
6663
6664 /*
6665 * Make sure that signal_pending_state()->signal_pending() below
6666 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6667 * done by the caller to avoid the race with signal_wake_up():
6668 *
6669 * __set_current_state(@state) signal_wake_up()
6670 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
6671 * wake_up_state(p, state)
6672 * LOCK rq->lock LOCK p->pi_state
6673 * smp_mb__after_spinlock() smp_mb__after_spinlock()
6674 * if (signal_pending_state()) if (p->state & @state)
6675 *
6676 * Also, the membarrier system call requires a full memory barrier
6677 * after coming from user-space, before storing to rq->curr; this
6678 * barrier matches a full barrier in the proximity of the membarrier
6679 * system call exit.
6680 */
6681 rq_lock(rq, &rf);
6682 smp_mb__after_spinlock();
6683
6684 /* Promote REQ to ACT */
6685 rq->clock_update_flags <<= 1;
6686 update_rq_clock(rq);
6687 rq->clock_update_flags = RQCF_UPDATED;
6688
6689 switch_count = &prev->nivcsw;
6690
6691 /* Task state changes only considers SM_PREEMPT as preemption */
6692 preempt = sched_mode == SM_PREEMPT;
6693
6694 /*
6695 * We must load prev->state once (task_struct::state is volatile), such
6696 * that we form a control dependency vs deactivate_task() below.
6697 */
6698 prev_state = READ_ONCE(prev->__state);
6699 if (sched_mode == SM_IDLE) {
6700 /* SCX must consult the BPF scheduler to tell if rq is empty */
6701 if (!rq->nr_running && !scx_enabled()) {
6702 next = prev;
6703 goto picked;
6704 }
6705 } else if (!preempt && prev_state) {
6706 try_to_block_task(rq, prev, prev_state);
6707 switch_count = &prev->nvcsw;
6708 }
6709
6710 next = pick_next_task(rq, prev, &rf);
6711 rq_set_donor(rq, next);
6712picked:
6713 clear_tsk_need_resched(prev);
6714 clear_preempt_need_resched();
6715#ifdef CONFIG_SCHED_DEBUG
6716 rq->last_seen_need_resched_ns = 0;
6717#endif
6718
6719 if (likely(prev != next)) {
6720 rq->nr_switches++;
6721 /*
6722 * RCU users of rcu_dereference(rq->curr) may not see
6723 * changes to task_struct made by pick_next_task().
6724 */
6725 RCU_INIT_POINTER(rq->curr, next);
6726 /*
6727 * The membarrier system call requires each architecture
6728 * to have a full memory barrier after updating
6729 * rq->curr, before returning to user-space.
6730 *
6731 * Here are the schemes providing that barrier on the
6732 * various architectures:
6733 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6734 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
6735 * on PowerPC and on RISC-V.
6736 * - finish_lock_switch() for weakly-ordered
6737 * architectures where spin_unlock is a full barrier,
6738 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6739 * is a RELEASE barrier),
6740 *
6741 * The barrier matches a full barrier in the proximity of
6742 * the membarrier system call entry.
6743 *
6744 * On RISC-V, this barrier pairing is also needed for the
6745 * SYNC_CORE command when switching between processes, cf.
6746 * the inline comments in membarrier_arch_switch_mm().
6747 */
6748 ++*switch_count;
6749
6750 migrate_disable_switch(rq, prev);
6751 psi_account_irqtime(rq, prev, next);
6752 psi_sched_switch(prev, next, !task_on_rq_queued(prev) ||
6753 prev->se.sched_delayed);
6754
6755 trace_sched_switch(preempt, prev, next, prev_state);
6756
6757 /* Also unlocks the rq: */
6758 rq = context_switch(rq, prev, next, &rf);
6759 } else {
6760 rq_unpin_lock(rq, &rf);
6761 __balance_callbacks(rq);
6762 raw_spin_rq_unlock_irq(rq);
6763 }
6764}
6765
6766void __noreturn do_task_dead(void)
6767{
6768 /* Causes final put_task_struct in finish_task_switch(): */
6769 set_special_state(TASK_DEAD);
6770
6771 /* Tell freezer to ignore us: */
6772 current->flags |= PF_NOFREEZE;
6773
6774 __schedule(SM_NONE);
6775 BUG();
6776
6777 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6778 for (;;)
6779 cpu_relax();
6780}
6781
6782static inline void sched_submit_work(struct task_struct *tsk)
6783{
6784 static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6785 unsigned int task_flags;
6786
6787 /*
6788 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6789 * will use a blocking primitive -- which would lead to recursion.
6790 */
6791 lock_map_acquire_try(&sched_map);
6792
6793 task_flags = tsk->flags;
6794 /*
6795 * If a worker goes to sleep, notify and ask workqueue whether it
6796 * wants to wake up a task to maintain concurrency.
6797 */
6798 if (task_flags & PF_WQ_WORKER)
6799 wq_worker_sleeping(tsk);
6800 else if (task_flags & PF_IO_WORKER)
6801 io_wq_worker_sleeping(tsk);
6802
6803 /*
6804 * spinlock and rwlock must not flush block requests. This will
6805 * deadlock if the callback attempts to acquire a lock which is
6806 * already acquired.
6807 */
6808 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6809
6810 /*
6811 * If we are going to sleep and we have plugged IO queued,
6812 * make sure to submit it to avoid deadlocks.
6813 */
6814 blk_flush_plug(tsk->plug, true);
6815
6816 lock_map_release(&sched_map);
6817}
6818
6819static void sched_update_worker(struct task_struct *tsk)
6820{
6821 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6822 if (tsk->flags & PF_BLOCK_TS)
6823 blk_plug_invalidate_ts(tsk);
6824 if (tsk->flags & PF_WQ_WORKER)
6825 wq_worker_running(tsk);
6826 else if (tsk->flags & PF_IO_WORKER)
6827 io_wq_worker_running(tsk);
6828 }
6829}
6830
6831static __always_inline void __schedule_loop(int sched_mode)
6832{
6833 do {
6834 preempt_disable();
6835 __schedule(sched_mode);
6836 sched_preempt_enable_no_resched();
6837 } while (need_resched());
6838}
6839
6840asmlinkage __visible void __sched schedule(void)
6841{
6842 struct task_struct *tsk = current;
6843
6844#ifdef CONFIG_RT_MUTEXES
6845 lockdep_assert(!tsk->sched_rt_mutex);
6846#endif
6847
6848 if (!task_is_running(tsk))
6849 sched_submit_work(tsk);
6850 __schedule_loop(SM_NONE);
6851 sched_update_worker(tsk);
6852}
6853EXPORT_SYMBOL(schedule);
6854
6855/*
6856 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6857 * state (have scheduled out non-voluntarily) by making sure that all
6858 * tasks have either left the run queue or have gone into user space.
6859 * As idle tasks do not do either, they must not ever be preempted
6860 * (schedule out non-voluntarily).
6861 *
6862 * schedule_idle() is similar to schedule_preempt_disable() except that it
6863 * never enables preemption because it does not call sched_submit_work().
6864 */
6865void __sched schedule_idle(void)
6866{
6867 /*
6868 * As this skips calling sched_submit_work(), which the idle task does
6869 * regardless because that function is a NOP when the task is in a
6870 * TASK_RUNNING state, make sure this isn't used someplace that the
6871 * current task can be in any other state. Note, idle is always in the
6872 * TASK_RUNNING state.
6873 */
6874 WARN_ON_ONCE(current->__state);
6875 do {
6876 __schedule(SM_IDLE);
6877 } while (need_resched());
6878}
6879
6880#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
6881asmlinkage __visible void __sched schedule_user(void)
6882{
6883 /*
6884 * If we come here after a random call to set_need_resched(),
6885 * or we have been woken up remotely but the IPI has not yet arrived,
6886 * we haven't yet exited the RCU idle mode. Do it here manually until
6887 * we find a better solution.
6888 *
6889 * NB: There are buggy callers of this function. Ideally we
6890 * should warn if prev_state != CT_STATE_USER, but that will trigger
6891 * too frequently to make sense yet.
6892 */
6893 enum ctx_state prev_state = exception_enter();
6894 schedule();
6895 exception_exit(prev_state);
6896}
6897#endif
6898
6899/**
6900 * schedule_preempt_disabled - called with preemption disabled
6901 *
6902 * Returns with preemption disabled. Note: preempt_count must be 1
6903 */
6904void __sched schedule_preempt_disabled(void)
6905{
6906 sched_preempt_enable_no_resched();
6907 schedule();
6908 preempt_disable();
6909}
6910
6911#ifdef CONFIG_PREEMPT_RT
6912void __sched notrace schedule_rtlock(void)
6913{
6914 __schedule_loop(SM_RTLOCK_WAIT);
6915}
6916NOKPROBE_SYMBOL(schedule_rtlock);
6917#endif
6918
6919static void __sched notrace preempt_schedule_common(void)
6920{
6921 do {
6922 /*
6923 * Because the function tracer can trace preempt_count_sub()
6924 * and it also uses preempt_enable/disable_notrace(), if
6925 * NEED_RESCHED is set, the preempt_enable_notrace() called
6926 * by the function tracer will call this function again and
6927 * cause infinite recursion.
6928 *
6929 * Preemption must be disabled here before the function
6930 * tracer can trace. Break up preempt_disable() into two
6931 * calls. One to disable preemption without fear of being
6932 * traced. The other to still record the preemption latency,
6933 * which can also be traced by the function tracer.
6934 */
6935 preempt_disable_notrace();
6936 preempt_latency_start(1);
6937 __schedule(SM_PREEMPT);
6938 preempt_latency_stop(1);
6939 preempt_enable_no_resched_notrace();
6940
6941 /*
6942 * Check again in case we missed a preemption opportunity
6943 * between schedule and now.
6944 */
6945 } while (need_resched());
6946}
6947
6948#ifdef CONFIG_PREEMPTION
6949/*
6950 * This is the entry point to schedule() from in-kernel preemption
6951 * off of preempt_enable.
6952 */
6953asmlinkage __visible void __sched notrace preempt_schedule(void)
6954{
6955 /*
6956 * If there is a non-zero preempt_count or interrupts are disabled,
6957 * we do not want to preempt the current task. Just return..
6958 */
6959 if (likely(!preemptible()))
6960 return;
6961 preempt_schedule_common();
6962}
6963NOKPROBE_SYMBOL(preempt_schedule);
6964EXPORT_SYMBOL(preempt_schedule);
6965
6966#ifdef CONFIG_PREEMPT_DYNAMIC
6967#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6968#ifndef preempt_schedule_dynamic_enabled
6969#define preempt_schedule_dynamic_enabled preempt_schedule
6970#define preempt_schedule_dynamic_disabled NULL
6971#endif
6972DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
6973EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6974#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6975static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
6976void __sched notrace dynamic_preempt_schedule(void)
6977{
6978 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6979 return;
6980 preempt_schedule();
6981}
6982NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6983EXPORT_SYMBOL(dynamic_preempt_schedule);
6984#endif
6985#endif
6986
6987/**
6988 * preempt_schedule_notrace - preempt_schedule called by tracing
6989 *
6990 * The tracing infrastructure uses preempt_enable_notrace to prevent
6991 * recursion and tracing preempt enabling caused by the tracing
6992 * infrastructure itself. But as tracing can happen in areas coming
6993 * from userspace or just about to enter userspace, a preempt enable
6994 * can occur before user_exit() is called. This will cause the scheduler
6995 * to be called when the system is still in usermode.
6996 *
6997 * To prevent this, the preempt_enable_notrace will use this function
6998 * instead of preempt_schedule() to exit user context if needed before
6999 * calling the scheduler.
7000 */
7001asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
7002{
7003 enum ctx_state prev_ctx;
7004
7005 if (likely(!preemptible()))
7006 return;
7007
7008 do {
7009 /*
7010 * Because the function tracer can trace preempt_count_sub()
7011 * and it also uses preempt_enable/disable_notrace(), if
7012 * NEED_RESCHED is set, the preempt_enable_notrace() called
7013 * by the function tracer will call this function again and
7014 * cause infinite recursion.
7015 *
7016 * Preemption must be disabled here before the function
7017 * tracer can trace. Break up preempt_disable() into two
7018 * calls. One to disable preemption without fear of being
7019 * traced. The other to still record the preemption latency,
7020 * which can also be traced by the function tracer.
7021 */
7022 preempt_disable_notrace();
7023 preempt_latency_start(1);
7024 /*
7025 * Needs preempt disabled in case user_exit() is traced
7026 * and the tracer calls preempt_enable_notrace() causing
7027 * an infinite recursion.
7028 */
7029 prev_ctx = exception_enter();
7030 __schedule(SM_PREEMPT);
7031 exception_exit(prev_ctx);
7032
7033 preempt_latency_stop(1);
7034 preempt_enable_no_resched_notrace();
7035 } while (need_resched());
7036}
7037EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
7038
7039#ifdef CONFIG_PREEMPT_DYNAMIC
7040#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7041#ifndef preempt_schedule_notrace_dynamic_enabled
7042#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
7043#define preempt_schedule_notrace_dynamic_disabled NULL
7044#endif
7045DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
7046EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
7047#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7048static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
7049void __sched notrace dynamic_preempt_schedule_notrace(void)
7050{
7051 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
7052 return;
7053 preempt_schedule_notrace();
7054}
7055NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
7056EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
7057#endif
7058#endif
7059
7060#endif /* CONFIG_PREEMPTION */
7061
7062/*
7063 * This is the entry point to schedule() from kernel preemption
7064 * off of IRQ context.
7065 * Note, that this is called and return with IRQs disabled. This will
7066 * protect us against recursive calling from IRQ contexts.
7067 */
7068asmlinkage __visible void __sched preempt_schedule_irq(void)
7069{
7070 enum ctx_state prev_state;
7071
7072 /* Catch callers which need to be fixed */
7073 BUG_ON(preempt_count() || !irqs_disabled());
7074
7075 prev_state = exception_enter();
7076
7077 do {
7078 preempt_disable();
7079 local_irq_enable();
7080 __schedule(SM_PREEMPT);
7081 local_irq_disable();
7082 sched_preempt_enable_no_resched();
7083 } while (need_resched());
7084
7085 exception_exit(prev_state);
7086}
7087
7088int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7089 void *key)
7090{
7091 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7092 return try_to_wake_up(curr->private, mode, wake_flags);
7093}
7094EXPORT_SYMBOL(default_wake_function);
7095
7096const struct sched_class *__setscheduler_class(int policy, int prio)
7097{
7098 if (dl_prio(prio))
7099 return &dl_sched_class;
7100
7101 if (rt_prio(prio))
7102 return &rt_sched_class;
7103
7104#ifdef CONFIG_SCHED_CLASS_EXT
7105 if (task_should_scx(policy))
7106 return &ext_sched_class;
7107#endif
7108
7109 return &fair_sched_class;
7110}
7111
7112#ifdef CONFIG_RT_MUTEXES
7113
7114/*
7115 * Would be more useful with typeof()/auto_type but they don't mix with
7116 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7117 * name such that if someone were to implement this function we get to compare
7118 * notes.
7119 */
7120#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7121
7122void rt_mutex_pre_schedule(void)
7123{
7124 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7125 sched_submit_work(current);
7126}
7127
7128void rt_mutex_schedule(void)
7129{
7130 lockdep_assert(current->sched_rt_mutex);
7131 __schedule_loop(SM_NONE);
7132}
7133
7134void rt_mutex_post_schedule(void)
7135{
7136 sched_update_worker(current);
7137 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7138}
7139
7140/*
7141 * rt_mutex_setprio - set the current priority of a task
7142 * @p: task to boost
7143 * @pi_task: donor task
7144 *
7145 * This function changes the 'effective' priority of a task. It does
7146 * not touch ->normal_prio like __setscheduler().
7147 *
7148 * Used by the rt_mutex code to implement priority inheritance
7149 * logic. Call site only calls if the priority of the task changed.
7150 */
7151void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7152{
7153 int prio, oldprio, queued, running, queue_flag =
7154 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7155 const struct sched_class *prev_class, *next_class;
7156 struct rq_flags rf;
7157 struct rq *rq;
7158
7159 /* XXX used to be waiter->prio, not waiter->task->prio */
7160 prio = __rt_effective_prio(pi_task, p->normal_prio);
7161
7162 /*
7163 * If nothing changed; bail early.
7164 */
7165 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7166 return;
7167
7168 rq = __task_rq_lock(p, &rf);
7169 update_rq_clock(rq);
7170 /*
7171 * Set under pi_lock && rq->lock, such that the value can be used under
7172 * either lock.
7173 *
7174 * Note that there is loads of tricky to make this pointer cache work
7175 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7176 * ensure a task is de-boosted (pi_task is set to NULL) before the
7177 * task is allowed to run again (and can exit). This ensures the pointer
7178 * points to a blocked task -- which guarantees the task is present.
7179 */
7180 p->pi_top_task = pi_task;
7181
7182 /*
7183 * For FIFO/RR we only need to set prio, if that matches we're done.
7184 */
7185 if (prio == p->prio && !dl_prio(prio))
7186 goto out_unlock;
7187
7188 /*
7189 * Idle task boosting is a no-no in general. There is one
7190 * exception, when PREEMPT_RT and NOHZ is active:
7191 *
7192 * The idle task calls get_next_timer_interrupt() and holds
7193 * the timer wheel base->lock on the CPU and another CPU wants
7194 * to access the timer (probably to cancel it). We can safely
7195 * ignore the boosting request, as the idle CPU runs this code
7196 * with interrupts disabled and will complete the lock
7197 * protected section without being interrupted. So there is no
7198 * real need to boost.
7199 */
7200 if (unlikely(p == rq->idle)) {
7201 WARN_ON(p != rq->curr);
7202 WARN_ON(p->pi_blocked_on);
7203 goto out_unlock;
7204 }
7205
7206 trace_sched_pi_setprio(p, pi_task);
7207 oldprio = p->prio;
7208
7209 if (oldprio == prio)
7210 queue_flag &= ~DEQUEUE_MOVE;
7211
7212 prev_class = p->sched_class;
7213 next_class = __setscheduler_class(p->policy, prio);
7214
7215 if (prev_class != next_class && p->se.sched_delayed)
7216 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
7217
7218 queued = task_on_rq_queued(p);
7219 running = task_current_donor(rq, p);
7220 if (queued)
7221 dequeue_task(rq, p, queue_flag);
7222 if (running)
7223 put_prev_task(rq, p);
7224
7225 /*
7226 * Boosting condition are:
7227 * 1. -rt task is running and holds mutex A
7228 * --> -dl task blocks on mutex A
7229 *
7230 * 2. -dl task is running and holds mutex A
7231 * --> -dl task blocks on mutex A and could preempt the
7232 * running task
7233 */
7234 if (dl_prio(prio)) {
7235 if (!dl_prio(p->normal_prio) ||
7236 (pi_task && dl_prio(pi_task->prio) &&
7237 dl_entity_preempt(&pi_task->dl, &p->dl))) {
7238 p->dl.pi_se = pi_task->dl.pi_se;
7239 queue_flag |= ENQUEUE_REPLENISH;
7240 } else {
7241 p->dl.pi_se = &p->dl;
7242 }
7243 } else if (rt_prio(prio)) {
7244 if (dl_prio(oldprio))
7245 p->dl.pi_se = &p->dl;
7246 if (oldprio < prio)
7247 queue_flag |= ENQUEUE_HEAD;
7248 } else {
7249 if (dl_prio(oldprio))
7250 p->dl.pi_se = &p->dl;
7251 if (rt_prio(oldprio))
7252 p->rt.timeout = 0;
7253 }
7254
7255 p->sched_class = next_class;
7256 p->prio = prio;
7257
7258 check_class_changing(rq, p, prev_class);
7259
7260 if (queued)
7261 enqueue_task(rq, p, queue_flag);
7262 if (running)
7263 set_next_task(rq, p);
7264
7265 check_class_changed(rq, p, prev_class, oldprio);
7266out_unlock:
7267 /* Avoid rq from going away on us: */
7268 preempt_disable();
7269
7270 rq_unpin_lock(rq, &rf);
7271 __balance_callbacks(rq);
7272 raw_spin_rq_unlock(rq);
7273
7274 preempt_enable();
7275}
7276#endif
7277
7278#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
7279int __sched __cond_resched(void)
7280{
7281 if (should_resched(0) && !irqs_disabled()) {
7282 preempt_schedule_common();
7283 return 1;
7284 }
7285 /*
7286 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
7287 * whether the current CPU is in an RCU read-side critical section,
7288 * so the tick can report quiescent states even for CPUs looping
7289 * in kernel context. In contrast, in non-preemptible kernels,
7290 * RCU readers leave no in-memory hints, which means that CPU-bound
7291 * processes executing in kernel context might never report an
7292 * RCU quiescent state. Therefore, the following code causes
7293 * cond_resched() to report a quiescent state, but only when RCU
7294 * is in urgent need of one.
7295 */
7296#ifndef CONFIG_PREEMPT_RCU
7297 rcu_all_qs();
7298#endif
7299 return 0;
7300}
7301EXPORT_SYMBOL(__cond_resched);
7302#endif
7303
7304#ifdef CONFIG_PREEMPT_DYNAMIC
7305#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7306#define cond_resched_dynamic_enabled __cond_resched
7307#define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
7308DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7309EXPORT_STATIC_CALL_TRAMP(cond_resched);
7310
7311#define might_resched_dynamic_enabled __cond_resched
7312#define might_resched_dynamic_disabled ((void *)&__static_call_return0)
7313DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7314EXPORT_STATIC_CALL_TRAMP(might_resched);
7315#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7316static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
7317int __sched dynamic_cond_resched(void)
7318{
7319 klp_sched_try_switch();
7320 if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7321 return 0;
7322 return __cond_resched();
7323}
7324EXPORT_SYMBOL(dynamic_cond_resched);
7325
7326static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
7327int __sched dynamic_might_resched(void)
7328{
7329 if (!static_branch_unlikely(&sk_dynamic_might_resched))
7330 return 0;
7331 return __cond_resched();
7332}
7333EXPORT_SYMBOL(dynamic_might_resched);
7334#endif
7335#endif
7336
7337/*
7338 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7339 * call schedule, and on return reacquire the lock.
7340 *
7341 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7342 * operations here to prevent schedule() from being called twice (once via
7343 * spin_unlock(), once by hand).
7344 */
7345int __cond_resched_lock(spinlock_t *lock)
7346{
7347 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7348 int ret = 0;
7349
7350 lockdep_assert_held(lock);
7351
7352 if (spin_needbreak(lock) || resched) {
7353 spin_unlock(lock);
7354 if (!_cond_resched())
7355 cpu_relax();
7356 ret = 1;
7357 spin_lock(lock);
7358 }
7359 return ret;
7360}
7361EXPORT_SYMBOL(__cond_resched_lock);
7362
7363int __cond_resched_rwlock_read(rwlock_t *lock)
7364{
7365 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7366 int ret = 0;
7367
7368 lockdep_assert_held_read(lock);
7369
7370 if (rwlock_needbreak(lock) || resched) {
7371 read_unlock(lock);
7372 if (!_cond_resched())
7373 cpu_relax();
7374 ret = 1;
7375 read_lock(lock);
7376 }
7377 return ret;
7378}
7379EXPORT_SYMBOL(__cond_resched_rwlock_read);
7380
7381int __cond_resched_rwlock_write(rwlock_t *lock)
7382{
7383 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7384 int ret = 0;
7385
7386 lockdep_assert_held_write(lock);
7387
7388 if (rwlock_needbreak(lock) || resched) {
7389 write_unlock(lock);
7390 if (!_cond_resched())
7391 cpu_relax();
7392 ret = 1;
7393 write_lock(lock);
7394 }
7395 return ret;
7396}
7397EXPORT_SYMBOL(__cond_resched_rwlock_write);
7398
7399#ifdef CONFIG_PREEMPT_DYNAMIC
7400
7401#ifdef CONFIG_GENERIC_ENTRY
7402#include <linux/entry-common.h>
7403#endif
7404
7405/*
7406 * SC:cond_resched
7407 * SC:might_resched
7408 * SC:preempt_schedule
7409 * SC:preempt_schedule_notrace
7410 * SC:irqentry_exit_cond_resched
7411 *
7412 *
7413 * NONE:
7414 * cond_resched <- __cond_resched
7415 * might_resched <- RET0
7416 * preempt_schedule <- NOP
7417 * preempt_schedule_notrace <- NOP
7418 * irqentry_exit_cond_resched <- NOP
7419 * dynamic_preempt_lazy <- false
7420 *
7421 * VOLUNTARY:
7422 * cond_resched <- __cond_resched
7423 * might_resched <- __cond_resched
7424 * preempt_schedule <- NOP
7425 * preempt_schedule_notrace <- NOP
7426 * irqentry_exit_cond_resched <- NOP
7427 * dynamic_preempt_lazy <- false
7428 *
7429 * FULL:
7430 * cond_resched <- RET0
7431 * might_resched <- RET0
7432 * preempt_schedule <- preempt_schedule
7433 * preempt_schedule_notrace <- preempt_schedule_notrace
7434 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7435 * dynamic_preempt_lazy <- false
7436 *
7437 * LAZY:
7438 * cond_resched <- RET0
7439 * might_resched <- RET0
7440 * preempt_schedule <- preempt_schedule
7441 * preempt_schedule_notrace <- preempt_schedule_notrace
7442 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7443 * dynamic_preempt_lazy <- true
7444 */
7445
7446enum {
7447 preempt_dynamic_undefined = -1,
7448 preempt_dynamic_none,
7449 preempt_dynamic_voluntary,
7450 preempt_dynamic_full,
7451 preempt_dynamic_lazy,
7452};
7453
7454int preempt_dynamic_mode = preempt_dynamic_undefined;
7455
7456int sched_dynamic_mode(const char *str)
7457{
7458#ifndef CONFIG_PREEMPT_RT
7459 if (!strcmp(str, "none"))
7460 return preempt_dynamic_none;
7461
7462 if (!strcmp(str, "voluntary"))
7463 return preempt_dynamic_voluntary;
7464#endif
7465
7466 if (!strcmp(str, "full"))
7467 return preempt_dynamic_full;
7468
7469#ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7470 if (!strcmp(str, "lazy"))
7471 return preempt_dynamic_lazy;
7472#endif
7473
7474 return -EINVAL;
7475}
7476
7477#define preempt_dynamic_key_enable(f) static_key_enable(&sk_dynamic_##f.key)
7478#define preempt_dynamic_key_disable(f) static_key_disable(&sk_dynamic_##f.key)
7479
7480#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7481#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
7482#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
7483#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7484#define preempt_dynamic_enable(f) preempt_dynamic_key_enable(f)
7485#define preempt_dynamic_disable(f) preempt_dynamic_key_disable(f)
7486#else
7487#error "Unsupported PREEMPT_DYNAMIC mechanism"
7488#endif
7489
7490static DEFINE_MUTEX(sched_dynamic_mutex);
7491static bool klp_override;
7492
7493static void __sched_dynamic_update(int mode)
7494{
7495 /*
7496 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7497 * the ZERO state, which is invalid.
7498 */
7499 if (!klp_override)
7500 preempt_dynamic_enable(cond_resched);
7501 preempt_dynamic_enable(might_resched);
7502 preempt_dynamic_enable(preempt_schedule);
7503 preempt_dynamic_enable(preempt_schedule_notrace);
7504 preempt_dynamic_enable(irqentry_exit_cond_resched);
7505 preempt_dynamic_key_disable(preempt_lazy);
7506
7507 switch (mode) {
7508 case preempt_dynamic_none:
7509 if (!klp_override)
7510 preempt_dynamic_enable(cond_resched);
7511 preempt_dynamic_disable(might_resched);
7512 preempt_dynamic_disable(preempt_schedule);
7513 preempt_dynamic_disable(preempt_schedule_notrace);
7514 preempt_dynamic_disable(irqentry_exit_cond_resched);
7515 preempt_dynamic_key_disable(preempt_lazy);
7516 if (mode != preempt_dynamic_mode)
7517 pr_info("Dynamic Preempt: none\n");
7518 break;
7519
7520 case preempt_dynamic_voluntary:
7521 if (!klp_override)
7522 preempt_dynamic_enable(cond_resched);
7523 preempt_dynamic_enable(might_resched);
7524 preempt_dynamic_disable(preempt_schedule);
7525 preempt_dynamic_disable(preempt_schedule_notrace);
7526 preempt_dynamic_disable(irqentry_exit_cond_resched);
7527 preempt_dynamic_key_disable(preempt_lazy);
7528 if (mode != preempt_dynamic_mode)
7529 pr_info("Dynamic Preempt: voluntary\n");
7530 break;
7531
7532 case preempt_dynamic_full:
7533 if (!klp_override)
7534 preempt_dynamic_disable(cond_resched);
7535 preempt_dynamic_disable(might_resched);
7536 preempt_dynamic_enable(preempt_schedule);
7537 preempt_dynamic_enable(preempt_schedule_notrace);
7538 preempt_dynamic_enable(irqentry_exit_cond_resched);
7539 preempt_dynamic_key_disable(preempt_lazy);
7540 if (mode != preempt_dynamic_mode)
7541 pr_info("Dynamic Preempt: full\n");
7542 break;
7543
7544 case preempt_dynamic_lazy:
7545 if (!klp_override)
7546 preempt_dynamic_disable(cond_resched);
7547 preempt_dynamic_disable(might_resched);
7548 preempt_dynamic_enable(preempt_schedule);
7549 preempt_dynamic_enable(preempt_schedule_notrace);
7550 preempt_dynamic_enable(irqentry_exit_cond_resched);
7551 preempt_dynamic_key_enable(preempt_lazy);
7552 if (mode != preempt_dynamic_mode)
7553 pr_info("Dynamic Preempt: lazy\n");
7554 break;
7555 }
7556
7557 preempt_dynamic_mode = mode;
7558}
7559
7560void sched_dynamic_update(int mode)
7561{
7562 mutex_lock(&sched_dynamic_mutex);
7563 __sched_dynamic_update(mode);
7564 mutex_unlock(&sched_dynamic_mutex);
7565}
7566
7567#ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7568
7569static int klp_cond_resched(void)
7570{
7571 __klp_sched_try_switch();
7572 return __cond_resched();
7573}
7574
7575void sched_dynamic_klp_enable(void)
7576{
7577 mutex_lock(&sched_dynamic_mutex);
7578
7579 klp_override = true;
7580 static_call_update(cond_resched, klp_cond_resched);
7581
7582 mutex_unlock(&sched_dynamic_mutex);
7583}
7584
7585void sched_dynamic_klp_disable(void)
7586{
7587 mutex_lock(&sched_dynamic_mutex);
7588
7589 klp_override = false;
7590 __sched_dynamic_update(preempt_dynamic_mode);
7591
7592 mutex_unlock(&sched_dynamic_mutex);
7593}
7594
7595#endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
7596
7597static int __init setup_preempt_mode(char *str)
7598{
7599 int mode = sched_dynamic_mode(str);
7600 if (mode < 0) {
7601 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7602 return 0;
7603 }
7604
7605 sched_dynamic_update(mode);
7606 return 1;
7607}
7608__setup("preempt=", setup_preempt_mode);
7609
7610static void __init preempt_dynamic_init(void)
7611{
7612 if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7613 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7614 sched_dynamic_update(preempt_dynamic_none);
7615 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7616 sched_dynamic_update(preempt_dynamic_voluntary);
7617 } else if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7618 sched_dynamic_update(preempt_dynamic_lazy);
7619 } else {
7620 /* Default static call setting, nothing to do */
7621 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7622 preempt_dynamic_mode = preempt_dynamic_full;
7623 pr_info("Dynamic Preempt: full\n");
7624 }
7625 }
7626}
7627
7628#define PREEMPT_MODEL_ACCESSOR(mode) \
7629 bool preempt_model_##mode(void) \
7630 { \
7631 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7632 return preempt_dynamic_mode == preempt_dynamic_##mode; \
7633 } \
7634 EXPORT_SYMBOL_GPL(preempt_model_##mode)
7635
7636PREEMPT_MODEL_ACCESSOR(none);
7637PREEMPT_MODEL_ACCESSOR(voluntary);
7638PREEMPT_MODEL_ACCESSOR(full);
7639PREEMPT_MODEL_ACCESSOR(lazy);
7640
7641#else /* !CONFIG_PREEMPT_DYNAMIC: */
7642
7643static inline void preempt_dynamic_init(void) { }
7644
7645#endif /* CONFIG_PREEMPT_DYNAMIC */
7646
7647int io_schedule_prepare(void)
7648{
7649 int old_iowait = current->in_iowait;
7650
7651 current->in_iowait = 1;
7652 blk_flush_plug(current->plug, true);
7653 return old_iowait;
7654}
7655
7656void io_schedule_finish(int token)
7657{
7658 current->in_iowait = token;
7659}
7660
7661/*
7662 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7663 * that process accounting knows that this is a task in IO wait state.
7664 */
7665long __sched io_schedule_timeout(long timeout)
7666{
7667 int token;
7668 long ret;
7669
7670 token = io_schedule_prepare();
7671 ret = schedule_timeout(timeout);
7672 io_schedule_finish(token);
7673
7674 return ret;
7675}
7676EXPORT_SYMBOL(io_schedule_timeout);
7677
7678void __sched io_schedule(void)
7679{
7680 int token;
7681
7682 token = io_schedule_prepare();
7683 schedule();
7684 io_schedule_finish(token);
7685}
7686EXPORT_SYMBOL(io_schedule);
7687
7688void sched_show_task(struct task_struct *p)
7689{
7690 unsigned long free;
7691 int ppid;
7692
7693 if (!try_get_task_stack(p))
7694 return;
7695
7696 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7697
7698 if (task_is_running(p))
7699 pr_cont(" running task ");
7700 free = stack_not_used(p);
7701 ppid = 0;
7702 rcu_read_lock();
7703 if (pid_alive(p))
7704 ppid = task_pid_nr(rcu_dereference(p->real_parent));
7705 rcu_read_unlock();
7706 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n",
7707 free, task_pid_nr(p), task_tgid_nr(p),
7708 ppid, read_task_thread_flags(p));
7709
7710 print_worker_info(KERN_INFO, p);
7711 print_stop_info(KERN_INFO, p);
7712 print_scx_info(KERN_INFO, p);
7713 show_stack(p, NULL, KERN_INFO);
7714 put_task_stack(p);
7715}
7716EXPORT_SYMBOL_GPL(sched_show_task);
7717
7718static inline bool
7719state_filter_match(unsigned long state_filter, struct task_struct *p)
7720{
7721 unsigned int state = READ_ONCE(p->__state);
7722
7723 /* no filter, everything matches */
7724 if (!state_filter)
7725 return true;
7726
7727 /* filter, but doesn't match */
7728 if (!(state & state_filter))
7729 return false;
7730
7731 /*
7732 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7733 * TASK_KILLABLE).
7734 */
7735 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
7736 return false;
7737
7738 return true;
7739}
7740
7741
7742void show_state_filter(unsigned int state_filter)
7743{
7744 struct task_struct *g, *p;
7745
7746 rcu_read_lock();
7747 for_each_process_thread(g, p) {
7748 /*
7749 * reset the NMI-timeout, listing all files on a slow
7750 * console might take a lot of time:
7751 * Also, reset softlockup watchdogs on all CPUs, because
7752 * another CPU might be blocked waiting for us to process
7753 * an IPI.
7754 */
7755 touch_nmi_watchdog();
7756 touch_all_softlockup_watchdogs();
7757 if (state_filter_match(state_filter, p))
7758 sched_show_task(p);
7759 }
7760
7761#ifdef CONFIG_SCHED_DEBUG
7762 if (!state_filter)
7763 sysrq_sched_debug_show();
7764#endif
7765 rcu_read_unlock();
7766 /*
7767 * Only show locks if all tasks are dumped:
7768 */
7769 if (!state_filter)
7770 debug_show_all_locks();
7771}
7772
7773/**
7774 * init_idle - set up an idle thread for a given CPU
7775 * @idle: task in question
7776 * @cpu: CPU the idle task belongs to
7777 *
7778 * NOTE: this function does not set the idle thread's NEED_RESCHED
7779 * flag, to make booting more robust.
7780 */
7781void __init init_idle(struct task_struct *idle, int cpu)
7782{
7783#ifdef CONFIG_SMP
7784 struct affinity_context ac = (struct affinity_context) {
7785 .new_mask = cpumask_of(cpu),
7786 .flags = 0,
7787 };
7788#endif
7789 struct rq *rq = cpu_rq(cpu);
7790 unsigned long flags;
7791
7792 raw_spin_lock_irqsave(&idle->pi_lock, flags);
7793 raw_spin_rq_lock(rq);
7794
7795 idle->__state = TASK_RUNNING;
7796 idle->se.exec_start = sched_clock();
7797 /*
7798 * PF_KTHREAD should already be set at this point; regardless, make it
7799 * look like a proper per-CPU kthread.
7800 */
7801 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
7802 kthread_set_per_cpu(idle, cpu);
7803
7804#ifdef CONFIG_SMP
7805 /*
7806 * No validation and serialization required at boot time and for
7807 * setting up the idle tasks of not yet online CPUs.
7808 */
7809 set_cpus_allowed_common(idle, &ac);
7810#endif
7811 /*
7812 * We're having a chicken and egg problem, even though we are
7813 * holding rq->lock, the CPU isn't yet set to this CPU so the
7814 * lockdep check in task_group() will fail.
7815 *
7816 * Similar case to sched_fork(). / Alternatively we could
7817 * use task_rq_lock() here and obtain the other rq->lock.
7818 *
7819 * Silence PROVE_RCU
7820 */
7821 rcu_read_lock();
7822 __set_task_cpu(idle, cpu);
7823 rcu_read_unlock();
7824
7825 rq->idle = idle;
7826 rq_set_donor(rq, idle);
7827 rcu_assign_pointer(rq->curr, idle);
7828 idle->on_rq = TASK_ON_RQ_QUEUED;
7829#ifdef CONFIG_SMP
7830 idle->on_cpu = 1;
7831#endif
7832 raw_spin_rq_unlock(rq);
7833 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
7834
7835 /* Set the preempt count _outside_ the spinlocks! */
7836 init_idle_preempt_count(idle, cpu);
7837
7838 /*
7839 * The idle tasks have their own, simple scheduling class:
7840 */
7841 idle->sched_class = &idle_sched_class;
7842 ftrace_graph_init_idle_task(idle, cpu);
7843 vtime_init_idle(idle, cpu);
7844#ifdef CONFIG_SMP
7845 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7846#endif
7847}
7848
7849#ifdef CONFIG_SMP
7850
7851int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7852 const struct cpumask *trial)
7853{
7854 int ret = 1;
7855
7856 if (cpumask_empty(cur))
7857 return ret;
7858
7859 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
7860
7861 return ret;
7862}
7863
7864int task_can_attach(struct task_struct *p)
7865{
7866 int ret = 0;
7867
7868 /*
7869 * Kthreads which disallow setaffinity shouldn't be moved
7870 * to a new cpuset; we don't want to change their CPU
7871 * affinity and isolating such threads by their set of
7872 * allowed nodes is unnecessary. Thus, cpusets are not
7873 * applicable for such threads. This prevents checking for
7874 * success of set_cpus_allowed_ptr() on all attached tasks
7875 * before cpus_mask may be changed.
7876 */
7877 if (p->flags & PF_NO_SETAFFINITY)
7878 ret = -EINVAL;
7879
7880 return ret;
7881}
7882
7883bool sched_smp_initialized __read_mostly;
7884
7885#ifdef CONFIG_NUMA_BALANCING
7886/* Migrate current task p to target_cpu */
7887int migrate_task_to(struct task_struct *p, int target_cpu)
7888{
7889 struct migration_arg arg = { p, target_cpu };
7890 int curr_cpu = task_cpu(p);
7891
7892 if (curr_cpu == target_cpu)
7893 return 0;
7894
7895 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
7896 return -EINVAL;
7897
7898 /* TODO: This is not properly updating schedstats */
7899
7900 trace_sched_move_numa(p, curr_cpu, target_cpu);
7901 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
7902}
7903
7904/*
7905 * Requeue a task on a given node and accurately track the number of NUMA
7906 * tasks on the runqueues
7907 */
7908void sched_setnuma(struct task_struct *p, int nid)
7909{
7910 bool queued, running;
7911 struct rq_flags rf;
7912 struct rq *rq;
7913
7914 rq = task_rq_lock(p, &rf);
7915 queued = task_on_rq_queued(p);
7916 running = task_current_donor(rq, p);
7917
7918 if (queued)
7919 dequeue_task(rq, p, DEQUEUE_SAVE);
7920 if (running)
7921 put_prev_task(rq, p);
7922
7923 p->numa_preferred_nid = nid;
7924
7925 if (queued)
7926 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7927 if (running)
7928 set_next_task(rq, p);
7929 task_rq_unlock(rq, p, &rf);
7930}
7931#endif /* CONFIG_NUMA_BALANCING */
7932
7933#ifdef CONFIG_HOTPLUG_CPU
7934/*
7935 * Ensure that the idle task is using init_mm right before its CPU goes
7936 * offline.
7937 */
7938void idle_task_exit(void)
7939{
7940 struct mm_struct *mm = current->active_mm;
7941
7942 BUG_ON(cpu_online(smp_processor_id()));
7943 BUG_ON(current != this_rq()->idle);
7944
7945 if (mm != &init_mm) {
7946 switch_mm(mm, &init_mm, current);
7947 finish_arch_post_lock_switch();
7948 }
7949
7950 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
7951}
7952
7953static int __balance_push_cpu_stop(void *arg)
7954{
7955 struct task_struct *p = arg;
7956 struct rq *rq = this_rq();
7957 struct rq_flags rf;
7958 int cpu;
7959
7960 raw_spin_lock_irq(&p->pi_lock);
7961 rq_lock(rq, &rf);
7962
7963 update_rq_clock(rq);
7964
7965 if (task_rq(p) == rq && task_on_rq_queued(p)) {
7966 cpu = select_fallback_rq(rq->cpu, p);
7967 rq = __migrate_task(rq, &rf, p, cpu);
7968 }
7969
7970 rq_unlock(rq, &rf);
7971 raw_spin_unlock_irq(&p->pi_lock);
7972
7973 put_task_struct(p);
7974
7975 return 0;
7976}
7977
7978static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
7979
7980/*
7981 * Ensure we only run per-cpu kthreads once the CPU goes !active.
7982 *
7983 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
7984 * effective when the hotplug motion is down.
7985 */
7986static void balance_push(struct rq *rq)
7987{
7988 struct task_struct *push_task = rq->curr;
7989
7990 lockdep_assert_rq_held(rq);
7991
7992 /*
7993 * Ensure the thing is persistent until balance_push_set(.on = false);
7994 */
7995 rq->balance_callback = &balance_push_callback;
7996
7997 /*
7998 * Only active while going offline and when invoked on the outgoing
7999 * CPU.
8000 */
8001 if (!cpu_dying(rq->cpu) || rq != this_rq())
8002 return;
8003
8004 /*
8005 * Both the cpu-hotplug and stop task are in this case and are
8006 * required to complete the hotplug process.
8007 */
8008 if (kthread_is_per_cpu(push_task) ||
8009 is_migration_disabled(push_task)) {
8010
8011 /*
8012 * If this is the idle task on the outgoing CPU try to wake
8013 * up the hotplug control thread which might wait for the
8014 * last task to vanish. The rcuwait_active() check is
8015 * accurate here because the waiter is pinned on this CPU
8016 * and can't obviously be running in parallel.
8017 *
8018 * On RT kernels this also has to check whether there are
8019 * pinned and scheduled out tasks on the runqueue. They
8020 * need to leave the migrate disabled section first.
8021 */
8022 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
8023 rcuwait_active(&rq->hotplug_wait)) {
8024 raw_spin_rq_unlock(rq);
8025 rcuwait_wake_up(&rq->hotplug_wait);
8026 raw_spin_rq_lock(rq);
8027 }
8028 return;
8029 }
8030
8031 get_task_struct(push_task);
8032 /*
8033 * Temporarily drop rq->lock such that we can wake-up the stop task.
8034 * Both preemption and IRQs are still disabled.
8035 */
8036 preempt_disable();
8037 raw_spin_rq_unlock(rq);
8038 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
8039 this_cpu_ptr(&push_work));
8040 preempt_enable();
8041 /*
8042 * At this point need_resched() is true and we'll take the loop in
8043 * schedule(). The next pick is obviously going to be the stop task
8044 * which kthread_is_per_cpu() and will push this task away.
8045 */
8046 raw_spin_rq_lock(rq);
8047}
8048
8049static void balance_push_set(int cpu, bool on)
8050{
8051 struct rq *rq = cpu_rq(cpu);
8052 struct rq_flags rf;
8053
8054 rq_lock_irqsave(rq, &rf);
8055 if (on) {
8056 WARN_ON_ONCE(rq->balance_callback);
8057 rq->balance_callback = &balance_push_callback;
8058 } else if (rq->balance_callback == &balance_push_callback) {
8059 rq->balance_callback = NULL;
8060 }
8061 rq_unlock_irqrestore(rq, &rf);
8062}
8063
8064/*
8065 * Invoked from a CPUs hotplug control thread after the CPU has been marked
8066 * inactive. All tasks which are not per CPU kernel threads are either
8067 * pushed off this CPU now via balance_push() or placed on a different CPU
8068 * during wakeup. Wait until the CPU is quiescent.
8069 */
8070static void balance_hotplug_wait(void)
8071{
8072 struct rq *rq = this_rq();
8073
8074 rcuwait_wait_event(&rq->hotplug_wait,
8075 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
8076 TASK_UNINTERRUPTIBLE);
8077}
8078
8079#else
8080
8081static inline void balance_push(struct rq *rq)
8082{
8083}
8084
8085static inline void balance_push_set(int cpu, bool on)
8086{
8087}
8088
8089static inline void balance_hotplug_wait(void)
8090{
8091}
8092
8093#endif /* CONFIG_HOTPLUG_CPU */
8094
8095void set_rq_online(struct rq *rq)
8096{
8097 if (!rq->online) {
8098 const struct sched_class *class;
8099
8100 cpumask_set_cpu(rq->cpu, rq->rd->online);
8101 rq->online = 1;
8102
8103 for_each_class(class) {
8104 if (class->rq_online)
8105 class->rq_online(rq);
8106 }
8107 }
8108}
8109
8110void set_rq_offline(struct rq *rq)
8111{
8112 if (rq->online) {
8113 const struct sched_class *class;
8114
8115 update_rq_clock(rq);
8116 for_each_class(class) {
8117 if (class->rq_offline)
8118 class->rq_offline(rq);
8119 }
8120
8121 cpumask_clear_cpu(rq->cpu, rq->rd->online);
8122 rq->online = 0;
8123 }
8124}
8125
8126static inline void sched_set_rq_online(struct rq *rq, int cpu)
8127{
8128 struct rq_flags rf;
8129
8130 rq_lock_irqsave(rq, &rf);
8131 if (rq->rd) {
8132 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8133 set_rq_online(rq);
8134 }
8135 rq_unlock_irqrestore(rq, &rf);
8136}
8137
8138static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8139{
8140 struct rq_flags rf;
8141
8142 rq_lock_irqsave(rq, &rf);
8143 if (rq->rd) {
8144 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8145 set_rq_offline(rq);
8146 }
8147 rq_unlock_irqrestore(rq, &rf);
8148}
8149
8150/*
8151 * used to mark begin/end of suspend/resume:
8152 */
8153static int num_cpus_frozen;
8154
8155/*
8156 * Update cpusets according to cpu_active mask. If cpusets are
8157 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8158 * around partition_sched_domains().
8159 *
8160 * If we come here as part of a suspend/resume, don't touch cpusets because we
8161 * want to restore it back to its original state upon resume anyway.
8162 */
8163static void cpuset_cpu_active(void)
8164{
8165 if (cpuhp_tasks_frozen) {
8166 /*
8167 * num_cpus_frozen tracks how many CPUs are involved in suspend
8168 * resume sequence. As long as this is not the last online
8169 * operation in the resume sequence, just build a single sched
8170 * domain, ignoring cpusets.
8171 */
8172 partition_sched_domains(1, NULL, NULL);
8173 if (--num_cpus_frozen)
8174 return;
8175 /*
8176 * This is the last CPU online operation. So fall through and
8177 * restore the original sched domains by considering the
8178 * cpuset configurations.
8179 */
8180 cpuset_force_rebuild();
8181 }
8182 cpuset_update_active_cpus();
8183}
8184
8185static int cpuset_cpu_inactive(unsigned int cpu)
8186{
8187 if (!cpuhp_tasks_frozen) {
8188 int ret = dl_bw_check_overflow(cpu);
8189
8190 if (ret)
8191 return ret;
8192 cpuset_update_active_cpus();
8193 } else {
8194 num_cpus_frozen++;
8195 partition_sched_domains(1, NULL, NULL);
8196 }
8197 return 0;
8198}
8199
8200static inline void sched_smt_present_inc(int cpu)
8201{
8202#ifdef CONFIG_SCHED_SMT
8203 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8204 static_branch_inc_cpuslocked(&sched_smt_present);
8205#endif
8206}
8207
8208static inline void sched_smt_present_dec(int cpu)
8209{
8210#ifdef CONFIG_SCHED_SMT
8211 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8212 static_branch_dec_cpuslocked(&sched_smt_present);
8213#endif
8214}
8215
8216int sched_cpu_activate(unsigned int cpu)
8217{
8218 struct rq *rq = cpu_rq(cpu);
8219
8220 /*
8221 * Clear the balance_push callback and prepare to schedule
8222 * regular tasks.
8223 */
8224 balance_push_set(cpu, false);
8225
8226 /*
8227 * When going up, increment the number of cores with SMT present.
8228 */
8229 sched_smt_present_inc(cpu);
8230 set_cpu_active(cpu, true);
8231
8232 if (sched_smp_initialized) {
8233 sched_update_numa(cpu, true);
8234 sched_domains_numa_masks_set(cpu);
8235 cpuset_cpu_active();
8236 }
8237
8238 scx_rq_activate(rq);
8239
8240 /*
8241 * Put the rq online, if not already. This happens:
8242 *
8243 * 1) In the early boot process, because we build the real domains
8244 * after all CPUs have been brought up.
8245 *
8246 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8247 * domains.
8248 */
8249 sched_set_rq_online(rq, cpu);
8250
8251 return 0;
8252}
8253
8254int sched_cpu_deactivate(unsigned int cpu)
8255{
8256 struct rq *rq = cpu_rq(cpu);
8257 int ret;
8258
8259 /*
8260 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8261 * load balancing when not active
8262 */
8263 nohz_balance_exit_idle(rq);
8264
8265 set_cpu_active(cpu, false);
8266
8267 /*
8268 * From this point forward, this CPU will refuse to run any task that
8269 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8270 * push those tasks away until this gets cleared, see
8271 * sched_cpu_dying().
8272 */
8273 balance_push_set(cpu, true);
8274
8275 /*
8276 * We've cleared cpu_active_mask / set balance_push, wait for all
8277 * preempt-disabled and RCU users of this state to go away such that
8278 * all new such users will observe it.
8279 *
8280 * Specifically, we rely on ttwu to no longer target this CPU, see
8281 * ttwu_queue_cond() and is_cpu_allowed().
8282 *
8283 * Do sync before park smpboot threads to take care the RCU boost case.
8284 */
8285 synchronize_rcu();
8286
8287 sched_set_rq_offline(rq, cpu);
8288
8289 scx_rq_deactivate(rq);
8290
8291 /*
8292 * When going down, decrement the number of cores with SMT present.
8293 */
8294 sched_smt_present_dec(cpu);
8295
8296#ifdef CONFIG_SCHED_SMT
8297 sched_core_cpu_deactivate(cpu);
8298#endif
8299
8300 if (!sched_smp_initialized)
8301 return 0;
8302
8303 sched_update_numa(cpu, false);
8304 ret = cpuset_cpu_inactive(cpu);
8305 if (ret) {
8306 sched_smt_present_inc(cpu);
8307 sched_set_rq_online(rq, cpu);
8308 balance_push_set(cpu, false);
8309 set_cpu_active(cpu, true);
8310 sched_update_numa(cpu, true);
8311 return ret;
8312 }
8313 sched_domains_numa_masks_clear(cpu);
8314 return 0;
8315}
8316
8317static void sched_rq_cpu_starting(unsigned int cpu)
8318{
8319 struct rq *rq = cpu_rq(cpu);
8320
8321 rq->calc_load_update = calc_load_update;
8322 update_max_interval();
8323}
8324
8325int sched_cpu_starting(unsigned int cpu)
8326{
8327 sched_core_cpu_starting(cpu);
8328 sched_rq_cpu_starting(cpu);
8329 sched_tick_start(cpu);
8330 return 0;
8331}
8332
8333#ifdef CONFIG_HOTPLUG_CPU
8334
8335/*
8336 * Invoked immediately before the stopper thread is invoked to bring the
8337 * CPU down completely. At this point all per CPU kthreads except the
8338 * hotplug thread (current) and the stopper thread (inactive) have been
8339 * either parked or have been unbound from the outgoing CPU. Ensure that
8340 * any of those which might be on the way out are gone.
8341 *
8342 * If after this point a bound task is being woken on this CPU then the
8343 * responsible hotplug callback has failed to do it's job.
8344 * sched_cpu_dying() will catch it with the appropriate fireworks.
8345 */
8346int sched_cpu_wait_empty(unsigned int cpu)
8347{
8348 balance_hotplug_wait();
8349 return 0;
8350}
8351
8352/*
8353 * Since this CPU is going 'away' for a while, fold any nr_active delta we
8354 * might have. Called from the CPU stopper task after ensuring that the
8355 * stopper is the last running task on the CPU, so nr_active count is
8356 * stable. We need to take the tear-down thread which is calling this into
8357 * account, so we hand in adjust = 1 to the load calculation.
8358 *
8359 * Also see the comment "Global load-average calculations".
8360 */
8361static void calc_load_migrate(struct rq *rq)
8362{
8363 long delta = calc_load_fold_active(rq, 1);
8364
8365 if (delta)
8366 atomic_long_add(delta, &calc_load_tasks);
8367}
8368
8369static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8370{
8371 struct task_struct *g, *p;
8372 int cpu = cpu_of(rq);
8373
8374 lockdep_assert_rq_held(rq);
8375
8376 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8377 for_each_process_thread(g, p) {
8378 if (task_cpu(p) != cpu)
8379 continue;
8380
8381 if (!task_on_rq_queued(p))
8382 continue;
8383
8384 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8385 }
8386}
8387
8388int sched_cpu_dying(unsigned int cpu)
8389{
8390 struct rq *rq = cpu_rq(cpu);
8391 struct rq_flags rf;
8392
8393 /* Handle pending wakeups and then migrate everything off */
8394 sched_tick_stop(cpu);
8395
8396 rq_lock_irqsave(rq, &rf);
8397 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8398 WARN(true, "Dying CPU not properly vacated!");
8399 dump_rq_tasks(rq, KERN_WARNING);
8400 }
8401 rq_unlock_irqrestore(rq, &rf);
8402
8403 calc_load_migrate(rq);
8404 update_max_interval();
8405 hrtick_clear(rq);
8406 sched_core_cpu_dying(cpu);
8407 return 0;
8408}
8409#endif
8410
8411void __init sched_init_smp(void)
8412{
8413 sched_init_numa(NUMA_NO_NODE);
8414
8415 /*
8416 * There's no userspace yet to cause hotplug operations; hence all the
8417 * CPU masks are stable and all blatant races in the below code cannot
8418 * happen.
8419 */
8420 mutex_lock(&sched_domains_mutex);
8421 sched_init_domains(cpu_active_mask);
8422 mutex_unlock(&sched_domains_mutex);
8423
8424 /* Move init over to a non-isolated CPU */
8425 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8426 BUG();
8427 current->flags &= ~PF_NO_SETAFFINITY;
8428 sched_init_granularity();
8429
8430 init_sched_rt_class();
8431 init_sched_dl_class();
8432
8433 sched_smp_initialized = true;
8434}
8435
8436static int __init migration_init(void)
8437{
8438 sched_cpu_starting(smp_processor_id());
8439 return 0;
8440}
8441early_initcall(migration_init);
8442
8443#else
8444void __init sched_init_smp(void)
8445{
8446 sched_init_granularity();
8447}
8448#endif /* CONFIG_SMP */
8449
8450int in_sched_functions(unsigned long addr)
8451{
8452 return in_lock_functions(addr) ||
8453 (addr >= (unsigned long)__sched_text_start
8454 && addr < (unsigned long)__sched_text_end);
8455}
8456
8457#ifdef CONFIG_CGROUP_SCHED
8458/*
8459 * Default task group.
8460 * Every task in system belongs to this group at bootup.
8461 */
8462struct task_group root_task_group;
8463LIST_HEAD(task_groups);
8464
8465/* Cacheline aligned slab cache for task_group */
8466static struct kmem_cache *task_group_cache __ro_after_init;
8467#endif
8468
8469void __init sched_init(void)
8470{
8471 unsigned long ptr = 0;
8472 int i;
8473
8474 /* Make sure the linker didn't screw up */
8475#ifdef CONFIG_SMP
8476 BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
8477#endif
8478 BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8479 BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8480 BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
8481#ifdef CONFIG_SCHED_CLASS_EXT
8482 BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8483 BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
8484#endif
8485
8486 wait_bit_init();
8487
8488#ifdef CONFIG_FAIR_GROUP_SCHED
8489 ptr += 2 * nr_cpu_ids * sizeof(void **);
8490#endif
8491#ifdef CONFIG_RT_GROUP_SCHED
8492 ptr += 2 * nr_cpu_ids * sizeof(void **);
8493#endif
8494 if (ptr) {
8495 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8496
8497#ifdef CONFIG_FAIR_GROUP_SCHED
8498 root_task_group.se = (struct sched_entity **)ptr;
8499 ptr += nr_cpu_ids * sizeof(void **);
8500
8501 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8502 ptr += nr_cpu_ids * sizeof(void **);
8503
8504 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8505 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8506#endif /* CONFIG_FAIR_GROUP_SCHED */
8507#ifdef CONFIG_EXT_GROUP_SCHED
8508 root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
8509#endif /* CONFIG_EXT_GROUP_SCHED */
8510#ifdef CONFIG_RT_GROUP_SCHED
8511 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8512 ptr += nr_cpu_ids * sizeof(void **);
8513
8514 root_task_group.rt_rq = (struct rt_rq **)ptr;
8515 ptr += nr_cpu_ids * sizeof(void **);
8516
8517#endif /* CONFIG_RT_GROUP_SCHED */
8518 }
8519
8520#ifdef CONFIG_SMP
8521 init_defrootdomain();
8522#endif
8523
8524#ifdef CONFIG_RT_GROUP_SCHED
8525 init_rt_bandwidth(&root_task_group.rt_bandwidth,
8526 global_rt_period(), global_rt_runtime());
8527#endif /* CONFIG_RT_GROUP_SCHED */
8528
8529#ifdef CONFIG_CGROUP_SCHED
8530 task_group_cache = KMEM_CACHE(task_group, 0);
8531
8532 list_add(&root_task_group.list, &task_groups);
8533 INIT_LIST_HEAD(&root_task_group.children);
8534 INIT_LIST_HEAD(&root_task_group.siblings);
8535 autogroup_init(&init_task);
8536#endif /* CONFIG_CGROUP_SCHED */
8537
8538 for_each_possible_cpu(i) {
8539 struct rq *rq;
8540
8541 rq = cpu_rq(i);
8542 raw_spin_lock_init(&rq->__lock);
8543 rq->nr_running = 0;
8544 rq->calc_load_active = 0;
8545 rq->calc_load_update = jiffies + LOAD_FREQ;
8546 init_cfs_rq(&rq->cfs);
8547 init_rt_rq(&rq->rt);
8548 init_dl_rq(&rq->dl);
8549#ifdef CONFIG_FAIR_GROUP_SCHED
8550 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8551 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8552 /*
8553 * How much CPU bandwidth does root_task_group get?
8554 *
8555 * In case of task-groups formed through the cgroup filesystem, it
8556 * gets 100% of the CPU resources in the system. This overall
8557 * system CPU resource is divided among the tasks of
8558 * root_task_group and its child task-groups in a fair manner,
8559 * based on each entity's (task or task-group's) weight
8560 * (se->load.weight).
8561 *
8562 * In other words, if root_task_group has 10 tasks of weight
8563 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8564 * then A0's share of the CPU resource is:
8565 *
8566 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8567 *
8568 * We achieve this by letting root_task_group's tasks sit
8569 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8570 */
8571 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8572#endif /* CONFIG_FAIR_GROUP_SCHED */
8573
8574#ifdef CONFIG_RT_GROUP_SCHED
8575 /*
8576 * This is required for init cpu because rt.c:__enable_runtime()
8577 * starts working after scheduler_running, which is not the case
8578 * yet.
8579 */
8580 rq->rt.rt_runtime = global_rt_runtime();
8581 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8582#endif
8583#ifdef CONFIG_SMP
8584 rq->sd = NULL;
8585 rq->rd = NULL;
8586 rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8587 rq->balance_callback = &balance_push_callback;
8588 rq->active_balance = 0;
8589 rq->next_balance = jiffies;
8590 rq->push_cpu = 0;
8591 rq->cpu = i;
8592 rq->online = 0;
8593 rq->idle_stamp = 0;
8594 rq->avg_idle = 2*sysctl_sched_migration_cost;
8595 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8596
8597 INIT_LIST_HEAD(&rq->cfs_tasks);
8598
8599 rq_attach_root(rq, &def_root_domain);
8600#ifdef CONFIG_NO_HZ_COMMON
8601 rq->last_blocked_load_update_tick = jiffies;
8602 atomic_set(&rq->nohz_flags, 0);
8603
8604 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8605#endif
8606#ifdef CONFIG_HOTPLUG_CPU
8607 rcuwait_init(&rq->hotplug_wait);
8608#endif
8609#endif /* CONFIG_SMP */
8610 hrtick_rq_init(rq);
8611 atomic_set(&rq->nr_iowait, 0);
8612 fair_server_init(rq);
8613
8614#ifdef CONFIG_SCHED_CORE
8615 rq->core = rq;
8616 rq->core_pick = NULL;
8617 rq->core_dl_server = NULL;
8618 rq->core_enabled = 0;
8619 rq->core_tree = RB_ROOT;
8620 rq->core_forceidle_count = 0;
8621 rq->core_forceidle_occupation = 0;
8622 rq->core_forceidle_start = 0;
8623
8624 rq->core_cookie = 0UL;
8625#endif
8626 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
8627 }
8628
8629 set_load_weight(&init_task, false);
8630 init_task.se.slice = sysctl_sched_base_slice,
8631
8632 /*
8633 * The boot idle thread does lazy MMU switching as well:
8634 */
8635 mmgrab_lazy_tlb(&init_mm);
8636 enter_lazy_tlb(&init_mm, current);
8637
8638 /*
8639 * The idle task doesn't need the kthread struct to function, but it
8640 * is dressed up as a per-CPU kthread and thus needs to play the part
8641 * if we want to avoid special-casing it in code that deals with per-CPU
8642 * kthreads.
8643 */
8644 WARN_ON(!set_kthread_struct(current));
8645
8646 /*
8647 * Make us the idle thread. Technically, schedule() should not be
8648 * called from this thread, however somewhere below it might be,
8649 * but because we are the idle thread, we just pick up running again
8650 * when this runqueue becomes "idle".
8651 */
8652 __sched_fork(0, current);
8653 init_idle(current, smp_processor_id());
8654
8655 calc_load_update = jiffies + LOAD_FREQ;
8656
8657#ifdef CONFIG_SMP
8658 idle_thread_set_boot_cpu();
8659 balance_push_set(smp_processor_id(), false);
8660#endif
8661 init_sched_fair_class();
8662 init_sched_ext_class();
8663
8664 psi_init();
8665
8666 init_uclamp();
8667
8668 preempt_dynamic_init();
8669
8670 scheduler_running = 1;
8671}
8672
8673#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8674
8675void __might_sleep(const char *file, int line)
8676{
8677 unsigned int state = get_current_state();
8678 /*
8679 * Blocking primitives will set (and therefore destroy) current->state,
8680 * since we will exit with TASK_RUNNING make sure we enter with it,
8681 * otherwise we will destroy state.
8682 */
8683 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8684 "do not call blocking ops when !TASK_RUNNING; "
8685 "state=%x set at [<%p>] %pS\n", state,
8686 (void *)current->task_state_change,
8687 (void *)current->task_state_change);
8688
8689 __might_resched(file, line, 0);
8690}
8691EXPORT_SYMBOL(__might_sleep);
8692
8693static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8694{
8695 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8696 return;
8697
8698 if (preempt_count() == preempt_offset)
8699 return;
8700
8701 pr_err("Preemption disabled at:");
8702 print_ip_sym(KERN_ERR, ip);
8703}
8704
8705static inline bool resched_offsets_ok(unsigned int offsets)
8706{
8707 unsigned int nested = preempt_count();
8708
8709 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8710
8711 return nested == offsets;
8712}
8713
8714void __might_resched(const char *file, int line, unsigned int offsets)
8715{
8716 /* Ratelimiting timestamp: */
8717 static unsigned long prev_jiffy;
8718
8719 unsigned long preempt_disable_ip;
8720
8721 /* WARN_ON_ONCE() by default, no rate limit required: */
8722 rcu_sleep_check();
8723
8724 if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
8725 !is_idle_task(current) && !current->non_block_count) ||
8726 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8727 oops_in_progress)
8728 return;
8729
8730 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8731 return;
8732 prev_jiffy = jiffies;
8733
8734 /* Save this before calling printk(), since that will clobber it: */
8735 preempt_disable_ip = get_preempt_disable_ip(current);
8736
8737 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8738 file, line);
8739 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8740 in_atomic(), irqs_disabled(), current->non_block_count,
8741 current->pid, current->comm);
8742 pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8743 offsets & MIGHT_RESCHED_PREEMPT_MASK);
8744
8745 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
8746 pr_err("RCU nest depth: %d, expected: %u\n",
8747 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8748 }
8749
8750 if (task_stack_end_corrupted(current))
8751 pr_emerg("Thread overran stack, or stack corrupted\n");
8752
8753 debug_show_held_locks(current);
8754 if (irqs_disabled())
8755 print_irqtrace_events(current);
8756
8757 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8758 preempt_disable_ip);
8759
8760 dump_stack();
8761 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8762}
8763EXPORT_SYMBOL(__might_resched);
8764
8765void __cant_sleep(const char *file, int line, int preempt_offset)
8766{
8767 static unsigned long prev_jiffy;
8768
8769 if (irqs_disabled())
8770 return;
8771
8772 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8773 return;
8774
8775 if (preempt_count() > preempt_offset)
8776 return;
8777
8778 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8779 return;
8780 prev_jiffy = jiffies;
8781
8782 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8783 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8784 in_atomic(), irqs_disabled(),
8785 current->pid, current->comm);
8786
8787 debug_show_held_locks(current);
8788 dump_stack();
8789 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8790}
8791EXPORT_SYMBOL_GPL(__cant_sleep);
8792
8793#ifdef CONFIG_SMP
8794void __cant_migrate(const char *file, int line)
8795{
8796 static unsigned long prev_jiffy;
8797
8798 if (irqs_disabled())
8799 return;
8800
8801 if (is_migration_disabled(current))
8802 return;
8803
8804 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8805 return;
8806
8807 if (preempt_count() > 0)
8808 return;
8809
8810 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8811 return;
8812 prev_jiffy = jiffies;
8813
8814 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8815 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8816 in_atomic(), irqs_disabled(), is_migration_disabled(current),
8817 current->pid, current->comm);
8818
8819 debug_show_held_locks(current);
8820 dump_stack();
8821 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8822}
8823EXPORT_SYMBOL_GPL(__cant_migrate);
8824#endif
8825#endif
8826
8827#ifdef CONFIG_MAGIC_SYSRQ
8828void normalize_rt_tasks(void)
8829{
8830 struct task_struct *g, *p;
8831 struct sched_attr attr = {
8832 .sched_policy = SCHED_NORMAL,
8833 };
8834
8835 read_lock(&tasklist_lock);
8836 for_each_process_thread(g, p) {
8837 /*
8838 * Only normalize user tasks:
8839 */
8840 if (p->flags & PF_KTHREAD)
8841 continue;
8842
8843 p->se.exec_start = 0;
8844 schedstat_set(p->stats.wait_start, 0);
8845 schedstat_set(p->stats.sleep_start, 0);
8846 schedstat_set(p->stats.block_start, 0);
8847
8848 if (!rt_or_dl_task(p)) {
8849 /*
8850 * Renice negative nice level userspace
8851 * tasks back to 0:
8852 */
8853 if (task_nice(p) < 0)
8854 set_user_nice(p, 0);
8855 continue;
8856 }
8857
8858 __sched_setscheduler(p, &attr, false, false);
8859 }
8860 read_unlock(&tasklist_lock);
8861}
8862
8863#endif /* CONFIG_MAGIC_SYSRQ */
8864
8865#if defined(CONFIG_KGDB_KDB)
8866/*
8867 * These functions are only useful for KDB.
8868 *
8869 * They can only be called when the whole system has been
8870 * stopped - every CPU needs to be quiescent, and no scheduling
8871 * activity can take place. Using them for anything else would
8872 * be a serious bug, and as a result, they aren't even visible
8873 * under any other configuration.
8874 */
8875
8876/**
8877 * curr_task - return the current task for a given CPU.
8878 * @cpu: the processor in question.
8879 *
8880 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8881 *
8882 * Return: The current task for @cpu.
8883 */
8884struct task_struct *curr_task(int cpu)
8885{
8886 return cpu_curr(cpu);
8887}
8888
8889#endif /* defined(CONFIG_KGDB_KDB) */
8890
8891#ifdef CONFIG_CGROUP_SCHED
8892/* task_group_lock serializes the addition/removal of task groups */
8893static DEFINE_SPINLOCK(task_group_lock);
8894
8895static inline void alloc_uclamp_sched_group(struct task_group *tg,
8896 struct task_group *parent)
8897{
8898#ifdef CONFIG_UCLAMP_TASK_GROUP
8899 enum uclamp_id clamp_id;
8900
8901 for_each_clamp_id(clamp_id) {
8902 uclamp_se_set(&tg->uclamp_req[clamp_id],
8903 uclamp_none(clamp_id), false);
8904 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
8905 }
8906#endif
8907}
8908
8909static void sched_free_group(struct task_group *tg)
8910{
8911 free_fair_sched_group(tg);
8912 free_rt_sched_group(tg);
8913 autogroup_free(tg);
8914 kmem_cache_free(task_group_cache, tg);
8915}
8916
8917static void sched_free_group_rcu(struct rcu_head *rcu)
8918{
8919 sched_free_group(container_of(rcu, struct task_group, rcu));
8920}
8921
8922static void sched_unregister_group(struct task_group *tg)
8923{
8924 unregister_fair_sched_group(tg);
8925 unregister_rt_sched_group(tg);
8926 /*
8927 * We have to wait for yet another RCU grace period to expire, as
8928 * print_cfs_stats() might run concurrently.
8929 */
8930 call_rcu(&tg->rcu, sched_free_group_rcu);
8931}
8932
8933/* allocate runqueue etc for a new task group */
8934struct task_group *sched_create_group(struct task_group *parent)
8935{
8936 struct task_group *tg;
8937
8938 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
8939 if (!tg)
8940 return ERR_PTR(-ENOMEM);
8941
8942 if (!alloc_fair_sched_group(tg, parent))
8943 goto err;
8944
8945 if (!alloc_rt_sched_group(tg, parent))
8946 goto err;
8947
8948 scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
8949 alloc_uclamp_sched_group(tg, parent);
8950
8951 return tg;
8952
8953err:
8954 sched_free_group(tg);
8955 return ERR_PTR(-ENOMEM);
8956}
8957
8958void sched_online_group(struct task_group *tg, struct task_group *parent)
8959{
8960 unsigned long flags;
8961
8962 spin_lock_irqsave(&task_group_lock, flags);
8963 list_add_rcu(&tg->list, &task_groups);
8964
8965 /* Root should already exist: */
8966 WARN_ON(!parent);
8967
8968 tg->parent = parent;
8969 INIT_LIST_HEAD(&tg->children);
8970 list_add_rcu(&tg->siblings, &parent->children);
8971 spin_unlock_irqrestore(&task_group_lock, flags);
8972
8973 online_fair_sched_group(tg);
8974}
8975
8976/* RCU callback to free various structures associated with a task group */
8977static void sched_unregister_group_rcu(struct rcu_head *rhp)
8978{
8979 /* Now it should be safe to free those cfs_rqs: */
8980 sched_unregister_group(container_of(rhp, struct task_group, rcu));
8981}
8982
8983void sched_destroy_group(struct task_group *tg)
8984{
8985 /* Wait for possible concurrent references to cfs_rqs complete: */
8986 call_rcu(&tg->rcu, sched_unregister_group_rcu);
8987}
8988
8989void sched_release_group(struct task_group *tg)
8990{
8991 unsigned long flags;
8992
8993 /*
8994 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
8995 * sched_cfs_period_timer()).
8996 *
8997 * For this to be effective, we have to wait for all pending users of
8998 * this task group to leave their RCU critical section to ensure no new
8999 * user will see our dying task group any more. Specifically ensure
9000 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
9001 *
9002 * We therefore defer calling unregister_fair_sched_group() to
9003 * sched_unregister_group() which is guarantied to get called only after the
9004 * current RCU grace period has expired.
9005 */
9006 spin_lock_irqsave(&task_group_lock, flags);
9007 list_del_rcu(&tg->list);
9008 list_del_rcu(&tg->siblings);
9009 spin_unlock_irqrestore(&task_group_lock, flags);
9010}
9011
9012static struct task_group *sched_get_task_group(struct task_struct *tsk)
9013{
9014 struct task_group *tg;
9015
9016 /*
9017 * All callers are synchronized by task_rq_lock(); we do not use RCU
9018 * which is pointless here. Thus, we pass "true" to task_css_check()
9019 * to prevent lockdep warnings.
9020 */
9021 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
9022 struct task_group, css);
9023 tg = autogroup_task_group(tsk, tg);
9024
9025 return tg;
9026}
9027
9028static void sched_change_group(struct task_struct *tsk, struct task_group *group)
9029{
9030 tsk->sched_task_group = group;
9031
9032#ifdef CONFIG_FAIR_GROUP_SCHED
9033 if (tsk->sched_class->task_change_group)
9034 tsk->sched_class->task_change_group(tsk);
9035 else
9036#endif
9037 set_task_rq(tsk, task_cpu(tsk));
9038}
9039
9040/*
9041 * Change task's runqueue when it moves between groups.
9042 *
9043 * The caller of this function should have put the task in its new group by
9044 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9045 * its new group.
9046 */
9047void sched_move_task(struct task_struct *tsk, bool for_autogroup)
9048{
9049 int queued, running, queue_flags =
9050 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
9051 struct task_group *group;
9052 struct rq *rq;
9053
9054 CLASS(task_rq_lock, rq_guard)(tsk);
9055 rq = rq_guard.rq;
9056
9057 /*
9058 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
9059 * group changes.
9060 */
9061 group = sched_get_task_group(tsk);
9062 if (group == tsk->sched_task_group)
9063 return;
9064
9065 update_rq_clock(rq);
9066
9067 running = task_current_donor(rq, tsk);
9068 queued = task_on_rq_queued(tsk);
9069
9070 if (queued)
9071 dequeue_task(rq, tsk, queue_flags);
9072 if (running)
9073 put_prev_task(rq, tsk);
9074
9075 sched_change_group(tsk, group);
9076 if (!for_autogroup)
9077 scx_cgroup_move_task(tsk);
9078
9079 if (queued)
9080 enqueue_task(rq, tsk, queue_flags);
9081 if (running) {
9082 set_next_task(rq, tsk);
9083 /*
9084 * After changing group, the running task may have joined a
9085 * throttled one but it's still the running task. Trigger a
9086 * resched to make sure that task can still run.
9087 */
9088 resched_curr(rq);
9089 }
9090}
9091
9092static struct cgroup_subsys_state *
9093cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
9094{
9095 struct task_group *parent = css_tg(parent_css);
9096 struct task_group *tg;
9097
9098 if (!parent) {
9099 /* This is early initialization for the top cgroup */
9100 return &root_task_group.css;
9101 }
9102
9103 tg = sched_create_group(parent);
9104 if (IS_ERR(tg))
9105 return ERR_PTR(-ENOMEM);
9106
9107 return &tg->css;
9108}
9109
9110/* Expose task group only after completing cgroup initialization */
9111static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
9112{
9113 struct task_group *tg = css_tg(css);
9114 struct task_group *parent = css_tg(css->parent);
9115 int ret;
9116
9117 ret = scx_tg_online(tg);
9118 if (ret)
9119 return ret;
9120
9121 if (parent)
9122 sched_online_group(tg, parent);
9123
9124#ifdef CONFIG_UCLAMP_TASK_GROUP
9125 /* Propagate the effective uclamp value for the new group */
9126 guard(mutex)(&uclamp_mutex);
9127 guard(rcu)();
9128 cpu_util_update_eff(css);
9129#endif
9130
9131 return 0;
9132}
9133
9134static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9135{
9136 struct task_group *tg = css_tg(css);
9137
9138 scx_tg_offline(tg);
9139}
9140
9141static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
9142{
9143 struct task_group *tg = css_tg(css);
9144
9145 sched_release_group(tg);
9146}
9147
9148static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
9149{
9150 struct task_group *tg = css_tg(css);
9151
9152 /*
9153 * Relies on the RCU grace period between css_released() and this.
9154 */
9155 sched_unregister_group(tg);
9156}
9157
9158static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
9159{
9160#ifdef CONFIG_RT_GROUP_SCHED
9161 struct task_struct *task;
9162 struct cgroup_subsys_state *css;
9163
9164 cgroup_taskset_for_each(task, css, tset) {
9165 if (!sched_rt_can_attach(css_tg(css), task))
9166 return -EINVAL;
9167 }
9168#endif
9169 return scx_cgroup_can_attach(tset);
9170}
9171
9172static void cpu_cgroup_attach(struct cgroup_taskset *tset)
9173{
9174 struct task_struct *task;
9175 struct cgroup_subsys_state *css;
9176
9177 cgroup_taskset_for_each(task, css, tset)
9178 sched_move_task(task, false);
9179
9180 scx_cgroup_finish_attach();
9181}
9182
9183static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9184{
9185 scx_cgroup_cancel_attach(tset);
9186}
9187
9188#ifdef CONFIG_UCLAMP_TASK_GROUP
9189static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9190{
9191 struct cgroup_subsys_state *top_css = css;
9192 struct uclamp_se *uc_parent = NULL;
9193 struct uclamp_se *uc_se = NULL;
9194 unsigned int eff[UCLAMP_CNT];
9195 enum uclamp_id clamp_id;
9196 unsigned int clamps;
9197
9198 lockdep_assert_held(&uclamp_mutex);
9199 SCHED_WARN_ON(!rcu_read_lock_held());
9200
9201 css_for_each_descendant_pre(css, top_css) {
9202 uc_parent = css_tg(css)->parent
9203 ? css_tg(css)->parent->uclamp : NULL;
9204
9205 for_each_clamp_id(clamp_id) {
9206 /* Assume effective clamps matches requested clamps */
9207 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9208 /* Cap effective clamps with parent's effective clamps */
9209 if (uc_parent &&
9210 eff[clamp_id] > uc_parent[clamp_id].value) {
9211 eff[clamp_id] = uc_parent[clamp_id].value;
9212 }
9213 }
9214 /* Ensure protection is always capped by limit */
9215 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9216
9217 /* Propagate most restrictive effective clamps */
9218 clamps = 0x0;
9219 uc_se = css_tg(css)->uclamp;
9220 for_each_clamp_id(clamp_id) {
9221 if (eff[clamp_id] == uc_se[clamp_id].value)
9222 continue;
9223 uc_se[clamp_id].value = eff[clamp_id];
9224 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9225 clamps |= (0x1 << clamp_id);
9226 }
9227 if (!clamps) {
9228 css = css_rightmost_descendant(css);
9229 continue;
9230 }
9231
9232 /* Immediately update descendants RUNNABLE tasks */
9233 uclamp_update_active_tasks(css);
9234 }
9235}
9236
9237/*
9238 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9239 * C expression. Since there is no way to convert a macro argument (N) into a
9240 * character constant, use two levels of macros.
9241 */
9242#define _POW10(exp) ((unsigned int)1e##exp)
9243#define POW10(exp) _POW10(exp)
9244
9245struct uclamp_request {
9246#define UCLAMP_PERCENT_SHIFT 2
9247#define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
9248 s64 percent;
9249 u64 util;
9250 int ret;
9251};
9252
9253static inline struct uclamp_request
9254capacity_from_percent(char *buf)
9255{
9256 struct uclamp_request req = {
9257 .percent = UCLAMP_PERCENT_SCALE,
9258 .util = SCHED_CAPACITY_SCALE,
9259 .ret = 0,
9260 };
9261
9262 buf = strim(buf);
9263 if (strcmp(buf, "max")) {
9264 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9265 &req.percent);
9266 if (req.ret)
9267 return req;
9268 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
9269 req.ret = -ERANGE;
9270 return req;
9271 }
9272
9273 req.util = req.percent << SCHED_CAPACITY_SHIFT;
9274 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9275 }
9276
9277 return req;
9278}
9279
9280static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9281 size_t nbytes, loff_t off,
9282 enum uclamp_id clamp_id)
9283{
9284 struct uclamp_request req;
9285 struct task_group *tg;
9286
9287 req = capacity_from_percent(buf);
9288 if (req.ret)
9289 return req.ret;
9290
9291 static_branch_enable(&sched_uclamp_used);
9292
9293 guard(mutex)(&uclamp_mutex);
9294 guard(rcu)();
9295
9296 tg = css_tg(of_css(of));
9297 if (tg->uclamp_req[clamp_id].value != req.util)
9298 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9299
9300 /*
9301 * Because of not recoverable conversion rounding we keep track of the
9302 * exact requested value
9303 */
9304 tg->uclamp_pct[clamp_id] = req.percent;
9305
9306 /* Update effective clamps to track the most restrictive value */
9307 cpu_util_update_eff(of_css(of));
9308
9309 return nbytes;
9310}
9311
9312static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9313 char *buf, size_t nbytes,
9314 loff_t off)
9315{
9316 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9317}
9318
9319static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9320 char *buf, size_t nbytes,
9321 loff_t off)
9322{
9323 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9324}
9325
9326static inline void cpu_uclamp_print(struct seq_file *sf,
9327 enum uclamp_id clamp_id)
9328{
9329 struct task_group *tg;
9330 u64 util_clamp;
9331 u64 percent;
9332 u32 rem;
9333
9334 scoped_guard (rcu) {
9335 tg = css_tg(seq_css(sf));
9336 util_clamp = tg->uclamp_req[clamp_id].value;
9337 }
9338
9339 if (util_clamp == SCHED_CAPACITY_SCALE) {
9340 seq_puts(sf, "max\n");
9341 return;
9342 }
9343
9344 percent = tg->uclamp_pct[clamp_id];
9345 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9346 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9347}
9348
9349static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9350{
9351 cpu_uclamp_print(sf, UCLAMP_MIN);
9352 return 0;
9353}
9354
9355static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9356{
9357 cpu_uclamp_print(sf, UCLAMP_MAX);
9358 return 0;
9359}
9360#endif /* CONFIG_UCLAMP_TASK_GROUP */
9361
9362#ifdef CONFIG_GROUP_SCHED_WEIGHT
9363static unsigned long tg_weight(struct task_group *tg)
9364{
9365#ifdef CONFIG_FAIR_GROUP_SCHED
9366 return scale_load_down(tg->shares);
9367#else
9368 return sched_weight_from_cgroup(tg->scx_weight);
9369#endif
9370}
9371
9372static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9373 struct cftype *cftype, u64 shareval)
9374{
9375 int ret;
9376
9377 if (shareval > scale_load_down(ULONG_MAX))
9378 shareval = MAX_SHARES;
9379 ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9380 if (!ret)
9381 scx_group_set_weight(css_tg(css),
9382 sched_weight_to_cgroup(shareval));
9383 return ret;
9384}
9385
9386static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9387 struct cftype *cft)
9388{
9389 return tg_weight(css_tg(css));
9390}
9391#endif /* CONFIG_GROUP_SCHED_WEIGHT */
9392
9393#ifdef CONFIG_CFS_BANDWIDTH
9394static DEFINE_MUTEX(cfs_constraints_mutex);
9395
9396const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
9397static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
9398/* More than 203 days if BW_SHIFT equals 20. */
9399static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
9400
9401static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9402
9403static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
9404 u64 burst)
9405{
9406 int i, ret = 0, runtime_enabled, runtime_was_enabled;
9407 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9408
9409 if (tg == &root_task_group)
9410 return -EINVAL;
9411
9412 /*
9413 * Ensure we have at some amount of bandwidth every period. This is
9414 * to prevent reaching a state of large arrears when throttled via
9415 * entity_tick() resulting in prolonged exit starvation.
9416 */
9417 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
9418 return -EINVAL;
9419
9420 /*
9421 * Likewise, bound things on the other side by preventing insane quota
9422 * periods. This also allows us to normalize in computing quota
9423 * feasibility.
9424 */
9425 if (period > max_cfs_quota_period)
9426 return -EINVAL;
9427
9428 /*
9429 * Bound quota to defend quota against overflow during bandwidth shift.
9430 */
9431 if (quota != RUNTIME_INF && quota > max_cfs_runtime)
9432 return -EINVAL;
9433
9434 if (quota != RUNTIME_INF && (burst > quota ||
9435 burst + quota > max_cfs_runtime))
9436 return -EINVAL;
9437
9438 /*
9439 * Prevent race between setting of cfs_rq->runtime_enabled and
9440 * unthrottle_offline_cfs_rqs().
9441 */
9442 guard(cpus_read_lock)();
9443 guard(mutex)(&cfs_constraints_mutex);
9444
9445 ret = __cfs_schedulable(tg, period, quota);
9446 if (ret)
9447 return ret;
9448
9449 runtime_enabled = quota != RUNTIME_INF;
9450 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9451 /*
9452 * If we need to toggle cfs_bandwidth_used, off->on must occur
9453 * before making related changes, and on->off must occur afterwards
9454 */
9455 if (runtime_enabled && !runtime_was_enabled)
9456 cfs_bandwidth_usage_inc();
9457
9458 scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9459 cfs_b->period = ns_to_ktime(period);
9460 cfs_b->quota = quota;
9461 cfs_b->burst = burst;
9462
9463 __refill_cfs_bandwidth_runtime(cfs_b);
9464
9465 /*
9466 * Restart the period timer (if active) to handle new
9467 * period expiry:
9468 */
9469 if (runtime_enabled)
9470 start_cfs_bandwidth(cfs_b);
9471 }
9472
9473 for_each_online_cpu(i) {
9474 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9475 struct rq *rq = cfs_rq->rq;
9476
9477 guard(rq_lock_irq)(rq);
9478 cfs_rq->runtime_enabled = runtime_enabled;
9479 cfs_rq->runtime_remaining = 0;
9480
9481 if (cfs_rq->throttled)
9482 unthrottle_cfs_rq(cfs_rq);
9483 }
9484
9485 if (runtime_was_enabled && !runtime_enabled)
9486 cfs_bandwidth_usage_dec();
9487
9488 return 0;
9489}
9490
9491static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
9492{
9493 u64 quota, period, burst;
9494
9495 period = ktime_to_ns(tg->cfs_bandwidth.period);
9496 burst = tg->cfs_bandwidth.burst;
9497 if (cfs_quota_us < 0)
9498 quota = RUNTIME_INF;
9499 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
9500 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
9501 else
9502 return -EINVAL;
9503
9504 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9505}
9506
9507static long tg_get_cfs_quota(struct task_group *tg)
9508{
9509 u64 quota_us;
9510
9511 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9512 return -1;
9513
9514 quota_us = tg->cfs_bandwidth.quota;
9515 do_div(quota_us, NSEC_PER_USEC);
9516
9517 return quota_us;
9518}
9519
9520static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
9521{
9522 u64 quota, period, burst;
9523
9524 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
9525 return -EINVAL;
9526
9527 period = (u64)cfs_period_us * NSEC_PER_USEC;
9528 quota = tg->cfs_bandwidth.quota;
9529 burst = tg->cfs_bandwidth.burst;
9530
9531 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9532}
9533
9534static long tg_get_cfs_period(struct task_group *tg)
9535{
9536 u64 cfs_period_us;
9537
9538 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9539 do_div(cfs_period_us, NSEC_PER_USEC);
9540
9541 return cfs_period_us;
9542}
9543
9544static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
9545{
9546 u64 quota, period, burst;
9547
9548 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
9549 return -EINVAL;
9550
9551 burst = (u64)cfs_burst_us * NSEC_PER_USEC;
9552 period = ktime_to_ns(tg->cfs_bandwidth.period);
9553 quota = tg->cfs_bandwidth.quota;
9554
9555 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9556}
9557
9558static long tg_get_cfs_burst(struct task_group *tg)
9559{
9560 u64 burst_us;
9561
9562 burst_us = tg->cfs_bandwidth.burst;
9563 do_div(burst_us, NSEC_PER_USEC);
9564
9565 return burst_us;
9566}
9567
9568static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
9569 struct cftype *cft)
9570{
9571 return tg_get_cfs_quota(css_tg(css));
9572}
9573
9574static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
9575 struct cftype *cftype, s64 cfs_quota_us)
9576{
9577 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
9578}
9579
9580static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
9581 struct cftype *cft)
9582{
9583 return tg_get_cfs_period(css_tg(css));
9584}
9585
9586static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
9587 struct cftype *cftype, u64 cfs_period_us)
9588{
9589 return tg_set_cfs_period(css_tg(css), cfs_period_us);
9590}
9591
9592static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
9593 struct cftype *cft)
9594{
9595 return tg_get_cfs_burst(css_tg(css));
9596}
9597
9598static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
9599 struct cftype *cftype, u64 cfs_burst_us)
9600{
9601 return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
9602}
9603
9604struct cfs_schedulable_data {
9605 struct task_group *tg;
9606 u64 period, quota;
9607};
9608
9609/*
9610 * normalize group quota/period to be quota/max_period
9611 * note: units are usecs
9612 */
9613static u64 normalize_cfs_quota(struct task_group *tg,
9614 struct cfs_schedulable_data *d)
9615{
9616 u64 quota, period;
9617
9618 if (tg == d->tg) {
9619 period = d->period;
9620 quota = d->quota;
9621 } else {
9622 period = tg_get_cfs_period(tg);
9623 quota = tg_get_cfs_quota(tg);
9624 }
9625
9626 /* note: these should typically be equivalent */
9627 if (quota == RUNTIME_INF || quota == -1)
9628 return RUNTIME_INF;
9629
9630 return to_ratio(period, quota);
9631}
9632
9633static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9634{
9635 struct cfs_schedulable_data *d = data;
9636 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9637 s64 quota = 0, parent_quota = -1;
9638
9639 if (!tg->parent) {
9640 quota = RUNTIME_INF;
9641 } else {
9642 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9643
9644 quota = normalize_cfs_quota(tg, d);
9645 parent_quota = parent_b->hierarchical_quota;
9646
9647 /*
9648 * Ensure max(child_quota) <= parent_quota. On cgroup2,
9649 * always take the non-RUNTIME_INF min. On cgroup1, only
9650 * inherit when no limit is set. In both cases this is used
9651 * by the scheduler to determine if a given CFS task has a
9652 * bandwidth constraint at some higher level.
9653 */
9654 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9655 if (quota == RUNTIME_INF)
9656 quota = parent_quota;
9657 else if (parent_quota != RUNTIME_INF)
9658 quota = min(quota, parent_quota);
9659 } else {
9660 if (quota == RUNTIME_INF)
9661 quota = parent_quota;
9662 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9663 return -EINVAL;
9664 }
9665 }
9666 cfs_b->hierarchical_quota = quota;
9667
9668 return 0;
9669}
9670
9671static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9672{
9673 struct cfs_schedulable_data data = {
9674 .tg = tg,
9675 .period = period,
9676 .quota = quota,
9677 };
9678
9679 if (quota != RUNTIME_INF) {
9680 do_div(data.period, NSEC_PER_USEC);
9681 do_div(data.quota, NSEC_PER_USEC);
9682 }
9683
9684 guard(rcu)();
9685 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9686}
9687
9688static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9689{
9690 struct task_group *tg = css_tg(seq_css(sf));
9691 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9692
9693 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9694 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9695 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9696
9697 if (schedstat_enabled() && tg != &root_task_group) {
9698 struct sched_statistics *stats;
9699 u64 ws = 0;
9700 int i;
9701
9702 for_each_possible_cpu(i) {
9703 stats = __schedstats_from_se(tg->se[i]);
9704 ws += schedstat_val(stats->wait_sum);
9705 }
9706
9707 seq_printf(sf, "wait_sum %llu\n", ws);
9708 }
9709
9710 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9711 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9712
9713 return 0;
9714}
9715
9716static u64 throttled_time_self(struct task_group *tg)
9717{
9718 int i;
9719 u64 total = 0;
9720
9721 for_each_possible_cpu(i) {
9722 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9723 }
9724
9725 return total;
9726}
9727
9728static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9729{
9730 struct task_group *tg = css_tg(seq_css(sf));
9731
9732 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9733
9734 return 0;
9735}
9736#endif /* CONFIG_CFS_BANDWIDTH */
9737
9738#ifdef CONFIG_RT_GROUP_SCHED
9739static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9740 struct cftype *cft, s64 val)
9741{
9742 return sched_group_set_rt_runtime(css_tg(css), val);
9743}
9744
9745static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9746 struct cftype *cft)
9747{
9748 return sched_group_rt_runtime(css_tg(css));
9749}
9750
9751static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9752 struct cftype *cftype, u64 rt_period_us)
9753{
9754 return sched_group_set_rt_period(css_tg(css), rt_period_us);
9755}
9756
9757static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9758 struct cftype *cft)
9759{
9760 return sched_group_rt_period(css_tg(css));
9761}
9762#endif /* CONFIG_RT_GROUP_SCHED */
9763
9764#ifdef CONFIG_GROUP_SCHED_WEIGHT
9765static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9766 struct cftype *cft)
9767{
9768 return css_tg(css)->idle;
9769}
9770
9771static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9772 struct cftype *cft, s64 idle)
9773{
9774 int ret;
9775
9776 ret = sched_group_set_idle(css_tg(css), idle);
9777 if (!ret)
9778 scx_group_set_idle(css_tg(css), idle);
9779 return ret;
9780}
9781#endif
9782
9783static struct cftype cpu_legacy_files[] = {
9784#ifdef CONFIG_GROUP_SCHED_WEIGHT
9785 {
9786 .name = "shares",
9787 .read_u64 = cpu_shares_read_u64,
9788 .write_u64 = cpu_shares_write_u64,
9789 },
9790 {
9791 .name = "idle",
9792 .read_s64 = cpu_idle_read_s64,
9793 .write_s64 = cpu_idle_write_s64,
9794 },
9795#endif
9796#ifdef CONFIG_CFS_BANDWIDTH
9797 {
9798 .name = "cfs_quota_us",
9799 .read_s64 = cpu_cfs_quota_read_s64,
9800 .write_s64 = cpu_cfs_quota_write_s64,
9801 },
9802 {
9803 .name = "cfs_period_us",
9804 .read_u64 = cpu_cfs_period_read_u64,
9805 .write_u64 = cpu_cfs_period_write_u64,
9806 },
9807 {
9808 .name = "cfs_burst_us",
9809 .read_u64 = cpu_cfs_burst_read_u64,
9810 .write_u64 = cpu_cfs_burst_write_u64,
9811 },
9812 {
9813 .name = "stat",
9814 .seq_show = cpu_cfs_stat_show,
9815 },
9816 {
9817 .name = "stat.local",
9818 .seq_show = cpu_cfs_local_stat_show,
9819 },
9820#endif
9821#ifdef CONFIG_RT_GROUP_SCHED
9822 {
9823 .name = "rt_runtime_us",
9824 .read_s64 = cpu_rt_runtime_read,
9825 .write_s64 = cpu_rt_runtime_write,
9826 },
9827 {
9828 .name = "rt_period_us",
9829 .read_u64 = cpu_rt_period_read_uint,
9830 .write_u64 = cpu_rt_period_write_uint,
9831 },
9832#endif
9833#ifdef CONFIG_UCLAMP_TASK_GROUP
9834 {
9835 .name = "uclamp.min",
9836 .flags = CFTYPE_NOT_ON_ROOT,
9837 .seq_show = cpu_uclamp_min_show,
9838 .write = cpu_uclamp_min_write,
9839 },
9840 {
9841 .name = "uclamp.max",
9842 .flags = CFTYPE_NOT_ON_ROOT,
9843 .seq_show = cpu_uclamp_max_show,
9844 .write = cpu_uclamp_max_write,
9845 },
9846#endif
9847 { } /* Terminate */
9848};
9849
9850static int cpu_extra_stat_show(struct seq_file *sf,
9851 struct cgroup_subsys_state *css)
9852{
9853#ifdef CONFIG_CFS_BANDWIDTH
9854 {
9855 struct task_group *tg = css_tg(css);
9856 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9857 u64 throttled_usec, burst_usec;
9858
9859 throttled_usec = cfs_b->throttled_time;
9860 do_div(throttled_usec, NSEC_PER_USEC);
9861 burst_usec = cfs_b->burst_time;
9862 do_div(burst_usec, NSEC_PER_USEC);
9863
9864 seq_printf(sf, "nr_periods %d\n"
9865 "nr_throttled %d\n"
9866 "throttled_usec %llu\n"
9867 "nr_bursts %d\n"
9868 "burst_usec %llu\n",
9869 cfs_b->nr_periods, cfs_b->nr_throttled,
9870 throttled_usec, cfs_b->nr_burst, burst_usec);
9871 }
9872#endif
9873 return 0;
9874}
9875
9876static int cpu_local_stat_show(struct seq_file *sf,
9877 struct cgroup_subsys_state *css)
9878{
9879#ifdef CONFIG_CFS_BANDWIDTH
9880 {
9881 struct task_group *tg = css_tg(css);
9882 u64 throttled_self_usec;
9883
9884 throttled_self_usec = throttled_time_self(tg);
9885 do_div(throttled_self_usec, NSEC_PER_USEC);
9886
9887 seq_printf(sf, "throttled_usec %llu\n",
9888 throttled_self_usec);
9889 }
9890#endif
9891 return 0;
9892}
9893
9894#ifdef CONFIG_GROUP_SCHED_WEIGHT
9895
9896static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
9897 struct cftype *cft)
9898{
9899 return sched_weight_to_cgroup(tg_weight(css_tg(css)));
9900}
9901
9902static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
9903 struct cftype *cft, u64 cgrp_weight)
9904{
9905 unsigned long weight;
9906 int ret;
9907
9908 if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
9909 return -ERANGE;
9910
9911 weight = sched_weight_from_cgroup(cgrp_weight);
9912
9913 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9914 if (!ret)
9915 scx_group_set_weight(css_tg(css), cgrp_weight);
9916 return ret;
9917}
9918
9919static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
9920 struct cftype *cft)
9921{
9922 unsigned long weight = tg_weight(css_tg(css));
9923 int last_delta = INT_MAX;
9924 int prio, delta;
9925
9926 /* find the closest nice value to the current weight */
9927 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
9928 delta = abs(sched_prio_to_weight[prio] - weight);
9929 if (delta >= last_delta)
9930 break;
9931 last_delta = delta;
9932 }
9933
9934 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
9935}
9936
9937static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
9938 struct cftype *cft, s64 nice)
9939{
9940 unsigned long weight;
9941 int idx, ret;
9942
9943 if (nice < MIN_NICE || nice > MAX_NICE)
9944 return -ERANGE;
9945
9946 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
9947 idx = array_index_nospec(idx, 40);
9948 weight = sched_prio_to_weight[idx];
9949
9950 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9951 if (!ret)
9952 scx_group_set_weight(css_tg(css),
9953 sched_weight_to_cgroup(weight));
9954 return ret;
9955}
9956#endif /* CONFIG_GROUP_SCHED_WEIGHT */
9957
9958static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
9959 long period, long quota)
9960{
9961 if (quota < 0)
9962 seq_puts(sf, "max");
9963 else
9964 seq_printf(sf, "%ld", quota);
9965
9966 seq_printf(sf, " %ld\n", period);
9967}
9968
9969/* caller should put the current value in *@periodp before calling */
9970static int __maybe_unused cpu_period_quota_parse(char *buf,
9971 u64 *periodp, u64 *quotap)
9972{
9973 char tok[21]; /* U64_MAX */
9974
9975 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
9976 return -EINVAL;
9977
9978 *periodp *= NSEC_PER_USEC;
9979
9980 if (sscanf(tok, "%llu", quotap))
9981 *quotap *= NSEC_PER_USEC;
9982 else if (!strcmp(tok, "max"))
9983 *quotap = RUNTIME_INF;
9984 else
9985 return -EINVAL;
9986
9987 return 0;
9988}
9989
9990#ifdef CONFIG_CFS_BANDWIDTH
9991static int cpu_max_show(struct seq_file *sf, void *v)
9992{
9993 struct task_group *tg = css_tg(seq_css(sf));
9994
9995 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
9996 return 0;
9997}
9998
9999static ssize_t cpu_max_write(struct kernfs_open_file *of,
10000 char *buf, size_t nbytes, loff_t off)
10001{
10002 struct task_group *tg = css_tg(of_css(of));
10003 u64 period = tg_get_cfs_period(tg);
10004 u64 burst = tg->cfs_bandwidth.burst;
10005 u64 quota;
10006 int ret;
10007
10008 ret = cpu_period_quota_parse(buf, &period, "a);
10009 if (!ret)
10010 ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
10011 return ret ?: nbytes;
10012}
10013#endif
10014
10015static struct cftype cpu_files[] = {
10016#ifdef CONFIG_GROUP_SCHED_WEIGHT
10017 {
10018 .name = "weight",
10019 .flags = CFTYPE_NOT_ON_ROOT,
10020 .read_u64 = cpu_weight_read_u64,
10021 .write_u64 = cpu_weight_write_u64,
10022 },
10023 {
10024 .name = "weight.nice",
10025 .flags = CFTYPE_NOT_ON_ROOT,
10026 .read_s64 = cpu_weight_nice_read_s64,
10027 .write_s64 = cpu_weight_nice_write_s64,
10028 },
10029 {
10030 .name = "idle",
10031 .flags = CFTYPE_NOT_ON_ROOT,
10032 .read_s64 = cpu_idle_read_s64,
10033 .write_s64 = cpu_idle_write_s64,
10034 },
10035#endif
10036#ifdef CONFIG_CFS_BANDWIDTH
10037 {
10038 .name = "max",
10039 .flags = CFTYPE_NOT_ON_ROOT,
10040 .seq_show = cpu_max_show,
10041 .write = cpu_max_write,
10042 },
10043 {
10044 .name = "max.burst",
10045 .flags = CFTYPE_NOT_ON_ROOT,
10046 .read_u64 = cpu_cfs_burst_read_u64,
10047 .write_u64 = cpu_cfs_burst_write_u64,
10048 },
10049#endif
10050#ifdef CONFIG_UCLAMP_TASK_GROUP
10051 {
10052 .name = "uclamp.min",
10053 .flags = CFTYPE_NOT_ON_ROOT,
10054 .seq_show = cpu_uclamp_min_show,
10055 .write = cpu_uclamp_min_write,
10056 },
10057 {
10058 .name = "uclamp.max",
10059 .flags = CFTYPE_NOT_ON_ROOT,
10060 .seq_show = cpu_uclamp_max_show,
10061 .write = cpu_uclamp_max_write,
10062 },
10063#endif
10064 { } /* terminate */
10065};
10066
10067struct cgroup_subsys cpu_cgrp_subsys = {
10068 .css_alloc = cpu_cgroup_css_alloc,
10069 .css_online = cpu_cgroup_css_online,
10070 .css_offline = cpu_cgroup_css_offline,
10071 .css_released = cpu_cgroup_css_released,
10072 .css_free = cpu_cgroup_css_free,
10073 .css_extra_stat_show = cpu_extra_stat_show,
10074 .css_local_stat_show = cpu_local_stat_show,
10075 .can_attach = cpu_cgroup_can_attach,
10076 .attach = cpu_cgroup_attach,
10077 .cancel_attach = cpu_cgroup_cancel_attach,
10078 .legacy_cftypes = cpu_legacy_files,
10079 .dfl_cftypes = cpu_files,
10080 .early_init = true,
10081 .threaded = true,
10082};
10083
10084#endif /* CONFIG_CGROUP_SCHED */
10085
10086void dump_cpu_task(int cpu)
10087{
10088 if (in_hardirq() && cpu == smp_processor_id()) {
10089 struct pt_regs *regs;
10090
10091 regs = get_irq_regs();
10092 if (regs) {
10093 show_regs(regs);
10094 return;
10095 }
10096 }
10097
10098 if (trigger_single_cpu_backtrace(cpu))
10099 return;
10100
10101 pr_info("Task dump for CPU %d:\n", cpu);
10102 sched_show_task(cpu_curr(cpu));
10103}
10104
10105/*
10106 * Nice levels are multiplicative, with a gentle 10% change for every
10107 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10108 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10109 * that remained on nice 0.
10110 *
10111 * The "10% effect" is relative and cumulative: from _any_ nice level,
10112 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10113 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
10114 * If a task goes up by ~10% and another task goes down by ~10% then
10115 * the relative distance between them is ~25%.)
10116 */
10117const int sched_prio_to_weight[40] = {
10118 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10119 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10120 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10121 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10122 /* 0 */ 1024, 820, 655, 526, 423,
10123 /* 5 */ 335, 272, 215, 172, 137,
10124 /* 10 */ 110, 87, 70, 56, 45,
10125 /* 15 */ 36, 29, 23, 18, 15,
10126};
10127
10128/*
10129 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10130 *
10131 * In cases where the weight does not change often, we can use the
10132 * pre-calculated inverse to speed up arithmetics by turning divisions
10133 * into multiplications:
10134 */
10135const u32 sched_prio_to_wmult[40] = {
10136 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10137 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10138 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10139 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10140 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
10141 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
10142 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
10143 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10144};
10145
10146void call_trace_sched_update_nr_running(struct rq *rq, int count)
10147{
10148 trace_sched_update_nr_running_tp(rq, count);
10149}
10150
10151#ifdef CONFIG_SCHED_MM_CID
10152
10153/*
10154 * @cid_lock: Guarantee forward-progress of cid allocation.
10155 *
10156 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10157 * is only used when contention is detected by the lock-free allocation so
10158 * forward progress can be guaranteed.
10159 */
10160DEFINE_RAW_SPINLOCK(cid_lock);
10161
10162/*
10163 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10164 *
10165 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10166 * detected, it is set to 1 to ensure that all newly coming allocations are
10167 * serialized by @cid_lock until the allocation which detected contention
10168 * completes and sets @use_cid_lock back to 0. This guarantees forward progress
10169 * of a cid allocation.
10170 */
10171int use_cid_lock;
10172
10173/*
10174 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10175 * concurrently with respect to the execution of the source runqueue context
10176 * switch.
10177 *
10178 * There is one basic properties we want to guarantee here:
10179 *
10180 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10181 * used by a task. That would lead to concurrent allocation of the cid and
10182 * userspace corruption.
10183 *
10184 * Provide this guarantee by introducing a Dekker memory ordering to guarantee
10185 * that a pair of loads observe at least one of a pair of stores, which can be
10186 * shown as:
10187 *
10188 * X = Y = 0
10189 *
10190 * w[X]=1 w[Y]=1
10191 * MB MB
10192 * r[Y]=y r[X]=x
10193 *
10194 * Which guarantees that x==0 && y==0 is impossible. But rather than using
10195 * values 0 and 1, this algorithm cares about specific state transitions of the
10196 * runqueue current task (as updated by the scheduler context switch), and the
10197 * per-mm/cpu cid value.
10198 *
10199 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10200 * task->mm != mm for the rest of the discussion. There are two scheduler state
10201 * transitions on context switch we care about:
10202 *
10203 * (TSA) Store to rq->curr with transition from (N) to (Y)
10204 *
10205 * (TSB) Store to rq->curr with transition from (Y) to (N)
10206 *
10207 * On the remote-clear side, there is one transition we care about:
10208 *
10209 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
10210 *
10211 * There is also a transition to UNSET state which can be performed from all
10212 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10213 * guarantees that only a single thread will succeed:
10214 *
10215 * (TMB) cmpxchg to *pcpu_cid to mark UNSET
10216 *
10217 * Just to be clear, what we do _not_ want to happen is a transition to UNSET
10218 * when a thread is actively using the cid (property (1)).
10219 *
10220 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
10221 *
10222 * Scenario A) (TSA)+(TMA) (from next task perspective)
10223 *
10224 * CPU0 CPU1
10225 *
10226 * Context switch CS-1 Remote-clear
10227 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
10228 * (implied barrier after cmpxchg)
10229 * - switch_mm_cid()
10230 * - memory barrier (see switch_mm_cid()
10231 * comment explaining how this barrier
10232 * is combined with other scheduler
10233 * barriers)
10234 * - mm_cid_get (next)
10235 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
10236 *
10237 * This Dekker ensures that either task (Y) is observed by the
10238 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
10239 * observed.
10240 *
10241 * If task (Y) store is observed by rcu_dereference(), it means that there is
10242 * still an active task on the cpu. Remote-clear will therefore not transition
10243 * to UNSET, which fulfills property (1).
10244 *
10245 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
10246 * it will move its state to UNSET, which clears the percpu cid perhaps
10247 * uselessly (which is not an issue for correctness). Because task (Y) is not
10248 * observed, CPU1 can move ahead to set the state to UNSET. Because moving
10249 * state to UNSET is done with a cmpxchg expecting that the old state has the
10250 * LAZY flag set, only one thread will successfully UNSET.
10251 *
10252 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
10253 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
10254 * CPU1 will observe task (Y) and do nothing more, which is fine.
10255 *
10256 * What we are effectively preventing with this Dekker is a scenario where
10257 * neither LAZY flag nor store (Y) are observed, which would fail property (1)
10258 * because this would UNSET a cid which is actively used.
10259 */
10260
10261void sched_mm_cid_migrate_from(struct task_struct *t)
10262{
10263 t->migrate_from_cpu = task_cpu(t);
10264}
10265
10266static
10267int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
10268 struct task_struct *t,
10269 struct mm_cid *src_pcpu_cid)
10270{
10271 struct mm_struct *mm = t->mm;
10272 struct task_struct *src_task;
10273 int src_cid, last_mm_cid;
10274
10275 if (!mm)
10276 return -1;
10277
10278 last_mm_cid = t->last_mm_cid;
10279 /*
10280 * If the migrated task has no last cid, or if the current
10281 * task on src rq uses the cid, it means the source cid does not need
10282 * to be moved to the destination cpu.
10283 */
10284 if (last_mm_cid == -1)
10285 return -1;
10286 src_cid = READ_ONCE(src_pcpu_cid->cid);
10287 if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
10288 return -1;
10289
10290 /*
10291 * If we observe an active task using the mm on this rq, it means we
10292 * are not the last task to be migrated from this cpu for this mm, so
10293 * there is no need to move src_cid to the destination cpu.
10294 */
10295 guard(rcu)();
10296 src_task = rcu_dereference(src_rq->curr);
10297 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10298 t->last_mm_cid = -1;
10299 return -1;
10300 }
10301
10302 return src_cid;
10303}
10304
10305static
10306int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
10307 struct task_struct *t,
10308 struct mm_cid *src_pcpu_cid,
10309 int src_cid)
10310{
10311 struct task_struct *src_task;
10312 struct mm_struct *mm = t->mm;
10313 int lazy_cid;
10314
10315 if (src_cid == -1)
10316 return -1;
10317
10318 /*
10319 * Attempt to clear the source cpu cid to move it to the destination
10320 * cpu.
10321 */
10322 lazy_cid = mm_cid_set_lazy_put(src_cid);
10323 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
10324 return -1;
10325
10326 /*
10327 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10328 * rq->curr->mm matches the scheduler barrier in context_switch()
10329 * between store to rq->curr and load of prev and next task's
10330 * per-mm/cpu cid.
10331 *
10332 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10333 * rq->curr->mm_cid_active matches the barrier in
10334 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10335 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10336 * load of per-mm/cpu cid.
10337 */
10338
10339 /*
10340 * If we observe an active task using the mm on this rq after setting
10341 * the lazy-put flag, this task will be responsible for transitioning
10342 * from lazy-put flag set to MM_CID_UNSET.
10343 */
10344 scoped_guard (rcu) {
10345 src_task = rcu_dereference(src_rq->curr);
10346 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10347 /*
10348 * We observed an active task for this mm, there is therefore
10349 * no point in moving this cid to the destination cpu.
10350 */
10351 t->last_mm_cid = -1;
10352 return -1;
10353 }
10354 }
10355
10356 /*
10357 * The src_cid is unused, so it can be unset.
10358 */
10359 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10360 return -1;
10361 WRITE_ONCE(src_pcpu_cid->recent_cid, MM_CID_UNSET);
10362 return src_cid;
10363}
10364
10365/*
10366 * Migration to dst cpu. Called with dst_rq lock held.
10367 * Interrupts are disabled, which keeps the window of cid ownership without the
10368 * source rq lock held small.
10369 */
10370void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
10371{
10372 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
10373 struct mm_struct *mm = t->mm;
10374 int src_cid, src_cpu;
10375 bool dst_cid_is_set;
10376 struct rq *src_rq;
10377
10378 lockdep_assert_rq_held(dst_rq);
10379
10380 if (!mm)
10381 return;
10382 src_cpu = t->migrate_from_cpu;
10383 if (src_cpu == -1) {
10384 t->last_mm_cid = -1;
10385 return;
10386 }
10387 /*
10388 * Move the src cid if the dst cid is unset. This keeps id
10389 * allocation closest to 0 in cases where few threads migrate around
10390 * many CPUs.
10391 *
10392 * If destination cid or recent cid is already set, we may have
10393 * to just clear the src cid to ensure compactness in frequent
10394 * migrations scenarios.
10395 *
10396 * It is not useful to clear the src cid when the number of threads is
10397 * greater or equal to the number of allowed CPUs, because user-space
10398 * can expect that the number of allowed cids can reach the number of
10399 * allowed CPUs.
10400 */
10401 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
10402 dst_cid_is_set = !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->cid)) ||
10403 !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->recent_cid));
10404 if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed))
10405 return;
10406 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
10407 src_rq = cpu_rq(src_cpu);
10408 src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
10409 if (src_cid == -1)
10410 return;
10411 src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
10412 src_cid);
10413 if (src_cid == -1)
10414 return;
10415 if (dst_cid_is_set) {
10416 __mm_cid_put(mm, src_cid);
10417 return;
10418 }
10419 /* Move src_cid to dst cpu. */
10420 mm_cid_snapshot_time(dst_rq, mm);
10421 WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
10422 WRITE_ONCE(dst_pcpu_cid->recent_cid, src_cid);
10423}
10424
10425static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
10426 int cpu)
10427{
10428 struct rq *rq = cpu_rq(cpu);
10429 struct task_struct *t;
10430 int cid, lazy_cid;
10431
10432 cid = READ_ONCE(pcpu_cid->cid);
10433 if (!mm_cid_is_valid(cid))
10434 return;
10435
10436 /*
10437 * Clear the cpu cid if it is set to keep cid allocation compact. If
10438 * there happens to be other tasks left on the source cpu using this
10439 * mm, the next task using this mm will reallocate its cid on context
10440 * switch.
10441 */
10442 lazy_cid = mm_cid_set_lazy_put(cid);
10443 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
10444 return;
10445
10446 /*
10447 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10448 * rq->curr->mm matches the scheduler barrier in context_switch()
10449 * between store to rq->curr and load of prev and next task's
10450 * per-mm/cpu cid.
10451 *
10452 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10453 * rq->curr->mm_cid_active matches the barrier in
10454 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10455 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10456 * load of per-mm/cpu cid.
10457 */
10458
10459 /*
10460 * If we observe an active task using the mm on this rq after setting
10461 * the lazy-put flag, that task will be responsible for transitioning
10462 * from lazy-put flag set to MM_CID_UNSET.
10463 */
10464 scoped_guard (rcu) {
10465 t = rcu_dereference(rq->curr);
10466 if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
10467 return;
10468 }
10469
10470 /*
10471 * The cid is unused, so it can be unset.
10472 * Disable interrupts to keep the window of cid ownership without rq
10473 * lock small.
10474 */
10475 scoped_guard (irqsave) {
10476 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10477 __mm_cid_put(mm, cid);
10478 }
10479}
10480
10481static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
10482{
10483 struct rq *rq = cpu_rq(cpu);
10484 struct mm_cid *pcpu_cid;
10485 struct task_struct *curr;
10486 u64 rq_clock;
10487
10488 /*
10489 * rq->clock load is racy on 32-bit but one spurious clear once in a
10490 * while is irrelevant.
10491 */
10492 rq_clock = READ_ONCE(rq->clock);
10493 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10494
10495 /*
10496 * In order to take care of infrequently scheduled tasks, bump the time
10497 * snapshot associated with this cid if an active task using the mm is
10498 * observed on this rq.
10499 */
10500 scoped_guard (rcu) {
10501 curr = rcu_dereference(rq->curr);
10502 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
10503 WRITE_ONCE(pcpu_cid->time, rq_clock);
10504 return;
10505 }
10506 }
10507
10508 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
10509 return;
10510 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10511}
10512
10513static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
10514 int weight)
10515{
10516 struct mm_cid *pcpu_cid;
10517 int cid;
10518
10519 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10520 cid = READ_ONCE(pcpu_cid->cid);
10521 if (!mm_cid_is_valid(cid) || cid < weight)
10522 return;
10523 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10524}
10525
10526static void task_mm_cid_work(struct callback_head *work)
10527{
10528 unsigned long now = jiffies, old_scan, next_scan;
10529 struct task_struct *t = current;
10530 struct cpumask *cidmask;
10531 struct mm_struct *mm;
10532 int weight, cpu;
10533
10534 SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
10535
10536 work->next = work; /* Prevent double-add */
10537 if (t->flags & PF_EXITING)
10538 return;
10539 mm = t->mm;
10540 if (!mm)
10541 return;
10542 old_scan = READ_ONCE(mm->mm_cid_next_scan);
10543 next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10544 if (!old_scan) {
10545 unsigned long res;
10546
10547 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
10548 if (res != old_scan)
10549 old_scan = res;
10550 else
10551 old_scan = next_scan;
10552 }
10553 if (time_before(now, old_scan))
10554 return;
10555 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
10556 return;
10557 cidmask = mm_cidmask(mm);
10558 /* Clear cids that were not recently used. */
10559 for_each_possible_cpu(cpu)
10560 sched_mm_cid_remote_clear_old(mm, cpu);
10561 weight = cpumask_weight(cidmask);
10562 /*
10563 * Clear cids that are greater or equal to the cidmask weight to
10564 * recompact it.
10565 */
10566 for_each_possible_cpu(cpu)
10567 sched_mm_cid_remote_clear_weight(mm, cpu, weight);
10568}
10569
10570void init_sched_mm_cid(struct task_struct *t)
10571{
10572 struct mm_struct *mm = t->mm;
10573 int mm_users = 0;
10574
10575 if (mm) {
10576 mm_users = atomic_read(&mm->mm_users);
10577 if (mm_users == 1)
10578 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10579 }
10580 t->cid_work.next = &t->cid_work; /* Protect against double add */
10581 init_task_work(&t->cid_work, task_mm_cid_work);
10582}
10583
10584void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
10585{
10586 struct callback_head *work = &curr->cid_work;
10587 unsigned long now = jiffies;
10588
10589 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
10590 work->next != work)
10591 return;
10592 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
10593 return;
10594
10595 /* No page allocation under rq lock */
10596 task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
10597}
10598
10599void sched_mm_cid_exit_signals(struct task_struct *t)
10600{
10601 struct mm_struct *mm = t->mm;
10602 struct rq *rq;
10603
10604 if (!mm)
10605 return;
10606
10607 preempt_disable();
10608 rq = this_rq();
10609 guard(rq_lock_irqsave)(rq);
10610 preempt_enable_no_resched(); /* holding spinlock */
10611 WRITE_ONCE(t->mm_cid_active, 0);
10612 /*
10613 * Store t->mm_cid_active before loading per-mm/cpu cid.
10614 * Matches barrier in sched_mm_cid_remote_clear_old().
10615 */
10616 smp_mb();
10617 mm_cid_put(mm);
10618 t->last_mm_cid = t->mm_cid = -1;
10619}
10620
10621void sched_mm_cid_before_execve(struct task_struct *t)
10622{
10623 struct mm_struct *mm = t->mm;
10624 struct rq *rq;
10625
10626 if (!mm)
10627 return;
10628
10629 preempt_disable();
10630 rq = this_rq();
10631 guard(rq_lock_irqsave)(rq);
10632 preempt_enable_no_resched(); /* holding spinlock */
10633 WRITE_ONCE(t->mm_cid_active, 0);
10634 /*
10635 * Store t->mm_cid_active before loading per-mm/cpu cid.
10636 * Matches barrier in sched_mm_cid_remote_clear_old().
10637 */
10638 smp_mb();
10639 mm_cid_put(mm);
10640 t->last_mm_cid = t->mm_cid = -1;
10641}
10642
10643void sched_mm_cid_after_execve(struct task_struct *t)
10644{
10645 struct mm_struct *mm = t->mm;
10646 struct rq *rq;
10647
10648 if (!mm)
10649 return;
10650
10651 preempt_disable();
10652 rq = this_rq();
10653 scoped_guard (rq_lock_irqsave, rq) {
10654 preempt_enable_no_resched(); /* holding spinlock */
10655 WRITE_ONCE(t->mm_cid_active, 1);
10656 /*
10657 * Store t->mm_cid_active before loading per-mm/cpu cid.
10658 * Matches barrier in sched_mm_cid_remote_clear_old().
10659 */
10660 smp_mb();
10661 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, mm);
10662 }
10663 rseq_set_notify_resume(t);
10664}
10665
10666void sched_mm_cid_fork(struct task_struct *t)
10667{
10668 WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
10669 t->mm_cid_active = 1;
10670}
10671#endif
10672
10673#ifdef CONFIG_SCHED_CLASS_EXT
10674void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
10675 struct sched_enq_and_set_ctx *ctx)
10676{
10677 struct rq *rq = task_rq(p);
10678
10679 lockdep_assert_rq_held(rq);
10680
10681 *ctx = (struct sched_enq_and_set_ctx){
10682 .p = p,
10683 .queue_flags = queue_flags,
10684 .queued = task_on_rq_queued(p),
10685 .running = task_current(rq, p),
10686 };
10687
10688 update_rq_clock(rq);
10689 if (ctx->queued)
10690 dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK);
10691 if (ctx->running)
10692 put_prev_task(rq, p);
10693}
10694
10695void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx)
10696{
10697 struct rq *rq = task_rq(ctx->p);
10698
10699 lockdep_assert_rq_held(rq);
10700
10701 if (ctx->queued)
10702 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK);
10703 if (ctx->running)
10704 set_next_task(rq, ctx->p);
10705}
10706#endif /* CONFIG_SCHED_CLASS_EXT */
1/*
2 * kernel/sched/core.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
27 */
28
29#include <linux/kasan.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/nmi.h>
33#include <linux/init.h>
34#include <linux/uaccess.h>
35#include <linux/highmem.h>
36#include <linux/mmu_context.h>
37#include <linux/interrupt.h>
38#include <linux/capability.h>
39#include <linux/completion.h>
40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h>
42#include <linux/perf_event.h>
43#include <linux/security.h>
44#include <linux/notifier.h>
45#include <linux/profile.h>
46#include <linux/freezer.h>
47#include <linux/vmalloc.h>
48#include <linux/blkdev.h>
49#include <linux/delay.h>
50#include <linux/pid_namespace.h>
51#include <linux/smp.h>
52#include <linux/threads.h>
53#include <linux/timer.h>
54#include <linux/rcupdate.h>
55#include <linux/cpu.h>
56#include <linux/cpuset.h>
57#include <linux/percpu.h>
58#include <linux/proc_fs.h>
59#include <linux/seq_file.h>
60#include <linux/sysctl.h>
61#include <linux/syscalls.h>
62#include <linux/times.h>
63#include <linux/tsacct_kern.h>
64#include <linux/kprobes.h>
65#include <linux/delayacct.h>
66#include <linux/unistd.h>
67#include <linux/pagemap.h>
68#include <linux/hrtimer.h>
69#include <linux/tick.h>
70#include <linux/ctype.h>
71#include <linux/ftrace.h>
72#include <linux/slab.h>
73#include <linux/init_task.h>
74#include <linux/context_tracking.h>
75#include <linux/compiler.h>
76#include <linux/frame.h>
77#include <linux/prefetch.h>
78#include <linux/mutex.h>
79
80#include <asm/switch_to.h>
81#include <asm/tlb.h>
82#include <asm/irq_regs.h>
83#ifdef CONFIG_PARAVIRT
84#include <asm/paravirt.h>
85#endif
86
87#include "sched.h"
88#include "../workqueue_internal.h"
89#include "../smpboot.h"
90
91#define CREATE_TRACE_POINTS
92#include <trace/events/sched.h>
93
94DEFINE_MUTEX(sched_domains_mutex);
95DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
96
97static void update_rq_clock_task(struct rq *rq, s64 delta);
98
99void update_rq_clock(struct rq *rq)
100{
101 s64 delta;
102
103 lockdep_assert_held(&rq->lock);
104
105 if (rq->clock_skip_update & RQCF_ACT_SKIP)
106 return;
107
108 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
109 if (delta < 0)
110 return;
111 rq->clock += delta;
112 update_rq_clock_task(rq, delta);
113}
114
115/*
116 * Debugging: various feature bits
117 */
118
119#define SCHED_FEAT(name, enabled) \
120 (1UL << __SCHED_FEAT_##name) * enabled |
121
122const_debug unsigned int sysctl_sched_features =
123#include "features.h"
124 0;
125
126#undef SCHED_FEAT
127
128/*
129 * Number of tasks to iterate in a single balance run.
130 * Limited because this is done with IRQs disabled.
131 */
132const_debug unsigned int sysctl_sched_nr_migrate = 32;
133
134/*
135 * period over which we average the RT time consumption, measured
136 * in ms.
137 *
138 * default: 1s
139 */
140const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
141
142/*
143 * period over which we measure -rt task cpu usage in us.
144 * default: 1s
145 */
146unsigned int sysctl_sched_rt_period = 1000000;
147
148__read_mostly int scheduler_running;
149
150/*
151 * part of the period that we allow rt tasks to run in us.
152 * default: 0.95s
153 */
154int sysctl_sched_rt_runtime = 950000;
155
156/* cpus with isolated domains */
157cpumask_var_t cpu_isolated_map;
158
159/*
160 * this_rq_lock - lock this runqueue and disable interrupts.
161 */
162static struct rq *this_rq_lock(void)
163 __acquires(rq->lock)
164{
165 struct rq *rq;
166
167 local_irq_disable();
168 rq = this_rq();
169 raw_spin_lock(&rq->lock);
170
171 return rq;
172}
173
174/*
175 * __task_rq_lock - lock the rq @p resides on.
176 */
177struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
178 __acquires(rq->lock)
179{
180 struct rq *rq;
181
182 lockdep_assert_held(&p->pi_lock);
183
184 for (;;) {
185 rq = task_rq(p);
186 raw_spin_lock(&rq->lock);
187 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
188 rf->cookie = lockdep_pin_lock(&rq->lock);
189 return rq;
190 }
191 raw_spin_unlock(&rq->lock);
192
193 while (unlikely(task_on_rq_migrating(p)))
194 cpu_relax();
195 }
196}
197
198/*
199 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
200 */
201struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
202 __acquires(p->pi_lock)
203 __acquires(rq->lock)
204{
205 struct rq *rq;
206
207 for (;;) {
208 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
209 rq = task_rq(p);
210 raw_spin_lock(&rq->lock);
211 /*
212 * move_queued_task() task_rq_lock()
213 *
214 * ACQUIRE (rq->lock)
215 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
216 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
217 * [S] ->cpu = new_cpu [L] task_rq()
218 * [L] ->on_rq
219 * RELEASE (rq->lock)
220 *
221 * If we observe the old cpu in task_rq_lock, the acquire of
222 * the old rq->lock will fully serialize against the stores.
223 *
224 * If we observe the new cpu in task_rq_lock, the acquire will
225 * pair with the WMB to ensure we must then also see migrating.
226 */
227 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
228 rf->cookie = lockdep_pin_lock(&rq->lock);
229 return rq;
230 }
231 raw_spin_unlock(&rq->lock);
232 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
233
234 while (unlikely(task_on_rq_migrating(p)))
235 cpu_relax();
236 }
237}
238
239#ifdef CONFIG_SCHED_HRTICK
240/*
241 * Use HR-timers to deliver accurate preemption points.
242 */
243
244static void hrtick_clear(struct rq *rq)
245{
246 if (hrtimer_active(&rq->hrtick_timer))
247 hrtimer_cancel(&rq->hrtick_timer);
248}
249
250/*
251 * High-resolution timer tick.
252 * Runs from hardirq context with interrupts disabled.
253 */
254static enum hrtimer_restart hrtick(struct hrtimer *timer)
255{
256 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
257
258 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
259
260 raw_spin_lock(&rq->lock);
261 update_rq_clock(rq);
262 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
263 raw_spin_unlock(&rq->lock);
264
265 return HRTIMER_NORESTART;
266}
267
268#ifdef CONFIG_SMP
269
270static void __hrtick_restart(struct rq *rq)
271{
272 struct hrtimer *timer = &rq->hrtick_timer;
273
274 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
275}
276
277/*
278 * called from hardirq (IPI) context
279 */
280static void __hrtick_start(void *arg)
281{
282 struct rq *rq = arg;
283
284 raw_spin_lock(&rq->lock);
285 __hrtick_restart(rq);
286 rq->hrtick_csd_pending = 0;
287 raw_spin_unlock(&rq->lock);
288}
289
290/*
291 * Called to set the hrtick timer state.
292 *
293 * called with rq->lock held and irqs disabled
294 */
295void hrtick_start(struct rq *rq, u64 delay)
296{
297 struct hrtimer *timer = &rq->hrtick_timer;
298 ktime_t time;
299 s64 delta;
300
301 /*
302 * Don't schedule slices shorter than 10000ns, that just
303 * doesn't make sense and can cause timer DoS.
304 */
305 delta = max_t(s64, delay, 10000LL);
306 time = ktime_add_ns(timer->base->get_time(), delta);
307
308 hrtimer_set_expires(timer, time);
309
310 if (rq == this_rq()) {
311 __hrtick_restart(rq);
312 } else if (!rq->hrtick_csd_pending) {
313 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
314 rq->hrtick_csd_pending = 1;
315 }
316}
317
318#else
319/*
320 * Called to set the hrtick timer state.
321 *
322 * called with rq->lock held and irqs disabled
323 */
324void hrtick_start(struct rq *rq, u64 delay)
325{
326 /*
327 * Don't schedule slices shorter than 10000ns, that just
328 * doesn't make sense. Rely on vruntime for fairness.
329 */
330 delay = max_t(u64, delay, 10000LL);
331 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
332 HRTIMER_MODE_REL_PINNED);
333}
334#endif /* CONFIG_SMP */
335
336static void init_rq_hrtick(struct rq *rq)
337{
338#ifdef CONFIG_SMP
339 rq->hrtick_csd_pending = 0;
340
341 rq->hrtick_csd.flags = 0;
342 rq->hrtick_csd.func = __hrtick_start;
343 rq->hrtick_csd.info = rq;
344#endif
345
346 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
347 rq->hrtick_timer.function = hrtick;
348}
349#else /* CONFIG_SCHED_HRTICK */
350static inline void hrtick_clear(struct rq *rq)
351{
352}
353
354static inline void init_rq_hrtick(struct rq *rq)
355{
356}
357#endif /* CONFIG_SCHED_HRTICK */
358
359/*
360 * cmpxchg based fetch_or, macro so it works for different integer types
361 */
362#define fetch_or(ptr, mask) \
363 ({ \
364 typeof(ptr) _ptr = (ptr); \
365 typeof(mask) _mask = (mask); \
366 typeof(*_ptr) _old, _val = *_ptr; \
367 \
368 for (;;) { \
369 _old = cmpxchg(_ptr, _val, _val | _mask); \
370 if (_old == _val) \
371 break; \
372 _val = _old; \
373 } \
374 _old; \
375})
376
377#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
378/*
379 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
380 * this avoids any races wrt polling state changes and thereby avoids
381 * spurious IPIs.
382 */
383static bool set_nr_and_not_polling(struct task_struct *p)
384{
385 struct thread_info *ti = task_thread_info(p);
386 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
387}
388
389/*
390 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
391 *
392 * If this returns true, then the idle task promises to call
393 * sched_ttwu_pending() and reschedule soon.
394 */
395static bool set_nr_if_polling(struct task_struct *p)
396{
397 struct thread_info *ti = task_thread_info(p);
398 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
399
400 for (;;) {
401 if (!(val & _TIF_POLLING_NRFLAG))
402 return false;
403 if (val & _TIF_NEED_RESCHED)
404 return true;
405 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
406 if (old == val)
407 break;
408 val = old;
409 }
410 return true;
411}
412
413#else
414static bool set_nr_and_not_polling(struct task_struct *p)
415{
416 set_tsk_need_resched(p);
417 return true;
418}
419
420#ifdef CONFIG_SMP
421static bool set_nr_if_polling(struct task_struct *p)
422{
423 return false;
424}
425#endif
426#endif
427
428void wake_q_add(struct wake_q_head *head, struct task_struct *task)
429{
430 struct wake_q_node *node = &task->wake_q;
431
432 /*
433 * Atomically grab the task, if ->wake_q is !nil already it means
434 * its already queued (either by us or someone else) and will get the
435 * wakeup due to that.
436 *
437 * This cmpxchg() implies a full barrier, which pairs with the write
438 * barrier implied by the wakeup in wake_up_q().
439 */
440 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
441 return;
442
443 get_task_struct(task);
444
445 /*
446 * The head is context local, there can be no concurrency.
447 */
448 *head->lastp = node;
449 head->lastp = &node->next;
450}
451
452void wake_up_q(struct wake_q_head *head)
453{
454 struct wake_q_node *node = head->first;
455
456 while (node != WAKE_Q_TAIL) {
457 struct task_struct *task;
458
459 task = container_of(node, struct task_struct, wake_q);
460 BUG_ON(!task);
461 /* task can safely be re-inserted now */
462 node = node->next;
463 task->wake_q.next = NULL;
464
465 /*
466 * wake_up_process() implies a wmb() to pair with the queueing
467 * in wake_q_add() so as not to miss wakeups.
468 */
469 wake_up_process(task);
470 put_task_struct(task);
471 }
472}
473
474/*
475 * resched_curr - mark rq's current task 'to be rescheduled now'.
476 *
477 * On UP this means the setting of the need_resched flag, on SMP it
478 * might also involve a cross-CPU call to trigger the scheduler on
479 * the target CPU.
480 */
481void resched_curr(struct rq *rq)
482{
483 struct task_struct *curr = rq->curr;
484 int cpu;
485
486 lockdep_assert_held(&rq->lock);
487
488 if (test_tsk_need_resched(curr))
489 return;
490
491 cpu = cpu_of(rq);
492
493 if (cpu == smp_processor_id()) {
494 set_tsk_need_resched(curr);
495 set_preempt_need_resched();
496 return;
497 }
498
499 if (set_nr_and_not_polling(curr))
500 smp_send_reschedule(cpu);
501 else
502 trace_sched_wake_idle_without_ipi(cpu);
503}
504
505void resched_cpu(int cpu)
506{
507 struct rq *rq = cpu_rq(cpu);
508 unsigned long flags;
509
510 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
511 return;
512 resched_curr(rq);
513 raw_spin_unlock_irqrestore(&rq->lock, flags);
514}
515
516#ifdef CONFIG_SMP
517#ifdef CONFIG_NO_HZ_COMMON
518/*
519 * In the semi idle case, use the nearest busy cpu for migrating timers
520 * from an idle cpu. This is good for power-savings.
521 *
522 * We don't do similar optimization for completely idle system, as
523 * selecting an idle cpu will add more delays to the timers than intended
524 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
525 */
526int get_nohz_timer_target(void)
527{
528 int i, cpu = smp_processor_id();
529 struct sched_domain *sd;
530
531 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
532 return cpu;
533
534 rcu_read_lock();
535 for_each_domain(cpu, sd) {
536 for_each_cpu(i, sched_domain_span(sd)) {
537 if (cpu == i)
538 continue;
539
540 if (!idle_cpu(i) && is_housekeeping_cpu(i)) {
541 cpu = i;
542 goto unlock;
543 }
544 }
545 }
546
547 if (!is_housekeeping_cpu(cpu))
548 cpu = housekeeping_any_cpu();
549unlock:
550 rcu_read_unlock();
551 return cpu;
552}
553/*
554 * When add_timer_on() enqueues a timer into the timer wheel of an
555 * idle CPU then this timer might expire before the next timer event
556 * which is scheduled to wake up that CPU. In case of a completely
557 * idle system the next event might even be infinite time into the
558 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
559 * leaves the inner idle loop so the newly added timer is taken into
560 * account when the CPU goes back to idle and evaluates the timer
561 * wheel for the next timer event.
562 */
563static void wake_up_idle_cpu(int cpu)
564{
565 struct rq *rq = cpu_rq(cpu);
566
567 if (cpu == smp_processor_id())
568 return;
569
570 if (set_nr_and_not_polling(rq->idle))
571 smp_send_reschedule(cpu);
572 else
573 trace_sched_wake_idle_without_ipi(cpu);
574}
575
576static bool wake_up_full_nohz_cpu(int cpu)
577{
578 /*
579 * We just need the target to call irq_exit() and re-evaluate
580 * the next tick. The nohz full kick at least implies that.
581 * If needed we can still optimize that later with an
582 * empty IRQ.
583 */
584 if (cpu_is_offline(cpu))
585 return true; /* Don't try to wake offline CPUs. */
586 if (tick_nohz_full_cpu(cpu)) {
587 if (cpu != smp_processor_id() ||
588 tick_nohz_tick_stopped())
589 tick_nohz_full_kick_cpu(cpu);
590 return true;
591 }
592
593 return false;
594}
595
596/*
597 * Wake up the specified CPU. If the CPU is going offline, it is the
598 * caller's responsibility to deal with the lost wakeup, for example,
599 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
600 */
601void wake_up_nohz_cpu(int cpu)
602{
603 if (!wake_up_full_nohz_cpu(cpu))
604 wake_up_idle_cpu(cpu);
605}
606
607static inline bool got_nohz_idle_kick(void)
608{
609 int cpu = smp_processor_id();
610
611 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
612 return false;
613
614 if (idle_cpu(cpu) && !need_resched())
615 return true;
616
617 /*
618 * We can't run Idle Load Balance on this CPU for this time so we
619 * cancel it and clear NOHZ_BALANCE_KICK
620 */
621 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
622 return false;
623}
624
625#else /* CONFIG_NO_HZ_COMMON */
626
627static inline bool got_nohz_idle_kick(void)
628{
629 return false;
630}
631
632#endif /* CONFIG_NO_HZ_COMMON */
633
634#ifdef CONFIG_NO_HZ_FULL
635bool sched_can_stop_tick(struct rq *rq)
636{
637 int fifo_nr_running;
638
639 /* Deadline tasks, even if single, need the tick */
640 if (rq->dl.dl_nr_running)
641 return false;
642
643 /*
644 * If there are more than one RR tasks, we need the tick to effect the
645 * actual RR behaviour.
646 */
647 if (rq->rt.rr_nr_running) {
648 if (rq->rt.rr_nr_running == 1)
649 return true;
650 else
651 return false;
652 }
653
654 /*
655 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
656 * forced preemption between FIFO tasks.
657 */
658 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
659 if (fifo_nr_running)
660 return true;
661
662 /*
663 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
664 * if there's more than one we need the tick for involuntary
665 * preemption.
666 */
667 if (rq->nr_running > 1)
668 return false;
669
670 return true;
671}
672#endif /* CONFIG_NO_HZ_FULL */
673
674void sched_avg_update(struct rq *rq)
675{
676 s64 period = sched_avg_period();
677
678 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
679 /*
680 * Inline assembly required to prevent the compiler
681 * optimising this loop into a divmod call.
682 * See __iter_div_u64_rem() for another example of this.
683 */
684 asm("" : "+rm" (rq->age_stamp));
685 rq->age_stamp += period;
686 rq->rt_avg /= 2;
687 }
688}
689
690#endif /* CONFIG_SMP */
691
692#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
693 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
694/*
695 * Iterate task_group tree rooted at *from, calling @down when first entering a
696 * node and @up when leaving it for the final time.
697 *
698 * Caller must hold rcu_lock or sufficient equivalent.
699 */
700int walk_tg_tree_from(struct task_group *from,
701 tg_visitor down, tg_visitor up, void *data)
702{
703 struct task_group *parent, *child;
704 int ret;
705
706 parent = from;
707
708down:
709 ret = (*down)(parent, data);
710 if (ret)
711 goto out;
712 list_for_each_entry_rcu(child, &parent->children, siblings) {
713 parent = child;
714 goto down;
715
716up:
717 continue;
718 }
719 ret = (*up)(parent, data);
720 if (ret || parent == from)
721 goto out;
722
723 child = parent;
724 parent = parent->parent;
725 if (parent)
726 goto up;
727out:
728 return ret;
729}
730
731int tg_nop(struct task_group *tg, void *data)
732{
733 return 0;
734}
735#endif
736
737static void set_load_weight(struct task_struct *p)
738{
739 int prio = p->static_prio - MAX_RT_PRIO;
740 struct load_weight *load = &p->se.load;
741
742 /*
743 * SCHED_IDLE tasks get minimal weight:
744 */
745 if (idle_policy(p->policy)) {
746 load->weight = scale_load(WEIGHT_IDLEPRIO);
747 load->inv_weight = WMULT_IDLEPRIO;
748 return;
749 }
750
751 load->weight = scale_load(sched_prio_to_weight[prio]);
752 load->inv_weight = sched_prio_to_wmult[prio];
753}
754
755static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
756{
757 update_rq_clock(rq);
758 if (!(flags & ENQUEUE_RESTORE))
759 sched_info_queued(rq, p);
760 p->sched_class->enqueue_task(rq, p, flags);
761}
762
763static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
764{
765 update_rq_clock(rq);
766 if (!(flags & DEQUEUE_SAVE))
767 sched_info_dequeued(rq, p);
768 p->sched_class->dequeue_task(rq, p, flags);
769}
770
771void activate_task(struct rq *rq, struct task_struct *p, int flags)
772{
773 if (task_contributes_to_load(p))
774 rq->nr_uninterruptible--;
775
776 enqueue_task(rq, p, flags);
777}
778
779void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
780{
781 if (task_contributes_to_load(p))
782 rq->nr_uninterruptible++;
783
784 dequeue_task(rq, p, flags);
785}
786
787static void update_rq_clock_task(struct rq *rq, s64 delta)
788{
789/*
790 * In theory, the compile should just see 0 here, and optimize out the call
791 * to sched_rt_avg_update. But I don't trust it...
792 */
793#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
794 s64 steal = 0, irq_delta = 0;
795#endif
796#ifdef CONFIG_IRQ_TIME_ACCOUNTING
797 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
798
799 /*
800 * Since irq_time is only updated on {soft,}irq_exit, we might run into
801 * this case when a previous update_rq_clock() happened inside a
802 * {soft,}irq region.
803 *
804 * When this happens, we stop ->clock_task and only update the
805 * prev_irq_time stamp to account for the part that fit, so that a next
806 * update will consume the rest. This ensures ->clock_task is
807 * monotonic.
808 *
809 * It does however cause some slight miss-attribution of {soft,}irq
810 * time, a more accurate solution would be to update the irq_time using
811 * the current rq->clock timestamp, except that would require using
812 * atomic ops.
813 */
814 if (irq_delta > delta)
815 irq_delta = delta;
816
817 rq->prev_irq_time += irq_delta;
818 delta -= irq_delta;
819#endif
820#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
821 if (static_key_false((¶virt_steal_rq_enabled))) {
822 steal = paravirt_steal_clock(cpu_of(rq));
823 steal -= rq->prev_steal_time_rq;
824
825 if (unlikely(steal > delta))
826 steal = delta;
827
828 rq->prev_steal_time_rq += steal;
829 delta -= steal;
830 }
831#endif
832
833 rq->clock_task += delta;
834
835#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
836 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
837 sched_rt_avg_update(rq, irq_delta + steal);
838#endif
839}
840
841void sched_set_stop_task(int cpu, struct task_struct *stop)
842{
843 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
844 struct task_struct *old_stop = cpu_rq(cpu)->stop;
845
846 if (stop) {
847 /*
848 * Make it appear like a SCHED_FIFO task, its something
849 * userspace knows about and won't get confused about.
850 *
851 * Also, it will make PI more or less work without too
852 * much confusion -- but then, stop work should not
853 * rely on PI working anyway.
854 */
855 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
856
857 stop->sched_class = &stop_sched_class;
858 }
859
860 cpu_rq(cpu)->stop = stop;
861
862 if (old_stop) {
863 /*
864 * Reset it back to a normal scheduling class so that
865 * it can die in pieces.
866 */
867 old_stop->sched_class = &rt_sched_class;
868 }
869}
870
871/*
872 * __normal_prio - return the priority that is based on the static prio
873 */
874static inline int __normal_prio(struct task_struct *p)
875{
876 return p->static_prio;
877}
878
879/*
880 * Calculate the expected normal priority: i.e. priority
881 * without taking RT-inheritance into account. Might be
882 * boosted by interactivity modifiers. Changes upon fork,
883 * setprio syscalls, and whenever the interactivity
884 * estimator recalculates.
885 */
886static inline int normal_prio(struct task_struct *p)
887{
888 int prio;
889
890 if (task_has_dl_policy(p))
891 prio = MAX_DL_PRIO-1;
892 else if (task_has_rt_policy(p))
893 prio = MAX_RT_PRIO-1 - p->rt_priority;
894 else
895 prio = __normal_prio(p);
896 return prio;
897}
898
899/*
900 * Calculate the current priority, i.e. the priority
901 * taken into account by the scheduler. This value might
902 * be boosted by RT tasks, or might be boosted by
903 * interactivity modifiers. Will be RT if the task got
904 * RT-boosted. If not then it returns p->normal_prio.
905 */
906static int effective_prio(struct task_struct *p)
907{
908 p->normal_prio = normal_prio(p);
909 /*
910 * If we are RT tasks or we were boosted to RT priority,
911 * keep the priority unchanged. Otherwise, update priority
912 * to the normal priority:
913 */
914 if (!rt_prio(p->prio))
915 return p->normal_prio;
916 return p->prio;
917}
918
919/**
920 * task_curr - is this task currently executing on a CPU?
921 * @p: the task in question.
922 *
923 * Return: 1 if the task is currently executing. 0 otherwise.
924 */
925inline int task_curr(const struct task_struct *p)
926{
927 return cpu_curr(task_cpu(p)) == p;
928}
929
930/*
931 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
932 * use the balance_callback list if you want balancing.
933 *
934 * this means any call to check_class_changed() must be followed by a call to
935 * balance_callback().
936 */
937static inline void check_class_changed(struct rq *rq, struct task_struct *p,
938 const struct sched_class *prev_class,
939 int oldprio)
940{
941 if (prev_class != p->sched_class) {
942 if (prev_class->switched_from)
943 prev_class->switched_from(rq, p);
944
945 p->sched_class->switched_to(rq, p);
946 } else if (oldprio != p->prio || dl_task(p))
947 p->sched_class->prio_changed(rq, p, oldprio);
948}
949
950void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
951{
952 const struct sched_class *class;
953
954 if (p->sched_class == rq->curr->sched_class) {
955 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
956 } else {
957 for_each_class(class) {
958 if (class == rq->curr->sched_class)
959 break;
960 if (class == p->sched_class) {
961 resched_curr(rq);
962 break;
963 }
964 }
965 }
966
967 /*
968 * A queue event has occurred, and we're going to schedule. In
969 * this case, we can save a useless back to back clock update.
970 */
971 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
972 rq_clock_skip_update(rq, true);
973}
974
975#ifdef CONFIG_SMP
976/*
977 * This is how migration works:
978 *
979 * 1) we invoke migration_cpu_stop() on the target CPU using
980 * stop_one_cpu().
981 * 2) stopper starts to run (implicitly forcing the migrated thread
982 * off the CPU)
983 * 3) it checks whether the migrated task is still in the wrong runqueue.
984 * 4) if it's in the wrong runqueue then the migration thread removes
985 * it and puts it into the right queue.
986 * 5) stopper completes and stop_one_cpu() returns and the migration
987 * is done.
988 */
989
990/*
991 * move_queued_task - move a queued task to new rq.
992 *
993 * Returns (locked) new rq. Old rq's lock is released.
994 */
995static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
996{
997 lockdep_assert_held(&rq->lock);
998
999 p->on_rq = TASK_ON_RQ_MIGRATING;
1000 dequeue_task(rq, p, 0);
1001 set_task_cpu(p, new_cpu);
1002 raw_spin_unlock(&rq->lock);
1003
1004 rq = cpu_rq(new_cpu);
1005
1006 raw_spin_lock(&rq->lock);
1007 BUG_ON(task_cpu(p) != new_cpu);
1008 enqueue_task(rq, p, 0);
1009 p->on_rq = TASK_ON_RQ_QUEUED;
1010 check_preempt_curr(rq, p, 0);
1011
1012 return rq;
1013}
1014
1015struct migration_arg {
1016 struct task_struct *task;
1017 int dest_cpu;
1018};
1019
1020/*
1021 * Move (not current) task off this cpu, onto dest cpu. We're doing
1022 * this because either it can't run here any more (set_cpus_allowed()
1023 * away from this CPU, or CPU going down), or because we're
1024 * attempting to rebalance this task on exec (sched_exec).
1025 *
1026 * So we race with normal scheduler movements, but that's OK, as long
1027 * as the task is no longer on this CPU.
1028 */
1029static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
1030{
1031 if (unlikely(!cpu_active(dest_cpu)))
1032 return rq;
1033
1034 /* Affinity changed (again). */
1035 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1036 return rq;
1037
1038 rq = move_queued_task(rq, p, dest_cpu);
1039
1040 return rq;
1041}
1042
1043/*
1044 * migration_cpu_stop - this will be executed by a highprio stopper thread
1045 * and performs thread migration by bumping thread off CPU then
1046 * 'pushing' onto another runqueue.
1047 */
1048static int migration_cpu_stop(void *data)
1049{
1050 struct migration_arg *arg = data;
1051 struct task_struct *p = arg->task;
1052 struct rq *rq = this_rq();
1053
1054 /*
1055 * The original target cpu might have gone down and we might
1056 * be on another cpu but it doesn't matter.
1057 */
1058 local_irq_disable();
1059 /*
1060 * We need to explicitly wake pending tasks before running
1061 * __migrate_task() such that we will not miss enforcing cpus_allowed
1062 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
1063 */
1064 sched_ttwu_pending();
1065
1066 raw_spin_lock(&p->pi_lock);
1067 raw_spin_lock(&rq->lock);
1068 /*
1069 * If task_rq(p) != rq, it cannot be migrated here, because we're
1070 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
1071 * we're holding p->pi_lock.
1072 */
1073 if (task_rq(p) == rq) {
1074 if (task_on_rq_queued(p))
1075 rq = __migrate_task(rq, p, arg->dest_cpu);
1076 else
1077 p->wake_cpu = arg->dest_cpu;
1078 }
1079 raw_spin_unlock(&rq->lock);
1080 raw_spin_unlock(&p->pi_lock);
1081
1082 local_irq_enable();
1083 return 0;
1084}
1085
1086/*
1087 * sched_class::set_cpus_allowed must do the below, but is not required to
1088 * actually call this function.
1089 */
1090void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
1091{
1092 cpumask_copy(&p->cpus_allowed, new_mask);
1093 p->nr_cpus_allowed = cpumask_weight(new_mask);
1094}
1095
1096void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1097{
1098 struct rq *rq = task_rq(p);
1099 bool queued, running;
1100
1101 lockdep_assert_held(&p->pi_lock);
1102
1103 queued = task_on_rq_queued(p);
1104 running = task_current(rq, p);
1105
1106 if (queued) {
1107 /*
1108 * Because __kthread_bind() calls this on blocked tasks without
1109 * holding rq->lock.
1110 */
1111 lockdep_assert_held(&rq->lock);
1112 dequeue_task(rq, p, DEQUEUE_SAVE);
1113 }
1114 if (running)
1115 put_prev_task(rq, p);
1116
1117 p->sched_class->set_cpus_allowed(p, new_mask);
1118
1119 if (queued)
1120 enqueue_task(rq, p, ENQUEUE_RESTORE);
1121 if (running)
1122 set_curr_task(rq, p);
1123}
1124
1125/*
1126 * Change a given task's CPU affinity. Migrate the thread to a
1127 * proper CPU and schedule it away if the CPU it's executing on
1128 * is removed from the allowed bitmask.
1129 *
1130 * NOTE: the caller must have a valid reference to the task, the
1131 * task must not exit() & deallocate itself prematurely. The
1132 * call is not atomic; no spinlocks may be held.
1133 */
1134static int __set_cpus_allowed_ptr(struct task_struct *p,
1135 const struct cpumask *new_mask, bool check)
1136{
1137 const struct cpumask *cpu_valid_mask = cpu_active_mask;
1138 unsigned int dest_cpu;
1139 struct rq_flags rf;
1140 struct rq *rq;
1141 int ret = 0;
1142
1143 rq = task_rq_lock(p, &rf);
1144
1145 if (p->flags & PF_KTHREAD) {
1146 /*
1147 * Kernel threads are allowed on online && !active CPUs
1148 */
1149 cpu_valid_mask = cpu_online_mask;
1150 }
1151
1152 /*
1153 * Must re-check here, to close a race against __kthread_bind(),
1154 * sched_setaffinity() is not guaranteed to observe the flag.
1155 */
1156 if (check && (p->flags & PF_NO_SETAFFINITY)) {
1157 ret = -EINVAL;
1158 goto out;
1159 }
1160
1161 if (cpumask_equal(&p->cpus_allowed, new_mask))
1162 goto out;
1163
1164 if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
1165 ret = -EINVAL;
1166 goto out;
1167 }
1168
1169 do_set_cpus_allowed(p, new_mask);
1170
1171 if (p->flags & PF_KTHREAD) {
1172 /*
1173 * For kernel threads that do indeed end up on online &&
1174 * !active we want to ensure they are strict per-cpu threads.
1175 */
1176 WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
1177 !cpumask_intersects(new_mask, cpu_active_mask) &&
1178 p->nr_cpus_allowed != 1);
1179 }
1180
1181 /* Can the task run on the task's current CPU? If so, we're done */
1182 if (cpumask_test_cpu(task_cpu(p), new_mask))
1183 goto out;
1184
1185 dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
1186 if (task_running(rq, p) || p->state == TASK_WAKING) {
1187 struct migration_arg arg = { p, dest_cpu };
1188 /* Need help from migration thread: drop lock and wait. */
1189 task_rq_unlock(rq, p, &rf);
1190 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1191 tlb_migrate_finish(p->mm);
1192 return 0;
1193 } else if (task_on_rq_queued(p)) {
1194 /*
1195 * OK, since we're going to drop the lock immediately
1196 * afterwards anyway.
1197 */
1198 lockdep_unpin_lock(&rq->lock, rf.cookie);
1199 rq = move_queued_task(rq, p, dest_cpu);
1200 lockdep_repin_lock(&rq->lock, rf.cookie);
1201 }
1202out:
1203 task_rq_unlock(rq, p, &rf);
1204
1205 return ret;
1206}
1207
1208int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1209{
1210 return __set_cpus_allowed_ptr(p, new_mask, false);
1211}
1212EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1213
1214void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1215{
1216#ifdef CONFIG_SCHED_DEBUG
1217 /*
1218 * We should never call set_task_cpu() on a blocked task,
1219 * ttwu() will sort out the placement.
1220 */
1221 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1222 !p->on_rq);
1223
1224 /*
1225 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
1226 * because schedstat_wait_{start,end} rebase migrating task's wait_start
1227 * time relying on p->on_rq.
1228 */
1229 WARN_ON_ONCE(p->state == TASK_RUNNING &&
1230 p->sched_class == &fair_sched_class &&
1231 (p->on_rq && !task_on_rq_migrating(p)));
1232
1233#ifdef CONFIG_LOCKDEP
1234 /*
1235 * The caller should hold either p->pi_lock or rq->lock, when changing
1236 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1237 *
1238 * sched_move_task() holds both and thus holding either pins the cgroup,
1239 * see task_group().
1240 *
1241 * Furthermore, all task_rq users should acquire both locks, see
1242 * task_rq_lock().
1243 */
1244 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1245 lockdep_is_held(&task_rq(p)->lock)));
1246#endif
1247#endif
1248
1249 trace_sched_migrate_task(p, new_cpu);
1250
1251 if (task_cpu(p) != new_cpu) {
1252 if (p->sched_class->migrate_task_rq)
1253 p->sched_class->migrate_task_rq(p);
1254 p->se.nr_migrations++;
1255 perf_event_task_migrate(p);
1256 }
1257
1258 __set_task_cpu(p, new_cpu);
1259}
1260
1261static void __migrate_swap_task(struct task_struct *p, int cpu)
1262{
1263 if (task_on_rq_queued(p)) {
1264 struct rq *src_rq, *dst_rq;
1265
1266 src_rq = task_rq(p);
1267 dst_rq = cpu_rq(cpu);
1268
1269 p->on_rq = TASK_ON_RQ_MIGRATING;
1270 deactivate_task(src_rq, p, 0);
1271 set_task_cpu(p, cpu);
1272 activate_task(dst_rq, p, 0);
1273 p->on_rq = TASK_ON_RQ_QUEUED;
1274 check_preempt_curr(dst_rq, p, 0);
1275 } else {
1276 /*
1277 * Task isn't running anymore; make it appear like we migrated
1278 * it before it went to sleep. This means on wakeup we make the
1279 * previous cpu our target instead of where it really is.
1280 */
1281 p->wake_cpu = cpu;
1282 }
1283}
1284
1285struct migration_swap_arg {
1286 struct task_struct *src_task, *dst_task;
1287 int src_cpu, dst_cpu;
1288};
1289
1290static int migrate_swap_stop(void *data)
1291{
1292 struct migration_swap_arg *arg = data;
1293 struct rq *src_rq, *dst_rq;
1294 int ret = -EAGAIN;
1295
1296 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
1297 return -EAGAIN;
1298
1299 src_rq = cpu_rq(arg->src_cpu);
1300 dst_rq = cpu_rq(arg->dst_cpu);
1301
1302 double_raw_lock(&arg->src_task->pi_lock,
1303 &arg->dst_task->pi_lock);
1304 double_rq_lock(src_rq, dst_rq);
1305
1306 if (task_cpu(arg->dst_task) != arg->dst_cpu)
1307 goto unlock;
1308
1309 if (task_cpu(arg->src_task) != arg->src_cpu)
1310 goto unlock;
1311
1312 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
1313 goto unlock;
1314
1315 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
1316 goto unlock;
1317
1318 __migrate_swap_task(arg->src_task, arg->dst_cpu);
1319 __migrate_swap_task(arg->dst_task, arg->src_cpu);
1320
1321 ret = 0;
1322
1323unlock:
1324 double_rq_unlock(src_rq, dst_rq);
1325 raw_spin_unlock(&arg->dst_task->pi_lock);
1326 raw_spin_unlock(&arg->src_task->pi_lock);
1327
1328 return ret;
1329}
1330
1331/*
1332 * Cross migrate two tasks
1333 */
1334int migrate_swap(struct task_struct *cur, struct task_struct *p)
1335{
1336 struct migration_swap_arg arg;
1337 int ret = -EINVAL;
1338
1339 arg = (struct migration_swap_arg){
1340 .src_task = cur,
1341 .src_cpu = task_cpu(cur),
1342 .dst_task = p,
1343 .dst_cpu = task_cpu(p),
1344 };
1345
1346 if (arg.src_cpu == arg.dst_cpu)
1347 goto out;
1348
1349 /*
1350 * These three tests are all lockless; this is OK since all of them
1351 * will be re-checked with proper locks held further down the line.
1352 */
1353 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1354 goto out;
1355
1356 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
1357 goto out;
1358
1359 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
1360 goto out;
1361
1362 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
1363 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1364
1365out:
1366 return ret;
1367}
1368
1369/*
1370 * wait_task_inactive - wait for a thread to unschedule.
1371 *
1372 * If @match_state is nonzero, it's the @p->state value just checked and
1373 * not expected to change. If it changes, i.e. @p might have woken up,
1374 * then return zero. When we succeed in waiting for @p to be off its CPU,
1375 * we return a positive number (its total switch count). If a second call
1376 * a short while later returns the same number, the caller can be sure that
1377 * @p has remained unscheduled the whole time.
1378 *
1379 * The caller must ensure that the task *will* unschedule sometime soon,
1380 * else this function might spin for a *long* time. This function can't
1381 * be called with interrupts off, or it may introduce deadlock with
1382 * smp_call_function() if an IPI is sent by the same process we are
1383 * waiting to become inactive.
1384 */
1385unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1386{
1387 int running, queued;
1388 struct rq_flags rf;
1389 unsigned long ncsw;
1390 struct rq *rq;
1391
1392 for (;;) {
1393 /*
1394 * We do the initial early heuristics without holding
1395 * any task-queue locks at all. We'll only try to get
1396 * the runqueue lock when things look like they will
1397 * work out!
1398 */
1399 rq = task_rq(p);
1400
1401 /*
1402 * If the task is actively running on another CPU
1403 * still, just relax and busy-wait without holding
1404 * any locks.
1405 *
1406 * NOTE! Since we don't hold any locks, it's not
1407 * even sure that "rq" stays as the right runqueue!
1408 * But we don't care, since "task_running()" will
1409 * return false if the runqueue has changed and p
1410 * is actually now running somewhere else!
1411 */
1412 while (task_running(rq, p)) {
1413 if (match_state && unlikely(p->state != match_state))
1414 return 0;
1415 cpu_relax();
1416 }
1417
1418 /*
1419 * Ok, time to look more closely! We need the rq
1420 * lock now, to be *sure*. If we're wrong, we'll
1421 * just go back and repeat.
1422 */
1423 rq = task_rq_lock(p, &rf);
1424 trace_sched_wait_task(p);
1425 running = task_running(rq, p);
1426 queued = task_on_rq_queued(p);
1427 ncsw = 0;
1428 if (!match_state || p->state == match_state)
1429 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1430 task_rq_unlock(rq, p, &rf);
1431
1432 /*
1433 * If it changed from the expected state, bail out now.
1434 */
1435 if (unlikely(!ncsw))
1436 break;
1437
1438 /*
1439 * Was it really running after all now that we
1440 * checked with the proper locks actually held?
1441 *
1442 * Oops. Go back and try again..
1443 */
1444 if (unlikely(running)) {
1445 cpu_relax();
1446 continue;
1447 }
1448
1449 /*
1450 * It's not enough that it's not actively running,
1451 * it must be off the runqueue _entirely_, and not
1452 * preempted!
1453 *
1454 * So if it was still runnable (but just not actively
1455 * running right now), it's preempted, and we should
1456 * yield - it could be a while.
1457 */
1458 if (unlikely(queued)) {
1459 ktime_t to = NSEC_PER_SEC / HZ;
1460
1461 set_current_state(TASK_UNINTERRUPTIBLE);
1462 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1463 continue;
1464 }
1465
1466 /*
1467 * Ahh, all good. It wasn't running, and it wasn't
1468 * runnable, which means that it will never become
1469 * running in the future either. We're all done!
1470 */
1471 break;
1472 }
1473
1474 return ncsw;
1475}
1476
1477/***
1478 * kick_process - kick a running thread to enter/exit the kernel
1479 * @p: the to-be-kicked thread
1480 *
1481 * Cause a process which is running on another CPU to enter
1482 * kernel-mode, without any delay. (to get signals handled.)
1483 *
1484 * NOTE: this function doesn't have to take the runqueue lock,
1485 * because all it wants to ensure is that the remote task enters
1486 * the kernel. If the IPI races and the task has been migrated
1487 * to another CPU then no harm is done and the purpose has been
1488 * achieved as well.
1489 */
1490void kick_process(struct task_struct *p)
1491{
1492 int cpu;
1493
1494 preempt_disable();
1495 cpu = task_cpu(p);
1496 if ((cpu != smp_processor_id()) && task_curr(p))
1497 smp_send_reschedule(cpu);
1498 preempt_enable();
1499}
1500EXPORT_SYMBOL_GPL(kick_process);
1501
1502/*
1503 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1504 *
1505 * A few notes on cpu_active vs cpu_online:
1506 *
1507 * - cpu_active must be a subset of cpu_online
1508 *
1509 * - on cpu-up we allow per-cpu kthreads on the online && !active cpu,
1510 * see __set_cpus_allowed_ptr(). At this point the newly online
1511 * cpu isn't yet part of the sched domains, and balancing will not
1512 * see it.
1513 *
1514 * - on cpu-down we clear cpu_active() to mask the sched domains and
1515 * avoid the load balancer to place new tasks on the to be removed
1516 * cpu. Existing tasks will remain running there and will be taken
1517 * off.
1518 *
1519 * This means that fallback selection must not select !active CPUs.
1520 * And can assume that any active CPU must be online. Conversely
1521 * select_task_rq() below may allow selection of !active CPUs in order
1522 * to satisfy the above rules.
1523 */
1524static int select_fallback_rq(int cpu, struct task_struct *p)
1525{
1526 int nid = cpu_to_node(cpu);
1527 const struct cpumask *nodemask = NULL;
1528 enum { cpuset, possible, fail } state = cpuset;
1529 int dest_cpu;
1530
1531 /*
1532 * If the node that the cpu is on has been offlined, cpu_to_node()
1533 * will return -1. There is no cpu on the node, and we should
1534 * select the cpu on the other node.
1535 */
1536 if (nid != -1) {
1537 nodemask = cpumask_of_node(nid);
1538
1539 /* Look for allowed, online CPU in same node. */
1540 for_each_cpu(dest_cpu, nodemask) {
1541 if (!cpu_active(dest_cpu))
1542 continue;
1543 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1544 return dest_cpu;
1545 }
1546 }
1547
1548 for (;;) {
1549 /* Any allowed, online CPU? */
1550 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1551 if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
1552 continue;
1553 if (!cpu_online(dest_cpu))
1554 continue;
1555 goto out;
1556 }
1557
1558 /* No more Mr. Nice Guy. */
1559 switch (state) {
1560 case cpuset:
1561 if (IS_ENABLED(CONFIG_CPUSETS)) {
1562 cpuset_cpus_allowed_fallback(p);
1563 state = possible;
1564 break;
1565 }
1566 /* fall-through */
1567 case possible:
1568 do_set_cpus_allowed(p, cpu_possible_mask);
1569 state = fail;
1570 break;
1571
1572 case fail:
1573 BUG();
1574 break;
1575 }
1576 }
1577
1578out:
1579 if (state != cpuset) {
1580 /*
1581 * Don't tell them about moving exiting tasks or
1582 * kernel threads (both mm NULL), since they never
1583 * leave kernel.
1584 */
1585 if (p->mm && printk_ratelimit()) {
1586 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
1587 task_pid_nr(p), p->comm, cpu);
1588 }
1589 }
1590
1591 return dest_cpu;
1592}
1593
1594/*
1595 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1596 */
1597static inline
1598int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1599{
1600 lockdep_assert_held(&p->pi_lock);
1601
1602 if (tsk_nr_cpus_allowed(p) > 1)
1603 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
1604 else
1605 cpu = cpumask_any(tsk_cpus_allowed(p));
1606
1607 /*
1608 * In order not to call set_task_cpu() on a blocking task we need
1609 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1610 * cpu.
1611 *
1612 * Since this is common to all placement strategies, this lives here.
1613 *
1614 * [ this allows ->select_task() to simply return task_cpu(p) and
1615 * not worry about this generic constraint ]
1616 */
1617 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
1618 !cpu_online(cpu)))
1619 cpu = select_fallback_rq(task_cpu(p), p);
1620
1621 return cpu;
1622}
1623
1624static void update_avg(u64 *avg, u64 sample)
1625{
1626 s64 diff = sample - *avg;
1627 *avg += diff >> 3;
1628}
1629
1630#else
1631
1632static inline int __set_cpus_allowed_ptr(struct task_struct *p,
1633 const struct cpumask *new_mask, bool check)
1634{
1635 return set_cpus_allowed_ptr(p, new_mask);
1636}
1637
1638#endif /* CONFIG_SMP */
1639
1640static void
1641ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1642{
1643 struct rq *rq;
1644
1645 if (!schedstat_enabled())
1646 return;
1647
1648 rq = this_rq();
1649
1650#ifdef CONFIG_SMP
1651 if (cpu == rq->cpu) {
1652 schedstat_inc(rq->ttwu_local);
1653 schedstat_inc(p->se.statistics.nr_wakeups_local);
1654 } else {
1655 struct sched_domain *sd;
1656
1657 schedstat_inc(p->se.statistics.nr_wakeups_remote);
1658 rcu_read_lock();
1659 for_each_domain(rq->cpu, sd) {
1660 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1661 schedstat_inc(sd->ttwu_wake_remote);
1662 break;
1663 }
1664 }
1665 rcu_read_unlock();
1666 }
1667
1668 if (wake_flags & WF_MIGRATED)
1669 schedstat_inc(p->se.statistics.nr_wakeups_migrate);
1670#endif /* CONFIG_SMP */
1671
1672 schedstat_inc(rq->ttwu_count);
1673 schedstat_inc(p->se.statistics.nr_wakeups);
1674
1675 if (wake_flags & WF_SYNC)
1676 schedstat_inc(p->se.statistics.nr_wakeups_sync);
1677}
1678
1679static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1680{
1681 activate_task(rq, p, en_flags);
1682 p->on_rq = TASK_ON_RQ_QUEUED;
1683
1684 /* if a worker is waking up, notify workqueue */
1685 if (p->flags & PF_WQ_WORKER)
1686 wq_worker_waking_up(p, cpu_of(rq));
1687}
1688
1689/*
1690 * Mark the task runnable and perform wakeup-preemption.
1691 */
1692static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
1693 struct pin_cookie cookie)
1694{
1695 check_preempt_curr(rq, p, wake_flags);
1696 p->state = TASK_RUNNING;
1697 trace_sched_wakeup(p);
1698
1699#ifdef CONFIG_SMP
1700 if (p->sched_class->task_woken) {
1701 /*
1702 * Our task @p is fully woken up and running; so its safe to
1703 * drop the rq->lock, hereafter rq is only used for statistics.
1704 */
1705 lockdep_unpin_lock(&rq->lock, cookie);
1706 p->sched_class->task_woken(rq, p);
1707 lockdep_repin_lock(&rq->lock, cookie);
1708 }
1709
1710 if (rq->idle_stamp) {
1711 u64 delta = rq_clock(rq) - rq->idle_stamp;
1712 u64 max = 2*rq->max_idle_balance_cost;
1713
1714 update_avg(&rq->avg_idle, delta);
1715
1716 if (rq->avg_idle > max)
1717 rq->avg_idle = max;
1718
1719 rq->idle_stamp = 0;
1720 }
1721#endif
1722}
1723
1724static void
1725ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
1726 struct pin_cookie cookie)
1727{
1728 int en_flags = ENQUEUE_WAKEUP;
1729
1730 lockdep_assert_held(&rq->lock);
1731
1732#ifdef CONFIG_SMP
1733 if (p->sched_contributes_to_load)
1734 rq->nr_uninterruptible--;
1735
1736 if (wake_flags & WF_MIGRATED)
1737 en_flags |= ENQUEUE_MIGRATED;
1738#endif
1739
1740 ttwu_activate(rq, p, en_flags);
1741 ttwu_do_wakeup(rq, p, wake_flags, cookie);
1742}
1743
1744/*
1745 * Called in case the task @p isn't fully descheduled from its runqueue,
1746 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1747 * since all we need to do is flip p->state to TASK_RUNNING, since
1748 * the task is still ->on_rq.
1749 */
1750static int ttwu_remote(struct task_struct *p, int wake_flags)
1751{
1752 struct rq_flags rf;
1753 struct rq *rq;
1754 int ret = 0;
1755
1756 rq = __task_rq_lock(p, &rf);
1757 if (task_on_rq_queued(p)) {
1758 /* check_preempt_curr() may use rq clock */
1759 update_rq_clock(rq);
1760 ttwu_do_wakeup(rq, p, wake_flags, rf.cookie);
1761 ret = 1;
1762 }
1763 __task_rq_unlock(rq, &rf);
1764
1765 return ret;
1766}
1767
1768#ifdef CONFIG_SMP
1769void sched_ttwu_pending(void)
1770{
1771 struct rq *rq = this_rq();
1772 struct llist_node *llist = llist_del_all(&rq->wake_list);
1773 struct pin_cookie cookie;
1774 struct task_struct *p;
1775 unsigned long flags;
1776
1777 if (!llist)
1778 return;
1779
1780 raw_spin_lock_irqsave(&rq->lock, flags);
1781 cookie = lockdep_pin_lock(&rq->lock);
1782
1783 while (llist) {
1784 int wake_flags = 0;
1785
1786 p = llist_entry(llist, struct task_struct, wake_entry);
1787 llist = llist_next(llist);
1788
1789 if (p->sched_remote_wakeup)
1790 wake_flags = WF_MIGRATED;
1791
1792 ttwu_do_activate(rq, p, wake_flags, cookie);
1793 }
1794
1795 lockdep_unpin_lock(&rq->lock, cookie);
1796 raw_spin_unlock_irqrestore(&rq->lock, flags);
1797}
1798
1799void scheduler_ipi(void)
1800{
1801 /*
1802 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1803 * TIF_NEED_RESCHED remotely (for the first time) will also send
1804 * this IPI.
1805 */
1806 preempt_fold_need_resched();
1807
1808 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
1809 return;
1810
1811 /*
1812 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1813 * traditionally all their work was done from the interrupt return
1814 * path. Now that we actually do some work, we need to make sure
1815 * we do call them.
1816 *
1817 * Some archs already do call them, luckily irq_enter/exit nest
1818 * properly.
1819 *
1820 * Arguably we should visit all archs and update all handlers,
1821 * however a fair share of IPIs are still resched only so this would
1822 * somewhat pessimize the simple resched case.
1823 */
1824 irq_enter();
1825 sched_ttwu_pending();
1826
1827 /*
1828 * Check if someone kicked us for doing the nohz idle load balance.
1829 */
1830 if (unlikely(got_nohz_idle_kick())) {
1831 this_rq()->idle_balance = 1;
1832 raise_softirq_irqoff(SCHED_SOFTIRQ);
1833 }
1834 irq_exit();
1835}
1836
1837static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
1838{
1839 struct rq *rq = cpu_rq(cpu);
1840
1841 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
1842
1843 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
1844 if (!set_nr_if_polling(rq->idle))
1845 smp_send_reschedule(cpu);
1846 else
1847 trace_sched_wake_idle_without_ipi(cpu);
1848 }
1849}
1850
1851void wake_up_if_idle(int cpu)
1852{
1853 struct rq *rq = cpu_rq(cpu);
1854 unsigned long flags;
1855
1856 rcu_read_lock();
1857
1858 if (!is_idle_task(rcu_dereference(rq->curr)))
1859 goto out;
1860
1861 if (set_nr_if_polling(rq->idle)) {
1862 trace_sched_wake_idle_without_ipi(cpu);
1863 } else {
1864 raw_spin_lock_irqsave(&rq->lock, flags);
1865 if (is_idle_task(rq->curr))
1866 smp_send_reschedule(cpu);
1867 /* Else cpu is not in idle, do nothing here */
1868 raw_spin_unlock_irqrestore(&rq->lock, flags);
1869 }
1870
1871out:
1872 rcu_read_unlock();
1873}
1874
1875bool cpus_share_cache(int this_cpu, int that_cpu)
1876{
1877 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1878}
1879#endif /* CONFIG_SMP */
1880
1881static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
1882{
1883 struct rq *rq = cpu_rq(cpu);
1884 struct pin_cookie cookie;
1885
1886#if defined(CONFIG_SMP)
1887 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1888 sched_clock_cpu(cpu); /* sync clocks x-cpu */
1889 ttwu_queue_remote(p, cpu, wake_flags);
1890 return;
1891 }
1892#endif
1893
1894 raw_spin_lock(&rq->lock);
1895 cookie = lockdep_pin_lock(&rq->lock);
1896 ttwu_do_activate(rq, p, wake_flags, cookie);
1897 lockdep_unpin_lock(&rq->lock, cookie);
1898 raw_spin_unlock(&rq->lock);
1899}
1900
1901/*
1902 * Notes on Program-Order guarantees on SMP systems.
1903 *
1904 * MIGRATION
1905 *
1906 * The basic program-order guarantee on SMP systems is that when a task [t]
1907 * migrates, all its activity on its old cpu [c0] happens-before any subsequent
1908 * execution on its new cpu [c1].
1909 *
1910 * For migration (of runnable tasks) this is provided by the following means:
1911 *
1912 * A) UNLOCK of the rq(c0)->lock scheduling out task t
1913 * B) migration for t is required to synchronize *both* rq(c0)->lock and
1914 * rq(c1)->lock (if not at the same time, then in that order).
1915 * C) LOCK of the rq(c1)->lock scheduling in task
1916 *
1917 * Transitivity guarantees that B happens after A and C after B.
1918 * Note: we only require RCpc transitivity.
1919 * Note: the cpu doing B need not be c0 or c1
1920 *
1921 * Example:
1922 *
1923 * CPU0 CPU1 CPU2
1924 *
1925 * LOCK rq(0)->lock
1926 * sched-out X
1927 * sched-in Y
1928 * UNLOCK rq(0)->lock
1929 *
1930 * LOCK rq(0)->lock // orders against CPU0
1931 * dequeue X
1932 * UNLOCK rq(0)->lock
1933 *
1934 * LOCK rq(1)->lock
1935 * enqueue X
1936 * UNLOCK rq(1)->lock
1937 *
1938 * LOCK rq(1)->lock // orders against CPU2
1939 * sched-out Z
1940 * sched-in X
1941 * UNLOCK rq(1)->lock
1942 *
1943 *
1944 * BLOCKING -- aka. SLEEP + WAKEUP
1945 *
1946 * For blocking we (obviously) need to provide the same guarantee as for
1947 * migration. However the means are completely different as there is no lock
1948 * chain to provide order. Instead we do:
1949 *
1950 * 1) smp_store_release(X->on_cpu, 0)
1951 * 2) smp_cond_load_acquire(!X->on_cpu)
1952 *
1953 * Example:
1954 *
1955 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
1956 *
1957 * LOCK rq(0)->lock LOCK X->pi_lock
1958 * dequeue X
1959 * sched-out X
1960 * smp_store_release(X->on_cpu, 0);
1961 *
1962 * smp_cond_load_acquire(&X->on_cpu, !VAL);
1963 * X->state = WAKING
1964 * set_task_cpu(X,2)
1965 *
1966 * LOCK rq(2)->lock
1967 * enqueue X
1968 * X->state = RUNNING
1969 * UNLOCK rq(2)->lock
1970 *
1971 * LOCK rq(2)->lock // orders against CPU1
1972 * sched-out Z
1973 * sched-in X
1974 * UNLOCK rq(2)->lock
1975 *
1976 * UNLOCK X->pi_lock
1977 * UNLOCK rq(0)->lock
1978 *
1979 *
1980 * However; for wakeups there is a second guarantee we must provide, namely we
1981 * must observe the state that lead to our wakeup. That is, not only must our
1982 * task observe its own prior state, it must also observe the stores prior to
1983 * its wakeup.
1984 *
1985 * This means that any means of doing remote wakeups must order the CPU doing
1986 * the wakeup against the CPU the task is going to end up running on. This,
1987 * however, is already required for the regular Program-Order guarantee above,
1988 * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
1989 *
1990 */
1991
1992/**
1993 * try_to_wake_up - wake up a thread
1994 * @p: the thread to be awakened
1995 * @state: the mask of task states that can be woken
1996 * @wake_flags: wake modifier flags (WF_*)
1997 *
1998 * If (@state & @p->state) @p->state = TASK_RUNNING.
1999 *
2000 * If the task was not queued/runnable, also place it back on a runqueue.
2001 *
2002 * Atomic against schedule() which would dequeue a task, also see
2003 * set_current_state().
2004 *
2005 * Return: %true if @p->state changes (an actual wakeup was done),
2006 * %false otherwise.
2007 */
2008static int
2009try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2010{
2011 unsigned long flags;
2012 int cpu, success = 0;
2013
2014 /*
2015 * If we are going to wake up a thread waiting for CONDITION we
2016 * need to ensure that CONDITION=1 done by the caller can not be
2017 * reordered with p->state check below. This pairs with mb() in
2018 * set_current_state() the waiting thread does.
2019 */
2020 smp_mb__before_spinlock();
2021 raw_spin_lock_irqsave(&p->pi_lock, flags);
2022 if (!(p->state & state))
2023 goto out;
2024
2025 trace_sched_waking(p);
2026
2027 success = 1; /* we're going to change ->state */
2028 cpu = task_cpu(p);
2029
2030 /*
2031 * Ensure we load p->on_rq _after_ p->state, otherwise it would
2032 * be possible to, falsely, observe p->on_rq == 0 and get stuck
2033 * in smp_cond_load_acquire() below.
2034 *
2035 * sched_ttwu_pending() try_to_wake_up()
2036 * [S] p->on_rq = 1; [L] P->state
2037 * UNLOCK rq->lock -----.
2038 * \
2039 * +--- RMB
2040 * schedule() /
2041 * LOCK rq->lock -----'
2042 * UNLOCK rq->lock
2043 *
2044 * [task p]
2045 * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
2046 *
2047 * Pairs with the UNLOCK+LOCK on rq->lock from the
2048 * last wakeup of our task and the schedule that got our task
2049 * current.
2050 */
2051 smp_rmb();
2052 if (p->on_rq && ttwu_remote(p, wake_flags))
2053 goto stat;
2054
2055#ifdef CONFIG_SMP
2056 /*
2057 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
2058 * possible to, falsely, observe p->on_cpu == 0.
2059 *
2060 * One must be running (->on_cpu == 1) in order to remove oneself
2061 * from the runqueue.
2062 *
2063 * [S] ->on_cpu = 1; [L] ->on_rq
2064 * UNLOCK rq->lock
2065 * RMB
2066 * LOCK rq->lock
2067 * [S] ->on_rq = 0; [L] ->on_cpu
2068 *
2069 * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
2070 * from the consecutive calls to schedule(); the first switching to our
2071 * task, the second putting it to sleep.
2072 */
2073 smp_rmb();
2074
2075 /*
2076 * If the owning (remote) cpu is still in the middle of schedule() with
2077 * this task as prev, wait until its done referencing the task.
2078 *
2079 * Pairs with the smp_store_release() in finish_lock_switch().
2080 *
2081 * This ensures that tasks getting woken will be fully ordered against
2082 * their previous state and preserve Program Order.
2083 */
2084 smp_cond_load_acquire(&p->on_cpu, !VAL);
2085
2086 p->sched_contributes_to_load = !!task_contributes_to_load(p);
2087 p->state = TASK_WAKING;
2088
2089 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
2090 if (task_cpu(p) != cpu) {
2091 wake_flags |= WF_MIGRATED;
2092 set_task_cpu(p, cpu);
2093 }
2094#endif /* CONFIG_SMP */
2095
2096 ttwu_queue(p, cpu, wake_flags);
2097stat:
2098 ttwu_stat(p, cpu, wake_flags);
2099out:
2100 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2101
2102 return success;
2103}
2104
2105/**
2106 * try_to_wake_up_local - try to wake up a local task with rq lock held
2107 * @p: the thread to be awakened
2108 * @cookie: context's cookie for pinning
2109 *
2110 * Put @p on the run-queue if it's not already there. The caller must
2111 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2112 * the current task.
2113 */
2114static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
2115{
2116 struct rq *rq = task_rq(p);
2117
2118 if (WARN_ON_ONCE(rq != this_rq()) ||
2119 WARN_ON_ONCE(p == current))
2120 return;
2121
2122 lockdep_assert_held(&rq->lock);
2123
2124 if (!raw_spin_trylock(&p->pi_lock)) {
2125 /*
2126 * This is OK, because current is on_cpu, which avoids it being
2127 * picked for load-balance and preemption/IRQs are still
2128 * disabled avoiding further scheduler activity on it and we've
2129 * not yet picked a replacement task.
2130 */
2131 lockdep_unpin_lock(&rq->lock, cookie);
2132 raw_spin_unlock(&rq->lock);
2133 raw_spin_lock(&p->pi_lock);
2134 raw_spin_lock(&rq->lock);
2135 lockdep_repin_lock(&rq->lock, cookie);
2136 }
2137
2138 if (!(p->state & TASK_NORMAL))
2139 goto out;
2140
2141 trace_sched_waking(p);
2142
2143 if (!task_on_rq_queued(p))
2144 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2145
2146 ttwu_do_wakeup(rq, p, 0, cookie);
2147 ttwu_stat(p, smp_processor_id(), 0);
2148out:
2149 raw_spin_unlock(&p->pi_lock);
2150}
2151
2152/**
2153 * wake_up_process - Wake up a specific process
2154 * @p: The process to be woken up.
2155 *
2156 * Attempt to wake up the nominated process and move it to the set of runnable
2157 * processes.
2158 *
2159 * Return: 1 if the process was woken up, 0 if it was already running.
2160 *
2161 * It may be assumed that this function implies a write memory barrier before
2162 * changing the task state if and only if any tasks are woken up.
2163 */
2164int wake_up_process(struct task_struct *p)
2165{
2166 return try_to_wake_up(p, TASK_NORMAL, 0);
2167}
2168EXPORT_SYMBOL(wake_up_process);
2169
2170int wake_up_state(struct task_struct *p, unsigned int state)
2171{
2172 return try_to_wake_up(p, state, 0);
2173}
2174
2175/*
2176 * This function clears the sched_dl_entity static params.
2177 */
2178void __dl_clear_params(struct task_struct *p)
2179{
2180 struct sched_dl_entity *dl_se = &p->dl;
2181
2182 dl_se->dl_runtime = 0;
2183 dl_se->dl_deadline = 0;
2184 dl_se->dl_period = 0;
2185 dl_se->flags = 0;
2186 dl_se->dl_bw = 0;
2187
2188 dl_se->dl_throttled = 0;
2189 dl_se->dl_yielded = 0;
2190}
2191
2192/*
2193 * Perform scheduler related setup for a newly forked process p.
2194 * p is forked by current.
2195 *
2196 * __sched_fork() is basic setup used by init_idle() too:
2197 */
2198static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
2199{
2200 p->on_rq = 0;
2201
2202 p->se.on_rq = 0;
2203 p->se.exec_start = 0;
2204 p->se.sum_exec_runtime = 0;
2205 p->se.prev_sum_exec_runtime = 0;
2206 p->se.nr_migrations = 0;
2207 p->se.vruntime = 0;
2208 INIT_LIST_HEAD(&p->se.group_node);
2209
2210#ifdef CONFIG_FAIR_GROUP_SCHED
2211 p->se.cfs_rq = NULL;
2212#endif
2213
2214#ifdef CONFIG_SCHEDSTATS
2215 /* Even if schedstat is disabled, there should not be garbage */
2216 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2217#endif
2218
2219 RB_CLEAR_NODE(&p->dl.rb_node);
2220 init_dl_task_timer(&p->dl);
2221 __dl_clear_params(p);
2222
2223 INIT_LIST_HEAD(&p->rt.run_list);
2224 p->rt.timeout = 0;
2225 p->rt.time_slice = sched_rr_timeslice;
2226 p->rt.on_rq = 0;
2227 p->rt.on_list = 0;
2228
2229#ifdef CONFIG_PREEMPT_NOTIFIERS
2230 INIT_HLIST_HEAD(&p->preempt_notifiers);
2231#endif
2232
2233#ifdef CONFIG_NUMA_BALANCING
2234 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
2235 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2236 p->mm->numa_scan_seq = 0;
2237 }
2238
2239 if (clone_flags & CLONE_VM)
2240 p->numa_preferred_nid = current->numa_preferred_nid;
2241 else
2242 p->numa_preferred_nid = -1;
2243
2244 p->node_stamp = 0ULL;
2245 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
2246 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
2247 p->numa_work.next = &p->numa_work;
2248 p->numa_faults = NULL;
2249 p->last_task_numa_placement = 0;
2250 p->last_sum_exec_runtime = 0;
2251
2252 p->numa_group = NULL;
2253#endif /* CONFIG_NUMA_BALANCING */
2254}
2255
2256DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
2257
2258#ifdef CONFIG_NUMA_BALANCING
2259
2260void set_numabalancing_state(bool enabled)
2261{
2262 if (enabled)
2263 static_branch_enable(&sched_numa_balancing);
2264 else
2265 static_branch_disable(&sched_numa_balancing);
2266}
2267
2268#ifdef CONFIG_PROC_SYSCTL
2269int sysctl_numa_balancing(struct ctl_table *table, int write,
2270 void __user *buffer, size_t *lenp, loff_t *ppos)
2271{
2272 struct ctl_table t;
2273 int err;
2274 int state = static_branch_likely(&sched_numa_balancing);
2275
2276 if (write && !capable(CAP_SYS_ADMIN))
2277 return -EPERM;
2278
2279 t = *table;
2280 t.data = &state;
2281 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2282 if (err < 0)
2283 return err;
2284 if (write)
2285 set_numabalancing_state(state);
2286 return err;
2287}
2288#endif
2289#endif
2290
2291#ifdef CONFIG_SCHEDSTATS
2292
2293DEFINE_STATIC_KEY_FALSE(sched_schedstats);
2294static bool __initdata __sched_schedstats = false;
2295
2296static void set_schedstats(bool enabled)
2297{
2298 if (enabled)
2299 static_branch_enable(&sched_schedstats);
2300 else
2301 static_branch_disable(&sched_schedstats);
2302}
2303
2304void force_schedstat_enabled(void)
2305{
2306 if (!schedstat_enabled()) {
2307 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
2308 static_branch_enable(&sched_schedstats);
2309 }
2310}
2311
2312static int __init setup_schedstats(char *str)
2313{
2314 int ret = 0;
2315 if (!str)
2316 goto out;
2317
2318 /*
2319 * This code is called before jump labels have been set up, so we can't
2320 * change the static branch directly just yet. Instead set a temporary
2321 * variable so init_schedstats() can do it later.
2322 */
2323 if (!strcmp(str, "enable")) {
2324 __sched_schedstats = true;
2325 ret = 1;
2326 } else if (!strcmp(str, "disable")) {
2327 __sched_schedstats = false;
2328 ret = 1;
2329 }
2330out:
2331 if (!ret)
2332 pr_warn("Unable to parse schedstats=\n");
2333
2334 return ret;
2335}
2336__setup("schedstats=", setup_schedstats);
2337
2338static void __init init_schedstats(void)
2339{
2340 set_schedstats(__sched_schedstats);
2341}
2342
2343#ifdef CONFIG_PROC_SYSCTL
2344int sysctl_schedstats(struct ctl_table *table, int write,
2345 void __user *buffer, size_t *lenp, loff_t *ppos)
2346{
2347 struct ctl_table t;
2348 int err;
2349 int state = static_branch_likely(&sched_schedstats);
2350
2351 if (write && !capable(CAP_SYS_ADMIN))
2352 return -EPERM;
2353
2354 t = *table;
2355 t.data = &state;
2356 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2357 if (err < 0)
2358 return err;
2359 if (write)
2360 set_schedstats(state);
2361 return err;
2362}
2363#endif /* CONFIG_PROC_SYSCTL */
2364#else /* !CONFIG_SCHEDSTATS */
2365static inline void init_schedstats(void) {}
2366#endif /* CONFIG_SCHEDSTATS */
2367
2368/*
2369 * fork()/clone()-time setup:
2370 */
2371int sched_fork(unsigned long clone_flags, struct task_struct *p)
2372{
2373 unsigned long flags;
2374 int cpu = get_cpu();
2375
2376 __sched_fork(clone_flags, p);
2377 /*
2378 * We mark the process as NEW here. This guarantees that
2379 * nobody will actually run it, and a signal or other external
2380 * event cannot wake it up and insert it on the runqueue either.
2381 */
2382 p->state = TASK_NEW;
2383
2384 /*
2385 * Make sure we do not leak PI boosting priority to the child.
2386 */
2387 p->prio = current->normal_prio;
2388
2389 /*
2390 * Revert to default priority/policy on fork if requested.
2391 */
2392 if (unlikely(p->sched_reset_on_fork)) {
2393 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
2394 p->policy = SCHED_NORMAL;
2395 p->static_prio = NICE_TO_PRIO(0);
2396 p->rt_priority = 0;
2397 } else if (PRIO_TO_NICE(p->static_prio) < 0)
2398 p->static_prio = NICE_TO_PRIO(0);
2399
2400 p->prio = p->normal_prio = __normal_prio(p);
2401 set_load_weight(p);
2402
2403 /*
2404 * We don't need the reset flag anymore after the fork. It has
2405 * fulfilled its duty:
2406 */
2407 p->sched_reset_on_fork = 0;
2408 }
2409
2410 if (dl_prio(p->prio)) {
2411 put_cpu();
2412 return -EAGAIN;
2413 } else if (rt_prio(p->prio)) {
2414 p->sched_class = &rt_sched_class;
2415 } else {
2416 p->sched_class = &fair_sched_class;
2417 }
2418
2419 init_entity_runnable_average(&p->se);
2420
2421 /*
2422 * The child is not yet in the pid-hash so no cgroup attach races,
2423 * and the cgroup is pinned to this child due to cgroup_fork()
2424 * is ran before sched_fork().
2425 *
2426 * Silence PROVE_RCU.
2427 */
2428 raw_spin_lock_irqsave(&p->pi_lock, flags);
2429 /*
2430 * We're setting the cpu for the first time, we don't migrate,
2431 * so use __set_task_cpu().
2432 */
2433 __set_task_cpu(p, cpu);
2434 if (p->sched_class->task_fork)
2435 p->sched_class->task_fork(p);
2436 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2437
2438#ifdef CONFIG_SCHED_INFO
2439 if (likely(sched_info_on()))
2440 memset(&p->sched_info, 0, sizeof(p->sched_info));
2441#endif
2442#if defined(CONFIG_SMP)
2443 p->on_cpu = 0;
2444#endif
2445 init_task_preempt_count(p);
2446#ifdef CONFIG_SMP
2447 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2448 RB_CLEAR_NODE(&p->pushable_dl_tasks);
2449#endif
2450
2451 put_cpu();
2452 return 0;
2453}
2454
2455unsigned long to_ratio(u64 period, u64 runtime)
2456{
2457 if (runtime == RUNTIME_INF)
2458 return 1ULL << 20;
2459
2460 /*
2461 * Doing this here saves a lot of checks in all
2462 * the calling paths, and returning zero seems
2463 * safe for them anyway.
2464 */
2465 if (period == 0)
2466 return 0;
2467
2468 return div64_u64(runtime << 20, period);
2469}
2470
2471#ifdef CONFIG_SMP
2472inline struct dl_bw *dl_bw_of(int i)
2473{
2474 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2475 "sched RCU must be held");
2476 return &cpu_rq(i)->rd->dl_bw;
2477}
2478
2479static inline int dl_bw_cpus(int i)
2480{
2481 struct root_domain *rd = cpu_rq(i)->rd;
2482 int cpus = 0;
2483
2484 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2485 "sched RCU must be held");
2486 for_each_cpu_and(i, rd->span, cpu_active_mask)
2487 cpus++;
2488
2489 return cpus;
2490}
2491#else
2492inline struct dl_bw *dl_bw_of(int i)
2493{
2494 return &cpu_rq(i)->dl.dl_bw;
2495}
2496
2497static inline int dl_bw_cpus(int i)
2498{
2499 return 1;
2500}
2501#endif
2502
2503/*
2504 * We must be sure that accepting a new task (or allowing changing the
2505 * parameters of an existing one) is consistent with the bandwidth
2506 * constraints. If yes, this function also accordingly updates the currently
2507 * allocated bandwidth to reflect the new situation.
2508 *
2509 * This function is called while holding p's rq->lock.
2510 *
2511 * XXX we should delay bw change until the task's 0-lag point, see
2512 * __setparam_dl().
2513 */
2514static int dl_overflow(struct task_struct *p, int policy,
2515 const struct sched_attr *attr)
2516{
2517
2518 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2519 u64 period = attr->sched_period ?: attr->sched_deadline;
2520 u64 runtime = attr->sched_runtime;
2521 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2522 int cpus, err = -1;
2523
2524 /* !deadline task may carry old deadline bandwidth */
2525 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2526 return 0;
2527
2528 /*
2529 * Either if a task, enters, leave, or stays -deadline but changes
2530 * its parameters, we may need to update accordingly the total
2531 * allocated bandwidth of the container.
2532 */
2533 raw_spin_lock(&dl_b->lock);
2534 cpus = dl_bw_cpus(task_cpu(p));
2535 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2536 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2537 __dl_add(dl_b, new_bw);
2538 err = 0;
2539 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2540 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2541 __dl_clear(dl_b, p->dl.dl_bw);
2542 __dl_add(dl_b, new_bw);
2543 err = 0;
2544 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2545 __dl_clear(dl_b, p->dl.dl_bw);
2546 err = 0;
2547 }
2548 raw_spin_unlock(&dl_b->lock);
2549
2550 return err;
2551}
2552
2553extern void init_dl_bw(struct dl_bw *dl_b);
2554
2555/*
2556 * wake_up_new_task - wake up a newly created task for the first time.
2557 *
2558 * This function will do some initial scheduler statistics housekeeping
2559 * that must be done for every newly created context, then puts the task
2560 * on the runqueue and wakes it.
2561 */
2562void wake_up_new_task(struct task_struct *p)
2563{
2564 struct rq_flags rf;
2565 struct rq *rq;
2566
2567 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2568 p->state = TASK_RUNNING;
2569#ifdef CONFIG_SMP
2570 /*
2571 * Fork balancing, do it here and not earlier because:
2572 * - cpus_allowed can change in the fork path
2573 * - any previously selected cpu might disappear through hotplug
2574 *
2575 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
2576 * as we're not fully set-up yet.
2577 */
2578 __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
2579#endif
2580 rq = __task_rq_lock(p, &rf);
2581 post_init_entity_util_avg(&p->se);
2582
2583 activate_task(rq, p, 0);
2584 p->on_rq = TASK_ON_RQ_QUEUED;
2585 trace_sched_wakeup_new(p);
2586 check_preempt_curr(rq, p, WF_FORK);
2587#ifdef CONFIG_SMP
2588 if (p->sched_class->task_woken) {
2589 /*
2590 * Nothing relies on rq->lock after this, so its fine to
2591 * drop it.
2592 */
2593 lockdep_unpin_lock(&rq->lock, rf.cookie);
2594 p->sched_class->task_woken(rq, p);
2595 lockdep_repin_lock(&rq->lock, rf.cookie);
2596 }
2597#endif
2598 task_rq_unlock(rq, p, &rf);
2599}
2600
2601#ifdef CONFIG_PREEMPT_NOTIFIERS
2602
2603static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
2604
2605void preempt_notifier_inc(void)
2606{
2607 static_key_slow_inc(&preempt_notifier_key);
2608}
2609EXPORT_SYMBOL_GPL(preempt_notifier_inc);
2610
2611void preempt_notifier_dec(void)
2612{
2613 static_key_slow_dec(&preempt_notifier_key);
2614}
2615EXPORT_SYMBOL_GPL(preempt_notifier_dec);
2616
2617/**
2618 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2619 * @notifier: notifier struct to register
2620 */
2621void preempt_notifier_register(struct preempt_notifier *notifier)
2622{
2623 if (!static_key_false(&preempt_notifier_key))
2624 WARN(1, "registering preempt_notifier while notifiers disabled\n");
2625
2626 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
2627}
2628EXPORT_SYMBOL_GPL(preempt_notifier_register);
2629
2630/**
2631 * preempt_notifier_unregister - no longer interested in preemption notifications
2632 * @notifier: notifier struct to unregister
2633 *
2634 * This is *not* safe to call from within a preemption notifier.
2635 */
2636void preempt_notifier_unregister(struct preempt_notifier *notifier)
2637{
2638 hlist_del(¬ifier->link);
2639}
2640EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2641
2642static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
2643{
2644 struct preempt_notifier *notifier;
2645
2646 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2647 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2648}
2649
2650static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2651{
2652 if (static_key_false(&preempt_notifier_key))
2653 __fire_sched_in_preempt_notifiers(curr);
2654}
2655
2656static void
2657__fire_sched_out_preempt_notifiers(struct task_struct *curr,
2658 struct task_struct *next)
2659{
2660 struct preempt_notifier *notifier;
2661
2662 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2663 notifier->ops->sched_out(notifier, next);
2664}
2665
2666static __always_inline void
2667fire_sched_out_preempt_notifiers(struct task_struct *curr,
2668 struct task_struct *next)
2669{
2670 if (static_key_false(&preempt_notifier_key))
2671 __fire_sched_out_preempt_notifiers(curr, next);
2672}
2673
2674#else /* !CONFIG_PREEMPT_NOTIFIERS */
2675
2676static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2677{
2678}
2679
2680static inline void
2681fire_sched_out_preempt_notifiers(struct task_struct *curr,
2682 struct task_struct *next)
2683{
2684}
2685
2686#endif /* CONFIG_PREEMPT_NOTIFIERS */
2687
2688/**
2689 * prepare_task_switch - prepare to switch tasks
2690 * @rq: the runqueue preparing to switch
2691 * @prev: the current task that is being switched out
2692 * @next: the task we are going to switch to.
2693 *
2694 * This is called with the rq lock held and interrupts off. It must
2695 * be paired with a subsequent finish_task_switch after the context
2696 * switch.
2697 *
2698 * prepare_task_switch sets up locking and calls architecture specific
2699 * hooks.
2700 */
2701static inline void
2702prepare_task_switch(struct rq *rq, struct task_struct *prev,
2703 struct task_struct *next)
2704{
2705 sched_info_switch(rq, prev, next);
2706 perf_event_task_sched_out(prev, next);
2707 fire_sched_out_preempt_notifiers(prev, next);
2708 prepare_lock_switch(rq, next);
2709 prepare_arch_switch(next);
2710}
2711
2712/**
2713 * finish_task_switch - clean up after a task-switch
2714 * @prev: the thread we just switched away from.
2715 *
2716 * finish_task_switch must be called after the context switch, paired
2717 * with a prepare_task_switch call before the context switch.
2718 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2719 * and do any other architecture-specific cleanup actions.
2720 *
2721 * Note that we may have delayed dropping an mm in context_switch(). If
2722 * so, we finish that here outside of the runqueue lock. (Doing it
2723 * with the lock held can cause deadlocks; see schedule() for
2724 * details.)
2725 *
2726 * The context switch have flipped the stack from under us and restored the
2727 * local variables which were saved when this task called schedule() in the
2728 * past. prev == current is still correct but we need to recalculate this_rq
2729 * because prev may have moved to another CPU.
2730 */
2731static struct rq *finish_task_switch(struct task_struct *prev)
2732 __releases(rq->lock)
2733{
2734 struct rq *rq = this_rq();
2735 struct mm_struct *mm = rq->prev_mm;
2736 long prev_state;
2737
2738 /*
2739 * The previous task will have left us with a preempt_count of 2
2740 * because it left us after:
2741 *
2742 * schedule()
2743 * preempt_disable(); // 1
2744 * __schedule()
2745 * raw_spin_lock_irq(&rq->lock) // 2
2746 *
2747 * Also, see FORK_PREEMPT_COUNT.
2748 */
2749 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
2750 "corrupted preempt_count: %s/%d/0x%x\n",
2751 current->comm, current->pid, preempt_count()))
2752 preempt_count_set(FORK_PREEMPT_COUNT);
2753
2754 rq->prev_mm = NULL;
2755
2756 /*
2757 * A task struct has one reference for the use as "current".
2758 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
2759 * schedule one last time. The schedule call will never return, and
2760 * the scheduled task must drop that reference.
2761 *
2762 * We must observe prev->state before clearing prev->on_cpu (in
2763 * finish_lock_switch), otherwise a concurrent wakeup can get prev
2764 * running on another CPU and we could rave with its RUNNING -> DEAD
2765 * transition, resulting in a double drop.
2766 */
2767 prev_state = prev->state;
2768 vtime_task_switch(prev);
2769 perf_event_task_sched_in(prev, current);
2770 finish_lock_switch(rq, prev);
2771 finish_arch_post_lock_switch();
2772
2773 fire_sched_in_preempt_notifiers(current);
2774 if (mm)
2775 mmdrop(mm);
2776 if (unlikely(prev_state == TASK_DEAD)) {
2777 if (prev->sched_class->task_dead)
2778 prev->sched_class->task_dead(prev);
2779
2780 /*
2781 * Remove function-return probe instances associated with this
2782 * task and put them back on the free list.
2783 */
2784 kprobe_flush_task(prev);
2785
2786 /* Task is done with its stack. */
2787 put_task_stack(prev);
2788
2789 put_task_struct(prev);
2790 }
2791
2792 tick_nohz_task_switch();
2793 return rq;
2794}
2795
2796#ifdef CONFIG_SMP
2797
2798/* rq->lock is NOT held, but preemption is disabled */
2799static void __balance_callback(struct rq *rq)
2800{
2801 struct callback_head *head, *next;
2802 void (*func)(struct rq *rq);
2803 unsigned long flags;
2804
2805 raw_spin_lock_irqsave(&rq->lock, flags);
2806 head = rq->balance_callback;
2807 rq->balance_callback = NULL;
2808 while (head) {
2809 func = (void (*)(struct rq *))head->func;
2810 next = head->next;
2811 head->next = NULL;
2812 head = next;
2813
2814 func(rq);
2815 }
2816 raw_spin_unlock_irqrestore(&rq->lock, flags);
2817}
2818
2819static inline void balance_callback(struct rq *rq)
2820{
2821 if (unlikely(rq->balance_callback))
2822 __balance_callback(rq);
2823}
2824
2825#else
2826
2827static inline void balance_callback(struct rq *rq)
2828{
2829}
2830
2831#endif
2832
2833/**
2834 * schedule_tail - first thing a freshly forked thread must call.
2835 * @prev: the thread we just switched away from.
2836 */
2837asmlinkage __visible void schedule_tail(struct task_struct *prev)
2838 __releases(rq->lock)
2839{
2840 struct rq *rq;
2841
2842 /*
2843 * New tasks start with FORK_PREEMPT_COUNT, see there and
2844 * finish_task_switch() for details.
2845 *
2846 * finish_task_switch() will drop rq->lock() and lower preempt_count
2847 * and the preempt_enable() will end up enabling preemption (on
2848 * PREEMPT_COUNT kernels).
2849 */
2850
2851 rq = finish_task_switch(prev);
2852 balance_callback(rq);
2853 preempt_enable();
2854
2855 if (current->set_child_tid)
2856 put_user(task_pid_vnr(current), current->set_child_tid);
2857}
2858
2859/*
2860 * context_switch - switch to the new MM and the new thread's register state.
2861 */
2862static __always_inline struct rq *
2863context_switch(struct rq *rq, struct task_struct *prev,
2864 struct task_struct *next, struct pin_cookie cookie)
2865{
2866 struct mm_struct *mm, *oldmm;
2867
2868 prepare_task_switch(rq, prev, next);
2869
2870 mm = next->mm;
2871 oldmm = prev->active_mm;
2872 /*
2873 * For paravirt, this is coupled with an exit in switch_to to
2874 * combine the page table reload and the switch backend into
2875 * one hypercall.
2876 */
2877 arch_start_context_switch(prev);
2878
2879 if (!mm) {
2880 next->active_mm = oldmm;
2881 atomic_inc(&oldmm->mm_count);
2882 enter_lazy_tlb(oldmm, next);
2883 } else
2884 switch_mm_irqs_off(oldmm, mm, next);
2885
2886 if (!prev->mm) {
2887 prev->active_mm = NULL;
2888 rq->prev_mm = oldmm;
2889 }
2890 /*
2891 * Since the runqueue lock will be released by the next
2892 * task (which is an invalid locking op but in the case
2893 * of the scheduler it's an obvious special-case), so we
2894 * do an early lockdep release here:
2895 */
2896 lockdep_unpin_lock(&rq->lock, cookie);
2897 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2898
2899 /* Here we just switch the register state and the stack. */
2900 switch_to(prev, next, prev);
2901 barrier();
2902
2903 return finish_task_switch(prev);
2904}
2905
2906/*
2907 * nr_running and nr_context_switches:
2908 *
2909 * externally visible scheduler statistics: current number of runnable
2910 * threads, total number of context switches performed since bootup.
2911 */
2912unsigned long nr_running(void)
2913{
2914 unsigned long i, sum = 0;
2915
2916 for_each_online_cpu(i)
2917 sum += cpu_rq(i)->nr_running;
2918
2919 return sum;
2920}
2921
2922/*
2923 * Check if only the current task is running on the cpu.
2924 *
2925 * Caution: this function does not check that the caller has disabled
2926 * preemption, thus the result might have a time-of-check-to-time-of-use
2927 * race. The caller is responsible to use it correctly, for example:
2928 *
2929 * - from a non-preemptable section (of course)
2930 *
2931 * - from a thread that is bound to a single CPU
2932 *
2933 * - in a loop with very short iterations (e.g. a polling loop)
2934 */
2935bool single_task_running(void)
2936{
2937 return raw_rq()->nr_running == 1;
2938}
2939EXPORT_SYMBOL(single_task_running);
2940
2941unsigned long long nr_context_switches(void)
2942{
2943 int i;
2944 unsigned long long sum = 0;
2945
2946 for_each_possible_cpu(i)
2947 sum += cpu_rq(i)->nr_switches;
2948
2949 return sum;
2950}
2951
2952unsigned long nr_iowait(void)
2953{
2954 unsigned long i, sum = 0;
2955
2956 for_each_possible_cpu(i)
2957 sum += atomic_read(&cpu_rq(i)->nr_iowait);
2958
2959 return sum;
2960}
2961
2962unsigned long nr_iowait_cpu(int cpu)
2963{
2964 struct rq *this = cpu_rq(cpu);
2965 return atomic_read(&this->nr_iowait);
2966}
2967
2968void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2969{
2970 struct rq *rq = this_rq();
2971 *nr_waiters = atomic_read(&rq->nr_iowait);
2972 *load = rq->load.weight;
2973}
2974
2975#ifdef CONFIG_SMP
2976
2977/*
2978 * sched_exec - execve() is a valuable balancing opportunity, because at
2979 * this point the task has the smallest effective memory and cache footprint.
2980 */
2981void sched_exec(void)
2982{
2983 struct task_struct *p = current;
2984 unsigned long flags;
2985 int dest_cpu;
2986
2987 raw_spin_lock_irqsave(&p->pi_lock, flags);
2988 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
2989 if (dest_cpu == smp_processor_id())
2990 goto unlock;
2991
2992 if (likely(cpu_active(dest_cpu))) {
2993 struct migration_arg arg = { p, dest_cpu };
2994
2995 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2996 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
2997 return;
2998 }
2999unlock:
3000 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3001}
3002
3003#endif
3004
3005DEFINE_PER_CPU(struct kernel_stat, kstat);
3006DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
3007
3008EXPORT_PER_CPU_SYMBOL(kstat);
3009EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
3010
3011/*
3012 * The function fair_sched_class.update_curr accesses the struct curr
3013 * and its field curr->exec_start; when called from task_sched_runtime(),
3014 * we observe a high rate of cache misses in practice.
3015 * Prefetching this data results in improved performance.
3016 */
3017static inline void prefetch_curr_exec_start(struct task_struct *p)
3018{
3019#ifdef CONFIG_FAIR_GROUP_SCHED
3020 struct sched_entity *curr = (&p->se)->cfs_rq->curr;
3021#else
3022 struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
3023#endif
3024 prefetch(curr);
3025 prefetch(&curr->exec_start);
3026}
3027
3028/*
3029 * Return accounted runtime for the task.
3030 * In case the task is currently running, return the runtime plus current's
3031 * pending runtime that have not been accounted yet.
3032 */
3033unsigned long long task_sched_runtime(struct task_struct *p)
3034{
3035 struct rq_flags rf;
3036 struct rq *rq;
3037 u64 ns;
3038
3039#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
3040 /*
3041 * 64-bit doesn't need locks to atomically read a 64bit value.
3042 * So we have a optimization chance when the task's delta_exec is 0.
3043 * Reading ->on_cpu is racy, but this is ok.
3044 *
3045 * If we race with it leaving cpu, we'll take a lock. So we're correct.
3046 * If we race with it entering cpu, unaccounted time is 0. This is
3047 * indistinguishable from the read occurring a few cycles earlier.
3048 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
3049 * been accounted, so we're correct here as well.
3050 */
3051 if (!p->on_cpu || !task_on_rq_queued(p))
3052 return p->se.sum_exec_runtime;
3053#endif
3054
3055 rq = task_rq_lock(p, &rf);
3056 /*
3057 * Must be ->curr _and_ ->on_rq. If dequeued, we would
3058 * project cycles that may never be accounted to this
3059 * thread, breaking clock_gettime().
3060 */
3061 if (task_current(rq, p) && task_on_rq_queued(p)) {
3062 prefetch_curr_exec_start(p);
3063 update_rq_clock(rq);
3064 p->sched_class->update_curr(rq);
3065 }
3066 ns = p->se.sum_exec_runtime;
3067 task_rq_unlock(rq, p, &rf);
3068
3069 return ns;
3070}
3071
3072/*
3073 * This function gets called by the timer code, with HZ frequency.
3074 * We call it with interrupts disabled.
3075 */
3076void scheduler_tick(void)
3077{
3078 int cpu = smp_processor_id();
3079 struct rq *rq = cpu_rq(cpu);
3080 struct task_struct *curr = rq->curr;
3081
3082 sched_clock_tick();
3083
3084 raw_spin_lock(&rq->lock);
3085 update_rq_clock(rq);
3086 curr->sched_class->task_tick(rq, curr, 0);
3087 cpu_load_update_active(rq);
3088 calc_global_load_tick(rq);
3089 raw_spin_unlock(&rq->lock);
3090
3091 perf_event_task_tick();
3092
3093#ifdef CONFIG_SMP
3094 rq->idle_balance = idle_cpu(cpu);
3095 trigger_load_balance(rq);
3096#endif
3097 rq_last_tick_reset(rq);
3098}
3099
3100#ifdef CONFIG_NO_HZ_FULL
3101/**
3102 * scheduler_tick_max_deferment
3103 *
3104 * Keep at least one tick per second when a single
3105 * active task is running because the scheduler doesn't
3106 * yet completely support full dynticks environment.
3107 *
3108 * This makes sure that uptime, CFS vruntime, load
3109 * balancing, etc... continue to move forward, even
3110 * with a very low granularity.
3111 *
3112 * Return: Maximum deferment in nanoseconds.
3113 */
3114u64 scheduler_tick_max_deferment(void)
3115{
3116 struct rq *rq = this_rq();
3117 unsigned long next, now = READ_ONCE(jiffies);
3118
3119 next = rq->last_sched_tick + HZ;
3120
3121 if (time_before_eq(next, now))
3122 return 0;
3123
3124 return jiffies_to_nsecs(next - now);
3125}
3126#endif
3127
3128#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3129 defined(CONFIG_PREEMPT_TRACER))
3130/*
3131 * If the value passed in is equal to the current preempt count
3132 * then we just disabled preemption. Start timing the latency.
3133 */
3134static inline void preempt_latency_start(int val)
3135{
3136 if (preempt_count() == val) {
3137 unsigned long ip = get_lock_parent_ip();
3138#ifdef CONFIG_DEBUG_PREEMPT
3139 current->preempt_disable_ip = ip;
3140#endif
3141 trace_preempt_off(CALLER_ADDR0, ip);
3142 }
3143}
3144
3145void preempt_count_add(int val)
3146{
3147#ifdef CONFIG_DEBUG_PREEMPT
3148 /*
3149 * Underflow?
3150 */
3151 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3152 return;
3153#endif
3154 __preempt_count_add(val);
3155#ifdef CONFIG_DEBUG_PREEMPT
3156 /*
3157 * Spinlock count overflowing soon?
3158 */
3159 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3160 PREEMPT_MASK - 10);
3161#endif
3162 preempt_latency_start(val);
3163}
3164EXPORT_SYMBOL(preempt_count_add);
3165NOKPROBE_SYMBOL(preempt_count_add);
3166
3167/*
3168 * If the value passed in equals to the current preempt count
3169 * then we just enabled preemption. Stop timing the latency.
3170 */
3171static inline void preempt_latency_stop(int val)
3172{
3173 if (preempt_count() == val)
3174 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
3175}
3176
3177void preempt_count_sub(int val)
3178{
3179#ifdef CONFIG_DEBUG_PREEMPT
3180 /*
3181 * Underflow?
3182 */
3183 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3184 return;
3185 /*
3186 * Is the spinlock portion underflowing?
3187 */
3188 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3189 !(preempt_count() & PREEMPT_MASK)))
3190 return;
3191#endif
3192
3193 preempt_latency_stop(val);
3194 __preempt_count_sub(val);
3195}
3196EXPORT_SYMBOL(preempt_count_sub);
3197NOKPROBE_SYMBOL(preempt_count_sub);
3198
3199#else
3200static inline void preempt_latency_start(int val) { }
3201static inline void preempt_latency_stop(int val) { }
3202#endif
3203
3204/*
3205 * Print scheduling while atomic bug:
3206 */
3207static noinline void __schedule_bug(struct task_struct *prev)
3208{
3209 /* Save this before calling printk(), since that will clobber it */
3210 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
3211
3212 if (oops_in_progress)
3213 return;
3214
3215 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3216 prev->comm, prev->pid, preempt_count());
3217
3218 debug_show_held_locks(prev);
3219 print_modules();
3220 if (irqs_disabled())
3221 print_irqtrace_events(prev);
3222 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
3223 && in_atomic_preempt_off()) {
3224 pr_err("Preemption disabled at:");
3225 print_ip_sym(preempt_disable_ip);
3226 pr_cont("\n");
3227 }
3228 if (panic_on_warn)
3229 panic("scheduling while atomic\n");
3230
3231 dump_stack();
3232 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
3233}
3234
3235/*
3236 * Various schedule()-time debugging checks and statistics:
3237 */
3238static inline void schedule_debug(struct task_struct *prev)
3239{
3240#ifdef CONFIG_SCHED_STACK_END_CHECK
3241 if (task_stack_end_corrupted(prev))
3242 panic("corrupted stack end detected inside scheduler\n");
3243#endif
3244
3245 if (unlikely(in_atomic_preempt_off())) {
3246 __schedule_bug(prev);
3247 preempt_count_set(PREEMPT_DISABLED);
3248 }
3249 rcu_sleep_check();
3250
3251 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3252
3253 schedstat_inc(this_rq()->sched_count);
3254}
3255
3256/*
3257 * Pick up the highest-prio task:
3258 */
3259static inline struct task_struct *
3260pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
3261{
3262 const struct sched_class *class = &fair_sched_class;
3263 struct task_struct *p;
3264
3265 /*
3266 * Optimization: we know that if all tasks are in
3267 * the fair class we can call that function directly:
3268 */
3269 if (likely(prev->sched_class == class &&
3270 rq->nr_running == rq->cfs.h_nr_running)) {
3271 p = fair_sched_class.pick_next_task(rq, prev, cookie);
3272 if (unlikely(p == RETRY_TASK))
3273 goto again;
3274
3275 /* assumes fair_sched_class->next == idle_sched_class */
3276 if (unlikely(!p))
3277 p = idle_sched_class.pick_next_task(rq, prev, cookie);
3278
3279 return p;
3280 }
3281
3282again:
3283 for_each_class(class) {
3284 p = class->pick_next_task(rq, prev, cookie);
3285 if (p) {
3286 if (unlikely(p == RETRY_TASK))
3287 goto again;
3288 return p;
3289 }
3290 }
3291
3292 BUG(); /* the idle class will always have a runnable task */
3293}
3294
3295/*
3296 * __schedule() is the main scheduler function.
3297 *
3298 * The main means of driving the scheduler and thus entering this function are:
3299 *
3300 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
3301 *
3302 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
3303 * paths. For example, see arch/x86/entry_64.S.
3304 *
3305 * To drive preemption between tasks, the scheduler sets the flag in timer
3306 * interrupt handler scheduler_tick().
3307 *
3308 * 3. Wakeups don't really cause entry into schedule(). They add a
3309 * task to the run-queue and that's it.
3310 *
3311 * Now, if the new task added to the run-queue preempts the current
3312 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
3313 * called on the nearest possible occasion:
3314 *
3315 * - If the kernel is preemptible (CONFIG_PREEMPT=y):
3316 *
3317 * - in syscall or exception context, at the next outmost
3318 * preempt_enable(). (this might be as soon as the wake_up()'s
3319 * spin_unlock()!)
3320 *
3321 * - in IRQ context, return from interrupt-handler to
3322 * preemptible context
3323 *
3324 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
3325 * then at the next:
3326 *
3327 * - cond_resched() call
3328 * - explicit schedule() call
3329 * - return from syscall or exception to user-space
3330 * - return from interrupt-handler to user-space
3331 *
3332 * WARNING: must be called with preemption disabled!
3333 */
3334static void __sched notrace __schedule(bool preempt)
3335{
3336 struct task_struct *prev, *next;
3337 unsigned long *switch_count;
3338 struct pin_cookie cookie;
3339 struct rq *rq;
3340 int cpu;
3341
3342 cpu = smp_processor_id();
3343 rq = cpu_rq(cpu);
3344 prev = rq->curr;
3345
3346 schedule_debug(prev);
3347
3348 if (sched_feat(HRTICK))
3349 hrtick_clear(rq);
3350
3351 local_irq_disable();
3352 rcu_note_context_switch();
3353
3354 /*
3355 * Make sure that signal_pending_state()->signal_pending() below
3356 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
3357 * done by the caller to avoid the race with signal_wake_up().
3358 */
3359 smp_mb__before_spinlock();
3360 raw_spin_lock(&rq->lock);
3361 cookie = lockdep_pin_lock(&rq->lock);
3362
3363 rq->clock_skip_update <<= 1; /* promote REQ to ACT */
3364
3365 switch_count = &prev->nivcsw;
3366 if (!preempt && prev->state) {
3367 if (unlikely(signal_pending_state(prev->state, prev))) {
3368 prev->state = TASK_RUNNING;
3369 } else {
3370 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3371 prev->on_rq = 0;
3372
3373 /*
3374 * If a worker went to sleep, notify and ask workqueue
3375 * whether it wants to wake up a task to maintain
3376 * concurrency.
3377 */
3378 if (prev->flags & PF_WQ_WORKER) {
3379 struct task_struct *to_wakeup;
3380
3381 to_wakeup = wq_worker_sleeping(prev);
3382 if (to_wakeup)
3383 try_to_wake_up_local(to_wakeup, cookie);
3384 }
3385 }
3386 switch_count = &prev->nvcsw;
3387 }
3388
3389 if (task_on_rq_queued(prev))
3390 update_rq_clock(rq);
3391
3392 next = pick_next_task(rq, prev, cookie);
3393 clear_tsk_need_resched(prev);
3394 clear_preempt_need_resched();
3395 rq->clock_skip_update = 0;
3396
3397 if (likely(prev != next)) {
3398 rq->nr_switches++;
3399 rq->curr = next;
3400 ++*switch_count;
3401
3402 trace_sched_switch(preempt, prev, next);
3403 rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
3404 } else {
3405 lockdep_unpin_lock(&rq->lock, cookie);
3406 raw_spin_unlock_irq(&rq->lock);
3407 }
3408
3409 balance_callback(rq);
3410}
3411
3412void __noreturn do_task_dead(void)
3413{
3414 /*
3415 * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
3416 * when the following two conditions become true.
3417 * - There is race condition of mmap_sem (It is acquired by
3418 * exit_mm()), and
3419 * - SMI occurs before setting TASK_RUNINNG.
3420 * (or hypervisor of virtual machine switches to other guest)
3421 * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
3422 *
3423 * To avoid it, we have to wait for releasing tsk->pi_lock which
3424 * is held by try_to_wake_up()
3425 */
3426 smp_mb();
3427 raw_spin_unlock_wait(¤t->pi_lock);
3428
3429 /* causes final put_task_struct in finish_task_switch(). */
3430 __set_current_state(TASK_DEAD);
3431 current->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
3432 __schedule(false);
3433 BUG();
3434 /* Avoid "noreturn function does return". */
3435 for (;;)
3436 cpu_relax(); /* For when BUG is null */
3437}
3438
3439static inline void sched_submit_work(struct task_struct *tsk)
3440{
3441 if (!tsk->state || tsk_is_pi_blocked(tsk))
3442 return;
3443 /*
3444 * If we are going to sleep and we have plugged IO queued,
3445 * make sure to submit it to avoid deadlocks.
3446 */
3447 if (blk_needs_flush_plug(tsk))
3448 blk_schedule_flush_plug(tsk);
3449}
3450
3451asmlinkage __visible void __sched schedule(void)
3452{
3453 struct task_struct *tsk = current;
3454
3455 sched_submit_work(tsk);
3456 do {
3457 preempt_disable();
3458 __schedule(false);
3459 sched_preempt_enable_no_resched();
3460 } while (need_resched());
3461}
3462EXPORT_SYMBOL(schedule);
3463
3464#ifdef CONFIG_CONTEXT_TRACKING
3465asmlinkage __visible void __sched schedule_user(void)
3466{
3467 /*
3468 * If we come here after a random call to set_need_resched(),
3469 * or we have been woken up remotely but the IPI has not yet arrived,
3470 * we haven't yet exited the RCU idle mode. Do it here manually until
3471 * we find a better solution.
3472 *
3473 * NB: There are buggy callers of this function. Ideally we
3474 * should warn if prev_state != CONTEXT_USER, but that will trigger
3475 * too frequently to make sense yet.
3476 */
3477 enum ctx_state prev_state = exception_enter();
3478 schedule();
3479 exception_exit(prev_state);
3480}
3481#endif
3482
3483/**
3484 * schedule_preempt_disabled - called with preemption disabled
3485 *
3486 * Returns with preemption disabled. Note: preempt_count must be 1
3487 */
3488void __sched schedule_preempt_disabled(void)
3489{
3490 sched_preempt_enable_no_resched();
3491 schedule();
3492 preempt_disable();
3493}
3494
3495static void __sched notrace preempt_schedule_common(void)
3496{
3497 do {
3498 /*
3499 * Because the function tracer can trace preempt_count_sub()
3500 * and it also uses preempt_enable/disable_notrace(), if
3501 * NEED_RESCHED is set, the preempt_enable_notrace() called
3502 * by the function tracer will call this function again and
3503 * cause infinite recursion.
3504 *
3505 * Preemption must be disabled here before the function
3506 * tracer can trace. Break up preempt_disable() into two
3507 * calls. One to disable preemption without fear of being
3508 * traced. The other to still record the preemption latency,
3509 * which can also be traced by the function tracer.
3510 */
3511 preempt_disable_notrace();
3512 preempt_latency_start(1);
3513 __schedule(true);
3514 preempt_latency_stop(1);
3515 preempt_enable_no_resched_notrace();
3516
3517 /*
3518 * Check again in case we missed a preemption opportunity
3519 * between schedule and now.
3520 */
3521 } while (need_resched());
3522}
3523
3524#ifdef CONFIG_PREEMPT
3525/*
3526 * this is the entry point to schedule() from in-kernel preemption
3527 * off of preempt_enable. Kernel preemptions off return from interrupt
3528 * occur there and call schedule directly.
3529 */
3530asmlinkage __visible void __sched notrace preempt_schedule(void)
3531{
3532 /*
3533 * If there is a non-zero preempt_count or interrupts are disabled,
3534 * we do not want to preempt the current task. Just return..
3535 */
3536 if (likely(!preemptible()))
3537 return;
3538
3539 preempt_schedule_common();
3540}
3541NOKPROBE_SYMBOL(preempt_schedule);
3542EXPORT_SYMBOL(preempt_schedule);
3543
3544/**
3545 * preempt_schedule_notrace - preempt_schedule called by tracing
3546 *
3547 * The tracing infrastructure uses preempt_enable_notrace to prevent
3548 * recursion and tracing preempt enabling caused by the tracing
3549 * infrastructure itself. But as tracing can happen in areas coming
3550 * from userspace or just about to enter userspace, a preempt enable
3551 * can occur before user_exit() is called. This will cause the scheduler
3552 * to be called when the system is still in usermode.
3553 *
3554 * To prevent this, the preempt_enable_notrace will use this function
3555 * instead of preempt_schedule() to exit user context if needed before
3556 * calling the scheduler.
3557 */
3558asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
3559{
3560 enum ctx_state prev_ctx;
3561
3562 if (likely(!preemptible()))
3563 return;
3564
3565 do {
3566 /*
3567 * Because the function tracer can trace preempt_count_sub()
3568 * and it also uses preempt_enable/disable_notrace(), if
3569 * NEED_RESCHED is set, the preempt_enable_notrace() called
3570 * by the function tracer will call this function again and
3571 * cause infinite recursion.
3572 *
3573 * Preemption must be disabled here before the function
3574 * tracer can trace. Break up preempt_disable() into two
3575 * calls. One to disable preemption without fear of being
3576 * traced. The other to still record the preemption latency,
3577 * which can also be traced by the function tracer.
3578 */
3579 preempt_disable_notrace();
3580 preempt_latency_start(1);
3581 /*
3582 * Needs preempt disabled in case user_exit() is traced
3583 * and the tracer calls preempt_enable_notrace() causing
3584 * an infinite recursion.
3585 */
3586 prev_ctx = exception_enter();
3587 __schedule(true);
3588 exception_exit(prev_ctx);
3589
3590 preempt_latency_stop(1);
3591 preempt_enable_no_resched_notrace();
3592 } while (need_resched());
3593}
3594EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
3595
3596#endif /* CONFIG_PREEMPT */
3597
3598/*
3599 * this is the entry point to schedule() from kernel preemption
3600 * off of irq context.
3601 * Note, that this is called and return with irqs disabled. This will
3602 * protect us against recursive calling from irq.
3603 */
3604asmlinkage __visible void __sched preempt_schedule_irq(void)
3605{
3606 enum ctx_state prev_state;
3607
3608 /* Catch callers which need to be fixed */
3609 BUG_ON(preempt_count() || !irqs_disabled());
3610
3611 prev_state = exception_enter();
3612
3613 do {
3614 preempt_disable();
3615 local_irq_enable();
3616 __schedule(true);
3617 local_irq_disable();
3618 sched_preempt_enable_no_resched();
3619 } while (need_resched());
3620
3621 exception_exit(prev_state);
3622}
3623
3624int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
3625 void *key)
3626{
3627 return try_to_wake_up(curr->private, mode, wake_flags);
3628}
3629EXPORT_SYMBOL(default_wake_function);
3630
3631#ifdef CONFIG_RT_MUTEXES
3632
3633/*
3634 * rt_mutex_setprio - set the current priority of a task
3635 * @p: task
3636 * @prio: prio value (kernel-internal form)
3637 *
3638 * This function changes the 'effective' priority of a task. It does
3639 * not touch ->normal_prio like __setscheduler().
3640 *
3641 * Used by the rt_mutex code to implement priority inheritance
3642 * logic. Call site only calls if the priority of the task changed.
3643 */
3644void rt_mutex_setprio(struct task_struct *p, int prio)
3645{
3646 int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
3647 const struct sched_class *prev_class;
3648 struct rq_flags rf;
3649 struct rq *rq;
3650
3651 BUG_ON(prio > MAX_PRIO);
3652
3653 rq = __task_rq_lock(p, &rf);
3654
3655 /*
3656 * Idle task boosting is a nono in general. There is one
3657 * exception, when PREEMPT_RT and NOHZ is active:
3658 *
3659 * The idle task calls get_next_timer_interrupt() and holds
3660 * the timer wheel base->lock on the CPU and another CPU wants
3661 * to access the timer (probably to cancel it). We can safely
3662 * ignore the boosting request, as the idle CPU runs this code
3663 * with interrupts disabled and will complete the lock
3664 * protected section without being interrupted. So there is no
3665 * real need to boost.
3666 */
3667 if (unlikely(p == rq->idle)) {
3668 WARN_ON(p != rq->curr);
3669 WARN_ON(p->pi_blocked_on);
3670 goto out_unlock;
3671 }
3672
3673 trace_sched_pi_setprio(p, prio);
3674 oldprio = p->prio;
3675
3676 if (oldprio == prio)
3677 queue_flag &= ~DEQUEUE_MOVE;
3678
3679 prev_class = p->sched_class;
3680 queued = task_on_rq_queued(p);
3681 running = task_current(rq, p);
3682 if (queued)
3683 dequeue_task(rq, p, queue_flag);
3684 if (running)
3685 put_prev_task(rq, p);
3686
3687 /*
3688 * Boosting condition are:
3689 * 1. -rt task is running and holds mutex A
3690 * --> -dl task blocks on mutex A
3691 *
3692 * 2. -dl task is running and holds mutex A
3693 * --> -dl task blocks on mutex A and could preempt the
3694 * running task
3695 */
3696 if (dl_prio(prio)) {
3697 struct task_struct *pi_task = rt_mutex_get_top_task(p);
3698 if (!dl_prio(p->normal_prio) ||
3699 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
3700 p->dl.dl_boosted = 1;
3701 queue_flag |= ENQUEUE_REPLENISH;
3702 } else
3703 p->dl.dl_boosted = 0;
3704 p->sched_class = &dl_sched_class;
3705 } else if (rt_prio(prio)) {
3706 if (dl_prio(oldprio))
3707 p->dl.dl_boosted = 0;
3708 if (oldprio < prio)
3709 queue_flag |= ENQUEUE_HEAD;
3710 p->sched_class = &rt_sched_class;
3711 } else {
3712 if (dl_prio(oldprio))
3713 p->dl.dl_boosted = 0;
3714 if (rt_prio(oldprio))
3715 p->rt.timeout = 0;
3716 p->sched_class = &fair_sched_class;
3717 }
3718
3719 p->prio = prio;
3720
3721 if (queued)
3722 enqueue_task(rq, p, queue_flag);
3723 if (running)
3724 set_curr_task(rq, p);
3725
3726 check_class_changed(rq, p, prev_class, oldprio);
3727out_unlock:
3728 preempt_disable(); /* avoid rq from going away on us */
3729 __task_rq_unlock(rq, &rf);
3730
3731 balance_callback(rq);
3732 preempt_enable();
3733}
3734#endif
3735
3736void set_user_nice(struct task_struct *p, long nice)
3737{
3738 bool queued, running;
3739 int old_prio, delta;
3740 struct rq_flags rf;
3741 struct rq *rq;
3742
3743 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
3744 return;
3745 /*
3746 * We have to be careful, if called from sys_setpriority(),
3747 * the task might be in the middle of scheduling on another CPU.
3748 */
3749 rq = task_rq_lock(p, &rf);
3750 /*
3751 * The RT priorities are set via sched_setscheduler(), but we still
3752 * allow the 'normal' nice value to be set - but as expected
3753 * it wont have any effect on scheduling until the task is
3754 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
3755 */
3756 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
3757 p->static_prio = NICE_TO_PRIO(nice);
3758 goto out_unlock;
3759 }
3760 queued = task_on_rq_queued(p);
3761 running = task_current(rq, p);
3762 if (queued)
3763 dequeue_task(rq, p, DEQUEUE_SAVE);
3764 if (running)
3765 put_prev_task(rq, p);
3766
3767 p->static_prio = NICE_TO_PRIO(nice);
3768 set_load_weight(p);
3769 old_prio = p->prio;
3770 p->prio = effective_prio(p);
3771 delta = p->prio - old_prio;
3772
3773 if (queued) {
3774 enqueue_task(rq, p, ENQUEUE_RESTORE);
3775 /*
3776 * If the task increased its priority or is running and
3777 * lowered its priority, then reschedule its CPU:
3778 */
3779 if (delta < 0 || (delta > 0 && task_running(rq, p)))
3780 resched_curr(rq);
3781 }
3782 if (running)
3783 set_curr_task(rq, p);
3784out_unlock:
3785 task_rq_unlock(rq, p, &rf);
3786}
3787EXPORT_SYMBOL(set_user_nice);
3788
3789/*
3790 * can_nice - check if a task can reduce its nice value
3791 * @p: task
3792 * @nice: nice value
3793 */
3794int can_nice(const struct task_struct *p, const int nice)
3795{
3796 /* convert nice value [19,-20] to rlimit style value [1,40] */
3797 int nice_rlim = nice_to_rlimit(nice);
3798
3799 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
3800 capable(CAP_SYS_NICE));
3801}
3802
3803#ifdef __ARCH_WANT_SYS_NICE
3804
3805/*
3806 * sys_nice - change the priority of the current process.
3807 * @increment: priority increment
3808 *
3809 * sys_setpriority is a more generic, but much slower function that
3810 * does similar things.
3811 */
3812SYSCALL_DEFINE1(nice, int, increment)
3813{
3814 long nice, retval;
3815
3816 /*
3817 * Setpriority might change our priority at the same moment.
3818 * We don't have to worry. Conceptually one call occurs first
3819 * and we have a single winner.
3820 */
3821 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
3822 nice = task_nice(current) + increment;
3823
3824 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
3825 if (increment < 0 && !can_nice(current, nice))
3826 return -EPERM;
3827
3828 retval = security_task_setnice(current, nice);
3829 if (retval)
3830 return retval;
3831
3832 set_user_nice(current, nice);
3833 return 0;
3834}
3835
3836#endif
3837
3838/**
3839 * task_prio - return the priority value of a given task.
3840 * @p: the task in question.
3841 *
3842 * Return: The priority value as seen by users in /proc.
3843 * RT tasks are offset by -200. Normal tasks are centered
3844 * around 0, value goes from -16 to +15.
3845 */
3846int task_prio(const struct task_struct *p)
3847{
3848 return p->prio - MAX_RT_PRIO;
3849}
3850
3851/**
3852 * idle_cpu - is a given cpu idle currently?
3853 * @cpu: the processor in question.
3854 *
3855 * Return: 1 if the CPU is currently idle. 0 otherwise.
3856 */
3857int idle_cpu(int cpu)
3858{
3859 struct rq *rq = cpu_rq(cpu);
3860
3861 if (rq->curr != rq->idle)
3862 return 0;
3863
3864 if (rq->nr_running)
3865 return 0;
3866
3867#ifdef CONFIG_SMP
3868 if (!llist_empty(&rq->wake_list))
3869 return 0;
3870#endif
3871
3872 return 1;
3873}
3874
3875/**
3876 * idle_task - return the idle task for a given cpu.
3877 * @cpu: the processor in question.
3878 *
3879 * Return: The idle task for the cpu @cpu.
3880 */
3881struct task_struct *idle_task(int cpu)
3882{
3883 return cpu_rq(cpu)->idle;
3884}
3885
3886/**
3887 * find_process_by_pid - find a process with a matching PID value.
3888 * @pid: the pid in question.
3889 *
3890 * The task of @pid, if found. %NULL otherwise.
3891 */
3892static struct task_struct *find_process_by_pid(pid_t pid)
3893{
3894 return pid ? find_task_by_vpid(pid) : current;
3895}
3896
3897/*
3898 * This function initializes the sched_dl_entity of a newly becoming
3899 * SCHED_DEADLINE task.
3900 *
3901 * Only the static values are considered here, the actual runtime and the
3902 * absolute deadline will be properly calculated when the task is enqueued
3903 * for the first time with its new policy.
3904 */
3905static void
3906__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3907{
3908 struct sched_dl_entity *dl_se = &p->dl;
3909
3910 dl_se->dl_runtime = attr->sched_runtime;
3911 dl_se->dl_deadline = attr->sched_deadline;
3912 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3913 dl_se->flags = attr->sched_flags;
3914 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3915
3916 /*
3917 * Changing the parameters of a task is 'tricky' and we're not doing
3918 * the correct thing -- also see task_dead_dl() and switched_from_dl().
3919 *
3920 * What we SHOULD do is delay the bandwidth release until the 0-lag
3921 * point. This would include retaining the task_struct until that time
3922 * and change dl_overflow() to not immediately decrement the current
3923 * amount.
3924 *
3925 * Instead we retain the current runtime/deadline and let the new
3926 * parameters take effect after the current reservation period lapses.
3927 * This is safe (albeit pessimistic) because the 0-lag point is always
3928 * before the current scheduling deadline.
3929 *
3930 * We can still have temporary overloads because we do not delay the
3931 * change in bandwidth until that time; so admission control is
3932 * not on the safe side. It does however guarantee tasks will never
3933 * consume more than promised.
3934 */
3935}
3936
3937/*
3938 * sched_setparam() passes in -1 for its policy, to let the functions
3939 * it calls know not to change it.
3940 */
3941#define SETPARAM_POLICY -1
3942
3943static void __setscheduler_params(struct task_struct *p,
3944 const struct sched_attr *attr)
3945{
3946 int policy = attr->sched_policy;
3947
3948 if (policy == SETPARAM_POLICY)
3949 policy = p->policy;
3950
3951 p->policy = policy;
3952
3953 if (dl_policy(policy))
3954 __setparam_dl(p, attr);
3955 else if (fair_policy(policy))
3956 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
3957
3958 /*
3959 * __sched_setscheduler() ensures attr->sched_priority == 0 when
3960 * !rt_policy. Always setting this ensures that things like
3961 * getparam()/getattr() don't report silly values for !rt tasks.
3962 */
3963 p->rt_priority = attr->sched_priority;
3964 p->normal_prio = normal_prio(p);
3965 set_load_weight(p);
3966}
3967
3968/* Actually do priority change: must hold pi & rq lock. */
3969static void __setscheduler(struct rq *rq, struct task_struct *p,
3970 const struct sched_attr *attr, bool keep_boost)
3971{
3972 __setscheduler_params(p, attr);
3973
3974 /*
3975 * Keep a potential priority boosting if called from
3976 * sched_setscheduler().
3977 */
3978 if (keep_boost)
3979 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3980 else
3981 p->prio = normal_prio(p);
3982
3983 if (dl_prio(p->prio))
3984 p->sched_class = &dl_sched_class;
3985 else if (rt_prio(p->prio))
3986 p->sched_class = &rt_sched_class;
3987 else
3988 p->sched_class = &fair_sched_class;
3989}
3990
3991static void
3992__getparam_dl(struct task_struct *p, struct sched_attr *attr)
3993{
3994 struct sched_dl_entity *dl_se = &p->dl;
3995
3996 attr->sched_priority = p->rt_priority;
3997 attr->sched_runtime = dl_se->dl_runtime;
3998 attr->sched_deadline = dl_se->dl_deadline;
3999 attr->sched_period = dl_se->dl_period;
4000 attr->sched_flags = dl_se->flags;
4001}
4002
4003/*
4004 * This function validates the new parameters of a -deadline task.
4005 * We ask for the deadline not being zero, and greater or equal
4006 * than the runtime, as well as the period of being zero or
4007 * greater than deadline. Furthermore, we have to be sure that
4008 * user parameters are above the internal resolution of 1us (we
4009 * check sched_runtime only since it is always the smaller one) and
4010 * below 2^63 ns (we have to check both sched_deadline and
4011 * sched_period, as the latter can be zero).
4012 */
4013static bool
4014__checkparam_dl(const struct sched_attr *attr)
4015{
4016 /* deadline != 0 */
4017 if (attr->sched_deadline == 0)
4018 return false;
4019
4020 /*
4021 * Since we truncate DL_SCALE bits, make sure we're at least
4022 * that big.
4023 */
4024 if (attr->sched_runtime < (1ULL << DL_SCALE))
4025 return false;
4026
4027 /*
4028 * Since we use the MSB for wrap-around and sign issues, make
4029 * sure it's not set (mind that period can be equal to zero).
4030 */
4031 if (attr->sched_deadline & (1ULL << 63) ||
4032 attr->sched_period & (1ULL << 63))
4033 return false;
4034
4035 /* runtime <= deadline <= period (if period != 0) */
4036 if ((attr->sched_period != 0 &&
4037 attr->sched_period < attr->sched_deadline) ||
4038 attr->sched_deadline < attr->sched_runtime)
4039 return false;
4040
4041 return true;
4042}
4043
4044/*
4045 * check the target process has a UID that matches the current process's
4046 */
4047static bool check_same_owner(struct task_struct *p)
4048{
4049 const struct cred *cred = current_cred(), *pcred;
4050 bool match;
4051
4052 rcu_read_lock();
4053 pcred = __task_cred(p);
4054 match = (uid_eq(cred->euid, pcred->euid) ||
4055 uid_eq(cred->euid, pcred->uid));
4056 rcu_read_unlock();
4057 return match;
4058}
4059
4060static bool dl_param_changed(struct task_struct *p,
4061 const struct sched_attr *attr)
4062{
4063 struct sched_dl_entity *dl_se = &p->dl;
4064
4065 if (dl_se->dl_runtime != attr->sched_runtime ||
4066 dl_se->dl_deadline != attr->sched_deadline ||
4067 dl_se->dl_period != attr->sched_period ||
4068 dl_se->flags != attr->sched_flags)
4069 return true;
4070
4071 return false;
4072}
4073
4074static int __sched_setscheduler(struct task_struct *p,
4075 const struct sched_attr *attr,
4076 bool user, bool pi)
4077{
4078 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
4079 MAX_RT_PRIO - 1 - attr->sched_priority;
4080 int retval, oldprio, oldpolicy = -1, queued, running;
4081 int new_effective_prio, policy = attr->sched_policy;
4082 const struct sched_class *prev_class;
4083 struct rq_flags rf;
4084 int reset_on_fork;
4085 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
4086 struct rq *rq;
4087
4088 /* may grab non-irq protected spin_locks */
4089 BUG_ON(in_interrupt());
4090recheck:
4091 /* double check policy once rq lock held */
4092 if (policy < 0) {
4093 reset_on_fork = p->sched_reset_on_fork;
4094 policy = oldpolicy = p->policy;
4095 } else {
4096 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
4097
4098 if (!valid_policy(policy))
4099 return -EINVAL;
4100 }
4101
4102 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
4103 return -EINVAL;
4104
4105 /*
4106 * Valid priorities for SCHED_FIFO and SCHED_RR are
4107 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4108 * SCHED_BATCH and SCHED_IDLE is 0.
4109 */
4110 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
4111 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
4112 return -EINVAL;
4113 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
4114 (rt_policy(policy) != (attr->sched_priority != 0)))
4115 return -EINVAL;
4116
4117 /*
4118 * Allow unprivileged RT tasks to decrease priority:
4119 */
4120 if (user && !capable(CAP_SYS_NICE)) {
4121 if (fair_policy(policy)) {
4122 if (attr->sched_nice < task_nice(p) &&
4123 !can_nice(p, attr->sched_nice))
4124 return -EPERM;
4125 }
4126
4127 if (rt_policy(policy)) {
4128 unsigned long rlim_rtprio =
4129 task_rlimit(p, RLIMIT_RTPRIO);
4130
4131 /* can't set/change the rt policy */
4132 if (policy != p->policy && !rlim_rtprio)
4133 return -EPERM;
4134
4135 /* can't increase priority */
4136 if (attr->sched_priority > p->rt_priority &&
4137 attr->sched_priority > rlim_rtprio)
4138 return -EPERM;
4139 }
4140
4141 /*
4142 * Can't set/change SCHED_DEADLINE policy at all for now
4143 * (safest behavior); in the future we would like to allow
4144 * unprivileged DL tasks to increase their relative deadline
4145 * or reduce their runtime (both ways reducing utilization)
4146 */
4147 if (dl_policy(policy))
4148 return -EPERM;
4149
4150 /*
4151 * Treat SCHED_IDLE as nice 20. Only allow a switch to
4152 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
4153 */
4154 if (idle_policy(p->policy) && !idle_policy(policy)) {
4155 if (!can_nice(p, task_nice(p)))
4156 return -EPERM;
4157 }
4158
4159 /* can't change other user's priorities */
4160 if (!check_same_owner(p))
4161 return -EPERM;
4162
4163 /* Normal users shall not reset the sched_reset_on_fork flag */
4164 if (p->sched_reset_on_fork && !reset_on_fork)
4165 return -EPERM;
4166 }
4167
4168 if (user) {
4169 retval = security_task_setscheduler(p);
4170 if (retval)
4171 return retval;
4172 }
4173
4174 /*
4175 * make sure no PI-waiters arrive (or leave) while we are
4176 * changing the priority of the task:
4177 *
4178 * To be able to change p->policy safely, the appropriate
4179 * runqueue lock must be held.
4180 */
4181 rq = task_rq_lock(p, &rf);
4182
4183 /*
4184 * Changing the policy of the stop threads its a very bad idea
4185 */
4186 if (p == rq->stop) {
4187 task_rq_unlock(rq, p, &rf);
4188 return -EINVAL;
4189 }
4190
4191 /*
4192 * If not changing anything there's no need to proceed further,
4193 * but store a possible modification of reset_on_fork.
4194 */
4195 if (unlikely(policy == p->policy)) {
4196 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
4197 goto change;
4198 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
4199 goto change;
4200 if (dl_policy(policy) && dl_param_changed(p, attr))
4201 goto change;
4202
4203 p->sched_reset_on_fork = reset_on_fork;
4204 task_rq_unlock(rq, p, &rf);
4205 return 0;
4206 }
4207change:
4208
4209 if (user) {
4210#ifdef CONFIG_RT_GROUP_SCHED
4211 /*
4212 * Do not allow realtime tasks into groups that have no runtime
4213 * assigned.
4214 */
4215 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4216 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4217 !task_group_is_autogroup(task_group(p))) {
4218 task_rq_unlock(rq, p, &rf);
4219 return -EPERM;
4220 }
4221#endif
4222#ifdef CONFIG_SMP
4223 if (dl_bandwidth_enabled() && dl_policy(policy)) {
4224 cpumask_t *span = rq->rd->span;
4225
4226 /*
4227 * Don't allow tasks with an affinity mask smaller than
4228 * the entire root_domain to become SCHED_DEADLINE. We
4229 * will also fail if there's no bandwidth available.
4230 */
4231 if (!cpumask_subset(span, &p->cpus_allowed) ||
4232 rq->rd->dl_bw.bw == 0) {
4233 task_rq_unlock(rq, p, &rf);
4234 return -EPERM;
4235 }
4236 }
4237#endif
4238 }
4239
4240 /* recheck policy now with rq lock held */
4241 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4242 policy = oldpolicy = -1;
4243 task_rq_unlock(rq, p, &rf);
4244 goto recheck;
4245 }
4246
4247 /*
4248 * If setscheduling to SCHED_DEADLINE (or changing the parameters
4249 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
4250 * is available.
4251 */
4252 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
4253 task_rq_unlock(rq, p, &rf);
4254 return -EBUSY;
4255 }
4256
4257 p->sched_reset_on_fork = reset_on_fork;
4258 oldprio = p->prio;
4259
4260 if (pi) {
4261 /*
4262 * Take priority boosted tasks into account. If the new
4263 * effective priority is unchanged, we just store the new
4264 * normal parameters and do not touch the scheduler class and
4265 * the runqueue. This will be done when the task deboost
4266 * itself.
4267 */
4268 new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
4269 if (new_effective_prio == oldprio)
4270 queue_flags &= ~DEQUEUE_MOVE;
4271 }
4272
4273 queued = task_on_rq_queued(p);
4274 running = task_current(rq, p);
4275 if (queued)
4276 dequeue_task(rq, p, queue_flags);
4277 if (running)
4278 put_prev_task(rq, p);
4279
4280 prev_class = p->sched_class;
4281 __setscheduler(rq, p, attr, pi);
4282
4283 if (queued) {
4284 /*
4285 * We enqueue to tail when the priority of a task is
4286 * increased (user space view).
4287 */
4288 if (oldprio < p->prio)
4289 queue_flags |= ENQUEUE_HEAD;
4290
4291 enqueue_task(rq, p, queue_flags);
4292 }
4293 if (running)
4294 set_curr_task(rq, p);
4295
4296 check_class_changed(rq, p, prev_class, oldprio);
4297 preempt_disable(); /* avoid rq from going away on us */
4298 task_rq_unlock(rq, p, &rf);
4299
4300 if (pi)
4301 rt_mutex_adjust_pi(p);
4302
4303 /*
4304 * Run balance callbacks after we've adjusted the PI chain.
4305 */
4306 balance_callback(rq);
4307 preempt_enable();
4308
4309 return 0;
4310}
4311
4312static int _sched_setscheduler(struct task_struct *p, int policy,
4313 const struct sched_param *param, bool check)
4314{
4315 struct sched_attr attr = {
4316 .sched_policy = policy,
4317 .sched_priority = param->sched_priority,
4318 .sched_nice = PRIO_TO_NICE(p->static_prio),
4319 };
4320
4321 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
4322 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
4323 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4324 policy &= ~SCHED_RESET_ON_FORK;
4325 attr.sched_policy = policy;
4326 }
4327
4328 return __sched_setscheduler(p, &attr, check, true);
4329}
4330/**
4331 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4332 * @p: the task in question.
4333 * @policy: new policy.
4334 * @param: structure containing the new RT priority.
4335 *
4336 * Return: 0 on success. An error code otherwise.
4337 *
4338 * NOTE that the task may be already dead.
4339 */
4340int sched_setscheduler(struct task_struct *p, int policy,
4341 const struct sched_param *param)
4342{
4343 return _sched_setscheduler(p, policy, param, true);
4344}
4345EXPORT_SYMBOL_GPL(sched_setscheduler);
4346
4347int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
4348{
4349 return __sched_setscheduler(p, attr, true, true);
4350}
4351EXPORT_SYMBOL_GPL(sched_setattr);
4352
4353/**
4354 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4355 * @p: the task in question.
4356 * @policy: new policy.
4357 * @param: structure containing the new RT priority.
4358 *
4359 * Just like sched_setscheduler, only don't bother checking if the
4360 * current context has permission. For example, this is needed in
4361 * stop_machine(): we create temporary high priority worker threads,
4362 * but our caller might not have that capability.
4363 *
4364 * Return: 0 on success. An error code otherwise.
4365 */
4366int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4367 const struct sched_param *param)
4368{
4369 return _sched_setscheduler(p, policy, param, false);
4370}
4371EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
4372
4373static int
4374do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4375{
4376 struct sched_param lparam;
4377 struct task_struct *p;
4378 int retval;
4379
4380 if (!param || pid < 0)
4381 return -EINVAL;
4382 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4383 return -EFAULT;
4384
4385 rcu_read_lock();
4386 retval = -ESRCH;
4387 p = find_process_by_pid(pid);
4388 if (p != NULL)
4389 retval = sched_setscheduler(p, policy, &lparam);
4390 rcu_read_unlock();
4391
4392 return retval;
4393}
4394
4395/*
4396 * Mimics kernel/events/core.c perf_copy_attr().
4397 */
4398static int sched_copy_attr(struct sched_attr __user *uattr,
4399 struct sched_attr *attr)
4400{
4401 u32 size;
4402 int ret;
4403
4404 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
4405 return -EFAULT;
4406
4407 /*
4408 * zero the full structure, so that a short copy will be nice.
4409 */
4410 memset(attr, 0, sizeof(*attr));
4411
4412 ret = get_user(size, &uattr->size);
4413 if (ret)
4414 return ret;
4415
4416 if (size > PAGE_SIZE) /* silly large */
4417 goto err_size;
4418
4419 if (!size) /* abi compat */
4420 size = SCHED_ATTR_SIZE_VER0;
4421
4422 if (size < SCHED_ATTR_SIZE_VER0)
4423 goto err_size;
4424
4425 /*
4426 * If we're handed a bigger struct than we know of,
4427 * ensure all the unknown bits are 0 - i.e. new
4428 * user-space does not rely on any kernel feature
4429 * extensions we dont know about yet.
4430 */
4431 if (size > sizeof(*attr)) {
4432 unsigned char __user *addr;
4433 unsigned char __user *end;
4434 unsigned char val;
4435
4436 addr = (void __user *)uattr + sizeof(*attr);
4437 end = (void __user *)uattr + size;
4438
4439 for (; addr < end; addr++) {
4440 ret = get_user(val, addr);
4441 if (ret)
4442 return ret;
4443 if (val)
4444 goto err_size;
4445 }
4446 size = sizeof(*attr);
4447 }
4448
4449 ret = copy_from_user(attr, uattr, size);
4450 if (ret)
4451 return -EFAULT;
4452
4453 /*
4454 * XXX: do we want to be lenient like existing syscalls; or do we want
4455 * to be strict and return an error on out-of-bounds values?
4456 */
4457 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
4458
4459 return 0;
4460
4461err_size:
4462 put_user(sizeof(*attr), &uattr->size);
4463 return -E2BIG;
4464}
4465
4466/**
4467 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4468 * @pid: the pid in question.
4469 * @policy: new policy.
4470 * @param: structure containing the new RT priority.
4471 *
4472 * Return: 0 on success. An error code otherwise.
4473 */
4474SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4475 struct sched_param __user *, param)
4476{
4477 /* negative values for policy are not valid */
4478 if (policy < 0)
4479 return -EINVAL;
4480
4481 return do_sched_setscheduler(pid, policy, param);
4482}
4483
4484/**
4485 * sys_sched_setparam - set/change the RT priority of a thread
4486 * @pid: the pid in question.
4487 * @param: structure containing the new RT priority.
4488 *
4489 * Return: 0 on success. An error code otherwise.
4490 */
4491SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
4492{
4493 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
4494}
4495
4496/**
4497 * sys_sched_setattr - same as above, but with extended sched_attr
4498 * @pid: the pid in question.
4499 * @uattr: structure containing the extended parameters.
4500 * @flags: for future extension.
4501 */
4502SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
4503 unsigned int, flags)
4504{
4505 struct sched_attr attr;
4506 struct task_struct *p;
4507 int retval;
4508
4509 if (!uattr || pid < 0 || flags)
4510 return -EINVAL;
4511
4512 retval = sched_copy_attr(uattr, &attr);
4513 if (retval)
4514 return retval;
4515
4516 if ((int)attr.sched_policy < 0)
4517 return -EINVAL;
4518
4519 rcu_read_lock();
4520 retval = -ESRCH;
4521 p = find_process_by_pid(pid);
4522 if (p != NULL)
4523 retval = sched_setattr(p, &attr);
4524 rcu_read_unlock();
4525
4526 return retval;
4527}
4528
4529/**
4530 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4531 * @pid: the pid in question.
4532 *
4533 * Return: On success, the policy of the thread. Otherwise, a negative error
4534 * code.
4535 */
4536SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
4537{
4538 struct task_struct *p;
4539 int retval;
4540
4541 if (pid < 0)
4542 return -EINVAL;
4543
4544 retval = -ESRCH;
4545 rcu_read_lock();
4546 p = find_process_by_pid(pid);
4547 if (p) {
4548 retval = security_task_getscheduler(p);
4549 if (!retval)
4550 retval = p->policy
4551 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
4552 }
4553 rcu_read_unlock();
4554 return retval;
4555}
4556
4557/**
4558 * sys_sched_getparam - get the RT priority of a thread
4559 * @pid: the pid in question.
4560 * @param: structure containing the RT priority.
4561 *
4562 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
4563 * code.
4564 */
4565SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
4566{
4567 struct sched_param lp = { .sched_priority = 0 };
4568 struct task_struct *p;
4569 int retval;
4570
4571 if (!param || pid < 0)
4572 return -EINVAL;
4573
4574 rcu_read_lock();
4575 p = find_process_by_pid(pid);
4576 retval = -ESRCH;
4577 if (!p)
4578 goto out_unlock;
4579
4580 retval = security_task_getscheduler(p);
4581 if (retval)
4582 goto out_unlock;
4583
4584 if (task_has_rt_policy(p))
4585 lp.sched_priority = p->rt_priority;
4586 rcu_read_unlock();
4587
4588 /*
4589 * This one might sleep, we cannot do it with a spinlock held ...
4590 */
4591 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4592
4593 return retval;
4594
4595out_unlock:
4596 rcu_read_unlock();
4597 return retval;
4598}
4599
4600static int sched_read_attr(struct sched_attr __user *uattr,
4601 struct sched_attr *attr,
4602 unsigned int usize)
4603{
4604 int ret;
4605
4606 if (!access_ok(VERIFY_WRITE, uattr, usize))
4607 return -EFAULT;
4608
4609 /*
4610 * If we're handed a smaller struct than we know of,
4611 * ensure all the unknown bits are 0 - i.e. old
4612 * user-space does not get uncomplete information.
4613 */
4614 if (usize < sizeof(*attr)) {
4615 unsigned char *addr;
4616 unsigned char *end;
4617
4618 addr = (void *)attr + usize;
4619 end = (void *)attr + sizeof(*attr);
4620
4621 for (; addr < end; addr++) {
4622 if (*addr)
4623 return -EFBIG;
4624 }
4625
4626 attr->size = usize;
4627 }
4628
4629 ret = copy_to_user(uattr, attr, attr->size);
4630 if (ret)
4631 return -EFAULT;
4632
4633 return 0;
4634}
4635
4636/**
4637 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
4638 * @pid: the pid in question.
4639 * @uattr: structure containing the extended parameters.
4640 * @size: sizeof(attr) for fwd/bwd comp.
4641 * @flags: for future extension.
4642 */
4643SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
4644 unsigned int, size, unsigned int, flags)
4645{
4646 struct sched_attr attr = {
4647 .size = sizeof(struct sched_attr),
4648 };
4649 struct task_struct *p;
4650 int retval;
4651
4652 if (!uattr || pid < 0 || size > PAGE_SIZE ||
4653 size < SCHED_ATTR_SIZE_VER0 || flags)
4654 return -EINVAL;
4655
4656 rcu_read_lock();
4657 p = find_process_by_pid(pid);
4658 retval = -ESRCH;
4659 if (!p)
4660 goto out_unlock;
4661
4662 retval = security_task_getscheduler(p);
4663 if (retval)
4664 goto out_unlock;
4665
4666 attr.sched_policy = p->policy;
4667 if (p->sched_reset_on_fork)
4668 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4669 if (task_has_dl_policy(p))
4670 __getparam_dl(p, &attr);
4671 else if (task_has_rt_policy(p))
4672 attr.sched_priority = p->rt_priority;
4673 else
4674 attr.sched_nice = task_nice(p);
4675
4676 rcu_read_unlock();
4677
4678 retval = sched_read_attr(uattr, &attr, size);
4679 return retval;
4680
4681out_unlock:
4682 rcu_read_unlock();
4683 return retval;
4684}
4685
4686long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4687{
4688 cpumask_var_t cpus_allowed, new_mask;
4689 struct task_struct *p;
4690 int retval;
4691
4692 rcu_read_lock();
4693
4694 p = find_process_by_pid(pid);
4695 if (!p) {
4696 rcu_read_unlock();
4697 return -ESRCH;
4698 }
4699
4700 /* Prevent p going away */
4701 get_task_struct(p);
4702 rcu_read_unlock();
4703
4704 if (p->flags & PF_NO_SETAFFINITY) {
4705 retval = -EINVAL;
4706 goto out_put_task;
4707 }
4708 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4709 retval = -ENOMEM;
4710 goto out_put_task;
4711 }
4712 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4713 retval = -ENOMEM;
4714 goto out_free_cpus_allowed;
4715 }
4716 retval = -EPERM;
4717 if (!check_same_owner(p)) {
4718 rcu_read_lock();
4719 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
4720 rcu_read_unlock();
4721 goto out_free_new_mask;
4722 }
4723 rcu_read_unlock();
4724 }
4725
4726 retval = security_task_setscheduler(p);
4727 if (retval)
4728 goto out_free_new_mask;
4729
4730
4731 cpuset_cpus_allowed(p, cpus_allowed);
4732 cpumask_and(new_mask, in_mask, cpus_allowed);
4733
4734 /*
4735 * Since bandwidth control happens on root_domain basis,
4736 * if admission test is enabled, we only admit -deadline
4737 * tasks allowed to run on all the CPUs in the task's
4738 * root_domain.
4739 */
4740#ifdef CONFIG_SMP
4741 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
4742 rcu_read_lock();
4743 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
4744 retval = -EBUSY;
4745 rcu_read_unlock();
4746 goto out_free_new_mask;
4747 }
4748 rcu_read_unlock();
4749 }
4750#endif
4751again:
4752 retval = __set_cpus_allowed_ptr(p, new_mask, true);
4753
4754 if (!retval) {
4755 cpuset_cpus_allowed(p, cpus_allowed);
4756 if (!cpumask_subset(new_mask, cpus_allowed)) {
4757 /*
4758 * We must have raced with a concurrent cpuset
4759 * update. Just reset the cpus_allowed to the
4760 * cpuset's cpus_allowed
4761 */
4762 cpumask_copy(new_mask, cpus_allowed);
4763 goto again;
4764 }
4765 }
4766out_free_new_mask:
4767 free_cpumask_var(new_mask);
4768out_free_cpus_allowed:
4769 free_cpumask_var(cpus_allowed);
4770out_put_task:
4771 put_task_struct(p);
4772 return retval;
4773}
4774
4775static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4776 struct cpumask *new_mask)
4777{
4778 if (len < cpumask_size())
4779 cpumask_clear(new_mask);
4780 else if (len > cpumask_size())
4781 len = cpumask_size();
4782
4783 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4784}
4785
4786/**
4787 * sys_sched_setaffinity - set the cpu affinity of a process
4788 * @pid: pid of the process
4789 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4790 * @user_mask_ptr: user-space pointer to the new cpu mask
4791 *
4792 * Return: 0 on success. An error code otherwise.
4793 */
4794SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4795 unsigned long __user *, user_mask_ptr)
4796{
4797 cpumask_var_t new_mask;
4798 int retval;
4799
4800 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4801 return -ENOMEM;
4802
4803 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4804 if (retval == 0)
4805 retval = sched_setaffinity(pid, new_mask);
4806 free_cpumask_var(new_mask);
4807 return retval;
4808}
4809
4810long sched_getaffinity(pid_t pid, struct cpumask *mask)
4811{
4812 struct task_struct *p;
4813 unsigned long flags;
4814 int retval;
4815
4816 rcu_read_lock();
4817
4818 retval = -ESRCH;
4819 p = find_process_by_pid(pid);
4820 if (!p)
4821 goto out_unlock;
4822
4823 retval = security_task_getscheduler(p);
4824 if (retval)
4825 goto out_unlock;
4826
4827 raw_spin_lock_irqsave(&p->pi_lock, flags);
4828 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
4829 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4830
4831out_unlock:
4832 rcu_read_unlock();
4833
4834 return retval;
4835}
4836
4837/**
4838 * sys_sched_getaffinity - get the cpu affinity of a process
4839 * @pid: pid of the process
4840 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4841 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4842 *
4843 * Return: size of CPU mask copied to user_mask_ptr on success. An
4844 * error code otherwise.
4845 */
4846SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4847 unsigned long __user *, user_mask_ptr)
4848{
4849 int ret;
4850 cpumask_var_t mask;
4851
4852 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4853 return -EINVAL;
4854 if (len & (sizeof(unsigned long)-1))
4855 return -EINVAL;
4856
4857 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4858 return -ENOMEM;
4859
4860 ret = sched_getaffinity(pid, mask);
4861 if (ret == 0) {
4862 size_t retlen = min_t(size_t, len, cpumask_size());
4863
4864 if (copy_to_user(user_mask_ptr, mask, retlen))
4865 ret = -EFAULT;
4866 else
4867 ret = retlen;
4868 }
4869 free_cpumask_var(mask);
4870
4871 return ret;
4872}
4873
4874/**
4875 * sys_sched_yield - yield the current processor to other threads.
4876 *
4877 * This function yields the current CPU to other tasks. If there are no
4878 * other threads running on this CPU then this function will return.
4879 *
4880 * Return: 0.
4881 */
4882SYSCALL_DEFINE0(sched_yield)
4883{
4884 struct rq *rq = this_rq_lock();
4885
4886 schedstat_inc(rq->yld_count);
4887 current->sched_class->yield_task(rq);
4888
4889 /*
4890 * Since we are going to call schedule() anyway, there's
4891 * no need to preempt or enable interrupts:
4892 */
4893 __release(rq->lock);
4894 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4895 do_raw_spin_unlock(&rq->lock);
4896 sched_preempt_enable_no_resched();
4897
4898 schedule();
4899
4900 return 0;
4901}
4902
4903#ifndef CONFIG_PREEMPT
4904int __sched _cond_resched(void)
4905{
4906 if (should_resched(0)) {
4907 preempt_schedule_common();
4908 return 1;
4909 }
4910 return 0;
4911}
4912EXPORT_SYMBOL(_cond_resched);
4913#endif
4914
4915/*
4916 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
4917 * call schedule, and on return reacquire the lock.
4918 *
4919 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
4920 * operations here to prevent schedule() from being called twice (once via
4921 * spin_unlock(), once by hand).
4922 */
4923int __cond_resched_lock(spinlock_t *lock)
4924{
4925 int resched = should_resched(PREEMPT_LOCK_OFFSET);
4926 int ret = 0;
4927
4928 lockdep_assert_held(lock);
4929
4930 if (spin_needbreak(lock) || resched) {
4931 spin_unlock(lock);
4932 if (resched)
4933 preempt_schedule_common();
4934 else
4935 cpu_relax();
4936 ret = 1;
4937 spin_lock(lock);
4938 }
4939 return ret;
4940}
4941EXPORT_SYMBOL(__cond_resched_lock);
4942
4943int __sched __cond_resched_softirq(void)
4944{
4945 BUG_ON(!in_softirq());
4946
4947 if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
4948 local_bh_enable();
4949 preempt_schedule_common();
4950 local_bh_disable();
4951 return 1;
4952 }
4953 return 0;
4954}
4955EXPORT_SYMBOL(__cond_resched_softirq);
4956
4957/**
4958 * yield - yield the current processor to other threads.
4959 *
4960 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4961 *
4962 * The scheduler is at all times free to pick the calling task as the most
4963 * eligible task to run, if removing the yield() call from your code breaks
4964 * it, its already broken.
4965 *
4966 * Typical broken usage is:
4967 *
4968 * while (!event)
4969 * yield();
4970 *
4971 * where one assumes that yield() will let 'the other' process run that will
4972 * make event true. If the current task is a SCHED_FIFO task that will never
4973 * happen. Never use yield() as a progress guarantee!!
4974 *
4975 * If you want to use yield() to wait for something, use wait_event().
4976 * If you want to use yield() to be 'nice' for others, use cond_resched().
4977 * If you still want to use yield(), do not!
4978 */
4979void __sched yield(void)
4980{
4981 set_current_state(TASK_RUNNING);
4982 sys_sched_yield();
4983}
4984EXPORT_SYMBOL(yield);
4985
4986/**
4987 * yield_to - yield the current processor to another thread in
4988 * your thread group, or accelerate that thread toward the
4989 * processor it's on.
4990 * @p: target task
4991 * @preempt: whether task preemption is allowed or not
4992 *
4993 * It's the caller's job to ensure that the target task struct
4994 * can't go away on us before we can do any checks.
4995 *
4996 * Return:
4997 * true (>0) if we indeed boosted the target task.
4998 * false (0) if we failed to boost the target.
4999 * -ESRCH if there's no task to yield to.
5000 */
5001int __sched yield_to(struct task_struct *p, bool preempt)
5002{
5003 struct task_struct *curr = current;
5004 struct rq *rq, *p_rq;
5005 unsigned long flags;
5006 int yielded = 0;
5007
5008 local_irq_save(flags);
5009 rq = this_rq();
5010
5011again:
5012 p_rq = task_rq(p);
5013 /*
5014 * If we're the only runnable task on the rq and target rq also
5015 * has only one task, there's absolutely no point in yielding.
5016 */
5017 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
5018 yielded = -ESRCH;
5019 goto out_irq;
5020 }
5021
5022 double_rq_lock(rq, p_rq);
5023 if (task_rq(p) != p_rq) {
5024 double_rq_unlock(rq, p_rq);
5025 goto again;
5026 }
5027
5028 if (!curr->sched_class->yield_to_task)
5029 goto out_unlock;
5030
5031 if (curr->sched_class != p->sched_class)
5032 goto out_unlock;
5033
5034 if (task_running(p_rq, p) || p->state)
5035 goto out_unlock;
5036
5037 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
5038 if (yielded) {
5039 schedstat_inc(rq->yld_count);
5040 /*
5041 * Make p's CPU reschedule; pick_next_entity takes care of
5042 * fairness.
5043 */
5044 if (preempt && rq != p_rq)
5045 resched_curr(p_rq);
5046 }
5047
5048out_unlock:
5049 double_rq_unlock(rq, p_rq);
5050out_irq:
5051 local_irq_restore(flags);
5052
5053 if (yielded > 0)
5054 schedule();
5055
5056 return yielded;
5057}
5058EXPORT_SYMBOL_GPL(yield_to);
5059
5060/*
5061 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
5062 * that process accounting knows that this is a task in IO wait state.
5063 */
5064long __sched io_schedule_timeout(long timeout)
5065{
5066 int old_iowait = current->in_iowait;
5067 struct rq *rq;
5068 long ret;
5069
5070 current->in_iowait = 1;
5071 blk_schedule_flush_plug(current);
5072
5073 delayacct_blkio_start();
5074 rq = raw_rq();
5075 atomic_inc(&rq->nr_iowait);
5076 ret = schedule_timeout(timeout);
5077 current->in_iowait = old_iowait;
5078 atomic_dec(&rq->nr_iowait);
5079 delayacct_blkio_end();
5080
5081 return ret;
5082}
5083EXPORT_SYMBOL(io_schedule_timeout);
5084
5085/**
5086 * sys_sched_get_priority_max - return maximum RT priority.
5087 * @policy: scheduling class.
5088 *
5089 * Return: On success, this syscall returns the maximum
5090 * rt_priority that can be used by a given scheduling class.
5091 * On failure, a negative error code is returned.
5092 */
5093SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
5094{
5095 int ret = -EINVAL;
5096
5097 switch (policy) {
5098 case SCHED_FIFO:
5099 case SCHED_RR:
5100 ret = MAX_USER_RT_PRIO-1;
5101 break;
5102 case SCHED_DEADLINE:
5103 case SCHED_NORMAL:
5104 case SCHED_BATCH:
5105 case SCHED_IDLE:
5106 ret = 0;
5107 break;
5108 }
5109 return ret;
5110}
5111
5112/**
5113 * sys_sched_get_priority_min - return minimum RT priority.
5114 * @policy: scheduling class.
5115 *
5116 * Return: On success, this syscall returns the minimum
5117 * rt_priority that can be used by a given scheduling class.
5118 * On failure, a negative error code is returned.
5119 */
5120SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
5121{
5122 int ret = -EINVAL;
5123
5124 switch (policy) {
5125 case SCHED_FIFO:
5126 case SCHED_RR:
5127 ret = 1;
5128 break;
5129 case SCHED_DEADLINE:
5130 case SCHED_NORMAL:
5131 case SCHED_BATCH:
5132 case SCHED_IDLE:
5133 ret = 0;
5134 }
5135 return ret;
5136}
5137
5138/**
5139 * sys_sched_rr_get_interval - return the default timeslice of a process.
5140 * @pid: pid of the process.
5141 * @interval: userspace pointer to the timeslice value.
5142 *
5143 * this syscall writes the default timeslice value of a given process
5144 * into the user-space timespec buffer. A value of '0' means infinity.
5145 *
5146 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
5147 * an error code.
5148 */
5149SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5150 struct timespec __user *, interval)
5151{
5152 struct task_struct *p;
5153 unsigned int time_slice;
5154 struct rq_flags rf;
5155 struct timespec t;
5156 struct rq *rq;
5157 int retval;
5158
5159 if (pid < 0)
5160 return -EINVAL;
5161
5162 retval = -ESRCH;
5163 rcu_read_lock();
5164 p = find_process_by_pid(pid);
5165 if (!p)
5166 goto out_unlock;
5167
5168 retval = security_task_getscheduler(p);
5169 if (retval)
5170 goto out_unlock;
5171
5172 rq = task_rq_lock(p, &rf);
5173 time_slice = 0;
5174 if (p->sched_class->get_rr_interval)
5175 time_slice = p->sched_class->get_rr_interval(rq, p);
5176 task_rq_unlock(rq, p, &rf);
5177
5178 rcu_read_unlock();
5179 jiffies_to_timespec(time_slice, &t);
5180 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
5181 return retval;
5182
5183out_unlock:
5184 rcu_read_unlock();
5185 return retval;
5186}
5187
5188static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
5189
5190void sched_show_task(struct task_struct *p)
5191{
5192 unsigned long free = 0;
5193 int ppid;
5194 unsigned long state = p->state;
5195
5196 if (!try_get_task_stack(p))
5197 return;
5198 if (state)
5199 state = __ffs(state) + 1;
5200 printk(KERN_INFO "%-15.15s %c", p->comm,
5201 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
5202 if (state == TASK_RUNNING)
5203 printk(KERN_CONT " running task ");
5204#ifdef CONFIG_DEBUG_STACK_USAGE
5205 free = stack_not_used(p);
5206#endif
5207 ppid = 0;
5208 rcu_read_lock();
5209 if (pid_alive(p))
5210 ppid = task_pid_nr(rcu_dereference(p->real_parent));
5211 rcu_read_unlock();
5212 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
5213 task_pid_nr(p), ppid,
5214 (unsigned long)task_thread_info(p)->flags);
5215
5216 print_worker_info(KERN_INFO, p);
5217 show_stack(p, NULL);
5218 put_task_stack(p);
5219}
5220
5221void show_state_filter(unsigned long state_filter)
5222{
5223 struct task_struct *g, *p;
5224
5225#if BITS_PER_LONG == 32
5226 printk(KERN_INFO
5227 " task PC stack pid father\n");
5228#else
5229 printk(KERN_INFO
5230 " task PC stack pid father\n");
5231#endif
5232 rcu_read_lock();
5233 for_each_process_thread(g, p) {
5234 /*
5235 * reset the NMI-timeout, listing all files on a slow
5236 * console might take a lot of time:
5237 * Also, reset softlockup watchdogs on all CPUs, because
5238 * another CPU might be blocked waiting for us to process
5239 * an IPI.
5240 */
5241 touch_nmi_watchdog();
5242 touch_all_softlockup_watchdogs();
5243 if (!state_filter || (p->state & state_filter))
5244 sched_show_task(p);
5245 }
5246
5247#ifdef CONFIG_SCHED_DEBUG
5248 if (!state_filter)
5249 sysrq_sched_debug_show();
5250#endif
5251 rcu_read_unlock();
5252 /*
5253 * Only show locks if all tasks are dumped:
5254 */
5255 if (!state_filter)
5256 debug_show_all_locks();
5257}
5258
5259void init_idle_bootup_task(struct task_struct *idle)
5260{
5261 idle->sched_class = &idle_sched_class;
5262}
5263
5264/**
5265 * init_idle - set up an idle thread for a given CPU
5266 * @idle: task in question
5267 * @cpu: cpu the idle task belongs to
5268 *
5269 * NOTE: this function does not set the idle thread's NEED_RESCHED
5270 * flag, to make booting more robust.
5271 */
5272void init_idle(struct task_struct *idle, int cpu)
5273{
5274 struct rq *rq = cpu_rq(cpu);
5275 unsigned long flags;
5276
5277 raw_spin_lock_irqsave(&idle->pi_lock, flags);
5278 raw_spin_lock(&rq->lock);
5279
5280 __sched_fork(0, idle);
5281 idle->state = TASK_RUNNING;
5282 idle->se.exec_start = sched_clock();
5283 idle->flags |= PF_IDLE;
5284
5285 kasan_unpoison_task_stack(idle);
5286
5287#ifdef CONFIG_SMP
5288 /*
5289 * Its possible that init_idle() gets called multiple times on a task,
5290 * in that case do_set_cpus_allowed() will not do the right thing.
5291 *
5292 * And since this is boot we can forgo the serialization.
5293 */
5294 set_cpus_allowed_common(idle, cpumask_of(cpu));
5295#endif
5296 /*
5297 * We're having a chicken and egg problem, even though we are
5298 * holding rq->lock, the cpu isn't yet set to this cpu so the
5299 * lockdep check in task_group() will fail.
5300 *
5301 * Similar case to sched_fork(). / Alternatively we could
5302 * use task_rq_lock() here and obtain the other rq->lock.
5303 *
5304 * Silence PROVE_RCU
5305 */
5306 rcu_read_lock();
5307 __set_task_cpu(idle, cpu);
5308 rcu_read_unlock();
5309
5310 rq->curr = rq->idle = idle;
5311 idle->on_rq = TASK_ON_RQ_QUEUED;
5312#ifdef CONFIG_SMP
5313 idle->on_cpu = 1;
5314#endif
5315 raw_spin_unlock(&rq->lock);
5316 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
5317
5318 /* Set the preempt count _outside_ the spinlocks! */
5319 init_idle_preempt_count(idle, cpu);
5320
5321 /*
5322 * The idle tasks have their own, simple scheduling class:
5323 */
5324 idle->sched_class = &idle_sched_class;
5325 ftrace_graph_init_idle_task(idle, cpu);
5326 vtime_init_idle(idle, cpu);
5327#ifdef CONFIG_SMP
5328 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
5329#endif
5330}
5331
5332int cpuset_cpumask_can_shrink(const struct cpumask *cur,
5333 const struct cpumask *trial)
5334{
5335 int ret = 1, trial_cpus;
5336 struct dl_bw *cur_dl_b;
5337 unsigned long flags;
5338
5339 if (!cpumask_weight(cur))
5340 return ret;
5341
5342 rcu_read_lock_sched();
5343 cur_dl_b = dl_bw_of(cpumask_any(cur));
5344 trial_cpus = cpumask_weight(trial);
5345
5346 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
5347 if (cur_dl_b->bw != -1 &&
5348 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
5349 ret = 0;
5350 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
5351 rcu_read_unlock_sched();
5352
5353 return ret;
5354}
5355
5356int task_can_attach(struct task_struct *p,
5357 const struct cpumask *cs_cpus_allowed)
5358{
5359 int ret = 0;
5360
5361 /*
5362 * Kthreads which disallow setaffinity shouldn't be moved
5363 * to a new cpuset; we don't want to change their cpu
5364 * affinity and isolating such threads by their set of
5365 * allowed nodes is unnecessary. Thus, cpusets are not
5366 * applicable for such threads. This prevents checking for
5367 * success of set_cpus_allowed_ptr() on all attached tasks
5368 * before cpus_allowed may be changed.
5369 */
5370 if (p->flags & PF_NO_SETAFFINITY) {
5371 ret = -EINVAL;
5372 goto out;
5373 }
5374
5375#ifdef CONFIG_SMP
5376 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
5377 cs_cpus_allowed)) {
5378 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
5379 cs_cpus_allowed);
5380 struct dl_bw *dl_b;
5381 bool overflow;
5382 int cpus;
5383 unsigned long flags;
5384
5385 rcu_read_lock_sched();
5386 dl_b = dl_bw_of(dest_cpu);
5387 raw_spin_lock_irqsave(&dl_b->lock, flags);
5388 cpus = dl_bw_cpus(dest_cpu);
5389 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
5390 if (overflow)
5391 ret = -EBUSY;
5392 else {
5393 /*
5394 * We reserve space for this task in the destination
5395 * root_domain, as we can't fail after this point.
5396 * We will free resources in the source root_domain
5397 * later on (see set_cpus_allowed_dl()).
5398 */
5399 __dl_add(dl_b, p->dl.dl_bw);
5400 }
5401 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
5402 rcu_read_unlock_sched();
5403
5404 }
5405#endif
5406out:
5407 return ret;
5408}
5409
5410#ifdef CONFIG_SMP
5411
5412static bool sched_smp_initialized __read_mostly;
5413
5414#ifdef CONFIG_NUMA_BALANCING
5415/* Migrate current task p to target_cpu */
5416int migrate_task_to(struct task_struct *p, int target_cpu)
5417{
5418 struct migration_arg arg = { p, target_cpu };
5419 int curr_cpu = task_cpu(p);
5420
5421 if (curr_cpu == target_cpu)
5422 return 0;
5423
5424 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
5425 return -EINVAL;
5426
5427 /* TODO: This is not properly updating schedstats */
5428
5429 trace_sched_move_numa(p, curr_cpu, target_cpu);
5430 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
5431}
5432
5433/*
5434 * Requeue a task on a given node and accurately track the number of NUMA
5435 * tasks on the runqueues
5436 */
5437void sched_setnuma(struct task_struct *p, int nid)
5438{
5439 bool queued, running;
5440 struct rq_flags rf;
5441 struct rq *rq;
5442
5443 rq = task_rq_lock(p, &rf);
5444 queued = task_on_rq_queued(p);
5445 running = task_current(rq, p);
5446
5447 if (queued)
5448 dequeue_task(rq, p, DEQUEUE_SAVE);
5449 if (running)
5450 put_prev_task(rq, p);
5451
5452 p->numa_preferred_nid = nid;
5453
5454 if (queued)
5455 enqueue_task(rq, p, ENQUEUE_RESTORE);
5456 if (running)
5457 set_curr_task(rq, p);
5458 task_rq_unlock(rq, p, &rf);
5459}
5460#endif /* CONFIG_NUMA_BALANCING */
5461
5462#ifdef CONFIG_HOTPLUG_CPU
5463/*
5464 * Ensures that the idle task is using init_mm right before its cpu goes
5465 * offline.
5466 */
5467void idle_task_exit(void)
5468{
5469 struct mm_struct *mm = current->active_mm;
5470
5471 BUG_ON(cpu_online(smp_processor_id()));
5472
5473 if (mm != &init_mm) {
5474 switch_mm_irqs_off(mm, &init_mm, current);
5475 finish_arch_post_lock_switch();
5476 }
5477 mmdrop(mm);
5478}
5479
5480/*
5481 * Since this CPU is going 'away' for a while, fold any nr_active delta
5482 * we might have. Assumes we're called after migrate_tasks() so that the
5483 * nr_active count is stable. We need to take the teardown thread which
5484 * is calling this into account, so we hand in adjust = 1 to the load
5485 * calculation.
5486 *
5487 * Also see the comment "Global load-average calculations".
5488 */
5489static void calc_load_migrate(struct rq *rq)
5490{
5491 long delta = calc_load_fold_active(rq, 1);
5492 if (delta)
5493 atomic_long_add(delta, &calc_load_tasks);
5494}
5495
5496static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
5497{
5498}
5499
5500static const struct sched_class fake_sched_class = {
5501 .put_prev_task = put_prev_task_fake,
5502};
5503
5504static struct task_struct fake_task = {
5505 /*
5506 * Avoid pull_{rt,dl}_task()
5507 */
5508 .prio = MAX_PRIO + 1,
5509 .sched_class = &fake_sched_class,
5510};
5511
5512/*
5513 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5514 * try_to_wake_up()->select_task_rq().
5515 *
5516 * Called with rq->lock held even though we'er in stop_machine() and
5517 * there's no concurrency possible, we hold the required locks anyway
5518 * because of lock validation efforts.
5519 */
5520static void migrate_tasks(struct rq *dead_rq)
5521{
5522 struct rq *rq = dead_rq;
5523 struct task_struct *next, *stop = rq->stop;
5524 struct pin_cookie cookie;
5525 int dest_cpu;
5526
5527 /*
5528 * Fudge the rq selection such that the below task selection loop
5529 * doesn't get stuck on the currently eligible stop task.
5530 *
5531 * We're currently inside stop_machine() and the rq is either stuck
5532 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5533 * either way we should never end up calling schedule() until we're
5534 * done here.
5535 */
5536 rq->stop = NULL;
5537
5538 /*
5539 * put_prev_task() and pick_next_task() sched
5540 * class method both need to have an up-to-date
5541 * value of rq->clock[_task]
5542 */
5543 update_rq_clock(rq);
5544
5545 for (;;) {
5546 /*
5547 * There's this thread running, bail when that's the only
5548 * remaining thread.
5549 */
5550 if (rq->nr_running == 1)
5551 break;
5552
5553 /*
5554 * pick_next_task assumes pinned rq->lock.
5555 */
5556 cookie = lockdep_pin_lock(&rq->lock);
5557 next = pick_next_task(rq, &fake_task, cookie);
5558 BUG_ON(!next);
5559 next->sched_class->put_prev_task(rq, next);
5560
5561 /*
5562 * Rules for changing task_struct::cpus_allowed are holding
5563 * both pi_lock and rq->lock, such that holding either
5564 * stabilizes the mask.
5565 *
5566 * Drop rq->lock is not quite as disastrous as it usually is
5567 * because !cpu_active at this point, which means load-balance
5568 * will not interfere. Also, stop-machine.
5569 */
5570 lockdep_unpin_lock(&rq->lock, cookie);
5571 raw_spin_unlock(&rq->lock);
5572 raw_spin_lock(&next->pi_lock);
5573 raw_spin_lock(&rq->lock);
5574
5575 /*
5576 * Since we're inside stop-machine, _nothing_ should have
5577 * changed the task, WARN if weird stuff happened, because in
5578 * that case the above rq->lock drop is a fail too.
5579 */
5580 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
5581 raw_spin_unlock(&next->pi_lock);
5582 continue;
5583 }
5584
5585 /* Find suitable destination for @next, with force if needed. */
5586 dest_cpu = select_fallback_rq(dead_rq->cpu, next);
5587
5588 rq = __migrate_task(rq, next, dest_cpu);
5589 if (rq != dead_rq) {
5590 raw_spin_unlock(&rq->lock);
5591 rq = dead_rq;
5592 raw_spin_lock(&rq->lock);
5593 }
5594 raw_spin_unlock(&next->pi_lock);
5595 }
5596
5597 rq->stop = stop;
5598}
5599#endif /* CONFIG_HOTPLUG_CPU */
5600
5601static void set_rq_online(struct rq *rq)
5602{
5603 if (!rq->online) {
5604 const struct sched_class *class;
5605
5606 cpumask_set_cpu(rq->cpu, rq->rd->online);
5607 rq->online = 1;
5608
5609 for_each_class(class) {
5610 if (class->rq_online)
5611 class->rq_online(rq);
5612 }
5613 }
5614}
5615
5616static void set_rq_offline(struct rq *rq)
5617{
5618 if (rq->online) {
5619 const struct sched_class *class;
5620
5621 for_each_class(class) {
5622 if (class->rq_offline)
5623 class->rq_offline(rq);
5624 }
5625
5626 cpumask_clear_cpu(rq->cpu, rq->rd->online);
5627 rq->online = 0;
5628 }
5629}
5630
5631static void set_cpu_rq_start_time(unsigned int cpu)
5632{
5633 struct rq *rq = cpu_rq(cpu);
5634
5635 rq->age_stamp = sched_clock_cpu(cpu);
5636}
5637
5638static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5639
5640#ifdef CONFIG_SCHED_DEBUG
5641
5642static __read_mostly int sched_debug_enabled;
5643
5644static int __init sched_debug_setup(char *str)
5645{
5646 sched_debug_enabled = 1;
5647
5648 return 0;
5649}
5650early_param("sched_debug", sched_debug_setup);
5651
5652static inline bool sched_debug(void)
5653{
5654 return sched_debug_enabled;
5655}
5656
5657static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5658 struct cpumask *groupmask)
5659{
5660 struct sched_group *group = sd->groups;
5661
5662 cpumask_clear(groupmask);
5663
5664 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5665
5666 if (!(sd->flags & SD_LOAD_BALANCE)) {
5667 printk("does not load-balance\n");
5668 if (sd->parent)
5669 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5670 " has parent");
5671 return -1;
5672 }
5673
5674 printk(KERN_CONT "span %*pbl level %s\n",
5675 cpumask_pr_args(sched_domain_span(sd)), sd->name);
5676
5677 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
5678 printk(KERN_ERR "ERROR: domain->span does not contain "
5679 "CPU%d\n", cpu);
5680 }
5681 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
5682 printk(KERN_ERR "ERROR: domain->groups does not contain"
5683 " CPU%d\n", cpu);
5684 }
5685
5686 printk(KERN_DEBUG "%*s groups:", level + 1, "");
5687 do {
5688 if (!group) {
5689 printk("\n");
5690 printk(KERN_ERR "ERROR: group is NULL\n");
5691 break;
5692 }
5693
5694 if (!cpumask_weight(sched_group_cpus(group))) {
5695 printk(KERN_CONT "\n");
5696 printk(KERN_ERR "ERROR: empty group\n");
5697 break;
5698 }
5699
5700 if (!(sd->flags & SD_OVERLAP) &&
5701 cpumask_intersects(groupmask, sched_group_cpus(group))) {
5702 printk(KERN_CONT "\n");
5703 printk(KERN_ERR "ERROR: repeated CPUs\n");
5704 break;
5705 }
5706
5707 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
5708
5709 printk(KERN_CONT " %*pbl",
5710 cpumask_pr_args(sched_group_cpus(group)));
5711 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
5712 printk(KERN_CONT " (cpu_capacity = %lu)",
5713 group->sgc->capacity);
5714 }
5715
5716 group = group->next;
5717 } while (group != sd->groups);
5718 printk(KERN_CONT "\n");
5719
5720 if (!cpumask_equal(sched_domain_span(sd), groupmask))
5721 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
5722
5723 if (sd->parent &&
5724 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
5725 printk(KERN_ERR "ERROR: parent span is not a superset "
5726 "of domain->span\n");
5727 return 0;
5728}
5729
5730static void sched_domain_debug(struct sched_domain *sd, int cpu)
5731{
5732 int level = 0;
5733
5734 if (!sched_debug_enabled)
5735 return;
5736
5737 if (!sd) {
5738 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5739 return;
5740 }
5741
5742 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5743
5744 for (;;) {
5745 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
5746 break;
5747 level++;
5748 sd = sd->parent;
5749 if (!sd)
5750 break;
5751 }
5752}
5753#else /* !CONFIG_SCHED_DEBUG */
5754
5755# define sched_debug_enabled 0
5756# define sched_domain_debug(sd, cpu) do { } while (0)
5757static inline bool sched_debug(void)
5758{
5759 return false;
5760}
5761#endif /* CONFIG_SCHED_DEBUG */
5762
5763static int sd_degenerate(struct sched_domain *sd)
5764{
5765 if (cpumask_weight(sched_domain_span(sd)) == 1)
5766 return 1;
5767
5768 /* Following flags need at least 2 groups */
5769 if (sd->flags & (SD_LOAD_BALANCE |
5770 SD_BALANCE_NEWIDLE |
5771 SD_BALANCE_FORK |
5772 SD_BALANCE_EXEC |
5773 SD_SHARE_CPUCAPACITY |
5774 SD_ASYM_CPUCAPACITY |
5775 SD_SHARE_PKG_RESOURCES |
5776 SD_SHARE_POWERDOMAIN)) {
5777 if (sd->groups != sd->groups->next)
5778 return 0;
5779 }
5780
5781 /* Following flags don't use groups */
5782 if (sd->flags & (SD_WAKE_AFFINE))
5783 return 0;
5784
5785 return 1;
5786}
5787
5788static int
5789sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5790{
5791 unsigned long cflags = sd->flags, pflags = parent->flags;
5792
5793 if (sd_degenerate(parent))
5794 return 1;
5795
5796 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
5797 return 0;
5798
5799 /* Flags needing groups don't count if only 1 group in parent */
5800 if (parent->groups == parent->groups->next) {
5801 pflags &= ~(SD_LOAD_BALANCE |
5802 SD_BALANCE_NEWIDLE |
5803 SD_BALANCE_FORK |
5804 SD_BALANCE_EXEC |
5805 SD_ASYM_CPUCAPACITY |
5806 SD_SHARE_CPUCAPACITY |
5807 SD_SHARE_PKG_RESOURCES |
5808 SD_PREFER_SIBLING |
5809 SD_SHARE_POWERDOMAIN);
5810 if (nr_node_ids == 1)
5811 pflags &= ~SD_SERIALIZE;
5812 }
5813 if (~cflags & pflags)
5814 return 0;
5815
5816 return 1;
5817}
5818
5819static void free_rootdomain(struct rcu_head *rcu)
5820{
5821 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
5822
5823 cpupri_cleanup(&rd->cpupri);
5824 cpudl_cleanup(&rd->cpudl);
5825 free_cpumask_var(rd->dlo_mask);
5826 free_cpumask_var(rd->rto_mask);
5827 free_cpumask_var(rd->online);
5828 free_cpumask_var(rd->span);
5829 kfree(rd);
5830}
5831
5832static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5833{
5834 struct root_domain *old_rd = NULL;
5835 unsigned long flags;
5836
5837 raw_spin_lock_irqsave(&rq->lock, flags);
5838
5839 if (rq->rd) {
5840 old_rd = rq->rd;
5841
5842 if (cpumask_test_cpu(rq->cpu, old_rd->online))
5843 set_rq_offline(rq);
5844
5845 cpumask_clear_cpu(rq->cpu, old_rd->span);
5846
5847 /*
5848 * If we dont want to free the old_rd yet then
5849 * set old_rd to NULL to skip the freeing later
5850 * in this function:
5851 */
5852 if (!atomic_dec_and_test(&old_rd->refcount))
5853 old_rd = NULL;
5854 }
5855
5856 atomic_inc(&rd->refcount);
5857 rq->rd = rd;
5858
5859 cpumask_set_cpu(rq->cpu, rd->span);
5860 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
5861 set_rq_online(rq);
5862
5863 raw_spin_unlock_irqrestore(&rq->lock, flags);
5864
5865 if (old_rd)
5866 call_rcu_sched(&old_rd->rcu, free_rootdomain);
5867}
5868
5869static int init_rootdomain(struct root_domain *rd)
5870{
5871 memset(rd, 0, sizeof(*rd));
5872
5873 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
5874 goto out;
5875 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
5876 goto free_span;
5877 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
5878 goto free_online;
5879 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5880 goto free_dlo_mask;
5881
5882 init_dl_bw(&rd->dl_bw);
5883 if (cpudl_init(&rd->cpudl) != 0)
5884 goto free_dlo_mask;
5885
5886 if (cpupri_init(&rd->cpupri) != 0)
5887 goto free_rto_mask;
5888 return 0;
5889
5890free_rto_mask:
5891 free_cpumask_var(rd->rto_mask);
5892free_dlo_mask:
5893 free_cpumask_var(rd->dlo_mask);
5894free_online:
5895 free_cpumask_var(rd->online);
5896free_span:
5897 free_cpumask_var(rd->span);
5898out:
5899 return -ENOMEM;
5900}
5901
5902/*
5903 * By default the system creates a single root-domain with all cpus as
5904 * members (mimicking the global state we have today).
5905 */
5906struct root_domain def_root_domain;
5907
5908static void init_defrootdomain(void)
5909{
5910 init_rootdomain(&def_root_domain);
5911
5912 atomic_set(&def_root_domain.refcount, 1);
5913}
5914
5915static struct root_domain *alloc_rootdomain(void)
5916{
5917 struct root_domain *rd;
5918
5919 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5920 if (!rd)
5921 return NULL;
5922
5923 if (init_rootdomain(rd) != 0) {
5924 kfree(rd);
5925 return NULL;
5926 }
5927
5928 return rd;
5929}
5930
5931static void free_sched_groups(struct sched_group *sg, int free_sgc)
5932{
5933 struct sched_group *tmp, *first;
5934
5935 if (!sg)
5936 return;
5937
5938 first = sg;
5939 do {
5940 tmp = sg->next;
5941
5942 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
5943 kfree(sg->sgc);
5944
5945 kfree(sg);
5946 sg = tmp;
5947 } while (sg != first);
5948}
5949
5950static void destroy_sched_domain(struct sched_domain *sd)
5951{
5952 /*
5953 * If its an overlapping domain it has private groups, iterate and
5954 * nuke them all.
5955 */
5956 if (sd->flags & SD_OVERLAP) {
5957 free_sched_groups(sd->groups, 1);
5958 } else if (atomic_dec_and_test(&sd->groups->ref)) {
5959 kfree(sd->groups->sgc);
5960 kfree(sd->groups);
5961 }
5962 if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
5963 kfree(sd->shared);
5964 kfree(sd);
5965}
5966
5967static void destroy_sched_domains_rcu(struct rcu_head *rcu)
5968{
5969 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
5970
5971 while (sd) {
5972 struct sched_domain *parent = sd->parent;
5973 destroy_sched_domain(sd);
5974 sd = parent;
5975 }
5976}
5977
5978static void destroy_sched_domains(struct sched_domain *sd)
5979{
5980 if (sd)
5981 call_rcu(&sd->rcu, destroy_sched_domains_rcu);
5982}
5983
5984/*
5985 * Keep a special pointer to the highest sched_domain that has
5986 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5987 * allows us to avoid some pointer chasing select_idle_sibling().
5988 *
5989 * Also keep a unique ID per domain (we use the first cpu number in
5990 * the cpumask of the domain), this allows us to quickly tell if
5991 * two cpus are in the same cache domain, see cpus_share_cache().
5992 */
5993DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5994DEFINE_PER_CPU(int, sd_llc_size);
5995DEFINE_PER_CPU(int, sd_llc_id);
5996DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
5997DEFINE_PER_CPU(struct sched_domain *, sd_numa);
5998DEFINE_PER_CPU(struct sched_domain *, sd_asym);
5999
6000static void update_top_cache_domain(int cpu)
6001{
6002 struct sched_domain_shared *sds = NULL;
6003 struct sched_domain *sd;
6004 int id = cpu;
6005 int size = 1;
6006
6007 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
6008 if (sd) {
6009 id = cpumask_first(sched_domain_span(sd));
6010 size = cpumask_weight(sched_domain_span(sd));
6011 sds = sd->shared;
6012 }
6013
6014 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
6015 per_cpu(sd_llc_size, cpu) = size;
6016 per_cpu(sd_llc_id, cpu) = id;
6017 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
6018
6019 sd = lowest_flag_domain(cpu, SD_NUMA);
6020 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
6021
6022 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
6023 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
6024}
6025
6026/*
6027 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
6028 * hold the hotplug lock.
6029 */
6030static void
6031cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6032{
6033 struct rq *rq = cpu_rq(cpu);
6034 struct sched_domain *tmp;
6035
6036 /* Remove the sched domains which do not contribute to scheduling. */
6037 for (tmp = sd; tmp; ) {
6038 struct sched_domain *parent = tmp->parent;
6039 if (!parent)
6040 break;
6041
6042 if (sd_parent_degenerate(tmp, parent)) {
6043 tmp->parent = parent->parent;
6044 if (parent->parent)
6045 parent->parent->child = tmp;
6046 /*
6047 * Transfer SD_PREFER_SIBLING down in case of a
6048 * degenerate parent; the spans match for this
6049 * so the property transfers.
6050 */
6051 if (parent->flags & SD_PREFER_SIBLING)
6052 tmp->flags |= SD_PREFER_SIBLING;
6053 destroy_sched_domain(parent);
6054 } else
6055 tmp = tmp->parent;
6056 }
6057
6058 if (sd && sd_degenerate(sd)) {
6059 tmp = sd;
6060 sd = sd->parent;
6061 destroy_sched_domain(tmp);
6062 if (sd)
6063 sd->child = NULL;
6064 }
6065
6066 sched_domain_debug(sd, cpu);
6067
6068 rq_attach_root(rq, rd);
6069 tmp = rq->sd;
6070 rcu_assign_pointer(rq->sd, sd);
6071 destroy_sched_domains(tmp);
6072
6073 update_top_cache_domain(cpu);
6074}
6075
6076/* Setup the mask of cpus configured for isolated domains */
6077static int __init isolated_cpu_setup(char *str)
6078{
6079 int ret;
6080
6081 alloc_bootmem_cpumask_var(&cpu_isolated_map);
6082 ret = cpulist_parse(str, cpu_isolated_map);
6083 if (ret) {
6084 pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids);
6085 return 0;
6086 }
6087 return 1;
6088}
6089__setup("isolcpus=", isolated_cpu_setup);
6090
6091struct s_data {
6092 struct sched_domain ** __percpu sd;
6093 struct root_domain *rd;
6094};
6095
6096enum s_alloc {
6097 sa_rootdomain,
6098 sa_sd,
6099 sa_sd_storage,
6100 sa_none,
6101};
6102
6103/*
6104 * Build an iteration mask that can exclude certain CPUs from the upwards
6105 * domain traversal.
6106 *
6107 * Asymmetric node setups can result in situations where the domain tree is of
6108 * unequal depth, make sure to skip domains that already cover the entire
6109 * range.
6110 *
6111 * In that case build_sched_domains() will have terminated the iteration early
6112 * and our sibling sd spans will be empty. Domains should always include the
6113 * cpu they're built on, so check that.
6114 *
6115 */
6116static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
6117{
6118 const struct cpumask *span = sched_domain_span(sd);
6119 struct sd_data *sdd = sd->private;
6120 struct sched_domain *sibling;
6121 int i;
6122
6123 for_each_cpu(i, span) {
6124 sibling = *per_cpu_ptr(sdd->sd, i);
6125 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6126 continue;
6127
6128 cpumask_set_cpu(i, sched_group_mask(sg));
6129 }
6130}
6131
6132/*
6133 * Return the canonical balance cpu for this group, this is the first cpu
6134 * of this group that's also in the iteration mask.
6135 */
6136int group_balance_cpu(struct sched_group *sg)
6137{
6138 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
6139}
6140
6141static int
6142build_overlap_sched_groups(struct sched_domain *sd, int cpu)
6143{
6144 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
6145 const struct cpumask *span = sched_domain_span(sd);
6146 struct cpumask *covered = sched_domains_tmpmask;
6147 struct sd_data *sdd = sd->private;
6148 struct sched_domain *sibling;
6149 int i;
6150
6151 cpumask_clear(covered);
6152
6153 for_each_cpu(i, span) {
6154 struct cpumask *sg_span;
6155
6156 if (cpumask_test_cpu(i, covered))
6157 continue;
6158
6159 sibling = *per_cpu_ptr(sdd->sd, i);
6160
6161 /* See the comment near build_group_mask(). */
6162 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6163 continue;
6164
6165 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6166 GFP_KERNEL, cpu_to_node(cpu));
6167
6168 if (!sg)
6169 goto fail;
6170
6171 sg_span = sched_group_cpus(sg);
6172 if (sibling->child)
6173 cpumask_copy(sg_span, sched_domain_span(sibling->child));
6174 else
6175 cpumask_set_cpu(i, sg_span);
6176
6177 cpumask_or(covered, covered, sg_span);
6178
6179 sg->sgc = *per_cpu_ptr(sdd->sgc, i);
6180 if (atomic_inc_return(&sg->sgc->ref) == 1)
6181 build_group_mask(sd, sg);
6182
6183 /*
6184 * Initialize sgc->capacity such that even if we mess up the
6185 * domains and no possible iteration will get us here, we won't
6186 * die on a /0 trap.
6187 */
6188 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
6189 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
6190
6191 /*
6192 * Make sure the first group of this domain contains the
6193 * canonical balance cpu. Otherwise the sched_domain iteration
6194 * breaks. See update_sg_lb_stats().
6195 */
6196 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
6197 group_balance_cpu(sg) == cpu)
6198 groups = sg;
6199
6200 if (!first)
6201 first = sg;
6202 if (last)
6203 last->next = sg;
6204 last = sg;
6205 last->next = first;
6206 }
6207 sd->groups = groups;
6208
6209 return 0;
6210
6211fail:
6212 free_sched_groups(first, 0);
6213
6214 return -ENOMEM;
6215}
6216
6217static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
6218{
6219 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6220 struct sched_domain *child = sd->child;
6221
6222 if (child)
6223 cpu = cpumask_first(sched_domain_span(child));
6224
6225 if (sg) {
6226 *sg = *per_cpu_ptr(sdd->sg, cpu);
6227 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
6228 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
6229 }
6230
6231 return cpu;
6232}
6233
6234/*
6235 * build_sched_groups will build a circular linked list of the groups
6236 * covered by the given span, and will set each group's ->cpumask correctly,
6237 * and ->cpu_capacity to 0.
6238 *
6239 * Assumes the sched_domain tree is fully constructed
6240 */
6241static int
6242build_sched_groups(struct sched_domain *sd, int cpu)
6243{
6244 struct sched_group *first = NULL, *last = NULL;
6245 struct sd_data *sdd = sd->private;
6246 const struct cpumask *span = sched_domain_span(sd);
6247 struct cpumask *covered;
6248 int i;
6249
6250 get_group(cpu, sdd, &sd->groups);
6251 atomic_inc(&sd->groups->ref);
6252
6253 if (cpu != cpumask_first(span))
6254 return 0;
6255
6256 lockdep_assert_held(&sched_domains_mutex);
6257 covered = sched_domains_tmpmask;
6258
6259 cpumask_clear(covered);
6260
6261 for_each_cpu(i, span) {
6262 struct sched_group *sg;
6263 int group, j;
6264
6265 if (cpumask_test_cpu(i, covered))
6266 continue;
6267
6268 group = get_group(i, sdd, &sg);
6269 cpumask_setall(sched_group_mask(sg));
6270
6271 for_each_cpu(j, span) {
6272 if (get_group(j, sdd, NULL) != group)
6273 continue;
6274
6275 cpumask_set_cpu(j, covered);
6276 cpumask_set_cpu(j, sched_group_cpus(sg));
6277 }
6278
6279 if (!first)
6280 first = sg;
6281 if (last)
6282 last->next = sg;
6283 last = sg;
6284 }
6285 last->next = first;
6286
6287 return 0;
6288}
6289
6290/*
6291 * Initialize sched groups cpu_capacity.
6292 *
6293 * cpu_capacity indicates the capacity of sched group, which is used while
6294 * distributing the load between different sched groups in a sched domain.
6295 * Typically cpu_capacity for all the groups in a sched domain will be same
6296 * unless there are asymmetries in the topology. If there are asymmetries,
6297 * group having more cpu_capacity will pickup more load compared to the
6298 * group having less cpu_capacity.
6299 */
6300static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
6301{
6302 struct sched_group *sg = sd->groups;
6303
6304 WARN_ON(!sg);
6305
6306 do {
6307 int cpu, max_cpu = -1;
6308
6309 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6310
6311 if (!(sd->flags & SD_ASYM_PACKING))
6312 goto next;
6313
6314 for_each_cpu(cpu, sched_group_cpus(sg)) {
6315 if (max_cpu < 0)
6316 max_cpu = cpu;
6317 else if (sched_asym_prefer(cpu, max_cpu))
6318 max_cpu = cpu;
6319 }
6320 sg->asym_prefer_cpu = max_cpu;
6321
6322next:
6323 sg = sg->next;
6324 } while (sg != sd->groups);
6325
6326 if (cpu != group_balance_cpu(sg))
6327 return;
6328
6329 update_group_capacity(sd, cpu);
6330}
6331
6332/*
6333 * Initializers for schedule domains
6334 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6335 */
6336
6337static int default_relax_domain_level = -1;
6338int sched_domain_level_max;
6339
6340static int __init setup_relax_domain_level(char *str)
6341{
6342 if (kstrtoint(str, 0, &default_relax_domain_level))
6343 pr_warn("Unable to set relax_domain_level\n");
6344
6345 return 1;
6346}
6347__setup("relax_domain_level=", setup_relax_domain_level);
6348
6349static void set_domain_attribute(struct sched_domain *sd,
6350 struct sched_domain_attr *attr)
6351{
6352 int request;
6353
6354 if (!attr || attr->relax_domain_level < 0) {
6355 if (default_relax_domain_level < 0)
6356 return;
6357 else
6358 request = default_relax_domain_level;
6359 } else
6360 request = attr->relax_domain_level;
6361 if (request < sd->level) {
6362 /* turn off idle balance on this domain */
6363 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6364 } else {
6365 /* turn on idle balance on this domain */
6366 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6367 }
6368}
6369
6370static void __sdt_free(const struct cpumask *cpu_map);
6371static int __sdt_alloc(const struct cpumask *cpu_map);
6372
6373static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6374 const struct cpumask *cpu_map)
6375{
6376 switch (what) {
6377 case sa_rootdomain:
6378 if (!atomic_read(&d->rd->refcount))
6379 free_rootdomain(&d->rd->rcu); /* fall through */
6380 case sa_sd:
6381 free_percpu(d->sd); /* fall through */
6382 case sa_sd_storage:
6383 __sdt_free(cpu_map); /* fall through */
6384 case sa_none:
6385 break;
6386 }
6387}
6388
6389static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6390 const struct cpumask *cpu_map)
6391{
6392 memset(d, 0, sizeof(*d));
6393
6394 if (__sdt_alloc(cpu_map))
6395 return sa_sd_storage;
6396 d->sd = alloc_percpu(struct sched_domain *);
6397 if (!d->sd)
6398 return sa_sd_storage;
6399 d->rd = alloc_rootdomain();
6400 if (!d->rd)
6401 return sa_sd;
6402 return sa_rootdomain;
6403}
6404
6405/*
6406 * NULL the sd_data elements we've used to build the sched_domain and
6407 * sched_group structure so that the subsequent __free_domain_allocs()
6408 * will not free the data we're using.
6409 */
6410static void claim_allocations(int cpu, struct sched_domain *sd)
6411{
6412 struct sd_data *sdd = sd->private;
6413
6414 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6415 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6416
6417 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
6418 *per_cpu_ptr(sdd->sds, cpu) = NULL;
6419
6420 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
6421 *per_cpu_ptr(sdd->sg, cpu) = NULL;
6422
6423 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
6424 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
6425}
6426
6427#ifdef CONFIG_NUMA
6428static int sched_domains_numa_levels;
6429enum numa_topology_type sched_numa_topology_type;
6430static int *sched_domains_numa_distance;
6431int sched_max_numa_distance;
6432static struct cpumask ***sched_domains_numa_masks;
6433static int sched_domains_curr_level;
6434#endif
6435
6436/*
6437 * SD_flags allowed in topology descriptions.
6438 *
6439 * These flags are purely descriptive of the topology and do not prescribe
6440 * behaviour. Behaviour is artificial and mapped in the below sd_init()
6441 * function:
6442 *
6443 * SD_SHARE_CPUCAPACITY - describes SMT topologies
6444 * SD_SHARE_PKG_RESOURCES - describes shared caches
6445 * SD_NUMA - describes NUMA topologies
6446 * SD_SHARE_POWERDOMAIN - describes shared power domain
6447 * SD_ASYM_CPUCAPACITY - describes mixed capacity topologies
6448 *
6449 * Odd one out, which beside describing the topology has a quirk also
6450 * prescribes the desired behaviour that goes along with it:
6451 *
6452 * SD_ASYM_PACKING - describes SMT quirks
6453 */
6454#define TOPOLOGY_SD_FLAGS \
6455 (SD_SHARE_CPUCAPACITY | \
6456 SD_SHARE_PKG_RESOURCES | \
6457 SD_NUMA | \
6458 SD_ASYM_PACKING | \
6459 SD_ASYM_CPUCAPACITY | \
6460 SD_SHARE_POWERDOMAIN)
6461
6462static struct sched_domain *
6463sd_init(struct sched_domain_topology_level *tl,
6464 const struct cpumask *cpu_map,
6465 struct sched_domain *child, int cpu)
6466{
6467 struct sd_data *sdd = &tl->data;
6468 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6469 int sd_id, sd_weight, sd_flags = 0;
6470
6471#ifdef CONFIG_NUMA
6472 /*
6473 * Ugly hack to pass state to sd_numa_mask()...
6474 */
6475 sched_domains_curr_level = tl->numa_level;
6476#endif
6477
6478 sd_weight = cpumask_weight(tl->mask(cpu));
6479
6480 if (tl->sd_flags)
6481 sd_flags = (*tl->sd_flags)();
6482 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
6483 "wrong sd_flags in topology description\n"))
6484 sd_flags &= ~TOPOLOGY_SD_FLAGS;
6485
6486 *sd = (struct sched_domain){
6487 .min_interval = sd_weight,
6488 .max_interval = 2*sd_weight,
6489 .busy_factor = 32,
6490 .imbalance_pct = 125,
6491
6492 .cache_nice_tries = 0,
6493 .busy_idx = 0,
6494 .idle_idx = 0,
6495 .newidle_idx = 0,
6496 .wake_idx = 0,
6497 .forkexec_idx = 0,
6498
6499 .flags = 1*SD_LOAD_BALANCE
6500 | 1*SD_BALANCE_NEWIDLE
6501 | 1*SD_BALANCE_EXEC
6502 | 1*SD_BALANCE_FORK
6503 | 0*SD_BALANCE_WAKE
6504 | 1*SD_WAKE_AFFINE
6505 | 0*SD_SHARE_CPUCAPACITY
6506 | 0*SD_SHARE_PKG_RESOURCES
6507 | 0*SD_SERIALIZE
6508 | 0*SD_PREFER_SIBLING
6509 | 0*SD_NUMA
6510 | sd_flags
6511 ,
6512
6513 .last_balance = jiffies,
6514 .balance_interval = sd_weight,
6515 .smt_gain = 0,
6516 .max_newidle_lb_cost = 0,
6517 .next_decay_max_lb_cost = jiffies,
6518 .child = child,
6519#ifdef CONFIG_SCHED_DEBUG
6520 .name = tl->name,
6521#endif
6522 };
6523
6524 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6525 sd_id = cpumask_first(sched_domain_span(sd));
6526
6527 /*
6528 * Convert topological properties into behaviour.
6529 */
6530
6531 if (sd->flags & SD_ASYM_CPUCAPACITY) {
6532 struct sched_domain *t = sd;
6533
6534 for_each_lower_domain(t)
6535 t->flags |= SD_BALANCE_WAKE;
6536 }
6537
6538 if (sd->flags & SD_SHARE_CPUCAPACITY) {
6539 sd->flags |= SD_PREFER_SIBLING;
6540 sd->imbalance_pct = 110;
6541 sd->smt_gain = 1178; /* ~15% */
6542
6543 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6544 sd->imbalance_pct = 117;
6545 sd->cache_nice_tries = 1;
6546 sd->busy_idx = 2;
6547
6548#ifdef CONFIG_NUMA
6549 } else if (sd->flags & SD_NUMA) {
6550 sd->cache_nice_tries = 2;
6551 sd->busy_idx = 3;
6552 sd->idle_idx = 2;
6553
6554 sd->flags |= SD_SERIALIZE;
6555 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
6556 sd->flags &= ~(SD_BALANCE_EXEC |
6557 SD_BALANCE_FORK |
6558 SD_WAKE_AFFINE);
6559 }
6560
6561#endif
6562 } else {
6563 sd->flags |= SD_PREFER_SIBLING;
6564 sd->cache_nice_tries = 1;
6565 sd->busy_idx = 2;
6566 sd->idle_idx = 1;
6567 }
6568
6569 /*
6570 * For all levels sharing cache; connect a sched_domain_shared
6571 * instance.
6572 */
6573 if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6574 sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
6575 atomic_inc(&sd->shared->ref);
6576 atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
6577 }
6578
6579 sd->private = sdd;
6580
6581 return sd;
6582}
6583
6584/*
6585 * Topology list, bottom-up.
6586 */
6587static struct sched_domain_topology_level default_topology[] = {
6588#ifdef CONFIG_SCHED_SMT
6589 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
6590#endif
6591#ifdef CONFIG_SCHED_MC
6592 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
6593#endif
6594 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
6595 { NULL, },
6596};
6597
6598static struct sched_domain_topology_level *sched_domain_topology =
6599 default_topology;
6600
6601#define for_each_sd_topology(tl) \
6602 for (tl = sched_domain_topology; tl->mask; tl++)
6603
6604void set_sched_topology(struct sched_domain_topology_level *tl)
6605{
6606 if (WARN_ON_ONCE(sched_smp_initialized))
6607 return;
6608
6609 sched_domain_topology = tl;
6610}
6611
6612#ifdef CONFIG_NUMA
6613
6614static const struct cpumask *sd_numa_mask(int cpu)
6615{
6616 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6617}
6618
6619static void sched_numa_warn(const char *str)
6620{
6621 static int done = false;
6622 int i,j;
6623
6624 if (done)
6625 return;
6626
6627 done = true;
6628
6629 printk(KERN_WARNING "ERROR: %s\n\n", str);
6630
6631 for (i = 0; i < nr_node_ids; i++) {
6632 printk(KERN_WARNING " ");
6633 for (j = 0; j < nr_node_ids; j++)
6634 printk(KERN_CONT "%02d ", node_distance(i,j));
6635 printk(KERN_CONT "\n");
6636 }
6637 printk(KERN_WARNING "\n");
6638}
6639
6640bool find_numa_distance(int distance)
6641{
6642 int i;
6643
6644 if (distance == node_distance(0, 0))
6645 return true;
6646
6647 for (i = 0; i < sched_domains_numa_levels; i++) {
6648 if (sched_domains_numa_distance[i] == distance)
6649 return true;
6650 }
6651
6652 return false;
6653}
6654
6655/*
6656 * A system can have three types of NUMA topology:
6657 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
6658 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
6659 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
6660 *
6661 * The difference between a glueless mesh topology and a backplane
6662 * topology lies in whether communication between not directly
6663 * connected nodes goes through intermediary nodes (where programs
6664 * could run), or through backplane controllers. This affects
6665 * placement of programs.
6666 *
6667 * The type of topology can be discerned with the following tests:
6668 * - If the maximum distance between any nodes is 1 hop, the system
6669 * is directly connected.
6670 * - If for two nodes A and B, located N > 1 hops away from each other,
6671 * there is an intermediary node C, which is < N hops away from both
6672 * nodes A and B, the system is a glueless mesh.
6673 */
6674static void init_numa_topology_type(void)
6675{
6676 int a, b, c, n;
6677
6678 n = sched_max_numa_distance;
6679
6680 if (sched_domains_numa_levels <= 1) {
6681 sched_numa_topology_type = NUMA_DIRECT;
6682 return;
6683 }
6684
6685 for_each_online_node(a) {
6686 for_each_online_node(b) {
6687 /* Find two nodes furthest removed from each other. */
6688 if (node_distance(a, b) < n)
6689 continue;
6690
6691 /* Is there an intermediary node between a and b? */
6692 for_each_online_node(c) {
6693 if (node_distance(a, c) < n &&
6694 node_distance(b, c) < n) {
6695 sched_numa_topology_type =
6696 NUMA_GLUELESS_MESH;
6697 return;
6698 }
6699 }
6700
6701 sched_numa_topology_type = NUMA_BACKPLANE;
6702 return;
6703 }
6704 }
6705}
6706
6707static void sched_init_numa(void)
6708{
6709 int next_distance, curr_distance = node_distance(0, 0);
6710 struct sched_domain_topology_level *tl;
6711 int level = 0;
6712 int i, j, k;
6713
6714 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6715 if (!sched_domains_numa_distance)
6716 return;
6717
6718 /*
6719 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6720 * unique distances in the node_distance() table.
6721 *
6722 * Assumes node_distance(0,j) includes all distances in
6723 * node_distance(i,j) in order to avoid cubic time.
6724 */
6725 next_distance = curr_distance;
6726 for (i = 0; i < nr_node_ids; i++) {
6727 for (j = 0; j < nr_node_ids; j++) {
6728 for (k = 0; k < nr_node_ids; k++) {
6729 int distance = node_distance(i, k);
6730
6731 if (distance > curr_distance &&
6732 (distance < next_distance ||
6733 next_distance == curr_distance))
6734 next_distance = distance;
6735
6736 /*
6737 * While not a strong assumption it would be nice to know
6738 * about cases where if node A is connected to B, B is not
6739 * equally connected to A.
6740 */
6741 if (sched_debug() && node_distance(k, i) != distance)
6742 sched_numa_warn("Node-distance not symmetric");
6743
6744 if (sched_debug() && i && !find_numa_distance(distance))
6745 sched_numa_warn("Node-0 not representative");
6746 }
6747 if (next_distance != curr_distance) {
6748 sched_domains_numa_distance[level++] = next_distance;
6749 sched_domains_numa_levels = level;
6750 curr_distance = next_distance;
6751 } else break;
6752 }
6753
6754 /*
6755 * In case of sched_debug() we verify the above assumption.
6756 */
6757 if (!sched_debug())
6758 break;
6759 }
6760
6761 if (!level)
6762 return;
6763
6764 /*
6765 * 'level' contains the number of unique distances, excluding the
6766 * identity distance node_distance(i,i).
6767 *
6768 * The sched_domains_numa_distance[] array includes the actual distance
6769 * numbers.
6770 */
6771
6772 /*
6773 * Here, we should temporarily reset sched_domains_numa_levels to 0.
6774 * If it fails to allocate memory for array sched_domains_numa_masks[][],
6775 * the array will contain less then 'level' members. This could be
6776 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
6777 * in other functions.
6778 *
6779 * We reset it to 'level' at the end of this function.
6780 */
6781 sched_domains_numa_levels = 0;
6782
6783 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6784 if (!sched_domains_numa_masks)
6785 return;
6786
6787 /*
6788 * Now for each level, construct a mask per node which contains all
6789 * cpus of nodes that are that many hops away from us.
6790 */
6791 for (i = 0; i < level; i++) {
6792 sched_domains_numa_masks[i] =
6793 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6794 if (!sched_domains_numa_masks[i])
6795 return;
6796
6797 for (j = 0; j < nr_node_ids; j++) {
6798 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
6799 if (!mask)
6800 return;
6801
6802 sched_domains_numa_masks[i][j] = mask;
6803
6804 for_each_node(k) {
6805 if (node_distance(j, k) > sched_domains_numa_distance[i])
6806 continue;
6807
6808 cpumask_or(mask, mask, cpumask_of_node(k));
6809 }
6810 }
6811 }
6812
6813 /* Compute default topology size */
6814 for (i = 0; sched_domain_topology[i].mask; i++);
6815
6816 tl = kzalloc((i + level + 1) *
6817 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6818 if (!tl)
6819 return;
6820
6821 /*
6822 * Copy the default topology bits..
6823 */
6824 for (i = 0; sched_domain_topology[i].mask; i++)
6825 tl[i] = sched_domain_topology[i];
6826
6827 /*
6828 * .. and append 'j' levels of NUMA goodness.
6829 */
6830 for (j = 0; j < level; i++, j++) {
6831 tl[i] = (struct sched_domain_topology_level){
6832 .mask = sd_numa_mask,
6833 .sd_flags = cpu_numa_flags,
6834 .flags = SDTL_OVERLAP,
6835 .numa_level = j,
6836 SD_INIT_NAME(NUMA)
6837 };
6838 }
6839
6840 sched_domain_topology = tl;
6841
6842 sched_domains_numa_levels = level;
6843 sched_max_numa_distance = sched_domains_numa_distance[level - 1];
6844
6845 init_numa_topology_type();
6846}
6847
6848static void sched_domains_numa_masks_set(unsigned int cpu)
6849{
6850 int node = cpu_to_node(cpu);
6851 int i, j;
6852
6853 for (i = 0; i < sched_domains_numa_levels; i++) {
6854 for (j = 0; j < nr_node_ids; j++) {
6855 if (node_distance(j, node) <= sched_domains_numa_distance[i])
6856 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
6857 }
6858 }
6859}
6860
6861static void sched_domains_numa_masks_clear(unsigned int cpu)
6862{
6863 int i, j;
6864
6865 for (i = 0; i < sched_domains_numa_levels; i++) {
6866 for (j = 0; j < nr_node_ids; j++)
6867 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
6868 }
6869}
6870
6871#else
6872static inline void sched_init_numa(void) { }
6873static void sched_domains_numa_masks_set(unsigned int cpu) { }
6874static void sched_domains_numa_masks_clear(unsigned int cpu) { }
6875#endif /* CONFIG_NUMA */
6876
6877static int __sdt_alloc(const struct cpumask *cpu_map)
6878{
6879 struct sched_domain_topology_level *tl;
6880 int j;
6881
6882 for_each_sd_topology(tl) {
6883 struct sd_data *sdd = &tl->data;
6884
6885 sdd->sd = alloc_percpu(struct sched_domain *);
6886 if (!sdd->sd)
6887 return -ENOMEM;
6888
6889 sdd->sds = alloc_percpu(struct sched_domain_shared *);
6890 if (!sdd->sds)
6891 return -ENOMEM;
6892
6893 sdd->sg = alloc_percpu(struct sched_group *);
6894 if (!sdd->sg)
6895 return -ENOMEM;
6896
6897 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
6898 if (!sdd->sgc)
6899 return -ENOMEM;
6900
6901 for_each_cpu(j, cpu_map) {
6902 struct sched_domain *sd;
6903 struct sched_domain_shared *sds;
6904 struct sched_group *sg;
6905 struct sched_group_capacity *sgc;
6906
6907 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6908 GFP_KERNEL, cpu_to_node(j));
6909 if (!sd)
6910 return -ENOMEM;
6911
6912 *per_cpu_ptr(sdd->sd, j) = sd;
6913
6914 sds = kzalloc_node(sizeof(struct sched_domain_shared),
6915 GFP_KERNEL, cpu_to_node(j));
6916 if (!sds)
6917 return -ENOMEM;
6918
6919 *per_cpu_ptr(sdd->sds, j) = sds;
6920
6921 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6922 GFP_KERNEL, cpu_to_node(j));
6923 if (!sg)
6924 return -ENOMEM;
6925
6926 sg->next = sg;
6927
6928 *per_cpu_ptr(sdd->sg, j) = sg;
6929
6930 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
6931 GFP_KERNEL, cpu_to_node(j));
6932 if (!sgc)
6933 return -ENOMEM;
6934
6935 *per_cpu_ptr(sdd->sgc, j) = sgc;
6936 }
6937 }
6938
6939 return 0;
6940}
6941
6942static void __sdt_free(const struct cpumask *cpu_map)
6943{
6944 struct sched_domain_topology_level *tl;
6945 int j;
6946
6947 for_each_sd_topology(tl) {
6948 struct sd_data *sdd = &tl->data;
6949
6950 for_each_cpu(j, cpu_map) {
6951 struct sched_domain *sd;
6952
6953 if (sdd->sd) {
6954 sd = *per_cpu_ptr(sdd->sd, j);
6955 if (sd && (sd->flags & SD_OVERLAP))
6956 free_sched_groups(sd->groups, 0);
6957 kfree(*per_cpu_ptr(sdd->sd, j));
6958 }
6959
6960 if (sdd->sds)
6961 kfree(*per_cpu_ptr(sdd->sds, j));
6962 if (sdd->sg)
6963 kfree(*per_cpu_ptr(sdd->sg, j));
6964 if (sdd->sgc)
6965 kfree(*per_cpu_ptr(sdd->sgc, j));
6966 }
6967 free_percpu(sdd->sd);
6968 sdd->sd = NULL;
6969 free_percpu(sdd->sds);
6970 sdd->sds = NULL;
6971 free_percpu(sdd->sg);
6972 sdd->sg = NULL;
6973 free_percpu(sdd->sgc);
6974 sdd->sgc = NULL;
6975 }
6976}
6977
6978struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6979 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6980 struct sched_domain *child, int cpu)
6981{
6982 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
6983
6984 if (child) {
6985 sd->level = child->level + 1;
6986 sched_domain_level_max = max(sched_domain_level_max, sd->level);
6987 child->parent = sd;
6988
6989 if (!cpumask_subset(sched_domain_span(child),
6990 sched_domain_span(sd))) {
6991 pr_err("BUG: arch topology borken\n");
6992#ifdef CONFIG_SCHED_DEBUG
6993 pr_err(" the %s domain not a subset of the %s domain\n",
6994 child->name, sd->name);
6995#endif
6996 /* Fixup, ensure @sd has at least @child cpus. */
6997 cpumask_or(sched_domain_span(sd),
6998 sched_domain_span(sd),
6999 sched_domain_span(child));
7000 }
7001
7002 }
7003 set_domain_attribute(sd, attr);
7004
7005 return sd;
7006}
7007
7008/*
7009 * Build sched domains for a given set of cpus and attach the sched domains
7010 * to the individual cpus
7011 */
7012static int build_sched_domains(const struct cpumask *cpu_map,
7013 struct sched_domain_attr *attr)
7014{
7015 enum s_alloc alloc_state;
7016 struct sched_domain *sd;
7017 struct s_data d;
7018 struct rq *rq = NULL;
7019 int i, ret = -ENOMEM;
7020
7021 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7022 if (alloc_state != sa_rootdomain)
7023 goto error;
7024
7025 /* Set up domains for cpus specified by the cpu_map. */
7026 for_each_cpu(i, cpu_map) {
7027 struct sched_domain_topology_level *tl;
7028
7029 sd = NULL;
7030 for_each_sd_topology(tl) {
7031 sd = build_sched_domain(tl, cpu_map, attr, sd, i);
7032 if (tl == sched_domain_topology)
7033 *per_cpu_ptr(d.sd, i) = sd;
7034 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
7035 sd->flags |= SD_OVERLAP;
7036 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
7037 break;
7038 }
7039 }
7040
7041 /* Build the groups for the domains */
7042 for_each_cpu(i, cpu_map) {
7043 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7044 sd->span_weight = cpumask_weight(sched_domain_span(sd));
7045 if (sd->flags & SD_OVERLAP) {
7046 if (build_overlap_sched_groups(sd, i))
7047 goto error;
7048 } else {
7049 if (build_sched_groups(sd, i))
7050 goto error;
7051 }
7052 }
7053 }
7054
7055 /* Calculate CPU capacity for physical packages and nodes */
7056 for (i = nr_cpumask_bits-1; i >= 0; i--) {
7057 if (!cpumask_test_cpu(i, cpu_map))
7058 continue;
7059
7060 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7061 claim_allocations(i, sd);
7062 init_sched_groups_capacity(i, sd);
7063 }
7064 }
7065
7066 /* Attach the domains */
7067 rcu_read_lock();
7068 for_each_cpu(i, cpu_map) {
7069 rq = cpu_rq(i);
7070 sd = *per_cpu_ptr(d.sd, i);
7071
7072 /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
7073 if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
7074 WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
7075
7076 cpu_attach_domain(sd, d.rd, i);
7077 }
7078 rcu_read_unlock();
7079
7080 if (rq && sched_debug_enabled) {
7081 pr_info("span: %*pbl (max cpu_capacity = %lu)\n",
7082 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
7083 }
7084
7085 ret = 0;
7086error:
7087 __free_domain_allocs(&d, alloc_state, cpu_map);
7088 return ret;
7089}
7090
7091static cpumask_var_t *doms_cur; /* current sched domains */
7092static int ndoms_cur; /* number of sched domains in 'doms_cur' */
7093static struct sched_domain_attr *dattr_cur;
7094 /* attribues of custom domains in 'doms_cur' */
7095
7096/*
7097 * Special case: If a kmalloc of a doms_cur partition (array of
7098 * cpumask) fails, then fallback to a single sched domain,
7099 * as determined by the single cpumask fallback_doms.
7100 */
7101static cpumask_var_t fallback_doms;
7102
7103/*
7104 * arch_update_cpu_topology lets virtualized architectures update the
7105 * cpu core maps. It is supposed to return 1 if the topology changed
7106 * or 0 if it stayed the same.
7107 */
7108int __weak arch_update_cpu_topology(void)
7109{
7110 return 0;
7111}
7112
7113cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7114{
7115 int i;
7116 cpumask_var_t *doms;
7117
7118 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7119 if (!doms)
7120 return NULL;
7121 for (i = 0; i < ndoms; i++) {
7122 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7123 free_sched_domains(doms, i);
7124 return NULL;
7125 }
7126 }
7127 return doms;
7128}
7129
7130void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7131{
7132 unsigned int i;
7133 for (i = 0; i < ndoms; i++)
7134 free_cpumask_var(doms[i]);
7135 kfree(doms);
7136}
7137
7138/*
7139 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7140 * For now this just excludes isolated cpus, but could be used to
7141 * exclude other special cases in the future.
7142 */
7143static int init_sched_domains(const struct cpumask *cpu_map)
7144{
7145 int err;
7146
7147 arch_update_cpu_topology();
7148 ndoms_cur = 1;
7149 doms_cur = alloc_sched_domains(ndoms_cur);
7150 if (!doms_cur)
7151 doms_cur = &fallback_doms;
7152 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
7153 err = build_sched_domains(doms_cur[0], NULL);
7154 register_sched_domain_sysctl();
7155
7156 return err;
7157}
7158
7159/*
7160 * Detach sched domains from a group of cpus specified in cpu_map
7161 * These cpus will now be attached to the NULL domain
7162 */
7163static void detach_destroy_domains(const struct cpumask *cpu_map)
7164{
7165 int i;
7166
7167 rcu_read_lock();
7168 for_each_cpu(i, cpu_map)
7169 cpu_attach_domain(NULL, &def_root_domain, i);
7170 rcu_read_unlock();
7171}
7172
7173/* handle null as "default" */
7174static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7175 struct sched_domain_attr *new, int idx_new)
7176{
7177 struct sched_domain_attr tmp;
7178
7179 /* fast path */
7180 if (!new && !cur)
7181 return 1;
7182
7183 tmp = SD_ATTR_INIT;
7184 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7185 new ? (new + idx_new) : &tmp,
7186 sizeof(struct sched_domain_attr));
7187}
7188
7189/*
7190 * Partition sched domains as specified by the 'ndoms_new'
7191 * cpumasks in the array doms_new[] of cpumasks. This compares
7192 * doms_new[] to the current sched domain partitioning, doms_cur[].
7193 * It destroys each deleted domain and builds each new domain.
7194 *
7195 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
7196 * The masks don't intersect (don't overlap.) We should setup one
7197 * sched domain for each mask. CPUs not in any of the cpumasks will
7198 * not be load balanced. If the same cpumask appears both in the
7199 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7200 * it as it is.
7201 *
7202 * The passed in 'doms_new' should be allocated using
7203 * alloc_sched_domains. This routine takes ownership of it and will
7204 * free_sched_domains it when done with it. If the caller failed the
7205 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7206 * and partition_sched_domains() will fallback to the single partition
7207 * 'fallback_doms', it also forces the domains to be rebuilt.
7208 *
7209 * If doms_new == NULL it will be replaced with cpu_online_mask.
7210 * ndoms_new == 0 is a special case for destroying existing domains,
7211 * and it will not create the default domain.
7212 *
7213 * Call with hotplug lock held
7214 */
7215void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
7216 struct sched_domain_attr *dattr_new)
7217{
7218 int i, j, n;
7219 int new_topology;
7220
7221 mutex_lock(&sched_domains_mutex);
7222
7223 /* always unregister in case we don't destroy any domains */
7224 unregister_sched_domain_sysctl();
7225
7226 /* Let architecture update cpu core mappings. */
7227 new_topology = arch_update_cpu_topology();
7228
7229 n = doms_new ? ndoms_new : 0;
7230
7231 /* Destroy deleted domains */
7232 for (i = 0; i < ndoms_cur; i++) {
7233 for (j = 0; j < n && !new_topology; j++) {
7234 if (cpumask_equal(doms_cur[i], doms_new[j])
7235 && dattrs_equal(dattr_cur, i, dattr_new, j))
7236 goto match1;
7237 }
7238 /* no match - a current sched domain not in new doms_new[] */
7239 detach_destroy_domains(doms_cur[i]);
7240match1:
7241 ;
7242 }
7243
7244 n = ndoms_cur;
7245 if (doms_new == NULL) {
7246 n = 0;
7247 doms_new = &fallback_doms;
7248 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
7249 WARN_ON_ONCE(dattr_new);
7250 }
7251
7252 /* Build new domains */
7253 for (i = 0; i < ndoms_new; i++) {
7254 for (j = 0; j < n && !new_topology; j++) {
7255 if (cpumask_equal(doms_new[i], doms_cur[j])
7256 && dattrs_equal(dattr_new, i, dattr_cur, j))
7257 goto match2;
7258 }
7259 /* no match - add a new doms_new */
7260 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
7261match2:
7262 ;
7263 }
7264
7265 /* Remember the new sched domains */
7266 if (doms_cur != &fallback_doms)
7267 free_sched_domains(doms_cur, ndoms_cur);
7268 kfree(dattr_cur); /* kfree(NULL) is safe */
7269 doms_cur = doms_new;
7270 dattr_cur = dattr_new;
7271 ndoms_cur = ndoms_new;
7272
7273 register_sched_domain_sysctl();
7274
7275 mutex_unlock(&sched_domains_mutex);
7276}
7277
7278static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
7279
7280/*
7281 * Update cpusets according to cpu_active mask. If cpusets are
7282 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7283 * around partition_sched_domains().
7284 *
7285 * If we come here as part of a suspend/resume, don't touch cpusets because we
7286 * want to restore it back to its original state upon resume anyway.
7287 */
7288static void cpuset_cpu_active(void)
7289{
7290 if (cpuhp_tasks_frozen) {
7291 /*
7292 * num_cpus_frozen tracks how many CPUs are involved in suspend
7293 * resume sequence. As long as this is not the last online
7294 * operation in the resume sequence, just build a single sched
7295 * domain, ignoring cpusets.
7296 */
7297 num_cpus_frozen--;
7298 if (likely(num_cpus_frozen)) {
7299 partition_sched_domains(1, NULL, NULL);
7300 return;
7301 }
7302 /*
7303 * This is the last CPU online operation. So fall through and
7304 * restore the original sched domains by considering the
7305 * cpuset configurations.
7306 */
7307 }
7308 cpuset_update_active_cpus(true);
7309}
7310
7311static int cpuset_cpu_inactive(unsigned int cpu)
7312{
7313 unsigned long flags;
7314 struct dl_bw *dl_b;
7315 bool overflow;
7316 int cpus;
7317
7318 if (!cpuhp_tasks_frozen) {
7319 rcu_read_lock_sched();
7320 dl_b = dl_bw_of(cpu);
7321
7322 raw_spin_lock_irqsave(&dl_b->lock, flags);
7323 cpus = dl_bw_cpus(cpu);
7324 overflow = __dl_overflow(dl_b, cpus, 0, 0);
7325 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7326
7327 rcu_read_unlock_sched();
7328
7329 if (overflow)
7330 return -EBUSY;
7331 cpuset_update_active_cpus(false);
7332 } else {
7333 num_cpus_frozen++;
7334 partition_sched_domains(1, NULL, NULL);
7335 }
7336 return 0;
7337}
7338
7339int sched_cpu_activate(unsigned int cpu)
7340{
7341 struct rq *rq = cpu_rq(cpu);
7342 unsigned long flags;
7343
7344 set_cpu_active(cpu, true);
7345
7346 if (sched_smp_initialized) {
7347 sched_domains_numa_masks_set(cpu);
7348 cpuset_cpu_active();
7349 }
7350
7351 /*
7352 * Put the rq online, if not already. This happens:
7353 *
7354 * 1) In the early boot process, because we build the real domains
7355 * after all cpus have been brought up.
7356 *
7357 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
7358 * domains.
7359 */
7360 raw_spin_lock_irqsave(&rq->lock, flags);
7361 if (rq->rd) {
7362 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7363 set_rq_online(rq);
7364 }
7365 raw_spin_unlock_irqrestore(&rq->lock, flags);
7366
7367 update_max_interval();
7368
7369 return 0;
7370}
7371
7372int sched_cpu_deactivate(unsigned int cpu)
7373{
7374 int ret;
7375
7376 set_cpu_active(cpu, false);
7377 /*
7378 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
7379 * users of this state to go away such that all new such users will
7380 * observe it.
7381 *
7382 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
7383 * not imply sync_sched(), so wait for both.
7384 *
7385 * Do sync before park smpboot threads to take care the rcu boost case.
7386 */
7387 if (IS_ENABLED(CONFIG_PREEMPT))
7388 synchronize_rcu_mult(call_rcu, call_rcu_sched);
7389 else
7390 synchronize_rcu();
7391
7392 if (!sched_smp_initialized)
7393 return 0;
7394
7395 ret = cpuset_cpu_inactive(cpu);
7396 if (ret) {
7397 set_cpu_active(cpu, true);
7398 return ret;
7399 }
7400 sched_domains_numa_masks_clear(cpu);
7401 return 0;
7402}
7403
7404static void sched_rq_cpu_starting(unsigned int cpu)
7405{
7406 struct rq *rq = cpu_rq(cpu);
7407
7408 rq->calc_load_update = calc_load_update;
7409 update_max_interval();
7410}
7411
7412int sched_cpu_starting(unsigned int cpu)
7413{
7414 set_cpu_rq_start_time(cpu);
7415 sched_rq_cpu_starting(cpu);
7416 return 0;
7417}
7418
7419#ifdef CONFIG_HOTPLUG_CPU
7420int sched_cpu_dying(unsigned int cpu)
7421{
7422 struct rq *rq = cpu_rq(cpu);
7423 unsigned long flags;
7424
7425 /* Handle pending wakeups and then migrate everything off */
7426 sched_ttwu_pending();
7427 raw_spin_lock_irqsave(&rq->lock, flags);
7428 if (rq->rd) {
7429 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7430 set_rq_offline(rq);
7431 }
7432 migrate_tasks(rq);
7433 BUG_ON(rq->nr_running != 1);
7434 raw_spin_unlock_irqrestore(&rq->lock, flags);
7435 calc_load_migrate(rq);
7436 update_max_interval();
7437 nohz_balance_exit_idle(cpu);
7438 hrtick_clear(rq);
7439 return 0;
7440}
7441#endif
7442
7443#ifdef CONFIG_SCHED_SMT
7444DEFINE_STATIC_KEY_FALSE(sched_smt_present);
7445
7446static void sched_init_smt(void)
7447{
7448 /*
7449 * We've enumerated all CPUs and will assume that if any CPU
7450 * has SMT siblings, CPU0 will too.
7451 */
7452 if (cpumask_weight(cpu_smt_mask(0)) > 1)
7453 static_branch_enable(&sched_smt_present);
7454}
7455#else
7456static inline void sched_init_smt(void) { }
7457#endif
7458
7459void __init sched_init_smp(void)
7460{
7461 cpumask_var_t non_isolated_cpus;
7462
7463 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
7464 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
7465
7466 sched_init_numa();
7467
7468 /*
7469 * There's no userspace yet to cause hotplug operations; hence all the
7470 * cpu masks are stable and all blatant races in the below code cannot
7471 * happen.
7472 */
7473 mutex_lock(&sched_domains_mutex);
7474 init_sched_domains(cpu_active_mask);
7475 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7476 if (cpumask_empty(non_isolated_cpus))
7477 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
7478 mutex_unlock(&sched_domains_mutex);
7479
7480 /* Move init over to a non-isolated CPU */
7481 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
7482 BUG();
7483 sched_init_granularity();
7484 free_cpumask_var(non_isolated_cpus);
7485
7486 init_sched_rt_class();
7487 init_sched_dl_class();
7488
7489 sched_init_smt();
7490
7491 sched_smp_initialized = true;
7492}
7493
7494static int __init migration_init(void)
7495{
7496 sched_rq_cpu_starting(smp_processor_id());
7497 return 0;
7498}
7499early_initcall(migration_init);
7500
7501#else
7502void __init sched_init_smp(void)
7503{
7504 sched_init_granularity();
7505}
7506#endif /* CONFIG_SMP */
7507
7508int in_sched_functions(unsigned long addr)
7509{
7510 return in_lock_functions(addr) ||
7511 (addr >= (unsigned long)__sched_text_start
7512 && addr < (unsigned long)__sched_text_end);
7513}
7514
7515#ifdef CONFIG_CGROUP_SCHED
7516/*
7517 * Default task group.
7518 * Every task in system belongs to this group at bootup.
7519 */
7520struct task_group root_task_group;
7521LIST_HEAD(task_groups);
7522
7523/* Cacheline aligned slab cache for task_group */
7524static struct kmem_cache *task_group_cache __read_mostly;
7525#endif
7526
7527DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
7528DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
7529
7530#define WAIT_TABLE_BITS 8
7531#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
7532static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
7533
7534wait_queue_head_t *bit_waitqueue(void *word, int bit)
7535{
7536 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
7537 unsigned long val = (unsigned long)word << shift | bit;
7538
7539 return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
7540}
7541EXPORT_SYMBOL(bit_waitqueue);
7542
7543void __init sched_init(void)
7544{
7545 int i, j;
7546 unsigned long alloc_size = 0, ptr;
7547
7548 for (i = 0; i < WAIT_TABLE_SIZE; i++)
7549 init_waitqueue_head(bit_wait_table + i);
7550
7551#ifdef CONFIG_FAIR_GROUP_SCHED
7552 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7553#endif
7554#ifdef CONFIG_RT_GROUP_SCHED
7555 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7556#endif
7557 if (alloc_size) {
7558 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
7559
7560#ifdef CONFIG_FAIR_GROUP_SCHED
7561 root_task_group.se = (struct sched_entity **)ptr;
7562 ptr += nr_cpu_ids * sizeof(void **);
7563
7564 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
7565 ptr += nr_cpu_ids * sizeof(void **);
7566
7567#endif /* CONFIG_FAIR_GROUP_SCHED */
7568#ifdef CONFIG_RT_GROUP_SCHED
7569 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
7570 ptr += nr_cpu_ids * sizeof(void **);
7571
7572 root_task_group.rt_rq = (struct rt_rq **)ptr;
7573 ptr += nr_cpu_ids * sizeof(void **);
7574
7575#endif /* CONFIG_RT_GROUP_SCHED */
7576 }
7577#ifdef CONFIG_CPUMASK_OFFSTACK
7578 for_each_possible_cpu(i) {
7579 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7580 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
7581 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
7582 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
7583 }
7584#endif /* CONFIG_CPUMASK_OFFSTACK */
7585
7586 init_rt_bandwidth(&def_rt_bandwidth,
7587 global_rt_period(), global_rt_runtime());
7588 init_dl_bandwidth(&def_dl_bandwidth,
7589 global_rt_period(), global_rt_runtime());
7590
7591#ifdef CONFIG_SMP
7592 init_defrootdomain();
7593#endif
7594
7595#ifdef CONFIG_RT_GROUP_SCHED
7596 init_rt_bandwidth(&root_task_group.rt_bandwidth,
7597 global_rt_period(), global_rt_runtime());
7598#endif /* CONFIG_RT_GROUP_SCHED */
7599
7600#ifdef CONFIG_CGROUP_SCHED
7601 task_group_cache = KMEM_CACHE(task_group, 0);
7602
7603 list_add(&root_task_group.list, &task_groups);
7604 INIT_LIST_HEAD(&root_task_group.children);
7605 INIT_LIST_HEAD(&root_task_group.siblings);
7606 autogroup_init(&init_task);
7607#endif /* CONFIG_CGROUP_SCHED */
7608
7609 for_each_possible_cpu(i) {
7610 struct rq *rq;
7611
7612 rq = cpu_rq(i);
7613 raw_spin_lock_init(&rq->lock);
7614 rq->nr_running = 0;
7615 rq->calc_load_active = 0;
7616 rq->calc_load_update = jiffies + LOAD_FREQ;
7617 init_cfs_rq(&rq->cfs);
7618 init_rt_rq(&rq->rt);
7619 init_dl_rq(&rq->dl);
7620#ifdef CONFIG_FAIR_GROUP_SCHED
7621 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
7622 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7623 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
7624 /*
7625 * How much cpu bandwidth does root_task_group get?
7626 *
7627 * In case of task-groups formed thr' the cgroup filesystem, it
7628 * gets 100% of the cpu resources in the system. This overall
7629 * system cpu resource is divided among the tasks of
7630 * root_task_group and its child task-groups in a fair manner,
7631 * based on each entity's (task or task-group's) weight
7632 * (se->load.weight).
7633 *
7634 * In other words, if root_task_group has 10 tasks of weight
7635 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7636 * then A0's share of the cpu resource is:
7637 *
7638 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
7639 *
7640 * We achieve this by letting root_task_group's tasks sit
7641 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
7642 */
7643 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
7644 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
7645#endif /* CONFIG_FAIR_GROUP_SCHED */
7646
7647 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
7648#ifdef CONFIG_RT_GROUP_SCHED
7649 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
7650#endif
7651
7652 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7653 rq->cpu_load[j] = 0;
7654
7655#ifdef CONFIG_SMP
7656 rq->sd = NULL;
7657 rq->rd = NULL;
7658 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
7659 rq->balance_callback = NULL;
7660 rq->active_balance = 0;
7661 rq->next_balance = jiffies;
7662 rq->push_cpu = 0;
7663 rq->cpu = i;
7664 rq->online = 0;
7665 rq->idle_stamp = 0;
7666 rq->avg_idle = 2*sysctl_sched_migration_cost;
7667 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
7668
7669 INIT_LIST_HEAD(&rq->cfs_tasks);
7670
7671 rq_attach_root(rq, &def_root_domain);
7672#ifdef CONFIG_NO_HZ_COMMON
7673 rq->last_load_update_tick = jiffies;
7674 rq->nohz_flags = 0;
7675#endif
7676#ifdef CONFIG_NO_HZ_FULL
7677 rq->last_sched_tick = 0;
7678#endif
7679#endif /* CONFIG_SMP */
7680 init_rq_hrtick(rq);
7681 atomic_set(&rq->nr_iowait, 0);
7682 }
7683
7684 set_load_weight(&init_task);
7685
7686 /*
7687 * The boot idle thread does lazy MMU switching as well:
7688 */
7689 atomic_inc(&init_mm.mm_count);
7690 enter_lazy_tlb(&init_mm, current);
7691
7692 /*
7693 * Make us the idle thread. Technically, schedule() should not be
7694 * called from this thread, however somewhere below it might be,
7695 * but because we are the idle thread, we just pick up running again
7696 * when this runqueue becomes "idle".
7697 */
7698 init_idle(current, smp_processor_id());
7699
7700 calc_load_update = jiffies + LOAD_FREQ;
7701
7702#ifdef CONFIG_SMP
7703 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
7704 /* May be allocated at isolcpus cmdline parse time */
7705 if (cpu_isolated_map == NULL)
7706 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
7707 idle_thread_set_boot_cpu();
7708 set_cpu_rq_start_time(smp_processor_id());
7709#endif
7710 init_sched_fair_class();
7711
7712 init_schedstats();
7713
7714 scheduler_running = 1;
7715}
7716
7717#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
7718static inline int preempt_count_equals(int preempt_offset)
7719{
7720 int nested = preempt_count() + rcu_preempt_depth();
7721
7722 return (nested == preempt_offset);
7723}
7724
7725void __might_sleep(const char *file, int line, int preempt_offset)
7726{
7727 /*
7728 * Blocking primitives will set (and therefore destroy) current->state,
7729 * since we will exit with TASK_RUNNING make sure we enter with it,
7730 * otherwise we will destroy state.
7731 */
7732 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
7733 "do not call blocking ops when !TASK_RUNNING; "
7734 "state=%lx set at [<%p>] %pS\n",
7735 current->state,
7736 (void *)current->task_state_change,
7737 (void *)current->task_state_change);
7738
7739 ___might_sleep(file, line, preempt_offset);
7740}
7741EXPORT_SYMBOL(__might_sleep);
7742
7743void ___might_sleep(const char *file, int line, int preempt_offset)
7744{
7745 static unsigned long prev_jiffy; /* ratelimiting */
7746 unsigned long preempt_disable_ip;
7747
7748 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
7749 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
7750 !is_idle_task(current)) ||
7751 system_state != SYSTEM_RUNNING || oops_in_progress)
7752 return;
7753 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7754 return;
7755 prev_jiffy = jiffies;
7756
7757 /* Save this before calling printk(), since that will clobber it */
7758 preempt_disable_ip = get_preempt_disable_ip(current);
7759
7760 printk(KERN_ERR
7761 "BUG: sleeping function called from invalid context at %s:%d\n",
7762 file, line);
7763 printk(KERN_ERR
7764 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7765 in_atomic(), irqs_disabled(),
7766 current->pid, current->comm);
7767
7768 if (task_stack_end_corrupted(current))
7769 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7770
7771 debug_show_held_locks(current);
7772 if (irqs_disabled())
7773 print_irqtrace_events(current);
7774 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
7775 && !preempt_count_equals(preempt_offset)) {
7776 pr_err("Preemption disabled at:");
7777 print_ip_sym(preempt_disable_ip);
7778 pr_cont("\n");
7779 }
7780 dump_stack();
7781 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
7782}
7783EXPORT_SYMBOL(___might_sleep);
7784#endif
7785
7786#ifdef CONFIG_MAGIC_SYSRQ
7787void normalize_rt_tasks(void)
7788{
7789 struct task_struct *g, *p;
7790 struct sched_attr attr = {
7791 .sched_policy = SCHED_NORMAL,
7792 };
7793
7794 read_lock(&tasklist_lock);
7795 for_each_process_thread(g, p) {
7796 /*
7797 * Only normalize user tasks:
7798 */
7799 if (p->flags & PF_KTHREAD)
7800 continue;
7801
7802 p->se.exec_start = 0;
7803 schedstat_set(p->se.statistics.wait_start, 0);
7804 schedstat_set(p->se.statistics.sleep_start, 0);
7805 schedstat_set(p->se.statistics.block_start, 0);
7806
7807 if (!dl_task(p) && !rt_task(p)) {
7808 /*
7809 * Renice negative nice level userspace
7810 * tasks back to 0:
7811 */
7812 if (task_nice(p) < 0)
7813 set_user_nice(p, 0);
7814 continue;
7815 }
7816
7817 __sched_setscheduler(p, &attr, false, false);
7818 }
7819 read_unlock(&tasklist_lock);
7820}
7821
7822#endif /* CONFIG_MAGIC_SYSRQ */
7823
7824#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
7825/*
7826 * These functions are only useful for the IA64 MCA handling, or kdb.
7827 *
7828 * They can only be called when the whole system has been
7829 * stopped - every CPU needs to be quiescent, and no scheduling
7830 * activity can take place. Using them for anything else would
7831 * be a serious bug, and as a result, they aren't even visible
7832 * under any other configuration.
7833 */
7834
7835/**
7836 * curr_task - return the current task for a given cpu.
7837 * @cpu: the processor in question.
7838 *
7839 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7840 *
7841 * Return: The current task for @cpu.
7842 */
7843struct task_struct *curr_task(int cpu)
7844{
7845 return cpu_curr(cpu);
7846}
7847
7848#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7849
7850#ifdef CONFIG_IA64
7851/**
7852 * set_curr_task - set the current task for a given cpu.
7853 * @cpu: the processor in question.
7854 * @p: the task pointer to set.
7855 *
7856 * Description: This function must only be used when non-maskable interrupts
7857 * are serviced on a separate stack. It allows the architecture to switch the
7858 * notion of the current task on a cpu in a non-blocking manner. This function
7859 * must be called with all CPU's synchronized, and interrupts disabled, the
7860 * and caller must save the original value of the current task (see
7861 * curr_task() above) and restore that value before reenabling interrupts and
7862 * re-starting the system.
7863 *
7864 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7865 */
7866void ia64_set_curr_task(int cpu, struct task_struct *p)
7867{
7868 cpu_curr(cpu) = p;
7869}
7870
7871#endif
7872
7873#ifdef CONFIG_CGROUP_SCHED
7874/* task_group_lock serializes the addition/removal of task groups */
7875static DEFINE_SPINLOCK(task_group_lock);
7876
7877static void sched_free_group(struct task_group *tg)
7878{
7879 free_fair_sched_group(tg);
7880 free_rt_sched_group(tg);
7881 autogroup_free(tg);
7882 kmem_cache_free(task_group_cache, tg);
7883}
7884
7885/* allocate runqueue etc for a new task group */
7886struct task_group *sched_create_group(struct task_group *parent)
7887{
7888 struct task_group *tg;
7889
7890 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
7891 if (!tg)
7892 return ERR_PTR(-ENOMEM);
7893
7894 if (!alloc_fair_sched_group(tg, parent))
7895 goto err;
7896
7897 if (!alloc_rt_sched_group(tg, parent))
7898 goto err;
7899
7900 return tg;
7901
7902err:
7903 sched_free_group(tg);
7904 return ERR_PTR(-ENOMEM);
7905}
7906
7907void sched_online_group(struct task_group *tg, struct task_group *parent)
7908{
7909 unsigned long flags;
7910
7911 spin_lock_irqsave(&task_group_lock, flags);
7912 list_add_rcu(&tg->list, &task_groups);
7913
7914 WARN_ON(!parent); /* root should already exist */
7915
7916 tg->parent = parent;
7917 INIT_LIST_HEAD(&tg->children);
7918 list_add_rcu(&tg->siblings, &parent->children);
7919 spin_unlock_irqrestore(&task_group_lock, flags);
7920
7921 online_fair_sched_group(tg);
7922}
7923
7924/* rcu callback to free various structures associated with a task group */
7925static void sched_free_group_rcu(struct rcu_head *rhp)
7926{
7927 /* now it should be safe to free those cfs_rqs */
7928 sched_free_group(container_of(rhp, struct task_group, rcu));
7929}
7930
7931void sched_destroy_group(struct task_group *tg)
7932{
7933 /* wait for possible concurrent references to cfs_rqs complete */
7934 call_rcu(&tg->rcu, sched_free_group_rcu);
7935}
7936
7937void sched_offline_group(struct task_group *tg)
7938{
7939 unsigned long flags;
7940
7941 /* end participation in shares distribution */
7942 unregister_fair_sched_group(tg);
7943
7944 spin_lock_irqsave(&task_group_lock, flags);
7945 list_del_rcu(&tg->list);
7946 list_del_rcu(&tg->siblings);
7947 spin_unlock_irqrestore(&task_group_lock, flags);
7948}
7949
7950static void sched_change_group(struct task_struct *tsk, int type)
7951{
7952 struct task_group *tg;
7953
7954 /*
7955 * All callers are synchronized by task_rq_lock(); we do not use RCU
7956 * which is pointless here. Thus, we pass "true" to task_css_check()
7957 * to prevent lockdep warnings.
7958 */
7959 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
7960 struct task_group, css);
7961 tg = autogroup_task_group(tsk, tg);
7962 tsk->sched_task_group = tg;
7963
7964#ifdef CONFIG_FAIR_GROUP_SCHED
7965 if (tsk->sched_class->task_change_group)
7966 tsk->sched_class->task_change_group(tsk, type);
7967 else
7968#endif
7969 set_task_rq(tsk, task_cpu(tsk));
7970}
7971
7972/*
7973 * Change task's runqueue when it moves between groups.
7974 *
7975 * The caller of this function should have put the task in its new group by
7976 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
7977 * its new group.
7978 */
7979void sched_move_task(struct task_struct *tsk)
7980{
7981 int queued, running;
7982 struct rq_flags rf;
7983 struct rq *rq;
7984
7985 rq = task_rq_lock(tsk, &rf);
7986
7987 running = task_current(rq, tsk);
7988 queued = task_on_rq_queued(tsk);
7989
7990 if (queued)
7991 dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
7992 if (unlikely(running))
7993 put_prev_task(rq, tsk);
7994
7995 sched_change_group(tsk, TASK_MOVE_GROUP);
7996
7997 if (queued)
7998 enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
7999 if (unlikely(running))
8000 set_curr_task(rq, tsk);
8001
8002 task_rq_unlock(rq, tsk, &rf);
8003}
8004#endif /* CONFIG_CGROUP_SCHED */
8005
8006#ifdef CONFIG_RT_GROUP_SCHED
8007/*
8008 * Ensure that the real time constraints are schedulable.
8009 */
8010static DEFINE_MUTEX(rt_constraints_mutex);
8011
8012/* Must be called with tasklist_lock held */
8013static inline int tg_has_rt_tasks(struct task_group *tg)
8014{
8015 struct task_struct *g, *p;
8016
8017 /*
8018 * Autogroups do not have RT tasks; see autogroup_create().
8019 */
8020 if (task_group_is_autogroup(tg))
8021 return 0;
8022
8023 for_each_process_thread(g, p) {
8024 if (rt_task(p) && task_group(p) == tg)
8025 return 1;
8026 }
8027
8028 return 0;
8029}
8030
8031struct rt_schedulable_data {
8032 struct task_group *tg;
8033 u64 rt_period;
8034 u64 rt_runtime;
8035};
8036
8037static int tg_rt_schedulable(struct task_group *tg, void *data)
8038{
8039 struct rt_schedulable_data *d = data;
8040 struct task_group *child;
8041 unsigned long total, sum = 0;
8042 u64 period, runtime;
8043
8044 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8045 runtime = tg->rt_bandwidth.rt_runtime;
8046
8047 if (tg == d->tg) {
8048 period = d->rt_period;
8049 runtime = d->rt_runtime;
8050 }
8051
8052 /*
8053 * Cannot have more runtime than the period.
8054 */
8055 if (runtime > period && runtime != RUNTIME_INF)
8056 return -EINVAL;
8057
8058 /*
8059 * Ensure we don't starve existing RT tasks.
8060 */
8061 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8062 return -EBUSY;
8063
8064 total = to_ratio(period, runtime);
8065
8066 /*
8067 * Nobody can have more than the global setting allows.
8068 */
8069 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8070 return -EINVAL;
8071
8072 /*
8073 * The sum of our children's runtime should not exceed our own.
8074 */
8075 list_for_each_entry_rcu(child, &tg->children, siblings) {
8076 period = ktime_to_ns(child->rt_bandwidth.rt_period);
8077 runtime = child->rt_bandwidth.rt_runtime;
8078
8079 if (child == d->tg) {
8080 period = d->rt_period;
8081 runtime = d->rt_runtime;
8082 }
8083
8084 sum += to_ratio(period, runtime);
8085 }
8086
8087 if (sum > total)
8088 return -EINVAL;
8089
8090 return 0;
8091}
8092
8093static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8094{
8095 int ret;
8096
8097 struct rt_schedulable_data data = {
8098 .tg = tg,
8099 .rt_period = period,
8100 .rt_runtime = runtime,
8101 };
8102
8103 rcu_read_lock();
8104 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
8105 rcu_read_unlock();
8106
8107 return ret;
8108}
8109
8110static int tg_set_rt_bandwidth(struct task_group *tg,
8111 u64 rt_period, u64 rt_runtime)
8112{
8113 int i, err = 0;
8114
8115 /*
8116 * Disallowing the root group RT runtime is BAD, it would disallow the
8117 * kernel creating (and or operating) RT threads.
8118 */
8119 if (tg == &root_task_group && rt_runtime == 0)
8120 return -EINVAL;
8121
8122 /* No period doesn't make any sense. */
8123 if (rt_period == 0)
8124 return -EINVAL;
8125
8126 mutex_lock(&rt_constraints_mutex);
8127 read_lock(&tasklist_lock);
8128 err = __rt_schedulable(tg, rt_period, rt_runtime);
8129 if (err)
8130 goto unlock;
8131
8132 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
8133 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8134 tg->rt_bandwidth.rt_runtime = rt_runtime;
8135
8136 for_each_possible_cpu(i) {
8137 struct rt_rq *rt_rq = tg->rt_rq[i];
8138
8139 raw_spin_lock(&rt_rq->rt_runtime_lock);
8140 rt_rq->rt_runtime = rt_runtime;
8141 raw_spin_unlock(&rt_rq->rt_runtime_lock);
8142 }
8143 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
8144unlock:
8145 read_unlock(&tasklist_lock);
8146 mutex_unlock(&rt_constraints_mutex);
8147
8148 return err;
8149}
8150
8151static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8152{
8153 u64 rt_runtime, rt_period;
8154
8155 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8156 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8157 if (rt_runtime_us < 0)
8158 rt_runtime = RUNTIME_INF;
8159
8160 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
8161}
8162
8163static long sched_group_rt_runtime(struct task_group *tg)
8164{
8165 u64 rt_runtime_us;
8166
8167 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
8168 return -1;
8169
8170 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
8171 do_div(rt_runtime_us, NSEC_PER_USEC);
8172 return rt_runtime_us;
8173}
8174
8175static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
8176{
8177 u64 rt_runtime, rt_period;
8178
8179 rt_period = rt_period_us * NSEC_PER_USEC;
8180 rt_runtime = tg->rt_bandwidth.rt_runtime;
8181
8182 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
8183}
8184
8185static long sched_group_rt_period(struct task_group *tg)
8186{
8187 u64 rt_period_us;
8188
8189 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8190 do_div(rt_period_us, NSEC_PER_USEC);
8191 return rt_period_us;
8192}
8193#endif /* CONFIG_RT_GROUP_SCHED */
8194
8195#ifdef CONFIG_RT_GROUP_SCHED
8196static int sched_rt_global_constraints(void)
8197{
8198 int ret = 0;
8199
8200 mutex_lock(&rt_constraints_mutex);
8201 read_lock(&tasklist_lock);
8202 ret = __rt_schedulable(NULL, 0, 0);
8203 read_unlock(&tasklist_lock);
8204 mutex_unlock(&rt_constraints_mutex);
8205
8206 return ret;
8207}
8208
8209static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
8210{
8211 /* Don't accept realtime tasks when there is no way for them to run */
8212 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8213 return 0;
8214
8215 return 1;
8216}
8217
8218#else /* !CONFIG_RT_GROUP_SCHED */
8219static int sched_rt_global_constraints(void)
8220{
8221 unsigned long flags;
8222 int i;
8223
8224 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
8225 for_each_possible_cpu(i) {
8226 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8227
8228 raw_spin_lock(&rt_rq->rt_runtime_lock);
8229 rt_rq->rt_runtime = global_rt_runtime();
8230 raw_spin_unlock(&rt_rq->rt_runtime_lock);
8231 }
8232 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
8233
8234 return 0;
8235}
8236#endif /* CONFIG_RT_GROUP_SCHED */
8237
8238static int sched_dl_global_validate(void)
8239{
8240 u64 runtime = global_rt_runtime();
8241 u64 period = global_rt_period();
8242 u64 new_bw = to_ratio(period, runtime);
8243 struct dl_bw *dl_b;
8244 int cpu, ret = 0;
8245 unsigned long flags;
8246
8247 /*
8248 * Here we want to check the bandwidth not being set to some
8249 * value smaller than the currently allocated bandwidth in
8250 * any of the root_domains.
8251 *
8252 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
8253 * cycling on root_domains... Discussion on different/better
8254 * solutions is welcome!
8255 */
8256 for_each_possible_cpu(cpu) {
8257 rcu_read_lock_sched();
8258 dl_b = dl_bw_of(cpu);
8259
8260 raw_spin_lock_irqsave(&dl_b->lock, flags);
8261 if (new_bw < dl_b->total_bw)
8262 ret = -EBUSY;
8263 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
8264
8265 rcu_read_unlock_sched();
8266
8267 if (ret)
8268 break;
8269 }
8270
8271 return ret;
8272}
8273
8274static void sched_dl_do_global(void)
8275{
8276 u64 new_bw = -1;
8277 struct dl_bw *dl_b;
8278 int cpu;
8279 unsigned long flags;
8280
8281 def_dl_bandwidth.dl_period = global_rt_period();
8282 def_dl_bandwidth.dl_runtime = global_rt_runtime();
8283
8284 if (global_rt_runtime() != RUNTIME_INF)
8285 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
8286
8287 /*
8288 * FIXME: As above...
8289 */
8290 for_each_possible_cpu(cpu) {
8291 rcu_read_lock_sched();
8292 dl_b = dl_bw_of(cpu);
8293
8294 raw_spin_lock_irqsave(&dl_b->lock, flags);
8295 dl_b->bw = new_bw;
8296 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
8297
8298 rcu_read_unlock_sched();
8299 }
8300}
8301
8302static int sched_rt_global_validate(void)
8303{
8304 if (sysctl_sched_rt_period <= 0)
8305 return -EINVAL;
8306
8307 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
8308 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
8309 return -EINVAL;
8310
8311 return 0;
8312}
8313
8314static void sched_rt_do_global(void)
8315{
8316 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8317 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
8318}
8319
8320int sched_rt_handler(struct ctl_table *table, int write,
8321 void __user *buffer, size_t *lenp,
8322 loff_t *ppos)
8323{
8324 int old_period, old_runtime;
8325 static DEFINE_MUTEX(mutex);
8326 int ret;
8327
8328 mutex_lock(&mutex);
8329 old_period = sysctl_sched_rt_period;
8330 old_runtime = sysctl_sched_rt_runtime;
8331
8332 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8333
8334 if (!ret && write) {
8335 ret = sched_rt_global_validate();
8336 if (ret)
8337 goto undo;
8338
8339 ret = sched_dl_global_validate();
8340 if (ret)
8341 goto undo;
8342
8343 ret = sched_rt_global_constraints();
8344 if (ret)
8345 goto undo;
8346
8347 sched_rt_do_global();
8348 sched_dl_do_global();
8349 }
8350 if (0) {
8351undo:
8352 sysctl_sched_rt_period = old_period;
8353 sysctl_sched_rt_runtime = old_runtime;
8354 }
8355 mutex_unlock(&mutex);
8356
8357 return ret;
8358}
8359
8360int sched_rr_handler(struct ctl_table *table, int write,
8361 void __user *buffer, size_t *lenp,
8362 loff_t *ppos)
8363{
8364 int ret;
8365 static DEFINE_MUTEX(mutex);
8366
8367 mutex_lock(&mutex);
8368 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8369 /* make sure that internally we keep jiffies */
8370 /* also, writing zero resets timeslice to default */
8371 if (!ret && write) {
8372 sched_rr_timeslice = sched_rr_timeslice <= 0 ?
8373 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
8374 }
8375 mutex_unlock(&mutex);
8376 return ret;
8377}
8378
8379#ifdef CONFIG_CGROUP_SCHED
8380
8381static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
8382{
8383 return css ? container_of(css, struct task_group, css) : NULL;
8384}
8385
8386static struct cgroup_subsys_state *
8387cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8388{
8389 struct task_group *parent = css_tg(parent_css);
8390 struct task_group *tg;
8391
8392 if (!parent) {
8393 /* This is early initialization for the top cgroup */
8394 return &root_task_group.css;
8395 }
8396
8397 tg = sched_create_group(parent);
8398 if (IS_ERR(tg))
8399 return ERR_PTR(-ENOMEM);
8400
8401 sched_online_group(tg, parent);
8402
8403 return &tg->css;
8404}
8405
8406static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
8407{
8408 struct task_group *tg = css_tg(css);
8409
8410 sched_offline_group(tg);
8411}
8412
8413static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
8414{
8415 struct task_group *tg = css_tg(css);
8416
8417 /*
8418 * Relies on the RCU grace period between css_released() and this.
8419 */
8420 sched_free_group(tg);
8421}
8422
8423/*
8424 * This is called before wake_up_new_task(), therefore we really only
8425 * have to set its group bits, all the other stuff does not apply.
8426 */
8427static void cpu_cgroup_fork(struct task_struct *task)
8428{
8429 struct rq_flags rf;
8430 struct rq *rq;
8431
8432 rq = task_rq_lock(task, &rf);
8433
8434 sched_change_group(task, TASK_SET_GROUP);
8435
8436 task_rq_unlock(rq, task, &rf);
8437}
8438
8439static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
8440{
8441 struct task_struct *task;
8442 struct cgroup_subsys_state *css;
8443 int ret = 0;
8444
8445 cgroup_taskset_for_each(task, css, tset) {
8446#ifdef CONFIG_RT_GROUP_SCHED
8447 if (!sched_rt_can_attach(css_tg(css), task))
8448 return -EINVAL;
8449#else
8450 /* We don't support RT-tasks being in separate groups */
8451 if (task->sched_class != &fair_sched_class)
8452 return -EINVAL;
8453#endif
8454 /*
8455 * Serialize against wake_up_new_task() such that if its
8456 * running, we're sure to observe its full state.
8457 */
8458 raw_spin_lock_irq(&task->pi_lock);
8459 /*
8460 * Avoid calling sched_move_task() before wake_up_new_task()
8461 * has happened. This would lead to problems with PELT, due to
8462 * move wanting to detach+attach while we're not attached yet.
8463 */
8464 if (task->state == TASK_NEW)
8465 ret = -EINVAL;
8466 raw_spin_unlock_irq(&task->pi_lock);
8467
8468 if (ret)
8469 break;
8470 }
8471 return ret;
8472}
8473
8474static void cpu_cgroup_attach(struct cgroup_taskset *tset)
8475{
8476 struct task_struct *task;
8477 struct cgroup_subsys_state *css;
8478
8479 cgroup_taskset_for_each(task, css, tset)
8480 sched_move_task(task);
8481}
8482
8483#ifdef CONFIG_FAIR_GROUP_SCHED
8484static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8485 struct cftype *cftype, u64 shareval)
8486{
8487 return sched_group_set_shares(css_tg(css), scale_load(shareval));
8488}
8489
8490static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8491 struct cftype *cft)
8492{
8493 struct task_group *tg = css_tg(css);
8494
8495 return (u64) scale_load_down(tg->shares);
8496}
8497
8498#ifdef CONFIG_CFS_BANDWIDTH
8499static DEFINE_MUTEX(cfs_constraints_mutex);
8500
8501const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8502const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8503
8504static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8505
8506static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8507{
8508 int i, ret = 0, runtime_enabled, runtime_was_enabled;
8509 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8510
8511 if (tg == &root_task_group)
8512 return -EINVAL;
8513
8514 /*
8515 * Ensure we have at some amount of bandwidth every period. This is
8516 * to prevent reaching a state of large arrears when throttled via
8517 * entity_tick() resulting in prolonged exit starvation.
8518 */
8519 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8520 return -EINVAL;
8521
8522 /*
8523 * Likewise, bound things on the otherside by preventing insane quota
8524 * periods. This also allows us to normalize in computing quota
8525 * feasibility.
8526 */
8527 if (period > max_cfs_quota_period)
8528 return -EINVAL;
8529
8530 /*
8531 * Prevent race between setting of cfs_rq->runtime_enabled and
8532 * unthrottle_offline_cfs_rqs().
8533 */
8534 get_online_cpus();
8535 mutex_lock(&cfs_constraints_mutex);
8536 ret = __cfs_schedulable(tg, period, quota);
8537 if (ret)
8538 goto out_unlock;
8539
8540 runtime_enabled = quota != RUNTIME_INF;
8541 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
8542 /*
8543 * If we need to toggle cfs_bandwidth_used, off->on must occur
8544 * before making related changes, and on->off must occur afterwards
8545 */
8546 if (runtime_enabled && !runtime_was_enabled)
8547 cfs_bandwidth_usage_inc();
8548 raw_spin_lock_irq(&cfs_b->lock);
8549 cfs_b->period = ns_to_ktime(period);
8550 cfs_b->quota = quota;
8551
8552 __refill_cfs_bandwidth_runtime(cfs_b);
8553 /* restart the period timer (if active) to handle new period expiry */
8554 if (runtime_enabled)
8555 start_cfs_bandwidth(cfs_b);
8556 raw_spin_unlock_irq(&cfs_b->lock);
8557
8558 for_each_online_cpu(i) {
8559 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
8560 struct rq *rq = cfs_rq->rq;
8561
8562 raw_spin_lock_irq(&rq->lock);
8563 cfs_rq->runtime_enabled = runtime_enabled;
8564 cfs_rq->runtime_remaining = 0;
8565
8566 if (cfs_rq->throttled)
8567 unthrottle_cfs_rq(cfs_rq);
8568 raw_spin_unlock_irq(&rq->lock);
8569 }
8570 if (runtime_was_enabled && !runtime_enabled)
8571 cfs_bandwidth_usage_dec();
8572out_unlock:
8573 mutex_unlock(&cfs_constraints_mutex);
8574 put_online_cpus();
8575
8576 return ret;
8577}
8578
8579int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8580{
8581 u64 quota, period;
8582
8583 period = ktime_to_ns(tg->cfs_bandwidth.period);
8584 if (cfs_quota_us < 0)
8585 quota = RUNTIME_INF;
8586 else
8587 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8588
8589 return tg_set_cfs_bandwidth(tg, period, quota);
8590}
8591
8592long tg_get_cfs_quota(struct task_group *tg)
8593{
8594 u64 quota_us;
8595
8596 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
8597 return -1;
8598
8599 quota_us = tg->cfs_bandwidth.quota;
8600 do_div(quota_us, NSEC_PER_USEC);
8601
8602 return quota_us;
8603}
8604
8605int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8606{
8607 u64 quota, period;
8608
8609 period = (u64)cfs_period_us * NSEC_PER_USEC;
8610 quota = tg->cfs_bandwidth.quota;
8611
8612 return tg_set_cfs_bandwidth(tg, period, quota);
8613}
8614
8615long tg_get_cfs_period(struct task_group *tg)
8616{
8617 u64 cfs_period_us;
8618
8619 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
8620 do_div(cfs_period_us, NSEC_PER_USEC);
8621
8622 return cfs_period_us;
8623}
8624
8625static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
8626 struct cftype *cft)
8627{
8628 return tg_get_cfs_quota(css_tg(css));
8629}
8630
8631static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
8632 struct cftype *cftype, s64 cfs_quota_us)
8633{
8634 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
8635}
8636
8637static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
8638 struct cftype *cft)
8639{
8640 return tg_get_cfs_period(css_tg(css));
8641}
8642
8643static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
8644 struct cftype *cftype, u64 cfs_period_us)
8645{
8646 return tg_set_cfs_period(css_tg(css), cfs_period_us);
8647}
8648
8649struct cfs_schedulable_data {
8650 struct task_group *tg;
8651 u64 period, quota;
8652};
8653
8654/*
8655 * normalize group quota/period to be quota/max_period
8656 * note: units are usecs
8657 */
8658static u64 normalize_cfs_quota(struct task_group *tg,
8659 struct cfs_schedulable_data *d)
8660{
8661 u64 quota, period;
8662
8663 if (tg == d->tg) {
8664 period = d->period;
8665 quota = d->quota;
8666 } else {
8667 period = tg_get_cfs_period(tg);
8668 quota = tg_get_cfs_quota(tg);
8669 }
8670
8671 /* note: these should typically be equivalent */
8672 if (quota == RUNTIME_INF || quota == -1)
8673 return RUNTIME_INF;
8674
8675 return to_ratio(period, quota);
8676}
8677
8678static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8679{
8680 struct cfs_schedulable_data *d = data;
8681 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8682 s64 quota = 0, parent_quota = -1;
8683
8684 if (!tg->parent) {
8685 quota = RUNTIME_INF;
8686 } else {
8687 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
8688
8689 quota = normalize_cfs_quota(tg, d);
8690 parent_quota = parent_b->hierarchical_quota;
8691
8692 /*
8693 * ensure max(child_quota) <= parent_quota, inherit when no
8694 * limit is set
8695 */
8696 if (quota == RUNTIME_INF)
8697 quota = parent_quota;
8698 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8699 return -EINVAL;
8700 }
8701 cfs_b->hierarchical_quota = quota;
8702
8703 return 0;
8704}
8705
8706static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8707{
8708 int ret;
8709 struct cfs_schedulable_data data = {
8710 .tg = tg,
8711 .period = period,
8712 .quota = quota,
8713 };
8714
8715 if (quota != RUNTIME_INF) {
8716 do_div(data.period, NSEC_PER_USEC);
8717 do_div(data.quota, NSEC_PER_USEC);
8718 }
8719
8720 rcu_read_lock();
8721 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8722 rcu_read_unlock();
8723
8724 return ret;
8725}
8726
8727static int cpu_stats_show(struct seq_file *sf, void *v)
8728{
8729 struct task_group *tg = css_tg(seq_css(sf));
8730 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8731
8732 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
8733 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
8734 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
8735
8736 return 0;
8737}
8738#endif /* CONFIG_CFS_BANDWIDTH */
8739#endif /* CONFIG_FAIR_GROUP_SCHED */
8740
8741#ifdef CONFIG_RT_GROUP_SCHED
8742static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
8743 struct cftype *cft, s64 val)
8744{
8745 return sched_group_set_rt_runtime(css_tg(css), val);
8746}
8747
8748static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
8749 struct cftype *cft)
8750{
8751 return sched_group_rt_runtime(css_tg(css));
8752}
8753
8754static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
8755 struct cftype *cftype, u64 rt_period_us)
8756{
8757 return sched_group_set_rt_period(css_tg(css), rt_period_us);
8758}
8759
8760static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
8761 struct cftype *cft)
8762{
8763 return sched_group_rt_period(css_tg(css));
8764}
8765#endif /* CONFIG_RT_GROUP_SCHED */
8766
8767static struct cftype cpu_files[] = {
8768#ifdef CONFIG_FAIR_GROUP_SCHED
8769 {
8770 .name = "shares",
8771 .read_u64 = cpu_shares_read_u64,
8772 .write_u64 = cpu_shares_write_u64,
8773 },
8774#endif
8775#ifdef CONFIG_CFS_BANDWIDTH
8776 {
8777 .name = "cfs_quota_us",
8778 .read_s64 = cpu_cfs_quota_read_s64,
8779 .write_s64 = cpu_cfs_quota_write_s64,
8780 },
8781 {
8782 .name = "cfs_period_us",
8783 .read_u64 = cpu_cfs_period_read_u64,
8784 .write_u64 = cpu_cfs_period_write_u64,
8785 },
8786 {
8787 .name = "stat",
8788 .seq_show = cpu_stats_show,
8789 },
8790#endif
8791#ifdef CONFIG_RT_GROUP_SCHED
8792 {
8793 .name = "rt_runtime_us",
8794 .read_s64 = cpu_rt_runtime_read,
8795 .write_s64 = cpu_rt_runtime_write,
8796 },
8797 {
8798 .name = "rt_period_us",
8799 .read_u64 = cpu_rt_period_read_uint,
8800 .write_u64 = cpu_rt_period_write_uint,
8801 },
8802#endif
8803 { } /* terminate */
8804};
8805
8806struct cgroup_subsys cpu_cgrp_subsys = {
8807 .css_alloc = cpu_cgroup_css_alloc,
8808 .css_released = cpu_cgroup_css_released,
8809 .css_free = cpu_cgroup_css_free,
8810 .fork = cpu_cgroup_fork,
8811 .can_attach = cpu_cgroup_can_attach,
8812 .attach = cpu_cgroup_attach,
8813 .legacy_cftypes = cpu_files,
8814 .early_init = true,
8815};
8816
8817#endif /* CONFIG_CGROUP_SCHED */
8818
8819void dump_cpu_task(int cpu)
8820{
8821 pr_info("Task dump for CPU %d:\n", cpu);
8822 sched_show_task(cpu_curr(cpu));
8823}
8824
8825/*
8826 * Nice levels are multiplicative, with a gentle 10% change for every
8827 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
8828 * nice 1, it will get ~10% less CPU time than another CPU-bound task
8829 * that remained on nice 0.
8830 *
8831 * The "10% effect" is relative and cumulative: from _any_ nice level,
8832 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
8833 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
8834 * If a task goes up by ~10% and another task goes down by ~10% then
8835 * the relative distance between them is ~25%.)
8836 */
8837const int sched_prio_to_weight[40] = {
8838 /* -20 */ 88761, 71755, 56483, 46273, 36291,
8839 /* -15 */ 29154, 23254, 18705, 14949, 11916,
8840 /* -10 */ 9548, 7620, 6100, 4904, 3906,
8841 /* -5 */ 3121, 2501, 1991, 1586, 1277,
8842 /* 0 */ 1024, 820, 655, 526, 423,
8843 /* 5 */ 335, 272, 215, 172, 137,
8844 /* 10 */ 110, 87, 70, 56, 45,
8845 /* 15 */ 36, 29, 23, 18, 15,
8846};
8847
8848/*
8849 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
8850 *
8851 * In cases where the weight does not change often, we can use the
8852 * precalculated inverse to speed up arithmetics by turning divisions
8853 * into multiplications:
8854 */
8855const u32 sched_prio_to_wmult[40] = {
8856 /* -20 */ 48388, 59856, 76040, 92818, 118348,
8857 /* -15 */ 147320, 184698, 229616, 287308, 360437,
8858 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
8859 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
8860 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
8861 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
8862 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
8863 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
8864};