Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
6 *
7 * No idle tick implementation for low and high resolution timers
8 *
9 * Started by: Thomas Gleixner and Ingo Molnar
10 */
11#include <linux/cpu.h>
12#include <linux/err.h>
13#include <linux/hrtimer.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/percpu.h>
17#include <linux/nmi.h>
18#include <linux/profile.h>
19#include <linux/sched/signal.h>
20#include <linux/sched/clock.h>
21#include <linux/sched/stat.h>
22#include <linux/sched/nohz.h>
23#include <linux/module.h>
24#include <linux/irq_work.h>
25#include <linux/posix-timers.h>
26#include <linux/context_tracking.h>
27#include <linux/mm.h>
28
29#include <asm/irq_regs.h>
30
31#include "tick-internal.h"
32
33#include <trace/events/timer.h>
34
35/*
36 * Per-CPU nohz control structure
37 */
38static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
39
40struct tick_sched *tick_get_tick_sched(int cpu)
41{
42 return &per_cpu(tick_cpu_sched, cpu);
43}
44
45#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
46/*
47 * The time, when the last jiffy update happened. Protected by jiffies_lock.
48 */
49static ktime_t last_jiffies_update;
50
51/*
52 * Must be called with interrupts disabled !
53 */
54static void tick_do_update_jiffies64(ktime_t now)
55{
56 unsigned long ticks = 0;
57 ktime_t delta;
58
59 /*
60 * Do a quick check without holding jiffies_lock:
61 */
62 delta = ktime_sub(now, last_jiffies_update);
63 if (delta < tick_period)
64 return;
65
66 /* Reevaluate with jiffies_lock held */
67 write_seqlock(&jiffies_lock);
68
69 delta = ktime_sub(now, last_jiffies_update);
70 if (delta >= tick_period) {
71
72 delta = ktime_sub(delta, tick_period);
73 last_jiffies_update = ktime_add(last_jiffies_update,
74 tick_period);
75
76 /* Slow path for long timeouts */
77 if (unlikely(delta >= tick_period)) {
78 s64 incr = ktime_to_ns(tick_period);
79
80 ticks = ktime_divns(delta, incr);
81
82 last_jiffies_update = ktime_add_ns(last_jiffies_update,
83 incr * ticks);
84 }
85 do_timer(++ticks);
86
87 /* Keep the tick_next_period variable up to date */
88 tick_next_period = ktime_add(last_jiffies_update, tick_period);
89 } else {
90 write_sequnlock(&jiffies_lock);
91 return;
92 }
93 write_sequnlock(&jiffies_lock);
94 update_wall_time();
95}
96
97/*
98 * Initialize and return retrieve the jiffies update.
99 */
100static ktime_t tick_init_jiffy_update(void)
101{
102 ktime_t period;
103
104 write_seqlock(&jiffies_lock);
105 /* Did we start the jiffies update yet ? */
106 if (last_jiffies_update == 0)
107 last_jiffies_update = tick_next_period;
108 period = last_jiffies_update;
109 write_sequnlock(&jiffies_lock);
110 return period;
111}
112
113static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
114{
115 int cpu = smp_processor_id();
116
117#ifdef CONFIG_NO_HZ_COMMON
118 /*
119 * Check if the do_timer duty was dropped. We don't care about
120 * concurrency: This happens only when the CPU in charge went
121 * into a long sleep. If two CPUs happen to assign themselves to
122 * this duty, then the jiffies update is still serialized by
123 * jiffies_lock.
124 *
125 * If nohz_full is enabled, this should not happen because the
126 * tick_do_timer_cpu never relinquishes.
127 */
128 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
129#ifdef CONFIG_NO_HZ_FULL
130 WARN_ON(tick_nohz_full_running);
131#endif
132 tick_do_timer_cpu = cpu;
133 }
134#endif
135
136 /* Check, if the jiffies need an update */
137 if (tick_do_timer_cpu == cpu)
138 tick_do_update_jiffies64(now);
139
140 if (ts->inidle)
141 ts->got_idle_tick = 1;
142}
143
144static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
145{
146#ifdef CONFIG_NO_HZ_COMMON
147 /*
148 * When we are idle and the tick is stopped, we have to touch
149 * the watchdog as we might not schedule for a really long
150 * time. This happens on complete idle SMP systems while
151 * waiting on the login prompt. We also increment the "start of
152 * idle" jiffy stamp so the idle accounting adjustment we do
153 * when we go busy again does not account too much ticks.
154 */
155 if (ts->tick_stopped) {
156 touch_softlockup_watchdog_sched();
157 if (is_idle_task(current))
158 ts->idle_jiffies++;
159 /*
160 * In case the current tick fired too early past its expected
161 * expiration, make sure we don't bypass the next clock reprogramming
162 * to the same deadline.
163 */
164 ts->next_tick = 0;
165 }
166#endif
167 update_process_times(user_mode(regs));
168 profile_tick(CPU_PROFILING);
169}
170#endif
171
172#ifdef CONFIG_NO_HZ_FULL
173cpumask_var_t tick_nohz_full_mask;
174bool tick_nohz_full_running;
175static atomic_t tick_dep_mask;
176
177static bool check_tick_dependency(atomic_t *dep)
178{
179 int val = atomic_read(dep);
180
181 if (val & TICK_DEP_MASK_POSIX_TIMER) {
182 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
183 return true;
184 }
185
186 if (val & TICK_DEP_MASK_PERF_EVENTS) {
187 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
188 return true;
189 }
190
191 if (val & TICK_DEP_MASK_SCHED) {
192 trace_tick_stop(0, TICK_DEP_MASK_SCHED);
193 return true;
194 }
195
196 if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
197 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
198 return true;
199 }
200
201 return false;
202}
203
204static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
205{
206 lockdep_assert_irqs_disabled();
207
208 if (unlikely(!cpu_online(cpu)))
209 return false;
210
211 if (check_tick_dependency(&tick_dep_mask))
212 return false;
213
214 if (check_tick_dependency(&ts->tick_dep_mask))
215 return false;
216
217 if (check_tick_dependency(¤t->tick_dep_mask))
218 return false;
219
220 if (check_tick_dependency(¤t->signal->tick_dep_mask))
221 return false;
222
223 return true;
224}
225
226static void nohz_full_kick_func(struct irq_work *work)
227{
228 /* Empty, the tick restart happens on tick_nohz_irq_exit() */
229}
230
231static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
232 .func = nohz_full_kick_func,
233};
234
235/*
236 * Kick this CPU if it's full dynticks in order to force it to
237 * re-evaluate its dependency on the tick and restart it if necessary.
238 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
239 * is NMI safe.
240 */
241static void tick_nohz_full_kick(void)
242{
243 if (!tick_nohz_full_cpu(smp_processor_id()))
244 return;
245
246 irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
247}
248
249/*
250 * Kick the CPU if it's full dynticks in order to force it to
251 * re-evaluate its dependency on the tick and restart it if necessary.
252 */
253void tick_nohz_full_kick_cpu(int cpu)
254{
255 if (!tick_nohz_full_cpu(cpu))
256 return;
257
258 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
259}
260
261/*
262 * Kick all full dynticks CPUs in order to force these to re-evaluate
263 * their dependency on the tick and restart it if necessary.
264 */
265static void tick_nohz_full_kick_all(void)
266{
267 int cpu;
268
269 if (!tick_nohz_full_running)
270 return;
271
272 preempt_disable();
273 for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
274 tick_nohz_full_kick_cpu(cpu);
275 preempt_enable();
276}
277
278static void tick_nohz_dep_set_all(atomic_t *dep,
279 enum tick_dep_bits bit)
280{
281 int prev;
282
283 prev = atomic_fetch_or(BIT(bit), dep);
284 if (!prev)
285 tick_nohz_full_kick_all();
286}
287
288/*
289 * Set a global tick dependency. Used by perf events that rely on freq and
290 * by unstable clock.
291 */
292void tick_nohz_dep_set(enum tick_dep_bits bit)
293{
294 tick_nohz_dep_set_all(&tick_dep_mask, bit);
295}
296
297void tick_nohz_dep_clear(enum tick_dep_bits bit)
298{
299 atomic_andnot(BIT(bit), &tick_dep_mask);
300}
301
302/*
303 * Set per-CPU tick dependency. Used by scheduler and perf events in order to
304 * manage events throttling.
305 */
306void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
307{
308 int prev;
309 struct tick_sched *ts;
310
311 ts = per_cpu_ptr(&tick_cpu_sched, cpu);
312
313 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
314 if (!prev) {
315 preempt_disable();
316 /* Perf needs local kick that is NMI safe */
317 if (cpu == smp_processor_id()) {
318 tick_nohz_full_kick();
319 } else {
320 /* Remote irq work not NMI-safe */
321 if (!WARN_ON_ONCE(in_nmi()))
322 tick_nohz_full_kick_cpu(cpu);
323 }
324 preempt_enable();
325 }
326}
327
328void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
329{
330 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
331
332 atomic_andnot(BIT(bit), &ts->tick_dep_mask);
333}
334
335/*
336 * Set a per-task tick dependency. Posix CPU timers need this in order to elapse
337 * per task timers.
338 */
339void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
340{
341 /*
342 * We could optimize this with just kicking the target running the task
343 * if that noise matters for nohz full users.
344 */
345 tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit);
346}
347
348void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
349{
350 atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
351}
352
353/*
354 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
355 * per process timers.
356 */
357void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
358{
359 tick_nohz_dep_set_all(&sig->tick_dep_mask, bit);
360}
361
362void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
363{
364 atomic_andnot(BIT(bit), &sig->tick_dep_mask);
365}
366
367/*
368 * Re-evaluate the need for the tick as we switch the current task.
369 * It might need the tick due to per task/process properties:
370 * perf events, posix CPU timers, ...
371 */
372void __tick_nohz_task_switch(void)
373{
374 unsigned long flags;
375 struct tick_sched *ts;
376
377 local_irq_save(flags);
378
379 if (!tick_nohz_full_cpu(smp_processor_id()))
380 goto out;
381
382 ts = this_cpu_ptr(&tick_cpu_sched);
383
384 if (ts->tick_stopped) {
385 if (atomic_read(¤t->tick_dep_mask) ||
386 atomic_read(¤t->signal->tick_dep_mask))
387 tick_nohz_full_kick();
388 }
389out:
390 local_irq_restore(flags);
391}
392
393/* Get the boot-time nohz CPU list from the kernel parameters. */
394void __init tick_nohz_full_setup(cpumask_var_t cpumask)
395{
396 alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
397 cpumask_copy(tick_nohz_full_mask, cpumask);
398 tick_nohz_full_running = true;
399}
400
401static int tick_nohz_cpu_down(unsigned int cpu)
402{
403 /*
404 * The tick_do_timer_cpu CPU handles housekeeping duty (unbound
405 * timers, workqueues, timekeeping, ...) on behalf of full dynticks
406 * CPUs. It must remain online when nohz full is enabled.
407 */
408 if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
409 return -EBUSY;
410 return 0;
411}
412
413void __init tick_nohz_init(void)
414{
415 int cpu, ret;
416
417 if (!tick_nohz_full_running)
418 return;
419
420 /*
421 * Full dynticks uses irq work to drive the tick rescheduling on safe
422 * locking contexts. But then we need irq work to raise its own
423 * interrupts to avoid circular dependency on the tick
424 */
425 if (!arch_irq_work_has_interrupt()) {
426 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
427 cpumask_clear(tick_nohz_full_mask);
428 tick_nohz_full_running = false;
429 return;
430 }
431
432 if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
433 !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
434 cpu = smp_processor_id();
435
436 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
437 pr_warn("NO_HZ: Clearing %d from nohz_full range "
438 "for timekeeping\n", cpu);
439 cpumask_clear_cpu(cpu, tick_nohz_full_mask);
440 }
441 }
442
443 for_each_cpu(cpu, tick_nohz_full_mask)
444 context_tracking_cpu_set(cpu);
445
446 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
447 "kernel/nohz:predown", NULL,
448 tick_nohz_cpu_down);
449 WARN_ON(ret < 0);
450 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
451 cpumask_pr_args(tick_nohz_full_mask));
452}
453#endif
454
455/*
456 * NOHZ - aka dynamic tick functionality
457 */
458#ifdef CONFIG_NO_HZ_COMMON
459/*
460 * NO HZ enabled ?
461 */
462bool tick_nohz_enabled __read_mostly = true;
463unsigned long tick_nohz_active __read_mostly;
464/*
465 * Enable / Disable tickless mode
466 */
467static int __init setup_tick_nohz(char *str)
468{
469 return (kstrtobool(str, &tick_nohz_enabled) == 0);
470}
471
472__setup("nohz=", setup_tick_nohz);
473
474bool tick_nohz_tick_stopped(void)
475{
476 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
477
478 return ts->tick_stopped;
479}
480
481bool tick_nohz_tick_stopped_cpu(int cpu)
482{
483 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
484
485 return ts->tick_stopped;
486}
487
488/**
489 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
490 *
491 * Called from interrupt entry when the CPU was idle
492 *
493 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
494 * must be updated. Otherwise an interrupt handler could use a stale jiffy
495 * value. We do this unconditionally on any CPU, as we don't know whether the
496 * CPU, which has the update task assigned is in a long sleep.
497 */
498static void tick_nohz_update_jiffies(ktime_t now)
499{
500 unsigned long flags;
501
502 __this_cpu_write(tick_cpu_sched.idle_waketime, now);
503
504 local_irq_save(flags);
505 tick_do_update_jiffies64(now);
506 local_irq_restore(flags);
507
508 touch_softlockup_watchdog_sched();
509}
510
511/*
512 * Updates the per-CPU time idle statistics counters
513 */
514static void
515update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
516{
517 ktime_t delta;
518
519 if (ts->idle_active) {
520 delta = ktime_sub(now, ts->idle_entrytime);
521 if (nr_iowait_cpu(cpu) > 0)
522 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
523 else
524 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
525 ts->idle_entrytime = now;
526 }
527
528 if (last_update_time)
529 *last_update_time = ktime_to_us(now);
530
531}
532
533static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
534{
535 update_ts_time_stats(smp_processor_id(), ts, now, NULL);
536 ts->idle_active = 0;
537
538 sched_clock_idle_wakeup_event();
539}
540
541static void tick_nohz_start_idle(struct tick_sched *ts)
542{
543 ts->idle_entrytime = ktime_get();
544 ts->idle_active = 1;
545 sched_clock_idle_sleep_event();
546}
547
548/**
549 * get_cpu_idle_time_us - get the total idle time of a CPU
550 * @cpu: CPU number to query
551 * @last_update_time: variable to store update time in. Do not update
552 * counters if NULL.
553 *
554 * Return the cumulative idle time (since boot) for a given
555 * CPU, in microseconds.
556 *
557 * This time is measured via accounting rather than sampling,
558 * and is as accurate as ktime_get() is.
559 *
560 * This function returns -1 if NOHZ is not enabled.
561 */
562u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
563{
564 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
565 ktime_t now, idle;
566
567 if (!tick_nohz_active)
568 return -1;
569
570 now = ktime_get();
571 if (last_update_time) {
572 update_ts_time_stats(cpu, ts, now, last_update_time);
573 idle = ts->idle_sleeptime;
574 } else {
575 if (ts->idle_active && !nr_iowait_cpu(cpu)) {
576 ktime_t delta = ktime_sub(now, ts->idle_entrytime);
577
578 idle = ktime_add(ts->idle_sleeptime, delta);
579 } else {
580 idle = ts->idle_sleeptime;
581 }
582 }
583
584 return ktime_to_us(idle);
585
586}
587EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
588
589/**
590 * get_cpu_iowait_time_us - get the total iowait time of a CPU
591 * @cpu: CPU number to query
592 * @last_update_time: variable to store update time in. Do not update
593 * counters if NULL.
594 *
595 * Return the cumulative iowait time (since boot) for a given
596 * CPU, in microseconds.
597 *
598 * This time is measured via accounting rather than sampling,
599 * and is as accurate as ktime_get() is.
600 *
601 * This function returns -1 if NOHZ is not enabled.
602 */
603u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
604{
605 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
606 ktime_t now, iowait;
607
608 if (!tick_nohz_active)
609 return -1;
610
611 now = ktime_get();
612 if (last_update_time) {
613 update_ts_time_stats(cpu, ts, now, last_update_time);
614 iowait = ts->iowait_sleeptime;
615 } else {
616 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
617 ktime_t delta = ktime_sub(now, ts->idle_entrytime);
618
619 iowait = ktime_add(ts->iowait_sleeptime, delta);
620 } else {
621 iowait = ts->iowait_sleeptime;
622 }
623 }
624
625 return ktime_to_us(iowait);
626}
627EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
628
629static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
630{
631 hrtimer_cancel(&ts->sched_timer);
632 hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
633
634 /* Forward the time to expire in the future */
635 hrtimer_forward(&ts->sched_timer, now, tick_period);
636
637 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
638 hrtimer_start_expires(&ts->sched_timer,
639 HRTIMER_MODE_ABS_PINNED_HARD);
640 } else {
641 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
642 }
643
644 /*
645 * Reset to make sure next tick stop doesn't get fooled by past
646 * cached clock deadline.
647 */
648 ts->next_tick = 0;
649}
650
651static inline bool local_timer_softirq_pending(void)
652{
653 return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
654}
655
656static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
657{
658 u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
659 unsigned long basejiff;
660 unsigned int seq;
661
662 /* Read jiffies and the time when jiffies were updated last */
663 do {
664 seq = read_seqbegin(&jiffies_lock);
665 basemono = last_jiffies_update;
666 basejiff = jiffies;
667 } while (read_seqretry(&jiffies_lock, seq));
668 ts->last_jiffies = basejiff;
669 ts->timer_expires_base = basemono;
670
671 /*
672 * Keep the periodic tick, when RCU, architecture or irq_work
673 * requests it.
674 * Aside of that check whether the local timer softirq is
675 * pending. If so its a bad idea to call get_next_timer_interrupt()
676 * because there is an already expired timer, so it will request
677 * immeditate expiry, which rearms the hardware timer with a
678 * minimal delta which brings us back to this place
679 * immediately. Lather, rinse and repeat...
680 */
681 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
682 irq_work_needs_cpu() || local_timer_softirq_pending()) {
683 next_tick = basemono + TICK_NSEC;
684 } else {
685 /*
686 * Get the next pending timer. If high resolution
687 * timers are enabled this only takes the timer wheel
688 * timers into account. If high resolution timers are
689 * disabled this also looks at the next expiring
690 * hrtimer.
691 */
692 next_tmr = get_next_timer_interrupt(basejiff, basemono);
693 ts->next_timer = next_tmr;
694 /* Take the next rcu event into account */
695 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
696 }
697
698 /*
699 * If the tick is due in the next period, keep it ticking or
700 * force prod the timer.
701 */
702 delta = next_tick - basemono;
703 if (delta <= (u64)TICK_NSEC) {
704 /*
705 * Tell the timer code that the base is not idle, i.e. undo
706 * the effect of get_next_timer_interrupt():
707 */
708 timer_clear_idle();
709 /*
710 * We've not stopped the tick yet, and there's a timer in the
711 * next period, so no point in stopping it either, bail.
712 */
713 if (!ts->tick_stopped) {
714 ts->timer_expires = 0;
715 goto out;
716 }
717 }
718
719 /*
720 * If this CPU is the one which had the do_timer() duty last, we limit
721 * the sleep time to the timekeeping max_deferment value.
722 * Otherwise we can sleep as long as we want.
723 */
724 delta = timekeeping_max_deferment();
725 if (cpu != tick_do_timer_cpu &&
726 (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last))
727 delta = KTIME_MAX;
728
729 /* Calculate the next expiry time */
730 if (delta < (KTIME_MAX - basemono))
731 expires = basemono + delta;
732 else
733 expires = KTIME_MAX;
734
735 ts->timer_expires = min_t(u64, expires, next_tick);
736
737out:
738 return ts->timer_expires;
739}
740
741static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
742{
743 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
744 u64 basemono = ts->timer_expires_base;
745 u64 expires = ts->timer_expires;
746 ktime_t tick = expires;
747
748 /* Make sure we won't be trying to stop it twice in a row. */
749 ts->timer_expires_base = 0;
750
751 /*
752 * If this CPU is the one which updates jiffies, then give up
753 * the assignment and let it be taken by the CPU which runs
754 * the tick timer next, which might be this CPU as well. If we
755 * don't drop this here the jiffies might be stale and
756 * do_timer() never invoked. Keep track of the fact that it
757 * was the one which had the do_timer() duty last.
758 */
759 if (cpu == tick_do_timer_cpu) {
760 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
761 ts->do_timer_last = 1;
762 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
763 ts->do_timer_last = 0;
764 }
765
766 /* Skip reprogram of event if its not changed */
767 if (ts->tick_stopped && (expires == ts->next_tick)) {
768 /* Sanity check: make sure clockevent is actually programmed */
769 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
770 return;
771
772 WARN_ON_ONCE(1);
773 printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
774 basemono, ts->next_tick, dev->next_event,
775 hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer));
776 }
777
778 /*
779 * nohz_stop_sched_tick can be called several times before
780 * the nohz_restart_sched_tick is called. This happens when
781 * interrupts arrive which do not cause a reschedule. In the
782 * first call we save the current tick time, so we can restart
783 * the scheduler tick in nohz_restart_sched_tick.
784 */
785 if (!ts->tick_stopped) {
786 calc_load_nohz_start();
787 quiet_vmstat();
788
789 ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
790 ts->tick_stopped = 1;
791 trace_tick_stop(1, TICK_DEP_MASK_NONE);
792 }
793
794 ts->next_tick = tick;
795
796 /*
797 * If the expiration time == KTIME_MAX, then we simply stop
798 * the tick timer.
799 */
800 if (unlikely(expires == KTIME_MAX)) {
801 if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
802 hrtimer_cancel(&ts->sched_timer);
803 return;
804 }
805
806 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
807 hrtimer_start(&ts->sched_timer, tick,
808 HRTIMER_MODE_ABS_PINNED_HARD);
809 } else {
810 hrtimer_set_expires(&ts->sched_timer, tick);
811 tick_program_event(tick, 1);
812 }
813}
814
815static void tick_nohz_retain_tick(struct tick_sched *ts)
816{
817 ts->timer_expires_base = 0;
818}
819
820#ifdef CONFIG_NO_HZ_FULL
821static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu)
822{
823 if (tick_nohz_next_event(ts, cpu))
824 tick_nohz_stop_tick(ts, cpu);
825 else
826 tick_nohz_retain_tick(ts);
827}
828#endif /* CONFIG_NO_HZ_FULL */
829
830static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
831{
832 /* Update jiffies first */
833 tick_do_update_jiffies64(now);
834
835 /*
836 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
837 * the clock forward checks in the enqueue path:
838 */
839 timer_clear_idle();
840
841 calc_load_nohz_stop();
842 touch_softlockup_watchdog_sched();
843 /*
844 * Cancel the scheduled timer and restore the tick
845 */
846 ts->tick_stopped = 0;
847 ts->idle_exittime = now;
848
849 tick_nohz_restart(ts, now);
850}
851
852static void tick_nohz_full_update_tick(struct tick_sched *ts)
853{
854#ifdef CONFIG_NO_HZ_FULL
855 int cpu = smp_processor_id();
856
857 if (!tick_nohz_full_cpu(cpu))
858 return;
859
860 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
861 return;
862
863 if (can_stop_full_tick(cpu, ts))
864 tick_nohz_stop_sched_tick(ts, cpu);
865 else if (ts->tick_stopped)
866 tick_nohz_restart_sched_tick(ts, ktime_get());
867#endif
868}
869
870static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
871{
872 /*
873 * If this CPU is offline and it is the one which updates
874 * jiffies, then give up the assignment and let it be taken by
875 * the CPU which runs the tick timer next. If we don't drop
876 * this here the jiffies might be stale and do_timer() never
877 * invoked.
878 */
879 if (unlikely(!cpu_online(cpu))) {
880 if (cpu == tick_do_timer_cpu)
881 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
882 /*
883 * Make sure the CPU doesn't get fooled by obsolete tick
884 * deadline if it comes back online later.
885 */
886 ts->next_tick = 0;
887 return false;
888 }
889
890 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
891 return false;
892
893 if (need_resched())
894 return false;
895
896 if (unlikely(local_softirq_pending())) {
897 static int ratelimit;
898
899 if (ratelimit < 10 &&
900 (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
901 pr_warn("NOHZ: local_softirq_pending %02x\n",
902 (unsigned int) local_softirq_pending());
903 ratelimit++;
904 }
905 return false;
906 }
907
908 if (tick_nohz_full_enabled()) {
909 /*
910 * Keep the tick alive to guarantee timekeeping progression
911 * if there are full dynticks CPUs around
912 */
913 if (tick_do_timer_cpu == cpu)
914 return false;
915 /*
916 * Boot safety: make sure the timekeeping duty has been
917 * assigned before entering dyntick-idle mode,
918 * tick_do_timer_cpu is TICK_DO_TIMER_BOOT
919 */
920 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT))
921 return false;
922
923 /* Should not happen for nohz-full */
924 if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
925 return false;
926 }
927
928 return true;
929}
930
931static void __tick_nohz_idle_stop_tick(struct tick_sched *ts)
932{
933 ktime_t expires;
934 int cpu = smp_processor_id();
935
936 /*
937 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the
938 * tick timer expiration time is known already.
939 */
940 if (ts->timer_expires_base)
941 expires = ts->timer_expires;
942 else if (can_stop_idle_tick(cpu, ts))
943 expires = tick_nohz_next_event(ts, cpu);
944 else
945 return;
946
947 ts->idle_calls++;
948
949 if (expires > 0LL) {
950 int was_stopped = ts->tick_stopped;
951
952 tick_nohz_stop_tick(ts, cpu);
953
954 ts->idle_sleeps++;
955 ts->idle_expires = expires;
956
957 if (!was_stopped && ts->tick_stopped) {
958 ts->idle_jiffies = ts->last_jiffies;
959 nohz_balance_enter_idle(cpu);
960 }
961 } else {
962 tick_nohz_retain_tick(ts);
963 }
964}
965
966/**
967 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
968 *
969 * When the next event is more than a tick into the future, stop the idle tick
970 */
971void tick_nohz_idle_stop_tick(void)
972{
973 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched));
974}
975
976void tick_nohz_idle_retain_tick(void)
977{
978 tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched));
979 /*
980 * Undo the effect of get_next_timer_interrupt() called from
981 * tick_nohz_next_event().
982 */
983 timer_clear_idle();
984}
985
986/**
987 * tick_nohz_idle_enter - prepare for entering idle on the current CPU
988 *
989 * Called when we start the idle loop.
990 */
991void tick_nohz_idle_enter(void)
992{
993 struct tick_sched *ts;
994
995 lockdep_assert_irqs_enabled();
996
997 local_irq_disable();
998
999 ts = this_cpu_ptr(&tick_cpu_sched);
1000
1001 WARN_ON_ONCE(ts->timer_expires_base);
1002
1003 ts->inidle = 1;
1004 tick_nohz_start_idle(ts);
1005
1006 local_irq_enable();
1007}
1008
1009/**
1010 * tick_nohz_irq_exit - update next tick event from interrupt exit
1011 *
1012 * When an interrupt fires while we are idle and it doesn't cause
1013 * a reschedule, it may still add, modify or delete a timer, enqueue
1014 * an RCU callback, etc...
1015 * So we need to re-calculate and reprogram the next tick event.
1016 */
1017void tick_nohz_irq_exit(void)
1018{
1019 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1020
1021 if (ts->inidle)
1022 tick_nohz_start_idle(ts);
1023 else
1024 tick_nohz_full_update_tick(ts);
1025}
1026
1027/**
1028 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run
1029 */
1030bool tick_nohz_idle_got_tick(void)
1031{
1032 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1033
1034 if (ts->got_idle_tick) {
1035 ts->got_idle_tick = 0;
1036 return true;
1037 }
1038 return false;
1039}
1040
1041/**
1042 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
1043 * or the tick, whatever that expires first. Note that, if the tick has been
1044 * stopped, it returns the next hrtimer.
1045 *
1046 * Called from power state control code with interrupts disabled
1047 */
1048ktime_t tick_nohz_get_next_hrtimer(void)
1049{
1050 return __this_cpu_read(tick_cpu_device.evtdev)->next_event;
1051}
1052
1053/**
1054 * tick_nohz_get_sleep_length - return the expected length of the current sleep
1055 * @delta_next: duration until the next event if the tick cannot be stopped
1056 *
1057 * Called from power state control code with interrupts disabled
1058 */
1059ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
1060{
1061 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
1062 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1063 int cpu = smp_processor_id();
1064 /*
1065 * The idle entry time is expected to be a sufficient approximation of
1066 * the current time at this point.
1067 */
1068 ktime_t now = ts->idle_entrytime;
1069 ktime_t next_event;
1070
1071 WARN_ON_ONCE(!ts->inidle);
1072
1073 *delta_next = ktime_sub(dev->next_event, now);
1074
1075 if (!can_stop_idle_tick(cpu, ts))
1076 return *delta_next;
1077
1078 next_event = tick_nohz_next_event(ts, cpu);
1079 if (!next_event)
1080 return *delta_next;
1081
1082 /*
1083 * If the next highres timer to expire is earlier than next_event, the
1084 * idle governor needs to know that.
1085 */
1086 next_event = min_t(u64, next_event,
1087 hrtimer_next_event_without(&ts->sched_timer));
1088
1089 return ktime_sub(next_event, now);
1090}
1091
1092/**
1093 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
1094 * for a particular CPU.
1095 *
1096 * Called from the schedutil frequency scaling governor in scheduler context.
1097 */
1098unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
1099{
1100 struct tick_sched *ts = tick_get_tick_sched(cpu);
1101
1102 return ts->idle_calls;
1103}
1104
1105/**
1106 * tick_nohz_get_idle_calls - return the current idle calls counter value
1107 *
1108 * Called from the schedutil frequency scaling governor in scheduler context.
1109 */
1110unsigned long tick_nohz_get_idle_calls(void)
1111{
1112 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1113
1114 return ts->idle_calls;
1115}
1116
1117static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
1118{
1119#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1120 unsigned long ticks;
1121
1122 if (vtime_accounting_cpu_enabled())
1123 return;
1124 /*
1125 * We stopped the tick in idle. Update process times would miss the
1126 * time we slept as update_process_times does only a 1 tick
1127 * accounting. Enforce that this is accounted to idle !
1128 */
1129 ticks = jiffies - ts->idle_jiffies;
1130 /*
1131 * We might be one off. Do not randomly account a huge number of ticks!
1132 */
1133 if (ticks && ticks < LONG_MAX)
1134 account_idle_ticks(ticks);
1135#endif
1136}
1137
1138static void __tick_nohz_idle_restart_tick(struct tick_sched *ts, ktime_t now)
1139{
1140 tick_nohz_restart_sched_tick(ts, now);
1141 tick_nohz_account_idle_ticks(ts);
1142}
1143
1144void tick_nohz_idle_restart_tick(void)
1145{
1146 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1147
1148 if (ts->tick_stopped)
1149 __tick_nohz_idle_restart_tick(ts, ktime_get());
1150}
1151
1152/**
1153 * tick_nohz_idle_exit - restart the idle tick from the idle task
1154 *
1155 * Restart the idle tick when the CPU is woken up from idle
1156 * This also exit the RCU extended quiescent state. The CPU
1157 * can use RCU again after this function is called.
1158 */
1159void tick_nohz_idle_exit(void)
1160{
1161 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1162 bool idle_active, tick_stopped;
1163 ktime_t now;
1164
1165 local_irq_disable();
1166
1167 WARN_ON_ONCE(!ts->inidle);
1168 WARN_ON_ONCE(ts->timer_expires_base);
1169
1170 ts->inidle = 0;
1171 idle_active = ts->idle_active;
1172 tick_stopped = ts->tick_stopped;
1173
1174 if (idle_active || tick_stopped)
1175 now = ktime_get();
1176
1177 if (idle_active)
1178 tick_nohz_stop_idle(ts, now);
1179
1180 if (tick_stopped)
1181 __tick_nohz_idle_restart_tick(ts, now);
1182
1183 local_irq_enable();
1184}
1185
1186/*
1187 * The nohz low res interrupt handler
1188 */
1189static void tick_nohz_handler(struct clock_event_device *dev)
1190{
1191 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1192 struct pt_regs *regs = get_irq_regs();
1193 ktime_t now = ktime_get();
1194
1195 dev->next_event = KTIME_MAX;
1196
1197 tick_sched_do_timer(ts, now);
1198 tick_sched_handle(ts, regs);
1199
1200 /* No need to reprogram if we are running tickless */
1201 if (unlikely(ts->tick_stopped))
1202 return;
1203
1204 hrtimer_forward(&ts->sched_timer, now, tick_period);
1205 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1206}
1207
1208static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
1209{
1210 if (!tick_nohz_enabled)
1211 return;
1212 ts->nohz_mode = mode;
1213 /* One update is enough */
1214 if (!test_and_set_bit(0, &tick_nohz_active))
1215 timers_update_nohz();
1216}
1217
1218/**
1219 * tick_nohz_switch_to_nohz - switch to nohz mode
1220 */
1221static void tick_nohz_switch_to_nohz(void)
1222{
1223 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1224 ktime_t next;
1225
1226 if (!tick_nohz_enabled)
1227 return;
1228
1229 if (tick_switch_to_oneshot(tick_nohz_handler))
1230 return;
1231
1232 /*
1233 * Recycle the hrtimer in ts, so we can share the
1234 * hrtimer_forward with the highres code.
1235 */
1236 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
1237 /* Get the next period */
1238 next = tick_init_jiffy_update();
1239
1240 hrtimer_set_expires(&ts->sched_timer, next);
1241 hrtimer_forward_now(&ts->sched_timer, tick_period);
1242 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1243 tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
1244}
1245
1246static inline void tick_nohz_irq_enter(void)
1247{
1248 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1249 ktime_t now;
1250
1251 if (!ts->idle_active && !ts->tick_stopped)
1252 return;
1253 now = ktime_get();
1254 if (ts->idle_active)
1255 tick_nohz_stop_idle(ts, now);
1256 if (ts->tick_stopped)
1257 tick_nohz_update_jiffies(now);
1258}
1259
1260#else
1261
1262static inline void tick_nohz_switch_to_nohz(void) { }
1263static inline void tick_nohz_irq_enter(void) { }
1264static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { }
1265
1266#endif /* CONFIG_NO_HZ_COMMON */
1267
1268/*
1269 * Called from irq_enter to notify about the possible interruption of idle()
1270 */
1271void tick_irq_enter(void)
1272{
1273 tick_check_oneshot_broadcast_this_cpu();
1274 tick_nohz_irq_enter();
1275}
1276
1277/*
1278 * High resolution timer specific code
1279 */
1280#ifdef CONFIG_HIGH_RES_TIMERS
1281/*
1282 * We rearm the timer until we get disabled by the idle code.
1283 * Called with interrupts disabled.
1284 */
1285static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
1286{
1287 struct tick_sched *ts =
1288 container_of(timer, struct tick_sched, sched_timer);
1289 struct pt_regs *regs = get_irq_regs();
1290 ktime_t now = ktime_get();
1291
1292 tick_sched_do_timer(ts, now);
1293
1294 /*
1295 * Do not call, when we are not in irq context and have
1296 * no valid regs pointer
1297 */
1298 if (regs)
1299 tick_sched_handle(ts, regs);
1300 else
1301 ts->next_tick = 0;
1302
1303 /* No need to reprogram if we are in idle or full dynticks mode */
1304 if (unlikely(ts->tick_stopped))
1305 return HRTIMER_NORESTART;
1306
1307 hrtimer_forward(timer, now, tick_period);
1308
1309 return HRTIMER_RESTART;
1310}
1311
1312static int sched_skew_tick;
1313
1314static int __init skew_tick(char *str)
1315{
1316 get_option(&str, &sched_skew_tick);
1317
1318 return 0;
1319}
1320early_param("skew_tick", skew_tick);
1321
1322/**
1323 * tick_setup_sched_timer - setup the tick emulation timer
1324 */
1325void tick_setup_sched_timer(void)
1326{
1327 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1328 ktime_t now = ktime_get();
1329
1330 /*
1331 * Emulate tick processing via per-CPU hrtimers:
1332 */
1333 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
1334 ts->sched_timer.function = tick_sched_timer;
1335
1336 /* Get the next period (per-CPU) */
1337 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
1338
1339 /* Offset the tick to avert jiffies_lock contention. */
1340 if (sched_skew_tick) {
1341 u64 offset = ktime_to_ns(tick_period) >> 1;
1342 do_div(offset, num_possible_cpus());
1343 offset *= smp_processor_id();
1344 hrtimer_add_expires_ns(&ts->sched_timer, offset);
1345 }
1346
1347 hrtimer_forward(&ts->sched_timer, now, tick_period);
1348 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
1349 tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
1350}
1351#endif /* HIGH_RES_TIMERS */
1352
1353#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
1354void tick_cancel_sched_timer(int cpu)
1355{
1356 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1357
1358# ifdef CONFIG_HIGH_RES_TIMERS
1359 if (ts->sched_timer.base)
1360 hrtimer_cancel(&ts->sched_timer);
1361# endif
1362
1363 memset(ts, 0, sizeof(*ts));
1364}
1365#endif
1366
1367/**
1368 * Async notification about clocksource changes
1369 */
1370void tick_clock_notify(void)
1371{
1372 int cpu;
1373
1374 for_each_possible_cpu(cpu)
1375 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
1376}
1377
1378/*
1379 * Async notification about clock event changes
1380 */
1381void tick_oneshot_notify(void)
1382{
1383 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1384
1385 set_bit(0, &ts->check_clocks);
1386}
1387
1388/**
1389 * Check, if a change happened, which makes oneshot possible.
1390 *
1391 * Called cyclic from the hrtimer softirq (driven by the timer
1392 * softirq) allow_nohz signals, that we can switch into low-res nohz
1393 * mode, because high resolution timers are disabled (either compile
1394 * or runtime). Called with interrupts disabled.
1395 */
1396int tick_check_oneshot_change(int allow_nohz)
1397{
1398 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1399
1400 if (!test_and_clear_bit(0, &ts->check_clocks))
1401 return 0;
1402
1403 if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
1404 return 0;
1405
1406 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
1407 return 0;
1408
1409 if (!allow_nohz)
1410 return 1;
1411
1412 tick_nohz_switch_to_nohz();
1413 return 0;
1414}
1/*
2 * linux/kernel/time/tick-sched.c
3 *
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 *
8 * No idle tick implementation for low and high resolution timers
9 *
10 * Started by: Thomas Gleixner and Ingo Molnar
11 *
12 * Distribute under GPLv2.
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
18#include <linux/kernel_stat.h>
19#include <linux/percpu.h>
20#include <linux/nmi.h>
21#include <linux/profile.h>
22#include <linux/sched/signal.h>
23#include <linux/sched/clock.h>
24#include <linux/sched/stat.h>
25#include <linux/sched/nohz.h>
26#include <linux/module.h>
27#include <linux/irq_work.h>
28#include <linux/posix-timers.h>
29#include <linux/context_tracking.h>
30#include <linux/mm.h>
31
32#include <asm/irq_regs.h>
33
34#include "tick-internal.h"
35
36#include <trace/events/timer.h>
37
38/*
39 * Per-CPU nohz control structure
40 */
41static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
42
43struct tick_sched *tick_get_tick_sched(int cpu)
44{
45 return &per_cpu(tick_cpu_sched, cpu);
46}
47
48#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
49/*
50 * The time, when the last jiffy update happened. Protected by jiffies_lock.
51 */
52static ktime_t last_jiffies_update;
53
54/*
55 * Must be called with interrupts disabled !
56 */
57static void tick_do_update_jiffies64(ktime_t now)
58{
59 unsigned long ticks = 0;
60 ktime_t delta;
61
62 /*
63 * Do a quick check without holding jiffies_lock:
64 */
65 delta = ktime_sub(now, last_jiffies_update);
66 if (delta < tick_period)
67 return;
68
69 /* Reevaluate with jiffies_lock held */
70 write_seqlock(&jiffies_lock);
71
72 delta = ktime_sub(now, last_jiffies_update);
73 if (delta >= tick_period) {
74
75 delta = ktime_sub(delta, tick_period);
76 last_jiffies_update = ktime_add(last_jiffies_update,
77 tick_period);
78
79 /* Slow path for long timeouts */
80 if (unlikely(delta >= tick_period)) {
81 s64 incr = ktime_to_ns(tick_period);
82
83 ticks = ktime_divns(delta, incr);
84
85 last_jiffies_update = ktime_add_ns(last_jiffies_update,
86 incr * ticks);
87 }
88 do_timer(++ticks);
89
90 /* Keep the tick_next_period variable up to date */
91 tick_next_period = ktime_add(last_jiffies_update, tick_period);
92 } else {
93 write_sequnlock(&jiffies_lock);
94 return;
95 }
96 write_sequnlock(&jiffies_lock);
97 update_wall_time();
98}
99
100/*
101 * Initialize and return retrieve the jiffies update.
102 */
103static ktime_t tick_init_jiffy_update(void)
104{
105 ktime_t period;
106
107 write_seqlock(&jiffies_lock);
108 /* Did we start the jiffies update yet ? */
109 if (last_jiffies_update == 0)
110 last_jiffies_update = tick_next_period;
111 period = last_jiffies_update;
112 write_sequnlock(&jiffies_lock);
113 return period;
114}
115
116static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
117{
118 int cpu = smp_processor_id();
119
120#ifdef CONFIG_NO_HZ_COMMON
121 /*
122 * Check if the do_timer duty was dropped. We don't care about
123 * concurrency: This happens only when the CPU in charge went
124 * into a long sleep. If two CPUs happen to assign themselves to
125 * this duty, then the jiffies update is still serialized by
126 * jiffies_lock.
127 */
128 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)
129 && !tick_nohz_full_cpu(cpu))
130 tick_do_timer_cpu = cpu;
131#endif
132
133 /* Check, if the jiffies need an update */
134 if (tick_do_timer_cpu == cpu)
135 tick_do_update_jiffies64(now);
136
137 if (ts->inidle)
138 ts->got_idle_tick = 1;
139}
140
141static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
142{
143#ifdef CONFIG_NO_HZ_COMMON
144 /*
145 * When we are idle and the tick is stopped, we have to touch
146 * the watchdog as we might not schedule for a really long
147 * time. This happens on complete idle SMP systems while
148 * waiting on the login prompt. We also increment the "start of
149 * idle" jiffy stamp so the idle accounting adjustment we do
150 * when we go busy again does not account too much ticks.
151 */
152 if (ts->tick_stopped) {
153 touch_softlockup_watchdog_sched();
154 if (is_idle_task(current))
155 ts->idle_jiffies++;
156 /*
157 * In case the current tick fired too early past its expected
158 * expiration, make sure we don't bypass the next clock reprogramming
159 * to the same deadline.
160 */
161 ts->next_tick = 0;
162 }
163#endif
164 update_process_times(user_mode(regs));
165 profile_tick(CPU_PROFILING);
166}
167#endif
168
169#ifdef CONFIG_NO_HZ_FULL
170cpumask_var_t tick_nohz_full_mask;
171bool tick_nohz_full_running;
172static atomic_t tick_dep_mask;
173
174static bool check_tick_dependency(atomic_t *dep)
175{
176 int val = atomic_read(dep);
177
178 if (val & TICK_DEP_MASK_POSIX_TIMER) {
179 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
180 return true;
181 }
182
183 if (val & TICK_DEP_MASK_PERF_EVENTS) {
184 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
185 return true;
186 }
187
188 if (val & TICK_DEP_MASK_SCHED) {
189 trace_tick_stop(0, TICK_DEP_MASK_SCHED);
190 return true;
191 }
192
193 if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
194 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
195 return true;
196 }
197
198 return false;
199}
200
201static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
202{
203 lockdep_assert_irqs_disabled();
204
205 if (unlikely(!cpu_online(cpu)))
206 return false;
207
208 if (check_tick_dependency(&tick_dep_mask))
209 return false;
210
211 if (check_tick_dependency(&ts->tick_dep_mask))
212 return false;
213
214 if (check_tick_dependency(¤t->tick_dep_mask))
215 return false;
216
217 if (check_tick_dependency(¤t->signal->tick_dep_mask))
218 return false;
219
220 return true;
221}
222
223static void nohz_full_kick_func(struct irq_work *work)
224{
225 /* Empty, the tick restart happens on tick_nohz_irq_exit() */
226}
227
228static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
229 .func = nohz_full_kick_func,
230};
231
232/*
233 * Kick this CPU if it's full dynticks in order to force it to
234 * re-evaluate its dependency on the tick and restart it if necessary.
235 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
236 * is NMI safe.
237 */
238static void tick_nohz_full_kick(void)
239{
240 if (!tick_nohz_full_cpu(smp_processor_id()))
241 return;
242
243 irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
244}
245
246/*
247 * Kick the CPU if it's full dynticks in order to force it to
248 * re-evaluate its dependency on the tick and restart it if necessary.
249 */
250void tick_nohz_full_kick_cpu(int cpu)
251{
252 if (!tick_nohz_full_cpu(cpu))
253 return;
254
255 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
256}
257
258/*
259 * Kick all full dynticks CPUs in order to force these to re-evaluate
260 * their dependency on the tick and restart it if necessary.
261 */
262static void tick_nohz_full_kick_all(void)
263{
264 int cpu;
265
266 if (!tick_nohz_full_running)
267 return;
268
269 preempt_disable();
270 for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
271 tick_nohz_full_kick_cpu(cpu);
272 preempt_enable();
273}
274
275static void tick_nohz_dep_set_all(atomic_t *dep,
276 enum tick_dep_bits bit)
277{
278 int prev;
279
280 prev = atomic_fetch_or(BIT(bit), dep);
281 if (!prev)
282 tick_nohz_full_kick_all();
283}
284
285/*
286 * Set a global tick dependency. Used by perf events that rely on freq and
287 * by unstable clock.
288 */
289void tick_nohz_dep_set(enum tick_dep_bits bit)
290{
291 tick_nohz_dep_set_all(&tick_dep_mask, bit);
292}
293
294void tick_nohz_dep_clear(enum tick_dep_bits bit)
295{
296 atomic_andnot(BIT(bit), &tick_dep_mask);
297}
298
299/*
300 * Set per-CPU tick dependency. Used by scheduler and perf events in order to
301 * manage events throttling.
302 */
303void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
304{
305 int prev;
306 struct tick_sched *ts;
307
308 ts = per_cpu_ptr(&tick_cpu_sched, cpu);
309
310 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
311 if (!prev) {
312 preempt_disable();
313 /* Perf needs local kick that is NMI safe */
314 if (cpu == smp_processor_id()) {
315 tick_nohz_full_kick();
316 } else {
317 /* Remote irq work not NMI-safe */
318 if (!WARN_ON_ONCE(in_nmi()))
319 tick_nohz_full_kick_cpu(cpu);
320 }
321 preempt_enable();
322 }
323}
324
325void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
326{
327 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
328
329 atomic_andnot(BIT(bit), &ts->tick_dep_mask);
330}
331
332/*
333 * Set a per-task tick dependency. Posix CPU timers need this in order to elapse
334 * per task timers.
335 */
336void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
337{
338 /*
339 * We could optimize this with just kicking the target running the task
340 * if that noise matters for nohz full users.
341 */
342 tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit);
343}
344
345void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
346{
347 atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
348}
349
350/*
351 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
352 * per process timers.
353 */
354void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
355{
356 tick_nohz_dep_set_all(&sig->tick_dep_mask, bit);
357}
358
359void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
360{
361 atomic_andnot(BIT(bit), &sig->tick_dep_mask);
362}
363
364/*
365 * Re-evaluate the need for the tick as we switch the current task.
366 * It might need the tick due to per task/process properties:
367 * perf events, posix CPU timers, ...
368 */
369void __tick_nohz_task_switch(void)
370{
371 unsigned long flags;
372 struct tick_sched *ts;
373
374 local_irq_save(flags);
375
376 if (!tick_nohz_full_cpu(smp_processor_id()))
377 goto out;
378
379 ts = this_cpu_ptr(&tick_cpu_sched);
380
381 if (ts->tick_stopped) {
382 if (atomic_read(¤t->tick_dep_mask) ||
383 atomic_read(¤t->signal->tick_dep_mask))
384 tick_nohz_full_kick();
385 }
386out:
387 local_irq_restore(flags);
388}
389
390/* Get the boot-time nohz CPU list from the kernel parameters. */
391void __init tick_nohz_full_setup(cpumask_var_t cpumask)
392{
393 alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
394 cpumask_copy(tick_nohz_full_mask, cpumask);
395 tick_nohz_full_running = true;
396}
397
398static int tick_nohz_cpu_down(unsigned int cpu)
399{
400 /*
401 * The boot CPU handles housekeeping duty (unbound timers,
402 * workqueues, timekeeping, ...) on behalf of full dynticks
403 * CPUs. It must remain online when nohz full is enabled.
404 */
405 if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
406 return -EBUSY;
407 return 0;
408}
409
410void __init tick_nohz_init(void)
411{
412 int cpu, ret;
413
414 if (!tick_nohz_full_running)
415 return;
416
417 /*
418 * Full dynticks uses irq work to drive the tick rescheduling on safe
419 * locking contexts. But then we need irq work to raise its own
420 * interrupts to avoid circular dependency on the tick
421 */
422 if (!arch_irq_work_has_interrupt()) {
423 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
424 cpumask_clear(tick_nohz_full_mask);
425 tick_nohz_full_running = false;
426 return;
427 }
428
429 cpu = smp_processor_id();
430
431 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
432 pr_warn("NO_HZ: Clearing %d from nohz_full range for timekeeping\n",
433 cpu);
434 cpumask_clear_cpu(cpu, tick_nohz_full_mask);
435 }
436
437 for_each_cpu(cpu, tick_nohz_full_mask)
438 context_tracking_cpu_set(cpu);
439
440 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
441 "kernel/nohz:predown", NULL,
442 tick_nohz_cpu_down);
443 WARN_ON(ret < 0);
444 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
445 cpumask_pr_args(tick_nohz_full_mask));
446}
447#endif
448
449/*
450 * NOHZ - aka dynamic tick functionality
451 */
452#ifdef CONFIG_NO_HZ_COMMON
453/*
454 * NO HZ enabled ?
455 */
456bool tick_nohz_enabled __read_mostly = true;
457unsigned long tick_nohz_active __read_mostly;
458/*
459 * Enable / Disable tickless mode
460 */
461static int __init setup_tick_nohz(char *str)
462{
463 return (kstrtobool(str, &tick_nohz_enabled) == 0);
464}
465
466__setup("nohz=", setup_tick_nohz);
467
468bool tick_nohz_tick_stopped(void)
469{
470 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
471
472 return ts->tick_stopped;
473}
474
475bool tick_nohz_tick_stopped_cpu(int cpu)
476{
477 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
478
479 return ts->tick_stopped;
480}
481
482/**
483 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
484 *
485 * Called from interrupt entry when the CPU was idle
486 *
487 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
488 * must be updated. Otherwise an interrupt handler could use a stale jiffy
489 * value. We do this unconditionally on any CPU, as we don't know whether the
490 * CPU, which has the update task assigned is in a long sleep.
491 */
492static void tick_nohz_update_jiffies(ktime_t now)
493{
494 unsigned long flags;
495
496 __this_cpu_write(tick_cpu_sched.idle_waketime, now);
497
498 local_irq_save(flags);
499 tick_do_update_jiffies64(now);
500 local_irq_restore(flags);
501
502 touch_softlockup_watchdog_sched();
503}
504
505/*
506 * Updates the per-CPU time idle statistics counters
507 */
508static void
509update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
510{
511 ktime_t delta;
512
513 if (ts->idle_active) {
514 delta = ktime_sub(now, ts->idle_entrytime);
515 if (nr_iowait_cpu(cpu) > 0)
516 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
517 else
518 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
519 ts->idle_entrytime = now;
520 }
521
522 if (last_update_time)
523 *last_update_time = ktime_to_us(now);
524
525}
526
527static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
528{
529 update_ts_time_stats(smp_processor_id(), ts, now, NULL);
530 ts->idle_active = 0;
531
532 sched_clock_idle_wakeup_event();
533}
534
535static void tick_nohz_start_idle(struct tick_sched *ts)
536{
537 ts->idle_entrytime = ktime_get();
538 ts->idle_active = 1;
539 sched_clock_idle_sleep_event();
540}
541
542/**
543 * get_cpu_idle_time_us - get the total idle time of a CPU
544 * @cpu: CPU number to query
545 * @last_update_time: variable to store update time in. Do not update
546 * counters if NULL.
547 *
548 * Return the cumulative idle time (since boot) for a given
549 * CPU, in microseconds.
550 *
551 * This time is measured via accounting rather than sampling,
552 * and is as accurate as ktime_get() is.
553 *
554 * This function returns -1 if NOHZ is not enabled.
555 */
556u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
557{
558 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
559 ktime_t now, idle;
560
561 if (!tick_nohz_active)
562 return -1;
563
564 now = ktime_get();
565 if (last_update_time) {
566 update_ts_time_stats(cpu, ts, now, last_update_time);
567 idle = ts->idle_sleeptime;
568 } else {
569 if (ts->idle_active && !nr_iowait_cpu(cpu)) {
570 ktime_t delta = ktime_sub(now, ts->idle_entrytime);
571
572 idle = ktime_add(ts->idle_sleeptime, delta);
573 } else {
574 idle = ts->idle_sleeptime;
575 }
576 }
577
578 return ktime_to_us(idle);
579
580}
581EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
582
583/**
584 * get_cpu_iowait_time_us - get the total iowait time of a CPU
585 * @cpu: CPU number to query
586 * @last_update_time: variable to store update time in. Do not update
587 * counters if NULL.
588 *
589 * Return the cumulative iowait time (since boot) for a given
590 * CPU, in microseconds.
591 *
592 * This time is measured via accounting rather than sampling,
593 * and is as accurate as ktime_get() is.
594 *
595 * This function returns -1 if NOHZ is not enabled.
596 */
597u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
598{
599 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
600 ktime_t now, iowait;
601
602 if (!tick_nohz_active)
603 return -1;
604
605 now = ktime_get();
606 if (last_update_time) {
607 update_ts_time_stats(cpu, ts, now, last_update_time);
608 iowait = ts->iowait_sleeptime;
609 } else {
610 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
611 ktime_t delta = ktime_sub(now, ts->idle_entrytime);
612
613 iowait = ktime_add(ts->iowait_sleeptime, delta);
614 } else {
615 iowait = ts->iowait_sleeptime;
616 }
617 }
618
619 return ktime_to_us(iowait);
620}
621EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
622
623static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
624{
625 hrtimer_cancel(&ts->sched_timer);
626 hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
627
628 /* Forward the time to expire in the future */
629 hrtimer_forward(&ts->sched_timer, now, tick_period);
630
631 if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
632 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
633 else
634 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
635
636 /*
637 * Reset to make sure next tick stop doesn't get fooled by past
638 * cached clock deadline.
639 */
640 ts->next_tick = 0;
641}
642
643static inline bool local_timer_softirq_pending(void)
644{
645 return local_softirq_pending() & TIMER_SOFTIRQ;
646}
647
648static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
649{
650 u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
651 unsigned long seq, basejiff;
652
653 /* Read jiffies and the time when jiffies were updated last */
654 do {
655 seq = read_seqbegin(&jiffies_lock);
656 basemono = last_jiffies_update;
657 basejiff = jiffies;
658 } while (read_seqretry(&jiffies_lock, seq));
659 ts->last_jiffies = basejiff;
660 ts->timer_expires_base = basemono;
661
662 /*
663 * Keep the periodic tick, when RCU, architecture or irq_work
664 * requests it.
665 * Aside of that check whether the local timer softirq is
666 * pending. If so its a bad idea to call get_next_timer_interrupt()
667 * because there is an already expired timer, so it will request
668 * immeditate expiry, which rearms the hardware timer with a
669 * minimal delta which brings us back to this place
670 * immediately. Lather, rinse and repeat...
671 */
672 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
673 irq_work_needs_cpu() || local_timer_softirq_pending()) {
674 next_tick = basemono + TICK_NSEC;
675 } else {
676 /*
677 * Get the next pending timer. If high resolution
678 * timers are enabled this only takes the timer wheel
679 * timers into account. If high resolution timers are
680 * disabled this also looks at the next expiring
681 * hrtimer.
682 */
683 next_tmr = get_next_timer_interrupt(basejiff, basemono);
684 ts->next_timer = next_tmr;
685 /* Take the next rcu event into account */
686 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
687 }
688
689 /*
690 * If the tick is due in the next period, keep it ticking or
691 * force prod the timer.
692 */
693 delta = next_tick - basemono;
694 if (delta <= (u64)TICK_NSEC) {
695 /*
696 * Tell the timer code that the base is not idle, i.e. undo
697 * the effect of get_next_timer_interrupt():
698 */
699 timer_clear_idle();
700 /*
701 * We've not stopped the tick yet, and there's a timer in the
702 * next period, so no point in stopping it either, bail.
703 */
704 if (!ts->tick_stopped) {
705 ts->timer_expires = 0;
706 goto out;
707 }
708 }
709
710 /*
711 * If this CPU is the one which had the do_timer() duty last, we limit
712 * the sleep time to the timekeeping max_deferment value.
713 * Otherwise we can sleep as long as we want.
714 */
715 delta = timekeeping_max_deferment();
716 if (cpu != tick_do_timer_cpu &&
717 (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last))
718 delta = KTIME_MAX;
719
720 /* Calculate the next expiry time */
721 if (delta < (KTIME_MAX - basemono))
722 expires = basemono + delta;
723 else
724 expires = KTIME_MAX;
725
726 ts->timer_expires = min_t(u64, expires, next_tick);
727
728out:
729 return ts->timer_expires;
730}
731
732static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
733{
734 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
735 u64 basemono = ts->timer_expires_base;
736 u64 expires = ts->timer_expires;
737 ktime_t tick = expires;
738
739 /* Make sure we won't be trying to stop it twice in a row. */
740 ts->timer_expires_base = 0;
741
742 /*
743 * If this CPU is the one which updates jiffies, then give up
744 * the assignment and let it be taken by the CPU which runs
745 * the tick timer next, which might be this CPU as well. If we
746 * don't drop this here the jiffies might be stale and
747 * do_timer() never invoked. Keep track of the fact that it
748 * was the one which had the do_timer() duty last.
749 */
750 if (cpu == tick_do_timer_cpu) {
751 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
752 ts->do_timer_last = 1;
753 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
754 ts->do_timer_last = 0;
755 }
756
757 /* Skip reprogram of event if its not changed */
758 if (ts->tick_stopped && (expires == ts->next_tick)) {
759 /* Sanity check: make sure clockevent is actually programmed */
760 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
761 return;
762
763 WARN_ON_ONCE(1);
764 printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
765 basemono, ts->next_tick, dev->next_event,
766 hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer));
767 }
768
769 /*
770 * nohz_stop_sched_tick can be called several times before
771 * the nohz_restart_sched_tick is called. This happens when
772 * interrupts arrive which do not cause a reschedule. In the
773 * first call we save the current tick time, so we can restart
774 * the scheduler tick in nohz_restart_sched_tick.
775 */
776 if (!ts->tick_stopped) {
777 calc_load_nohz_start();
778 cpu_load_update_nohz_start();
779 quiet_vmstat();
780
781 ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
782 ts->tick_stopped = 1;
783 trace_tick_stop(1, TICK_DEP_MASK_NONE);
784 }
785
786 ts->next_tick = tick;
787
788 /*
789 * If the expiration time == KTIME_MAX, then we simply stop
790 * the tick timer.
791 */
792 if (unlikely(expires == KTIME_MAX)) {
793 if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
794 hrtimer_cancel(&ts->sched_timer);
795 return;
796 }
797
798 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
799 hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
800 } else {
801 hrtimer_set_expires(&ts->sched_timer, tick);
802 tick_program_event(tick, 1);
803 }
804}
805
806static void tick_nohz_retain_tick(struct tick_sched *ts)
807{
808 ts->timer_expires_base = 0;
809}
810
811#ifdef CONFIG_NO_HZ_FULL
812static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu)
813{
814 if (tick_nohz_next_event(ts, cpu))
815 tick_nohz_stop_tick(ts, cpu);
816 else
817 tick_nohz_retain_tick(ts);
818}
819#endif /* CONFIG_NO_HZ_FULL */
820
821static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
822{
823 /* Update jiffies first */
824 tick_do_update_jiffies64(now);
825 cpu_load_update_nohz_stop();
826
827 /*
828 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
829 * the clock forward checks in the enqueue path:
830 */
831 timer_clear_idle();
832
833 calc_load_nohz_stop();
834 touch_softlockup_watchdog_sched();
835 /*
836 * Cancel the scheduled timer and restore the tick
837 */
838 ts->tick_stopped = 0;
839 ts->idle_exittime = now;
840
841 tick_nohz_restart(ts, now);
842}
843
844static void tick_nohz_full_update_tick(struct tick_sched *ts)
845{
846#ifdef CONFIG_NO_HZ_FULL
847 int cpu = smp_processor_id();
848
849 if (!tick_nohz_full_cpu(cpu))
850 return;
851
852 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
853 return;
854
855 if (can_stop_full_tick(cpu, ts))
856 tick_nohz_stop_sched_tick(ts, cpu);
857 else if (ts->tick_stopped)
858 tick_nohz_restart_sched_tick(ts, ktime_get());
859#endif
860}
861
862static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
863{
864 /*
865 * If this CPU is offline and it is the one which updates
866 * jiffies, then give up the assignment and let it be taken by
867 * the CPU which runs the tick timer next. If we don't drop
868 * this here the jiffies might be stale and do_timer() never
869 * invoked.
870 */
871 if (unlikely(!cpu_online(cpu))) {
872 if (cpu == tick_do_timer_cpu)
873 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
874 /*
875 * Make sure the CPU doesn't get fooled by obsolete tick
876 * deadline if it comes back online later.
877 */
878 ts->next_tick = 0;
879 return false;
880 }
881
882 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
883 return false;
884
885 if (need_resched())
886 return false;
887
888 if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
889 static int ratelimit;
890
891 if (ratelimit < 10 &&
892 (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
893 pr_warn("NOHZ: local_softirq_pending %02x\n",
894 (unsigned int) local_softirq_pending());
895 ratelimit++;
896 }
897 return false;
898 }
899
900 if (tick_nohz_full_enabled()) {
901 /*
902 * Keep the tick alive to guarantee timekeeping progression
903 * if there are full dynticks CPUs around
904 */
905 if (tick_do_timer_cpu == cpu)
906 return false;
907 /*
908 * Boot safety: make sure the timekeeping duty has been
909 * assigned before entering dyntick-idle mode,
910 */
911 if (tick_do_timer_cpu == TICK_DO_TIMER_NONE)
912 return false;
913 }
914
915 return true;
916}
917
918static void __tick_nohz_idle_stop_tick(struct tick_sched *ts)
919{
920 ktime_t expires;
921 int cpu = smp_processor_id();
922
923 /*
924 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the
925 * tick timer expiration time is known already.
926 */
927 if (ts->timer_expires_base)
928 expires = ts->timer_expires;
929 else if (can_stop_idle_tick(cpu, ts))
930 expires = tick_nohz_next_event(ts, cpu);
931 else
932 return;
933
934 ts->idle_calls++;
935
936 if (expires > 0LL) {
937 int was_stopped = ts->tick_stopped;
938
939 tick_nohz_stop_tick(ts, cpu);
940
941 ts->idle_sleeps++;
942 ts->idle_expires = expires;
943
944 if (!was_stopped && ts->tick_stopped) {
945 ts->idle_jiffies = ts->last_jiffies;
946 nohz_balance_enter_idle(cpu);
947 }
948 } else {
949 tick_nohz_retain_tick(ts);
950 }
951}
952
953/**
954 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
955 *
956 * When the next event is more than a tick into the future, stop the idle tick
957 */
958void tick_nohz_idle_stop_tick(void)
959{
960 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched));
961}
962
963void tick_nohz_idle_retain_tick(void)
964{
965 tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched));
966 /*
967 * Undo the effect of get_next_timer_interrupt() called from
968 * tick_nohz_next_event().
969 */
970 timer_clear_idle();
971}
972
973/**
974 * tick_nohz_idle_enter - prepare for entering idle on the current CPU
975 *
976 * Called when we start the idle loop.
977 */
978void tick_nohz_idle_enter(void)
979{
980 struct tick_sched *ts;
981
982 lockdep_assert_irqs_enabled();
983
984 local_irq_disable();
985
986 ts = this_cpu_ptr(&tick_cpu_sched);
987
988 WARN_ON_ONCE(ts->timer_expires_base);
989
990 ts->inidle = 1;
991 tick_nohz_start_idle(ts);
992
993 local_irq_enable();
994}
995
996/**
997 * tick_nohz_irq_exit - update next tick event from interrupt exit
998 *
999 * When an interrupt fires while we are idle and it doesn't cause
1000 * a reschedule, it may still add, modify or delete a timer, enqueue
1001 * an RCU callback, etc...
1002 * So we need to re-calculate and reprogram the next tick event.
1003 */
1004void tick_nohz_irq_exit(void)
1005{
1006 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1007
1008 if (ts->inidle)
1009 tick_nohz_start_idle(ts);
1010 else
1011 tick_nohz_full_update_tick(ts);
1012}
1013
1014/**
1015 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run
1016 */
1017bool tick_nohz_idle_got_tick(void)
1018{
1019 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1020
1021 if (ts->got_idle_tick) {
1022 ts->got_idle_tick = 0;
1023 return true;
1024 }
1025 return false;
1026}
1027
1028/**
1029 * tick_nohz_get_sleep_length - return the expected length of the current sleep
1030 * @delta_next: duration until the next event if the tick cannot be stopped
1031 *
1032 * Called from power state control code with interrupts disabled
1033 */
1034ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
1035{
1036 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
1037 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1038 int cpu = smp_processor_id();
1039 /*
1040 * The idle entry time is expected to be a sufficient approximation of
1041 * the current time at this point.
1042 */
1043 ktime_t now = ts->idle_entrytime;
1044 ktime_t next_event;
1045
1046 WARN_ON_ONCE(!ts->inidle);
1047
1048 *delta_next = ktime_sub(dev->next_event, now);
1049
1050 if (!can_stop_idle_tick(cpu, ts))
1051 return *delta_next;
1052
1053 next_event = tick_nohz_next_event(ts, cpu);
1054 if (!next_event)
1055 return *delta_next;
1056
1057 /*
1058 * If the next highres timer to expire is earlier than next_event, the
1059 * idle governor needs to know that.
1060 */
1061 next_event = min_t(u64, next_event,
1062 hrtimer_next_event_without(&ts->sched_timer));
1063
1064 return ktime_sub(next_event, now);
1065}
1066
1067/**
1068 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
1069 * for a particular CPU.
1070 *
1071 * Called from the schedutil frequency scaling governor in scheduler context.
1072 */
1073unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
1074{
1075 struct tick_sched *ts = tick_get_tick_sched(cpu);
1076
1077 return ts->idle_calls;
1078}
1079
1080/**
1081 * tick_nohz_get_idle_calls - return the current idle calls counter value
1082 *
1083 * Called from the schedutil frequency scaling governor in scheduler context.
1084 */
1085unsigned long tick_nohz_get_idle_calls(void)
1086{
1087 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1088
1089 return ts->idle_calls;
1090}
1091
1092static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
1093{
1094#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1095 unsigned long ticks;
1096
1097 if (vtime_accounting_cpu_enabled())
1098 return;
1099 /*
1100 * We stopped the tick in idle. Update process times would miss the
1101 * time we slept as update_process_times does only a 1 tick
1102 * accounting. Enforce that this is accounted to idle !
1103 */
1104 ticks = jiffies - ts->idle_jiffies;
1105 /*
1106 * We might be one off. Do not randomly account a huge number of ticks!
1107 */
1108 if (ticks && ticks < LONG_MAX)
1109 account_idle_ticks(ticks);
1110#endif
1111}
1112
1113static void __tick_nohz_idle_restart_tick(struct tick_sched *ts, ktime_t now)
1114{
1115 tick_nohz_restart_sched_tick(ts, now);
1116 tick_nohz_account_idle_ticks(ts);
1117}
1118
1119void tick_nohz_idle_restart_tick(void)
1120{
1121 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1122
1123 if (ts->tick_stopped)
1124 __tick_nohz_idle_restart_tick(ts, ktime_get());
1125}
1126
1127/**
1128 * tick_nohz_idle_exit - restart the idle tick from the idle task
1129 *
1130 * Restart the idle tick when the CPU is woken up from idle
1131 * This also exit the RCU extended quiescent state. The CPU
1132 * can use RCU again after this function is called.
1133 */
1134void tick_nohz_idle_exit(void)
1135{
1136 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1137 bool idle_active, tick_stopped;
1138 ktime_t now;
1139
1140 local_irq_disable();
1141
1142 WARN_ON_ONCE(!ts->inidle);
1143 WARN_ON_ONCE(ts->timer_expires_base);
1144
1145 ts->inidle = 0;
1146 idle_active = ts->idle_active;
1147 tick_stopped = ts->tick_stopped;
1148
1149 if (idle_active || tick_stopped)
1150 now = ktime_get();
1151
1152 if (idle_active)
1153 tick_nohz_stop_idle(ts, now);
1154
1155 if (tick_stopped)
1156 __tick_nohz_idle_restart_tick(ts, now);
1157
1158 local_irq_enable();
1159}
1160
1161/*
1162 * The nohz low res interrupt handler
1163 */
1164static void tick_nohz_handler(struct clock_event_device *dev)
1165{
1166 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1167 struct pt_regs *regs = get_irq_regs();
1168 ktime_t now = ktime_get();
1169
1170 dev->next_event = KTIME_MAX;
1171
1172 tick_sched_do_timer(ts, now);
1173 tick_sched_handle(ts, regs);
1174
1175 /* No need to reprogram if we are running tickless */
1176 if (unlikely(ts->tick_stopped))
1177 return;
1178
1179 hrtimer_forward(&ts->sched_timer, now, tick_period);
1180 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1181}
1182
1183static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
1184{
1185 if (!tick_nohz_enabled)
1186 return;
1187 ts->nohz_mode = mode;
1188 /* One update is enough */
1189 if (!test_and_set_bit(0, &tick_nohz_active))
1190 timers_update_nohz();
1191}
1192
1193/**
1194 * tick_nohz_switch_to_nohz - switch to nohz mode
1195 */
1196static void tick_nohz_switch_to_nohz(void)
1197{
1198 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1199 ktime_t next;
1200
1201 if (!tick_nohz_enabled)
1202 return;
1203
1204 if (tick_switch_to_oneshot(tick_nohz_handler))
1205 return;
1206
1207 /*
1208 * Recycle the hrtimer in ts, so we can share the
1209 * hrtimer_forward with the highres code.
1210 */
1211 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1212 /* Get the next period */
1213 next = tick_init_jiffy_update();
1214
1215 hrtimer_set_expires(&ts->sched_timer, next);
1216 hrtimer_forward_now(&ts->sched_timer, tick_period);
1217 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1218 tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
1219}
1220
1221static inline void tick_nohz_irq_enter(void)
1222{
1223 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1224 ktime_t now;
1225
1226 if (!ts->idle_active && !ts->tick_stopped)
1227 return;
1228 now = ktime_get();
1229 if (ts->idle_active)
1230 tick_nohz_stop_idle(ts, now);
1231 if (ts->tick_stopped)
1232 tick_nohz_update_jiffies(now);
1233}
1234
1235#else
1236
1237static inline void tick_nohz_switch_to_nohz(void) { }
1238static inline void tick_nohz_irq_enter(void) { }
1239static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { }
1240
1241#endif /* CONFIG_NO_HZ_COMMON */
1242
1243/*
1244 * Called from irq_enter to notify about the possible interruption of idle()
1245 */
1246void tick_irq_enter(void)
1247{
1248 tick_check_oneshot_broadcast_this_cpu();
1249 tick_nohz_irq_enter();
1250}
1251
1252/*
1253 * High resolution timer specific code
1254 */
1255#ifdef CONFIG_HIGH_RES_TIMERS
1256/*
1257 * We rearm the timer until we get disabled by the idle code.
1258 * Called with interrupts disabled.
1259 */
1260static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
1261{
1262 struct tick_sched *ts =
1263 container_of(timer, struct tick_sched, sched_timer);
1264 struct pt_regs *regs = get_irq_regs();
1265 ktime_t now = ktime_get();
1266
1267 tick_sched_do_timer(ts, now);
1268
1269 /*
1270 * Do not call, when we are not in irq context and have
1271 * no valid regs pointer
1272 */
1273 if (regs)
1274 tick_sched_handle(ts, regs);
1275 else
1276 ts->next_tick = 0;
1277
1278 /* No need to reprogram if we are in idle or full dynticks mode */
1279 if (unlikely(ts->tick_stopped))
1280 return HRTIMER_NORESTART;
1281
1282 hrtimer_forward(timer, now, tick_period);
1283
1284 return HRTIMER_RESTART;
1285}
1286
1287static int sched_skew_tick;
1288
1289static int __init skew_tick(char *str)
1290{
1291 get_option(&str, &sched_skew_tick);
1292
1293 return 0;
1294}
1295early_param("skew_tick", skew_tick);
1296
1297/**
1298 * tick_setup_sched_timer - setup the tick emulation timer
1299 */
1300void tick_setup_sched_timer(void)
1301{
1302 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1303 ktime_t now = ktime_get();
1304
1305 /*
1306 * Emulate tick processing via per-CPU hrtimers:
1307 */
1308 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1309 ts->sched_timer.function = tick_sched_timer;
1310
1311 /* Get the next period (per-CPU) */
1312 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
1313
1314 /* Offset the tick to avert jiffies_lock contention. */
1315 if (sched_skew_tick) {
1316 u64 offset = ktime_to_ns(tick_period) >> 1;
1317 do_div(offset, num_possible_cpus());
1318 offset *= smp_processor_id();
1319 hrtimer_add_expires_ns(&ts->sched_timer, offset);
1320 }
1321
1322 hrtimer_forward(&ts->sched_timer, now, tick_period);
1323 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
1324 tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
1325}
1326#endif /* HIGH_RES_TIMERS */
1327
1328#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
1329void tick_cancel_sched_timer(int cpu)
1330{
1331 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1332
1333# ifdef CONFIG_HIGH_RES_TIMERS
1334 if (ts->sched_timer.base)
1335 hrtimer_cancel(&ts->sched_timer);
1336# endif
1337
1338 memset(ts, 0, sizeof(*ts));
1339}
1340#endif
1341
1342/**
1343 * Async notification about clocksource changes
1344 */
1345void tick_clock_notify(void)
1346{
1347 int cpu;
1348
1349 for_each_possible_cpu(cpu)
1350 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
1351}
1352
1353/*
1354 * Async notification about clock event changes
1355 */
1356void tick_oneshot_notify(void)
1357{
1358 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1359
1360 set_bit(0, &ts->check_clocks);
1361}
1362
1363/**
1364 * Check, if a change happened, which makes oneshot possible.
1365 *
1366 * Called cyclic from the hrtimer softirq (driven by the timer
1367 * softirq) allow_nohz signals, that we can switch into low-res nohz
1368 * mode, because high resolution timers are disabled (either compile
1369 * or runtime). Called with interrupts disabled.
1370 */
1371int tick_check_oneshot_change(int allow_nohz)
1372{
1373 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1374
1375 if (!test_and_clear_bit(0, &ts->check_clocks))
1376 return 0;
1377
1378 if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
1379 return 0;
1380
1381 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
1382 return 0;
1383
1384 if (!allow_nohz)
1385 return 1;
1386
1387 tick_nohz_switch_to_nohz();
1388 return 0;
1389}