Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kernel internal timers
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
8 *
9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
10 * "A Kernel Model for Precision Timekeeping" by Dave Mills
11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
12 * serialize accesses to xtime/lost_ticks).
13 * Copyright (C) 1998 Andrea Arcangeli
14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
17 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
18 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
19 */
20
21#include <linux/kernel_stat.h>
22#include <linux/export.h>
23#include <linux/interrupt.h>
24#include <linux/percpu.h>
25#include <linux/init.h>
26#include <linux/mm.h>
27#include <linux/swap.h>
28#include <linux/pid_namespace.h>
29#include <linux/notifier.h>
30#include <linux/thread_info.h>
31#include <linux/time.h>
32#include <linux/jiffies.h>
33#include <linux/posix-timers.h>
34#include <linux/cpu.h>
35#include <linux/syscalls.h>
36#include <linux/delay.h>
37#include <linux/tick.h>
38#include <linux/kallsyms.h>
39#include <linux/irq_work.h>
40#include <linux/sched/signal.h>
41#include <linux/sched/sysctl.h>
42#include <linux/sched/nohz.h>
43#include <linux/sched/debug.h>
44#include <linux/slab.h>
45#include <linux/compat.h>
46#include <linux/random.h>
47
48#include <linux/uaccess.h>
49#include <asm/unistd.h>
50#include <asm/div64.h>
51#include <asm/timex.h>
52#include <asm/io.h>
53
54#include "tick-internal.h"
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/timer.h>
58
59__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
60
61EXPORT_SYMBOL(jiffies_64);
62
63/*
64 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
65 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
66 * level has a different granularity.
67 *
68 * The level granularity is: LVL_CLK_DIV ^ lvl
69 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
70 *
71 * The array level of a newly armed timer depends on the relative expiry
72 * time. The farther the expiry time is away the higher the array level and
73 * therefor the granularity becomes.
74 *
75 * Contrary to the original timer wheel implementation, which aims for 'exact'
76 * expiry of the timers, this implementation removes the need for recascading
77 * the timers into the lower array levels. The previous 'classic' timer wheel
78 * implementation of the kernel already violated the 'exact' expiry by adding
79 * slack to the expiry time to provide batched expiration. The granularity
80 * levels provide implicit batching.
81 *
82 * This is an optimization of the original timer wheel implementation for the
83 * majority of the timer wheel use cases: timeouts. The vast majority of
84 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
85 * the timeout expires it indicates that normal operation is disturbed, so it
86 * does not matter much whether the timeout comes with a slight delay.
87 *
88 * The only exception to this are networking timers with a small expiry
89 * time. They rely on the granularity. Those fit into the first wheel level,
90 * which has HZ granularity.
91 *
92 * We don't have cascading anymore. timers with a expiry time above the
93 * capacity of the last wheel level are force expired at the maximum timeout
94 * value of the last wheel level. From data sampling we know that the maximum
95 * value observed is 5 days (network connection tracking), so this should not
96 * be an issue.
97 *
98 * The currently chosen array constants values are a good compromise between
99 * array size and granularity.
100 *
101 * This results in the following granularity and range levels:
102 *
103 * HZ 1000 steps
104 * Level Offset Granularity Range
105 * 0 0 1 ms 0 ms - 63 ms
106 * 1 64 8 ms 64 ms - 511 ms
107 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
108 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
109 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
110 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
111 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
112 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
113 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
114 *
115 * HZ 300
116 * Level Offset Granularity Range
117 * 0 0 3 ms 0 ms - 210 ms
118 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
119 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
120 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
121 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
122 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
123 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
124 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
125 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
126 *
127 * HZ 250
128 * Level Offset Granularity Range
129 * 0 0 4 ms 0 ms - 255 ms
130 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
131 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
132 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
133 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
134 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
135 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
136 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
137 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
138 *
139 * HZ 100
140 * Level Offset Granularity Range
141 * 0 0 10 ms 0 ms - 630 ms
142 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
143 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
144 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
145 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
146 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
147 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
148 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
149 */
150
151/* Clock divisor for the next level */
152#define LVL_CLK_SHIFT 3
153#define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
154#define LVL_CLK_MASK (LVL_CLK_DIV - 1)
155#define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
156#define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
157
158/*
159 * The time start value for each level to select the bucket at enqueue
160 * time. We start from the last possible delta of the previous level
161 * so that we can later add an extra LVL_GRAN(n) to n (see calc_index()).
162 */
163#define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
164
165/* Size of each clock level */
166#define LVL_BITS 6
167#define LVL_SIZE (1UL << LVL_BITS)
168#define LVL_MASK (LVL_SIZE - 1)
169#define LVL_OFFS(n) ((n) * LVL_SIZE)
170
171/* Level depth */
172#if HZ > 100
173# define LVL_DEPTH 9
174# else
175# define LVL_DEPTH 8
176#endif
177
178/* The cutoff (max. capacity of the wheel) */
179#define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
180#define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
181
182/*
183 * The resulting wheel size. If NOHZ is configured we allocate two
184 * wheels so we have a separate storage for the deferrable timers.
185 */
186#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
187
188#ifdef CONFIG_NO_HZ_COMMON
189# define NR_BASES 2
190# define BASE_STD 0
191# define BASE_DEF 1
192#else
193# define NR_BASES 1
194# define BASE_STD 0
195# define BASE_DEF 0
196#endif
197
198struct timer_base {
199 raw_spinlock_t lock;
200 struct timer_list *running_timer;
201#ifdef CONFIG_PREEMPT_RT
202 spinlock_t expiry_lock;
203 atomic_t timer_waiters;
204#endif
205 unsigned long clk;
206 unsigned long next_expiry;
207 unsigned int cpu;
208 bool next_expiry_recalc;
209 bool is_idle;
210 bool timers_pending;
211 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
212 struct hlist_head vectors[WHEEL_SIZE];
213} ____cacheline_aligned;
214
215static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
216
217#ifdef CONFIG_NO_HZ_COMMON
218
219static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
220static DEFINE_MUTEX(timer_keys_mutex);
221
222static void timer_update_keys(struct work_struct *work);
223static DECLARE_WORK(timer_update_work, timer_update_keys);
224
225#ifdef CONFIG_SMP
226unsigned int sysctl_timer_migration = 1;
227
228DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
229
230static void timers_update_migration(void)
231{
232 if (sysctl_timer_migration && tick_nohz_active)
233 static_branch_enable(&timers_migration_enabled);
234 else
235 static_branch_disable(&timers_migration_enabled);
236}
237#else
238static inline void timers_update_migration(void) { }
239#endif /* !CONFIG_SMP */
240
241static void timer_update_keys(struct work_struct *work)
242{
243 mutex_lock(&timer_keys_mutex);
244 timers_update_migration();
245 static_branch_enable(&timers_nohz_active);
246 mutex_unlock(&timer_keys_mutex);
247}
248
249void timers_update_nohz(void)
250{
251 schedule_work(&timer_update_work);
252}
253
254int timer_migration_handler(struct ctl_table *table, int write,
255 void *buffer, size_t *lenp, loff_t *ppos)
256{
257 int ret;
258
259 mutex_lock(&timer_keys_mutex);
260 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
261 if (!ret && write)
262 timers_update_migration();
263 mutex_unlock(&timer_keys_mutex);
264 return ret;
265}
266
267static inline bool is_timers_nohz_active(void)
268{
269 return static_branch_unlikely(&timers_nohz_active);
270}
271#else
272static inline bool is_timers_nohz_active(void) { return false; }
273#endif /* NO_HZ_COMMON */
274
275static unsigned long round_jiffies_common(unsigned long j, int cpu,
276 bool force_up)
277{
278 int rem;
279 unsigned long original = j;
280
281 /*
282 * We don't want all cpus firing their timers at once hitting the
283 * same lock or cachelines, so we skew each extra cpu with an extra
284 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
285 * already did this.
286 * The skew is done by adding 3*cpunr, then round, then subtract this
287 * extra offset again.
288 */
289 j += cpu * 3;
290
291 rem = j % HZ;
292
293 /*
294 * If the target jiffie is just after a whole second (which can happen
295 * due to delays of the timer irq, long irq off times etc etc) then
296 * we should round down to the whole second, not up. Use 1/4th second
297 * as cutoff for this rounding as an extreme upper bound for this.
298 * But never round down if @force_up is set.
299 */
300 if (rem < HZ/4 && !force_up) /* round down */
301 j = j - rem;
302 else /* round up */
303 j = j - rem + HZ;
304
305 /* now that we have rounded, subtract the extra skew again */
306 j -= cpu * 3;
307
308 /*
309 * Make sure j is still in the future. Otherwise return the
310 * unmodified value.
311 */
312 return time_is_after_jiffies(j) ? j : original;
313}
314
315/**
316 * __round_jiffies - function to round jiffies to a full second
317 * @j: the time in (absolute) jiffies that should be rounded
318 * @cpu: the processor number on which the timeout will happen
319 *
320 * __round_jiffies() rounds an absolute time in the future (in jiffies)
321 * up or down to (approximately) full seconds. This is useful for timers
322 * for which the exact time they fire does not matter too much, as long as
323 * they fire approximately every X seconds.
324 *
325 * By rounding these timers to whole seconds, all such timers will fire
326 * at the same time, rather than at various times spread out. The goal
327 * of this is to have the CPU wake up less, which saves power.
328 *
329 * The exact rounding is skewed for each processor to avoid all
330 * processors firing at the exact same time, which could lead
331 * to lock contention or spurious cache line bouncing.
332 *
333 * The return value is the rounded version of the @j parameter.
334 */
335unsigned long __round_jiffies(unsigned long j, int cpu)
336{
337 return round_jiffies_common(j, cpu, false);
338}
339EXPORT_SYMBOL_GPL(__round_jiffies);
340
341/**
342 * __round_jiffies_relative - function to round jiffies to a full second
343 * @j: the time in (relative) jiffies that should be rounded
344 * @cpu: the processor number on which the timeout will happen
345 *
346 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
347 * up or down to (approximately) full seconds. This is useful for timers
348 * for which the exact time they fire does not matter too much, as long as
349 * they fire approximately every X seconds.
350 *
351 * By rounding these timers to whole seconds, all such timers will fire
352 * at the same time, rather than at various times spread out. The goal
353 * of this is to have the CPU wake up less, which saves power.
354 *
355 * The exact rounding is skewed for each processor to avoid all
356 * processors firing at the exact same time, which could lead
357 * to lock contention or spurious cache line bouncing.
358 *
359 * The return value is the rounded version of the @j parameter.
360 */
361unsigned long __round_jiffies_relative(unsigned long j, int cpu)
362{
363 unsigned long j0 = jiffies;
364
365 /* Use j0 because jiffies might change while we run */
366 return round_jiffies_common(j + j0, cpu, false) - j0;
367}
368EXPORT_SYMBOL_GPL(__round_jiffies_relative);
369
370/**
371 * round_jiffies - function to round jiffies to a full second
372 * @j: the time in (absolute) jiffies that should be rounded
373 *
374 * round_jiffies() rounds an absolute time in the future (in jiffies)
375 * up or down to (approximately) full seconds. This is useful for timers
376 * for which the exact time they fire does not matter too much, as long as
377 * they fire approximately every X seconds.
378 *
379 * By rounding these timers to whole seconds, all such timers will fire
380 * at the same time, rather than at various times spread out. The goal
381 * of this is to have the CPU wake up less, which saves power.
382 *
383 * The return value is the rounded version of the @j parameter.
384 */
385unsigned long round_jiffies(unsigned long j)
386{
387 return round_jiffies_common(j, raw_smp_processor_id(), false);
388}
389EXPORT_SYMBOL_GPL(round_jiffies);
390
391/**
392 * round_jiffies_relative - function to round jiffies to a full second
393 * @j: the time in (relative) jiffies that should be rounded
394 *
395 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
396 * up or down to (approximately) full seconds. This is useful for timers
397 * for which the exact time they fire does not matter too much, as long as
398 * they fire approximately every X seconds.
399 *
400 * By rounding these timers to whole seconds, all such timers will fire
401 * at the same time, rather than at various times spread out. The goal
402 * of this is to have the CPU wake up less, which saves power.
403 *
404 * The return value is the rounded version of the @j parameter.
405 */
406unsigned long round_jiffies_relative(unsigned long j)
407{
408 return __round_jiffies_relative(j, raw_smp_processor_id());
409}
410EXPORT_SYMBOL_GPL(round_jiffies_relative);
411
412/**
413 * __round_jiffies_up - function to round jiffies up to a full second
414 * @j: the time in (absolute) jiffies that should be rounded
415 * @cpu: the processor number on which the timeout will happen
416 *
417 * This is the same as __round_jiffies() except that it will never
418 * round down. This is useful for timeouts for which the exact time
419 * of firing does not matter too much, as long as they don't fire too
420 * early.
421 */
422unsigned long __round_jiffies_up(unsigned long j, int cpu)
423{
424 return round_jiffies_common(j, cpu, true);
425}
426EXPORT_SYMBOL_GPL(__round_jiffies_up);
427
428/**
429 * __round_jiffies_up_relative - function to round jiffies up to a full second
430 * @j: the time in (relative) jiffies that should be rounded
431 * @cpu: the processor number on which the timeout will happen
432 *
433 * This is the same as __round_jiffies_relative() except that it will never
434 * round down. This is useful for timeouts for which the exact time
435 * of firing does not matter too much, as long as they don't fire too
436 * early.
437 */
438unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
439{
440 unsigned long j0 = jiffies;
441
442 /* Use j0 because jiffies might change while we run */
443 return round_jiffies_common(j + j0, cpu, true) - j0;
444}
445EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
446
447/**
448 * round_jiffies_up - function to round jiffies up to a full second
449 * @j: the time in (absolute) jiffies that should be rounded
450 *
451 * This is the same as round_jiffies() except that it will never
452 * round down. This is useful for timeouts for which the exact time
453 * of firing does not matter too much, as long as they don't fire too
454 * early.
455 */
456unsigned long round_jiffies_up(unsigned long j)
457{
458 return round_jiffies_common(j, raw_smp_processor_id(), true);
459}
460EXPORT_SYMBOL_GPL(round_jiffies_up);
461
462/**
463 * round_jiffies_up_relative - function to round jiffies up to a full second
464 * @j: the time in (relative) jiffies that should be rounded
465 *
466 * This is the same as round_jiffies_relative() except that it will never
467 * round down. This is useful for timeouts for which the exact time
468 * of firing does not matter too much, as long as they don't fire too
469 * early.
470 */
471unsigned long round_jiffies_up_relative(unsigned long j)
472{
473 return __round_jiffies_up_relative(j, raw_smp_processor_id());
474}
475EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
476
477
478static inline unsigned int timer_get_idx(struct timer_list *timer)
479{
480 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
481}
482
483static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
484{
485 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
486 idx << TIMER_ARRAYSHIFT;
487}
488
489/*
490 * Helper function to calculate the array index for a given expiry
491 * time.
492 */
493static inline unsigned calc_index(unsigned long expires, unsigned lvl,
494 unsigned long *bucket_expiry)
495{
496
497 /*
498 * The timer wheel has to guarantee that a timer does not fire
499 * early. Early expiry can happen due to:
500 * - Timer is armed at the edge of a tick
501 * - Truncation of the expiry time in the outer wheel levels
502 *
503 * Round up with level granularity to prevent this.
504 */
505 expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
506 *bucket_expiry = expires << LVL_SHIFT(lvl);
507 return LVL_OFFS(lvl) + (expires & LVL_MASK);
508}
509
510static int calc_wheel_index(unsigned long expires, unsigned long clk,
511 unsigned long *bucket_expiry)
512{
513 unsigned long delta = expires - clk;
514 unsigned int idx;
515
516 if (delta < LVL_START(1)) {
517 idx = calc_index(expires, 0, bucket_expiry);
518 } else if (delta < LVL_START(2)) {
519 idx = calc_index(expires, 1, bucket_expiry);
520 } else if (delta < LVL_START(3)) {
521 idx = calc_index(expires, 2, bucket_expiry);
522 } else if (delta < LVL_START(4)) {
523 idx = calc_index(expires, 3, bucket_expiry);
524 } else if (delta < LVL_START(5)) {
525 idx = calc_index(expires, 4, bucket_expiry);
526 } else if (delta < LVL_START(6)) {
527 idx = calc_index(expires, 5, bucket_expiry);
528 } else if (delta < LVL_START(7)) {
529 idx = calc_index(expires, 6, bucket_expiry);
530 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
531 idx = calc_index(expires, 7, bucket_expiry);
532 } else if ((long) delta < 0) {
533 idx = clk & LVL_MASK;
534 *bucket_expiry = clk;
535 } else {
536 /*
537 * Force expire obscene large timeouts to expire at the
538 * capacity limit of the wheel.
539 */
540 if (delta >= WHEEL_TIMEOUT_CUTOFF)
541 expires = clk + WHEEL_TIMEOUT_MAX;
542
543 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry);
544 }
545 return idx;
546}
547
548static void
549trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
550{
551 if (!is_timers_nohz_active())
552 return;
553
554 /*
555 * TODO: This wants some optimizing similar to the code below, but we
556 * will do that when we switch from push to pull for deferrable timers.
557 */
558 if (timer->flags & TIMER_DEFERRABLE) {
559 if (tick_nohz_full_cpu(base->cpu))
560 wake_up_nohz_cpu(base->cpu);
561 return;
562 }
563
564 /*
565 * We might have to IPI the remote CPU if the base is idle and the
566 * timer is not deferrable. If the other CPU is on the way to idle
567 * then it can't set base->is_idle as we hold the base lock:
568 */
569 if (base->is_idle)
570 wake_up_nohz_cpu(base->cpu);
571}
572
573/*
574 * Enqueue the timer into the hash bucket, mark it pending in
575 * the bitmap, store the index in the timer flags then wake up
576 * the target CPU if needed.
577 */
578static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
579 unsigned int idx, unsigned long bucket_expiry)
580{
581
582 hlist_add_head(&timer->entry, base->vectors + idx);
583 __set_bit(idx, base->pending_map);
584 timer_set_idx(timer, idx);
585
586 trace_timer_start(timer, timer->expires, timer->flags);
587
588 /*
589 * Check whether this is the new first expiring timer. The
590 * effective expiry time of the timer is required here
591 * (bucket_expiry) instead of timer->expires.
592 */
593 if (time_before(bucket_expiry, base->next_expiry)) {
594 /*
595 * Set the next expiry time and kick the CPU so it
596 * can reevaluate the wheel:
597 */
598 base->next_expiry = bucket_expiry;
599 base->timers_pending = true;
600 base->next_expiry_recalc = false;
601 trigger_dyntick_cpu(base, timer);
602 }
603}
604
605static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
606{
607 unsigned long bucket_expiry;
608 unsigned int idx;
609
610 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry);
611 enqueue_timer(base, timer, idx, bucket_expiry);
612}
613
614#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
615
616static const struct debug_obj_descr timer_debug_descr;
617
618static void *timer_debug_hint(void *addr)
619{
620 return ((struct timer_list *) addr)->function;
621}
622
623static bool timer_is_static_object(void *addr)
624{
625 struct timer_list *timer = addr;
626
627 return (timer->entry.pprev == NULL &&
628 timer->entry.next == TIMER_ENTRY_STATIC);
629}
630
631/*
632 * fixup_init is called when:
633 * - an active object is initialized
634 */
635static bool timer_fixup_init(void *addr, enum debug_obj_state state)
636{
637 struct timer_list *timer = addr;
638
639 switch (state) {
640 case ODEBUG_STATE_ACTIVE:
641 del_timer_sync(timer);
642 debug_object_init(timer, &timer_debug_descr);
643 return true;
644 default:
645 return false;
646 }
647}
648
649/* Stub timer callback for improperly used timers. */
650static void stub_timer(struct timer_list *unused)
651{
652 WARN_ON(1);
653}
654
655/*
656 * fixup_activate is called when:
657 * - an active object is activated
658 * - an unknown non-static object is activated
659 */
660static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
661{
662 struct timer_list *timer = addr;
663
664 switch (state) {
665 case ODEBUG_STATE_NOTAVAILABLE:
666 timer_setup(timer, stub_timer, 0);
667 return true;
668
669 case ODEBUG_STATE_ACTIVE:
670 WARN_ON(1);
671 fallthrough;
672 default:
673 return false;
674 }
675}
676
677/*
678 * fixup_free is called when:
679 * - an active object is freed
680 */
681static bool timer_fixup_free(void *addr, enum debug_obj_state state)
682{
683 struct timer_list *timer = addr;
684
685 switch (state) {
686 case ODEBUG_STATE_ACTIVE:
687 del_timer_sync(timer);
688 debug_object_free(timer, &timer_debug_descr);
689 return true;
690 default:
691 return false;
692 }
693}
694
695/*
696 * fixup_assert_init is called when:
697 * - an untracked/uninit-ed object is found
698 */
699static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
700{
701 struct timer_list *timer = addr;
702
703 switch (state) {
704 case ODEBUG_STATE_NOTAVAILABLE:
705 timer_setup(timer, stub_timer, 0);
706 return true;
707 default:
708 return false;
709 }
710}
711
712static const struct debug_obj_descr timer_debug_descr = {
713 .name = "timer_list",
714 .debug_hint = timer_debug_hint,
715 .is_static_object = timer_is_static_object,
716 .fixup_init = timer_fixup_init,
717 .fixup_activate = timer_fixup_activate,
718 .fixup_free = timer_fixup_free,
719 .fixup_assert_init = timer_fixup_assert_init,
720};
721
722static inline void debug_timer_init(struct timer_list *timer)
723{
724 debug_object_init(timer, &timer_debug_descr);
725}
726
727static inline void debug_timer_activate(struct timer_list *timer)
728{
729 debug_object_activate(timer, &timer_debug_descr);
730}
731
732static inline void debug_timer_deactivate(struct timer_list *timer)
733{
734 debug_object_deactivate(timer, &timer_debug_descr);
735}
736
737static inline void debug_timer_assert_init(struct timer_list *timer)
738{
739 debug_object_assert_init(timer, &timer_debug_descr);
740}
741
742static void do_init_timer(struct timer_list *timer,
743 void (*func)(struct timer_list *),
744 unsigned int flags,
745 const char *name, struct lock_class_key *key);
746
747void init_timer_on_stack_key(struct timer_list *timer,
748 void (*func)(struct timer_list *),
749 unsigned int flags,
750 const char *name, struct lock_class_key *key)
751{
752 debug_object_init_on_stack(timer, &timer_debug_descr);
753 do_init_timer(timer, func, flags, name, key);
754}
755EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
756
757void destroy_timer_on_stack(struct timer_list *timer)
758{
759 debug_object_free(timer, &timer_debug_descr);
760}
761EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
762
763#else
764static inline void debug_timer_init(struct timer_list *timer) { }
765static inline void debug_timer_activate(struct timer_list *timer) { }
766static inline void debug_timer_deactivate(struct timer_list *timer) { }
767static inline void debug_timer_assert_init(struct timer_list *timer) { }
768#endif
769
770static inline void debug_init(struct timer_list *timer)
771{
772 debug_timer_init(timer);
773 trace_timer_init(timer);
774}
775
776static inline void debug_deactivate(struct timer_list *timer)
777{
778 debug_timer_deactivate(timer);
779 trace_timer_cancel(timer);
780}
781
782static inline void debug_assert_init(struct timer_list *timer)
783{
784 debug_timer_assert_init(timer);
785}
786
787static void do_init_timer(struct timer_list *timer,
788 void (*func)(struct timer_list *),
789 unsigned int flags,
790 const char *name, struct lock_class_key *key)
791{
792 timer->entry.pprev = NULL;
793 timer->function = func;
794 if (WARN_ON_ONCE(flags & ~TIMER_INIT_FLAGS))
795 flags &= TIMER_INIT_FLAGS;
796 timer->flags = flags | raw_smp_processor_id();
797 lockdep_init_map(&timer->lockdep_map, name, key, 0);
798}
799
800/**
801 * init_timer_key - initialize a timer
802 * @timer: the timer to be initialized
803 * @func: timer callback function
804 * @flags: timer flags
805 * @name: name of the timer
806 * @key: lockdep class key of the fake lock used for tracking timer
807 * sync lock dependencies
808 *
809 * init_timer_key() must be done to a timer prior calling *any* of the
810 * other timer functions.
811 */
812void init_timer_key(struct timer_list *timer,
813 void (*func)(struct timer_list *), unsigned int flags,
814 const char *name, struct lock_class_key *key)
815{
816 debug_init(timer);
817 do_init_timer(timer, func, flags, name, key);
818}
819EXPORT_SYMBOL(init_timer_key);
820
821static inline void detach_timer(struct timer_list *timer, bool clear_pending)
822{
823 struct hlist_node *entry = &timer->entry;
824
825 debug_deactivate(timer);
826
827 __hlist_del(entry);
828 if (clear_pending)
829 entry->pprev = NULL;
830 entry->next = LIST_POISON2;
831}
832
833static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
834 bool clear_pending)
835{
836 unsigned idx = timer_get_idx(timer);
837
838 if (!timer_pending(timer))
839 return 0;
840
841 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) {
842 __clear_bit(idx, base->pending_map);
843 base->next_expiry_recalc = true;
844 }
845
846 detach_timer(timer, clear_pending);
847 return 1;
848}
849
850static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
851{
852 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
853
854 /*
855 * If the timer is deferrable and NO_HZ_COMMON is set then we need
856 * to use the deferrable base.
857 */
858 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
859 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
860 return base;
861}
862
863static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
864{
865 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
866
867 /*
868 * If the timer is deferrable and NO_HZ_COMMON is set then we need
869 * to use the deferrable base.
870 */
871 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
872 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
873 return base;
874}
875
876static inline struct timer_base *get_timer_base(u32 tflags)
877{
878 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
879}
880
881static inline struct timer_base *
882get_target_base(struct timer_base *base, unsigned tflags)
883{
884#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
885 if (static_branch_likely(&timers_migration_enabled) &&
886 !(tflags & TIMER_PINNED))
887 return get_timer_cpu_base(tflags, get_nohz_timer_target());
888#endif
889 return get_timer_this_cpu_base(tflags);
890}
891
892static inline void forward_timer_base(struct timer_base *base)
893{
894 unsigned long jnow = READ_ONCE(jiffies);
895
896 /*
897 * No need to forward if we are close enough below jiffies.
898 * Also while executing timers, base->clk is 1 offset ahead
899 * of jiffies to avoid endless requeuing to current jiffies.
900 */
901 if ((long)(jnow - base->clk) < 1)
902 return;
903
904 /*
905 * If the next expiry value is > jiffies, then we fast forward to
906 * jiffies otherwise we forward to the next expiry value.
907 */
908 if (time_after(base->next_expiry, jnow)) {
909 base->clk = jnow;
910 } else {
911 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk)))
912 return;
913 base->clk = base->next_expiry;
914 }
915}
916
917
918/*
919 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
920 * that all timers which are tied to this base are locked, and the base itself
921 * is locked too.
922 *
923 * So __run_timers/migrate_timers can safely modify all timers which could
924 * be found in the base->vectors array.
925 *
926 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
927 * to wait until the migration is done.
928 */
929static struct timer_base *lock_timer_base(struct timer_list *timer,
930 unsigned long *flags)
931 __acquires(timer->base->lock)
932{
933 for (;;) {
934 struct timer_base *base;
935 u32 tf;
936
937 /*
938 * We need to use READ_ONCE() here, otherwise the compiler
939 * might re-read @tf between the check for TIMER_MIGRATING
940 * and spin_lock().
941 */
942 tf = READ_ONCE(timer->flags);
943
944 if (!(tf & TIMER_MIGRATING)) {
945 base = get_timer_base(tf);
946 raw_spin_lock_irqsave(&base->lock, *flags);
947 if (timer->flags == tf)
948 return base;
949 raw_spin_unlock_irqrestore(&base->lock, *flags);
950 }
951 cpu_relax();
952 }
953}
954
955#define MOD_TIMER_PENDING_ONLY 0x01
956#define MOD_TIMER_REDUCE 0x02
957#define MOD_TIMER_NOTPENDING 0x04
958
959static inline int
960__mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options)
961{
962 unsigned long clk = 0, flags, bucket_expiry;
963 struct timer_base *base, *new_base;
964 unsigned int idx = UINT_MAX;
965 int ret = 0;
966
967 BUG_ON(!timer->function);
968
969 /*
970 * This is a common optimization triggered by the networking code - if
971 * the timer is re-modified to have the same timeout or ends up in the
972 * same array bucket then just return:
973 */
974 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) {
975 /*
976 * The downside of this optimization is that it can result in
977 * larger granularity than you would get from adding a new
978 * timer with this expiry.
979 */
980 long diff = timer->expires - expires;
981
982 if (!diff)
983 return 1;
984 if (options & MOD_TIMER_REDUCE && diff <= 0)
985 return 1;
986
987 /*
988 * We lock timer base and calculate the bucket index right
989 * here. If the timer ends up in the same bucket, then we
990 * just update the expiry time and avoid the whole
991 * dequeue/enqueue dance.
992 */
993 base = lock_timer_base(timer, &flags);
994 forward_timer_base(base);
995
996 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) &&
997 time_before_eq(timer->expires, expires)) {
998 ret = 1;
999 goto out_unlock;
1000 }
1001
1002 clk = base->clk;
1003 idx = calc_wheel_index(expires, clk, &bucket_expiry);
1004
1005 /*
1006 * Retrieve and compare the array index of the pending
1007 * timer. If it matches set the expiry to the new value so a
1008 * subsequent call will exit in the expires check above.
1009 */
1010 if (idx == timer_get_idx(timer)) {
1011 if (!(options & MOD_TIMER_REDUCE))
1012 timer->expires = expires;
1013 else if (time_after(timer->expires, expires))
1014 timer->expires = expires;
1015 ret = 1;
1016 goto out_unlock;
1017 }
1018 } else {
1019 base = lock_timer_base(timer, &flags);
1020 forward_timer_base(base);
1021 }
1022
1023 ret = detach_if_pending(timer, base, false);
1024 if (!ret && (options & MOD_TIMER_PENDING_ONLY))
1025 goto out_unlock;
1026
1027 new_base = get_target_base(base, timer->flags);
1028
1029 if (base != new_base) {
1030 /*
1031 * We are trying to schedule the timer on the new base.
1032 * However we can't change timer's base while it is running,
1033 * otherwise del_timer_sync() can't detect that the timer's
1034 * handler yet has not finished. This also guarantees that the
1035 * timer is serialized wrt itself.
1036 */
1037 if (likely(base->running_timer != timer)) {
1038 /* See the comment in lock_timer_base() */
1039 timer->flags |= TIMER_MIGRATING;
1040
1041 raw_spin_unlock(&base->lock);
1042 base = new_base;
1043 raw_spin_lock(&base->lock);
1044 WRITE_ONCE(timer->flags,
1045 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1046 forward_timer_base(base);
1047 }
1048 }
1049
1050 debug_timer_activate(timer);
1051
1052 timer->expires = expires;
1053 /*
1054 * If 'idx' was calculated above and the base time did not advance
1055 * between calculating 'idx' and possibly switching the base, only
1056 * enqueue_timer() is required. Otherwise we need to (re)calculate
1057 * the wheel index via internal_add_timer().
1058 */
1059 if (idx != UINT_MAX && clk == base->clk)
1060 enqueue_timer(base, timer, idx, bucket_expiry);
1061 else
1062 internal_add_timer(base, timer);
1063
1064out_unlock:
1065 raw_spin_unlock_irqrestore(&base->lock, flags);
1066
1067 return ret;
1068}
1069
1070/**
1071 * mod_timer_pending - modify a pending timer's timeout
1072 * @timer: the pending timer to be modified
1073 * @expires: new timeout in jiffies
1074 *
1075 * mod_timer_pending() is the same for pending timers as mod_timer(),
1076 * but will not re-activate and modify already deleted timers.
1077 *
1078 * It is useful for unserialized use of timers.
1079 */
1080int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1081{
1082 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY);
1083}
1084EXPORT_SYMBOL(mod_timer_pending);
1085
1086/**
1087 * mod_timer - modify a timer's timeout
1088 * @timer: the timer to be modified
1089 * @expires: new timeout in jiffies
1090 *
1091 * mod_timer() is a more efficient way to update the expire field of an
1092 * active timer (if the timer is inactive it will be activated)
1093 *
1094 * mod_timer(timer, expires) is equivalent to:
1095 *
1096 * del_timer(timer); timer->expires = expires; add_timer(timer);
1097 *
1098 * Note that if there are multiple unserialized concurrent users of the
1099 * same timer, then mod_timer() is the only safe way to modify the timeout,
1100 * since add_timer() cannot modify an already running timer.
1101 *
1102 * The function returns whether it has modified a pending timer or not.
1103 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
1104 * active timer returns 1.)
1105 */
1106int mod_timer(struct timer_list *timer, unsigned long expires)
1107{
1108 return __mod_timer(timer, expires, 0);
1109}
1110EXPORT_SYMBOL(mod_timer);
1111
1112/**
1113 * timer_reduce - Modify a timer's timeout if it would reduce the timeout
1114 * @timer: The timer to be modified
1115 * @expires: New timeout in jiffies
1116 *
1117 * timer_reduce() is very similar to mod_timer(), except that it will only
1118 * modify a running timer if that would reduce the expiration time (it will
1119 * start a timer that isn't running).
1120 */
1121int timer_reduce(struct timer_list *timer, unsigned long expires)
1122{
1123 return __mod_timer(timer, expires, MOD_TIMER_REDUCE);
1124}
1125EXPORT_SYMBOL(timer_reduce);
1126
1127/**
1128 * add_timer - start a timer
1129 * @timer: the timer to be added
1130 *
1131 * The kernel will do a ->function(@timer) callback from the
1132 * timer interrupt at the ->expires point in the future. The
1133 * current time is 'jiffies'.
1134 *
1135 * The timer's ->expires, ->function fields must be set prior calling this
1136 * function.
1137 *
1138 * Timers with an ->expires field in the past will be executed in the next
1139 * timer tick.
1140 */
1141void add_timer(struct timer_list *timer)
1142{
1143 BUG_ON(timer_pending(timer));
1144 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
1145}
1146EXPORT_SYMBOL(add_timer);
1147
1148/**
1149 * add_timer_on - start a timer on a particular CPU
1150 * @timer: the timer to be added
1151 * @cpu: the CPU to start it on
1152 *
1153 * This is not very scalable on SMP. Double adds are not possible.
1154 */
1155void add_timer_on(struct timer_list *timer, int cpu)
1156{
1157 struct timer_base *new_base, *base;
1158 unsigned long flags;
1159
1160 BUG_ON(timer_pending(timer) || !timer->function);
1161
1162 new_base = get_timer_cpu_base(timer->flags, cpu);
1163
1164 /*
1165 * If @timer was on a different CPU, it should be migrated with the
1166 * old base locked to prevent other operations proceeding with the
1167 * wrong base locked. See lock_timer_base().
1168 */
1169 base = lock_timer_base(timer, &flags);
1170 if (base != new_base) {
1171 timer->flags |= TIMER_MIGRATING;
1172
1173 raw_spin_unlock(&base->lock);
1174 base = new_base;
1175 raw_spin_lock(&base->lock);
1176 WRITE_ONCE(timer->flags,
1177 (timer->flags & ~TIMER_BASEMASK) | cpu);
1178 }
1179 forward_timer_base(base);
1180
1181 debug_timer_activate(timer);
1182 internal_add_timer(base, timer);
1183 raw_spin_unlock_irqrestore(&base->lock, flags);
1184}
1185EXPORT_SYMBOL_GPL(add_timer_on);
1186
1187/**
1188 * del_timer - deactivate a timer.
1189 * @timer: the timer to be deactivated
1190 *
1191 * del_timer() deactivates a timer - this works on both active and inactive
1192 * timers.
1193 *
1194 * The function returns whether it has deactivated a pending timer or not.
1195 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1196 * active timer returns 1.)
1197 */
1198int del_timer(struct timer_list *timer)
1199{
1200 struct timer_base *base;
1201 unsigned long flags;
1202 int ret = 0;
1203
1204 debug_assert_init(timer);
1205
1206 if (timer_pending(timer)) {
1207 base = lock_timer_base(timer, &flags);
1208 ret = detach_if_pending(timer, base, true);
1209 raw_spin_unlock_irqrestore(&base->lock, flags);
1210 }
1211
1212 return ret;
1213}
1214EXPORT_SYMBOL(del_timer);
1215
1216/**
1217 * try_to_del_timer_sync - Try to deactivate a timer
1218 * @timer: timer to delete
1219 *
1220 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1221 * exit the timer is not queued and the handler is not running on any CPU.
1222 */
1223int try_to_del_timer_sync(struct timer_list *timer)
1224{
1225 struct timer_base *base;
1226 unsigned long flags;
1227 int ret = -1;
1228
1229 debug_assert_init(timer);
1230
1231 base = lock_timer_base(timer, &flags);
1232
1233 if (base->running_timer != timer)
1234 ret = detach_if_pending(timer, base, true);
1235
1236 raw_spin_unlock_irqrestore(&base->lock, flags);
1237
1238 return ret;
1239}
1240EXPORT_SYMBOL(try_to_del_timer_sync);
1241
1242#ifdef CONFIG_PREEMPT_RT
1243static __init void timer_base_init_expiry_lock(struct timer_base *base)
1244{
1245 spin_lock_init(&base->expiry_lock);
1246}
1247
1248static inline void timer_base_lock_expiry(struct timer_base *base)
1249{
1250 spin_lock(&base->expiry_lock);
1251}
1252
1253static inline void timer_base_unlock_expiry(struct timer_base *base)
1254{
1255 spin_unlock(&base->expiry_lock);
1256}
1257
1258/*
1259 * The counterpart to del_timer_wait_running().
1260 *
1261 * If there is a waiter for base->expiry_lock, then it was waiting for the
1262 * timer callback to finish. Drop expiry_lock and reacquire it. That allows
1263 * the waiter to acquire the lock and make progress.
1264 */
1265static void timer_sync_wait_running(struct timer_base *base)
1266{
1267 if (atomic_read(&base->timer_waiters)) {
1268 raw_spin_unlock_irq(&base->lock);
1269 spin_unlock(&base->expiry_lock);
1270 spin_lock(&base->expiry_lock);
1271 raw_spin_lock_irq(&base->lock);
1272 }
1273}
1274
1275/*
1276 * This function is called on PREEMPT_RT kernels when the fast path
1277 * deletion of a timer failed because the timer callback function was
1278 * running.
1279 *
1280 * This prevents priority inversion, if the softirq thread on a remote CPU
1281 * got preempted, and it prevents a life lock when the task which tries to
1282 * delete a timer preempted the softirq thread running the timer callback
1283 * function.
1284 */
1285static void del_timer_wait_running(struct timer_list *timer)
1286{
1287 u32 tf;
1288
1289 tf = READ_ONCE(timer->flags);
1290 if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) {
1291 struct timer_base *base = get_timer_base(tf);
1292
1293 /*
1294 * Mark the base as contended and grab the expiry lock,
1295 * which is held by the softirq across the timer
1296 * callback. Drop the lock immediately so the softirq can
1297 * expire the next timer. In theory the timer could already
1298 * be running again, but that's more than unlikely and just
1299 * causes another wait loop.
1300 */
1301 atomic_inc(&base->timer_waiters);
1302 spin_lock_bh(&base->expiry_lock);
1303 atomic_dec(&base->timer_waiters);
1304 spin_unlock_bh(&base->expiry_lock);
1305 }
1306}
1307#else
1308static inline void timer_base_init_expiry_lock(struct timer_base *base) { }
1309static inline void timer_base_lock_expiry(struct timer_base *base) { }
1310static inline void timer_base_unlock_expiry(struct timer_base *base) { }
1311static inline void timer_sync_wait_running(struct timer_base *base) { }
1312static inline void del_timer_wait_running(struct timer_list *timer) { }
1313#endif
1314
1315#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
1316/**
1317 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1318 * @timer: the timer to be deactivated
1319 *
1320 * This function only differs from del_timer() on SMP: besides deactivating
1321 * the timer it also makes sure the handler has finished executing on other
1322 * CPUs.
1323 *
1324 * Synchronization rules: Callers must prevent restarting of the timer,
1325 * otherwise this function is meaningless. It must not be called from
1326 * interrupt contexts unless the timer is an irqsafe one. The caller must
1327 * not hold locks which would prevent completion of the timer's
1328 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1329 * timer is not queued and the handler is not running on any CPU.
1330 *
1331 * Note: For !irqsafe timers, you must not hold locks that are held in
1332 * interrupt context while calling this function. Even if the lock has
1333 * nothing to do with the timer in question. Here's why::
1334 *
1335 * CPU0 CPU1
1336 * ---- ----
1337 * <SOFTIRQ>
1338 * call_timer_fn();
1339 * base->running_timer = mytimer;
1340 * spin_lock_irq(somelock);
1341 * <IRQ>
1342 * spin_lock(somelock);
1343 * del_timer_sync(mytimer);
1344 * while (base->running_timer == mytimer);
1345 *
1346 * Now del_timer_sync() will never return and never release somelock.
1347 * The interrupt on the other CPU is waiting to grab somelock but
1348 * it has interrupted the softirq that CPU0 is waiting to finish.
1349 *
1350 * The function returns whether it has deactivated a pending timer or not.
1351 */
1352int del_timer_sync(struct timer_list *timer)
1353{
1354 int ret;
1355
1356#ifdef CONFIG_LOCKDEP
1357 unsigned long flags;
1358
1359 /*
1360 * If lockdep gives a backtrace here, please reference
1361 * the synchronization rules above.
1362 */
1363 local_irq_save(flags);
1364 lock_map_acquire(&timer->lockdep_map);
1365 lock_map_release(&timer->lockdep_map);
1366 local_irq_restore(flags);
1367#endif
1368 /*
1369 * don't use it in hardirq context, because it
1370 * could lead to deadlock.
1371 */
1372 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1373
1374 /*
1375 * Must be able to sleep on PREEMPT_RT because of the slowpath in
1376 * del_timer_wait_running().
1377 */
1378 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
1379 lockdep_assert_preemption_enabled();
1380
1381 do {
1382 ret = try_to_del_timer_sync(timer);
1383
1384 if (unlikely(ret < 0)) {
1385 del_timer_wait_running(timer);
1386 cpu_relax();
1387 }
1388 } while (ret < 0);
1389
1390 return ret;
1391}
1392EXPORT_SYMBOL(del_timer_sync);
1393#endif
1394
1395static void call_timer_fn(struct timer_list *timer,
1396 void (*fn)(struct timer_list *),
1397 unsigned long baseclk)
1398{
1399 int count = preempt_count();
1400
1401#ifdef CONFIG_LOCKDEP
1402 /*
1403 * It is permissible to free the timer from inside the
1404 * function that is called from it, this we need to take into
1405 * account for lockdep too. To avoid bogus "held lock freed"
1406 * warnings as well as problems when looking into
1407 * timer->lockdep_map, make a copy and use that here.
1408 */
1409 struct lockdep_map lockdep_map;
1410
1411 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1412#endif
1413 /*
1414 * Couple the lock chain with the lock chain at
1415 * del_timer_sync() by acquiring the lock_map around the fn()
1416 * call here and in del_timer_sync().
1417 */
1418 lock_map_acquire(&lockdep_map);
1419
1420 trace_timer_expire_entry(timer, baseclk);
1421 fn(timer);
1422 trace_timer_expire_exit(timer);
1423
1424 lock_map_release(&lockdep_map);
1425
1426 if (count != preempt_count()) {
1427 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n",
1428 fn, count, preempt_count());
1429 /*
1430 * Restore the preempt count. That gives us a decent
1431 * chance to survive and extract information. If the
1432 * callback kept a lock held, bad luck, but not worse
1433 * than the BUG() we had.
1434 */
1435 preempt_count_set(count);
1436 }
1437}
1438
1439static void expire_timers(struct timer_base *base, struct hlist_head *head)
1440{
1441 /*
1442 * This value is required only for tracing. base->clk was
1443 * incremented directly before expire_timers was called. But expiry
1444 * is related to the old base->clk value.
1445 */
1446 unsigned long baseclk = base->clk - 1;
1447
1448 while (!hlist_empty(head)) {
1449 struct timer_list *timer;
1450 void (*fn)(struct timer_list *);
1451
1452 timer = hlist_entry(head->first, struct timer_list, entry);
1453
1454 base->running_timer = timer;
1455 detach_timer(timer, true);
1456
1457 fn = timer->function;
1458
1459 if (timer->flags & TIMER_IRQSAFE) {
1460 raw_spin_unlock(&base->lock);
1461 call_timer_fn(timer, fn, baseclk);
1462 raw_spin_lock(&base->lock);
1463 base->running_timer = NULL;
1464 } else {
1465 raw_spin_unlock_irq(&base->lock);
1466 call_timer_fn(timer, fn, baseclk);
1467 raw_spin_lock_irq(&base->lock);
1468 base->running_timer = NULL;
1469 timer_sync_wait_running(base);
1470 }
1471 }
1472}
1473
1474static int collect_expired_timers(struct timer_base *base,
1475 struct hlist_head *heads)
1476{
1477 unsigned long clk = base->clk = base->next_expiry;
1478 struct hlist_head *vec;
1479 int i, levels = 0;
1480 unsigned int idx;
1481
1482 for (i = 0; i < LVL_DEPTH; i++) {
1483 idx = (clk & LVL_MASK) + i * LVL_SIZE;
1484
1485 if (__test_and_clear_bit(idx, base->pending_map)) {
1486 vec = base->vectors + idx;
1487 hlist_move_list(vec, heads++);
1488 levels++;
1489 }
1490 /* Is it time to look at the next level? */
1491 if (clk & LVL_CLK_MASK)
1492 break;
1493 /* Shift clock for the next level granularity */
1494 clk >>= LVL_CLK_SHIFT;
1495 }
1496 return levels;
1497}
1498
1499/*
1500 * Find the next pending bucket of a level. Search from level start (@offset)
1501 * + @clk upwards and if nothing there, search from start of the level
1502 * (@offset) up to @offset + clk.
1503 */
1504static int next_pending_bucket(struct timer_base *base, unsigned offset,
1505 unsigned clk)
1506{
1507 unsigned pos, start = offset + clk;
1508 unsigned end = offset + LVL_SIZE;
1509
1510 pos = find_next_bit(base->pending_map, end, start);
1511 if (pos < end)
1512 return pos - start;
1513
1514 pos = find_next_bit(base->pending_map, start, offset);
1515 return pos < start ? pos + LVL_SIZE - start : -1;
1516}
1517
1518/*
1519 * Search the first expiring timer in the various clock levels. Caller must
1520 * hold base->lock.
1521 */
1522static unsigned long __next_timer_interrupt(struct timer_base *base)
1523{
1524 unsigned long clk, next, adj;
1525 unsigned lvl, offset = 0;
1526
1527 next = base->clk + NEXT_TIMER_MAX_DELTA;
1528 clk = base->clk;
1529 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
1530 int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
1531 unsigned long lvl_clk = clk & LVL_CLK_MASK;
1532
1533 if (pos >= 0) {
1534 unsigned long tmp = clk + (unsigned long) pos;
1535
1536 tmp <<= LVL_SHIFT(lvl);
1537 if (time_before(tmp, next))
1538 next = tmp;
1539
1540 /*
1541 * If the next expiration happens before we reach
1542 * the next level, no need to check further.
1543 */
1544 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK))
1545 break;
1546 }
1547 /*
1548 * Clock for the next level. If the current level clock lower
1549 * bits are zero, we look at the next level as is. If not we
1550 * need to advance it by one because that's going to be the
1551 * next expiring bucket in that level. base->clk is the next
1552 * expiring jiffie. So in case of:
1553 *
1554 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1555 * 0 0 0 0 0 0
1556 *
1557 * we have to look at all levels @index 0. With
1558 *
1559 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1560 * 0 0 0 0 0 2
1561 *
1562 * LVL0 has the next expiring bucket @index 2. The upper
1563 * levels have the next expiring bucket @index 1.
1564 *
1565 * In case that the propagation wraps the next level the same
1566 * rules apply:
1567 *
1568 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1569 * 0 0 0 0 F 2
1570 *
1571 * So after looking at LVL0 we get:
1572 *
1573 * LVL5 LVL4 LVL3 LVL2 LVL1
1574 * 0 0 0 1 0
1575 *
1576 * So no propagation from LVL1 to LVL2 because that happened
1577 * with the add already, but then we need to propagate further
1578 * from LVL2 to LVL3.
1579 *
1580 * So the simple check whether the lower bits of the current
1581 * level are 0 or not is sufficient for all cases.
1582 */
1583 adj = lvl_clk ? 1 : 0;
1584 clk >>= LVL_CLK_SHIFT;
1585 clk += adj;
1586 }
1587
1588 base->next_expiry_recalc = false;
1589 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
1590
1591 return next;
1592}
1593
1594#ifdef CONFIG_NO_HZ_COMMON
1595/*
1596 * Check, if the next hrtimer event is before the next timer wheel
1597 * event:
1598 */
1599static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1600{
1601 u64 nextevt = hrtimer_get_next_event();
1602
1603 /*
1604 * If high resolution timers are enabled
1605 * hrtimer_get_next_event() returns KTIME_MAX.
1606 */
1607 if (expires <= nextevt)
1608 return expires;
1609
1610 /*
1611 * If the next timer is already expired, return the tick base
1612 * time so the tick is fired immediately.
1613 */
1614 if (nextevt <= basem)
1615 return basem;
1616
1617 /*
1618 * Round up to the next jiffie. High resolution timers are
1619 * off, so the hrtimers are expired in the tick and we need to
1620 * make sure that this tick really expires the timer to avoid
1621 * a ping pong of the nohz stop code.
1622 *
1623 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1624 */
1625 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1626}
1627
1628/**
1629 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1630 * @basej: base time jiffies
1631 * @basem: base time clock monotonic
1632 *
1633 * Returns the tick aligned clock monotonic time of the next pending
1634 * timer or KTIME_MAX if no timer is pending.
1635 */
1636u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1637{
1638 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1639 u64 expires = KTIME_MAX;
1640 unsigned long nextevt;
1641
1642 /*
1643 * Pretend that there is no timer pending if the cpu is offline.
1644 * Possible pending timers will be migrated later to an active cpu.
1645 */
1646 if (cpu_is_offline(smp_processor_id()))
1647 return expires;
1648
1649 raw_spin_lock(&base->lock);
1650 if (base->next_expiry_recalc)
1651 base->next_expiry = __next_timer_interrupt(base);
1652 nextevt = base->next_expiry;
1653
1654 /*
1655 * We have a fresh next event. Check whether we can forward the
1656 * base. We can only do that when @basej is past base->clk
1657 * otherwise we might rewind base->clk.
1658 */
1659 if (time_after(basej, base->clk)) {
1660 if (time_after(nextevt, basej))
1661 base->clk = basej;
1662 else if (time_after(nextevt, base->clk))
1663 base->clk = nextevt;
1664 }
1665
1666 if (time_before_eq(nextevt, basej)) {
1667 expires = basem;
1668 base->is_idle = false;
1669 } else {
1670 if (base->timers_pending)
1671 expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
1672 /*
1673 * If we expect to sleep more than a tick, mark the base idle.
1674 * Also the tick is stopped so any added timer must forward
1675 * the base clk itself to keep granularity small. This idle
1676 * logic is only maintained for the BASE_STD base, deferrable
1677 * timers may still see large granularity skew (by design).
1678 */
1679 if ((expires - basem) > TICK_NSEC)
1680 base->is_idle = true;
1681 }
1682 raw_spin_unlock(&base->lock);
1683
1684 return cmp_next_hrtimer_event(basem, expires);
1685}
1686
1687/**
1688 * timer_clear_idle - Clear the idle state of the timer base
1689 *
1690 * Called with interrupts disabled
1691 */
1692void timer_clear_idle(void)
1693{
1694 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1695
1696 /*
1697 * We do this unlocked. The worst outcome is a remote enqueue sending
1698 * a pointless IPI, but taking the lock would just make the window for
1699 * sending the IPI a few instructions smaller for the cost of taking
1700 * the lock in the exit from idle path.
1701 */
1702 base->is_idle = false;
1703}
1704#endif
1705
1706/**
1707 * __run_timers - run all expired timers (if any) on this CPU.
1708 * @base: the timer vector to be processed.
1709 */
1710static inline void __run_timers(struct timer_base *base)
1711{
1712 struct hlist_head heads[LVL_DEPTH];
1713 int levels;
1714
1715 if (time_before(jiffies, base->next_expiry))
1716 return;
1717
1718 timer_base_lock_expiry(base);
1719 raw_spin_lock_irq(&base->lock);
1720
1721 while (time_after_eq(jiffies, base->clk) &&
1722 time_after_eq(jiffies, base->next_expiry)) {
1723 levels = collect_expired_timers(base, heads);
1724 /*
1725 * The only possible reason for not finding any expired
1726 * timer at this clk is that all matching timers have been
1727 * dequeued.
1728 */
1729 WARN_ON_ONCE(!levels && !base->next_expiry_recalc);
1730 base->clk++;
1731 base->next_expiry = __next_timer_interrupt(base);
1732
1733 while (levels--)
1734 expire_timers(base, heads + levels);
1735 }
1736 raw_spin_unlock_irq(&base->lock);
1737 timer_base_unlock_expiry(base);
1738}
1739
1740/*
1741 * This function runs timers and the timer-tq in bottom half context.
1742 */
1743static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1744{
1745 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1746
1747 __run_timers(base);
1748 if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
1749 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
1750}
1751
1752/*
1753 * Called by the local, per-CPU timer interrupt on SMP.
1754 */
1755static void run_local_timers(void)
1756{
1757 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1758
1759 hrtimer_run_queues();
1760 /* Raise the softirq only if required. */
1761 if (time_before(jiffies, base->next_expiry)) {
1762 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
1763 return;
1764 /* CPU is awake, so check the deferrable base. */
1765 base++;
1766 if (time_before(jiffies, base->next_expiry))
1767 return;
1768 }
1769 raise_softirq(TIMER_SOFTIRQ);
1770}
1771
1772/*
1773 * Called from the timer interrupt handler to charge one tick to the current
1774 * process. user_tick is 1 if the tick is user time, 0 for system.
1775 */
1776void update_process_times(int user_tick)
1777{
1778 struct task_struct *p = current;
1779
1780 PRANDOM_ADD_NOISE(jiffies, user_tick, p, 0);
1781
1782 /* Note: this timer irq context must be accounted for as well. */
1783 account_process_tick(p, user_tick);
1784 run_local_timers();
1785 rcu_sched_clock_irq(user_tick);
1786#ifdef CONFIG_IRQ_WORK
1787 if (in_irq())
1788 irq_work_tick();
1789#endif
1790 scheduler_tick();
1791 if (IS_ENABLED(CONFIG_POSIX_TIMERS))
1792 run_posix_cpu_timers();
1793}
1794
1795/*
1796 * Since schedule_timeout()'s timer is defined on the stack, it must store
1797 * the target task on the stack as well.
1798 */
1799struct process_timer {
1800 struct timer_list timer;
1801 struct task_struct *task;
1802};
1803
1804static void process_timeout(struct timer_list *t)
1805{
1806 struct process_timer *timeout = from_timer(timeout, t, timer);
1807
1808 wake_up_process(timeout->task);
1809}
1810
1811/**
1812 * schedule_timeout - sleep until timeout
1813 * @timeout: timeout value in jiffies
1814 *
1815 * Make the current task sleep until @timeout jiffies have elapsed.
1816 * The function behavior depends on the current task state
1817 * (see also set_current_state() description):
1818 *
1819 * %TASK_RUNNING - the scheduler is called, but the task does not sleep
1820 * at all. That happens because sched_submit_work() does nothing for
1821 * tasks in %TASK_RUNNING state.
1822 *
1823 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1824 * pass before the routine returns unless the current task is explicitly
1825 * woken up, (e.g. by wake_up_process()).
1826 *
1827 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1828 * delivered to the current task or the current task is explicitly woken
1829 * up.
1830 *
1831 * The current task state is guaranteed to be %TASK_RUNNING when this
1832 * routine returns.
1833 *
1834 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1835 * the CPU away without a bound on the timeout. In this case the return
1836 * value will be %MAX_SCHEDULE_TIMEOUT.
1837 *
1838 * Returns 0 when the timer has expired otherwise the remaining time in
1839 * jiffies will be returned. In all cases the return value is guaranteed
1840 * to be non-negative.
1841 */
1842signed long __sched schedule_timeout(signed long timeout)
1843{
1844 struct process_timer timer;
1845 unsigned long expire;
1846
1847 switch (timeout)
1848 {
1849 case MAX_SCHEDULE_TIMEOUT:
1850 /*
1851 * These two special cases are useful to be comfortable
1852 * in the caller. Nothing more. We could take
1853 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1854 * but I' d like to return a valid offset (>=0) to allow
1855 * the caller to do everything it want with the retval.
1856 */
1857 schedule();
1858 goto out;
1859 default:
1860 /*
1861 * Another bit of PARANOID. Note that the retval will be
1862 * 0 since no piece of kernel is supposed to do a check
1863 * for a negative retval of schedule_timeout() (since it
1864 * should never happens anyway). You just have the printk()
1865 * that will tell you if something is gone wrong and where.
1866 */
1867 if (timeout < 0) {
1868 printk(KERN_ERR "schedule_timeout: wrong timeout "
1869 "value %lx\n", timeout);
1870 dump_stack();
1871 __set_current_state(TASK_RUNNING);
1872 goto out;
1873 }
1874 }
1875
1876 expire = timeout + jiffies;
1877
1878 timer.task = current;
1879 timer_setup_on_stack(&timer.timer, process_timeout, 0);
1880 __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
1881 schedule();
1882 del_singleshot_timer_sync(&timer.timer);
1883
1884 /* Remove the timer from the object tracker */
1885 destroy_timer_on_stack(&timer.timer);
1886
1887 timeout = expire - jiffies;
1888
1889 out:
1890 return timeout < 0 ? 0 : timeout;
1891}
1892EXPORT_SYMBOL(schedule_timeout);
1893
1894/*
1895 * We can use __set_current_state() here because schedule_timeout() calls
1896 * schedule() unconditionally.
1897 */
1898signed long __sched schedule_timeout_interruptible(signed long timeout)
1899{
1900 __set_current_state(TASK_INTERRUPTIBLE);
1901 return schedule_timeout(timeout);
1902}
1903EXPORT_SYMBOL(schedule_timeout_interruptible);
1904
1905signed long __sched schedule_timeout_killable(signed long timeout)
1906{
1907 __set_current_state(TASK_KILLABLE);
1908 return schedule_timeout(timeout);
1909}
1910EXPORT_SYMBOL(schedule_timeout_killable);
1911
1912signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1913{
1914 __set_current_state(TASK_UNINTERRUPTIBLE);
1915 return schedule_timeout(timeout);
1916}
1917EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1918
1919/*
1920 * Like schedule_timeout_uninterruptible(), except this task will not contribute
1921 * to load average.
1922 */
1923signed long __sched schedule_timeout_idle(signed long timeout)
1924{
1925 __set_current_state(TASK_IDLE);
1926 return schedule_timeout(timeout);
1927}
1928EXPORT_SYMBOL(schedule_timeout_idle);
1929
1930#ifdef CONFIG_HOTPLUG_CPU
1931static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
1932{
1933 struct timer_list *timer;
1934 int cpu = new_base->cpu;
1935
1936 while (!hlist_empty(head)) {
1937 timer = hlist_entry(head->first, struct timer_list, entry);
1938 detach_timer(timer, false);
1939 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1940 internal_add_timer(new_base, timer);
1941 }
1942}
1943
1944int timers_prepare_cpu(unsigned int cpu)
1945{
1946 struct timer_base *base;
1947 int b;
1948
1949 for (b = 0; b < NR_BASES; b++) {
1950 base = per_cpu_ptr(&timer_bases[b], cpu);
1951 base->clk = jiffies;
1952 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
1953 base->timers_pending = false;
1954 base->is_idle = false;
1955 }
1956 return 0;
1957}
1958
1959int timers_dead_cpu(unsigned int cpu)
1960{
1961 struct timer_base *old_base;
1962 struct timer_base *new_base;
1963 int b, i;
1964
1965 BUG_ON(cpu_online(cpu));
1966
1967 for (b = 0; b < NR_BASES; b++) {
1968 old_base = per_cpu_ptr(&timer_bases[b], cpu);
1969 new_base = get_cpu_ptr(&timer_bases[b]);
1970 /*
1971 * The caller is globally serialized and nobody else
1972 * takes two locks at once, deadlock is not possible.
1973 */
1974 raw_spin_lock_irq(&new_base->lock);
1975 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1976
1977 /*
1978 * The current CPUs base clock might be stale. Update it
1979 * before moving the timers over.
1980 */
1981 forward_timer_base(new_base);
1982
1983 BUG_ON(old_base->running_timer);
1984
1985 for (i = 0; i < WHEEL_SIZE; i++)
1986 migrate_timer_list(new_base, old_base->vectors + i);
1987
1988 raw_spin_unlock(&old_base->lock);
1989 raw_spin_unlock_irq(&new_base->lock);
1990 put_cpu_ptr(&timer_bases);
1991 }
1992 return 0;
1993}
1994
1995#endif /* CONFIG_HOTPLUG_CPU */
1996
1997static void __init init_timer_cpu(int cpu)
1998{
1999 struct timer_base *base;
2000 int i;
2001
2002 for (i = 0; i < NR_BASES; i++) {
2003 base = per_cpu_ptr(&timer_bases[i], cpu);
2004 base->cpu = cpu;
2005 raw_spin_lock_init(&base->lock);
2006 base->clk = jiffies;
2007 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
2008 timer_base_init_expiry_lock(base);
2009 }
2010}
2011
2012static void __init init_timer_cpus(void)
2013{
2014 int cpu;
2015
2016 for_each_possible_cpu(cpu)
2017 init_timer_cpu(cpu);
2018}
2019
2020void __init init_timers(void)
2021{
2022 init_timer_cpus();
2023 posix_cputimers_init_work();
2024 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
2025}
2026
2027/**
2028 * msleep - sleep safely even with waitqueue interruptions
2029 * @msecs: Time in milliseconds to sleep for
2030 */
2031void msleep(unsigned int msecs)
2032{
2033 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
2034
2035 while (timeout)
2036 timeout = schedule_timeout_uninterruptible(timeout);
2037}
2038
2039EXPORT_SYMBOL(msleep);
2040
2041/**
2042 * msleep_interruptible - sleep waiting for signals
2043 * @msecs: Time in milliseconds to sleep for
2044 */
2045unsigned long msleep_interruptible(unsigned int msecs)
2046{
2047 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
2048
2049 while (timeout && !signal_pending(current))
2050 timeout = schedule_timeout_interruptible(timeout);
2051 return jiffies_to_msecs(timeout);
2052}
2053
2054EXPORT_SYMBOL(msleep_interruptible);
2055
2056/**
2057 * usleep_range - Sleep for an approximate time
2058 * @min: Minimum time in usecs to sleep
2059 * @max: Maximum time in usecs to sleep
2060 *
2061 * In non-atomic context where the exact wakeup time is flexible, use
2062 * usleep_range() instead of udelay(). The sleep improves responsiveness
2063 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
2064 * power usage by allowing hrtimers to take advantage of an already-
2065 * scheduled interrupt instead of scheduling a new one just for this sleep.
2066 */
2067void __sched usleep_range(unsigned long min, unsigned long max)
2068{
2069 ktime_t exp = ktime_add_us(ktime_get(), min);
2070 u64 delta = (u64)(max - min) * NSEC_PER_USEC;
2071
2072 for (;;) {
2073 __set_current_state(TASK_UNINTERRUPTIBLE);
2074 /* Do not return before the requested sleep time has elapsed */
2075 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
2076 break;
2077 }
2078}
2079EXPORT_SYMBOL(usleep_range);
1/*
2 * linux/kernel/timer.c
3 *
4 * Kernel internal timers
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
23#include <linux/export.h>
24#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
29#include <linux/pid_namespace.h>
30#include <linux/notifier.h>
31#include <linux/thread_info.h>
32#include <linux/time.h>
33#include <linux/jiffies.h>
34#include <linux/posix-timers.h>
35#include <linux/cpu.h>
36#include <linux/syscalls.h>
37#include <linux/delay.h>
38#include <linux/tick.h>
39#include <linux/kallsyms.h>
40#include <linux/irq_work.h>
41#include <linux/sched/signal.h>
42#include <linux/sched/sysctl.h>
43#include <linux/sched/nohz.h>
44#include <linux/sched/debug.h>
45#include <linux/slab.h>
46#include <linux/compat.h>
47
48#include <linux/uaccess.h>
49#include <asm/unistd.h>
50#include <asm/div64.h>
51#include <asm/timex.h>
52#include <asm/io.h>
53
54#include "tick-internal.h"
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/timer.h>
58
59__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
60
61EXPORT_SYMBOL(jiffies_64);
62
63/*
64 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
65 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
66 * level has a different granularity.
67 *
68 * The level granularity is: LVL_CLK_DIV ^ lvl
69 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
70 *
71 * The array level of a newly armed timer depends on the relative expiry
72 * time. The farther the expiry time is away the higher the array level and
73 * therefor the granularity becomes.
74 *
75 * Contrary to the original timer wheel implementation, which aims for 'exact'
76 * expiry of the timers, this implementation removes the need for recascading
77 * the timers into the lower array levels. The previous 'classic' timer wheel
78 * implementation of the kernel already violated the 'exact' expiry by adding
79 * slack to the expiry time to provide batched expiration. The granularity
80 * levels provide implicit batching.
81 *
82 * This is an optimization of the original timer wheel implementation for the
83 * majority of the timer wheel use cases: timeouts. The vast majority of
84 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
85 * the timeout expires it indicates that normal operation is disturbed, so it
86 * does not matter much whether the timeout comes with a slight delay.
87 *
88 * The only exception to this are networking timers with a small expiry
89 * time. They rely on the granularity. Those fit into the first wheel level,
90 * which has HZ granularity.
91 *
92 * We don't have cascading anymore. timers with a expiry time above the
93 * capacity of the last wheel level are force expired at the maximum timeout
94 * value of the last wheel level. From data sampling we know that the maximum
95 * value observed is 5 days (network connection tracking), so this should not
96 * be an issue.
97 *
98 * The currently chosen array constants values are a good compromise between
99 * array size and granularity.
100 *
101 * This results in the following granularity and range levels:
102 *
103 * HZ 1000 steps
104 * Level Offset Granularity Range
105 * 0 0 1 ms 0 ms - 63 ms
106 * 1 64 8 ms 64 ms - 511 ms
107 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
108 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
109 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
110 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
111 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
112 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
113 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
114 *
115 * HZ 300
116 * Level Offset Granularity Range
117 * 0 0 3 ms 0 ms - 210 ms
118 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
119 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
120 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
121 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
122 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
123 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
124 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
125 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
126 *
127 * HZ 250
128 * Level Offset Granularity Range
129 * 0 0 4 ms 0 ms - 255 ms
130 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
131 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
132 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
133 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
134 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
135 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
136 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
137 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
138 *
139 * HZ 100
140 * Level Offset Granularity Range
141 * 0 0 10 ms 0 ms - 630 ms
142 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
143 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
144 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
145 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
146 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
147 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
148 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
149 */
150
151/* Clock divisor for the next level */
152#define LVL_CLK_SHIFT 3
153#define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
154#define LVL_CLK_MASK (LVL_CLK_DIV - 1)
155#define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
156#define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
157
158/*
159 * The time start value for each level to select the bucket at enqueue
160 * time.
161 */
162#define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
163
164/* Size of each clock level */
165#define LVL_BITS 6
166#define LVL_SIZE (1UL << LVL_BITS)
167#define LVL_MASK (LVL_SIZE - 1)
168#define LVL_OFFS(n) ((n) * LVL_SIZE)
169
170/* Level depth */
171#if HZ > 100
172# define LVL_DEPTH 9
173# else
174# define LVL_DEPTH 8
175#endif
176
177/* The cutoff (max. capacity of the wheel) */
178#define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
179#define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
180
181/*
182 * The resulting wheel size. If NOHZ is configured we allocate two
183 * wheels so we have a separate storage for the deferrable timers.
184 */
185#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
186
187#ifdef CONFIG_NO_HZ_COMMON
188# define NR_BASES 2
189# define BASE_STD 0
190# define BASE_DEF 1
191#else
192# define NR_BASES 1
193# define BASE_STD 0
194# define BASE_DEF 0
195#endif
196
197struct timer_base {
198 raw_spinlock_t lock;
199 struct timer_list *running_timer;
200 unsigned long clk;
201 unsigned long next_expiry;
202 unsigned int cpu;
203 bool is_idle;
204 bool must_forward_clk;
205 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
206 struct hlist_head vectors[WHEEL_SIZE];
207} ____cacheline_aligned;
208
209static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
210
211#ifdef CONFIG_NO_HZ_COMMON
212
213static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
214static DEFINE_MUTEX(timer_keys_mutex);
215
216static void timer_update_keys(struct work_struct *work);
217static DECLARE_WORK(timer_update_work, timer_update_keys);
218
219#ifdef CONFIG_SMP
220unsigned int sysctl_timer_migration = 1;
221
222DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
223
224static void timers_update_migration(void)
225{
226 if (sysctl_timer_migration && tick_nohz_active)
227 static_branch_enable(&timers_migration_enabled);
228 else
229 static_branch_disable(&timers_migration_enabled);
230}
231#else
232static inline void timers_update_migration(void) { }
233#endif /* !CONFIG_SMP */
234
235static void timer_update_keys(struct work_struct *work)
236{
237 mutex_lock(&timer_keys_mutex);
238 timers_update_migration();
239 static_branch_enable(&timers_nohz_active);
240 mutex_unlock(&timer_keys_mutex);
241}
242
243void timers_update_nohz(void)
244{
245 schedule_work(&timer_update_work);
246}
247
248int timer_migration_handler(struct ctl_table *table, int write,
249 void __user *buffer, size_t *lenp,
250 loff_t *ppos)
251{
252 int ret;
253
254 mutex_lock(&timer_keys_mutex);
255 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
256 if (!ret && write)
257 timers_update_migration();
258 mutex_unlock(&timer_keys_mutex);
259 return ret;
260}
261
262static inline bool is_timers_nohz_active(void)
263{
264 return static_branch_unlikely(&timers_nohz_active);
265}
266#else
267static inline bool is_timers_nohz_active(void) { return false; }
268#endif /* NO_HZ_COMMON */
269
270static unsigned long round_jiffies_common(unsigned long j, int cpu,
271 bool force_up)
272{
273 int rem;
274 unsigned long original = j;
275
276 /*
277 * We don't want all cpus firing their timers at once hitting the
278 * same lock or cachelines, so we skew each extra cpu with an extra
279 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
280 * already did this.
281 * The skew is done by adding 3*cpunr, then round, then subtract this
282 * extra offset again.
283 */
284 j += cpu * 3;
285
286 rem = j % HZ;
287
288 /*
289 * If the target jiffie is just after a whole second (which can happen
290 * due to delays of the timer irq, long irq off times etc etc) then
291 * we should round down to the whole second, not up. Use 1/4th second
292 * as cutoff for this rounding as an extreme upper bound for this.
293 * But never round down if @force_up is set.
294 */
295 if (rem < HZ/4 && !force_up) /* round down */
296 j = j - rem;
297 else /* round up */
298 j = j - rem + HZ;
299
300 /* now that we have rounded, subtract the extra skew again */
301 j -= cpu * 3;
302
303 /*
304 * Make sure j is still in the future. Otherwise return the
305 * unmodified value.
306 */
307 return time_is_after_jiffies(j) ? j : original;
308}
309
310/**
311 * __round_jiffies - function to round jiffies to a full second
312 * @j: the time in (absolute) jiffies that should be rounded
313 * @cpu: the processor number on which the timeout will happen
314 *
315 * __round_jiffies() rounds an absolute time in the future (in jiffies)
316 * up or down to (approximately) full seconds. This is useful for timers
317 * for which the exact time they fire does not matter too much, as long as
318 * they fire approximately every X seconds.
319 *
320 * By rounding these timers to whole seconds, all such timers will fire
321 * at the same time, rather than at various times spread out. The goal
322 * of this is to have the CPU wake up less, which saves power.
323 *
324 * The exact rounding is skewed for each processor to avoid all
325 * processors firing at the exact same time, which could lead
326 * to lock contention or spurious cache line bouncing.
327 *
328 * The return value is the rounded version of the @j parameter.
329 */
330unsigned long __round_jiffies(unsigned long j, int cpu)
331{
332 return round_jiffies_common(j, cpu, false);
333}
334EXPORT_SYMBOL_GPL(__round_jiffies);
335
336/**
337 * __round_jiffies_relative - function to round jiffies to a full second
338 * @j: the time in (relative) jiffies that should be rounded
339 * @cpu: the processor number on which the timeout will happen
340 *
341 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
342 * up or down to (approximately) full seconds. This is useful for timers
343 * for which the exact time they fire does not matter too much, as long as
344 * they fire approximately every X seconds.
345 *
346 * By rounding these timers to whole seconds, all such timers will fire
347 * at the same time, rather than at various times spread out. The goal
348 * of this is to have the CPU wake up less, which saves power.
349 *
350 * The exact rounding is skewed for each processor to avoid all
351 * processors firing at the exact same time, which could lead
352 * to lock contention or spurious cache line bouncing.
353 *
354 * The return value is the rounded version of the @j parameter.
355 */
356unsigned long __round_jiffies_relative(unsigned long j, int cpu)
357{
358 unsigned long j0 = jiffies;
359
360 /* Use j0 because jiffies might change while we run */
361 return round_jiffies_common(j + j0, cpu, false) - j0;
362}
363EXPORT_SYMBOL_GPL(__round_jiffies_relative);
364
365/**
366 * round_jiffies - function to round jiffies to a full second
367 * @j: the time in (absolute) jiffies that should be rounded
368 *
369 * round_jiffies() rounds an absolute time in the future (in jiffies)
370 * up or down to (approximately) full seconds. This is useful for timers
371 * for which the exact time they fire does not matter too much, as long as
372 * they fire approximately every X seconds.
373 *
374 * By rounding these timers to whole seconds, all such timers will fire
375 * at the same time, rather than at various times spread out. The goal
376 * of this is to have the CPU wake up less, which saves power.
377 *
378 * The return value is the rounded version of the @j parameter.
379 */
380unsigned long round_jiffies(unsigned long j)
381{
382 return round_jiffies_common(j, raw_smp_processor_id(), false);
383}
384EXPORT_SYMBOL_GPL(round_jiffies);
385
386/**
387 * round_jiffies_relative - function to round jiffies to a full second
388 * @j: the time in (relative) jiffies that should be rounded
389 *
390 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
391 * up or down to (approximately) full seconds. This is useful for timers
392 * for which the exact time they fire does not matter too much, as long as
393 * they fire approximately every X seconds.
394 *
395 * By rounding these timers to whole seconds, all such timers will fire
396 * at the same time, rather than at various times spread out. The goal
397 * of this is to have the CPU wake up less, which saves power.
398 *
399 * The return value is the rounded version of the @j parameter.
400 */
401unsigned long round_jiffies_relative(unsigned long j)
402{
403 return __round_jiffies_relative(j, raw_smp_processor_id());
404}
405EXPORT_SYMBOL_GPL(round_jiffies_relative);
406
407/**
408 * __round_jiffies_up - function to round jiffies up to a full second
409 * @j: the time in (absolute) jiffies that should be rounded
410 * @cpu: the processor number on which the timeout will happen
411 *
412 * This is the same as __round_jiffies() except that it will never
413 * round down. This is useful for timeouts for which the exact time
414 * of firing does not matter too much, as long as they don't fire too
415 * early.
416 */
417unsigned long __round_jiffies_up(unsigned long j, int cpu)
418{
419 return round_jiffies_common(j, cpu, true);
420}
421EXPORT_SYMBOL_GPL(__round_jiffies_up);
422
423/**
424 * __round_jiffies_up_relative - function to round jiffies up to a full second
425 * @j: the time in (relative) jiffies that should be rounded
426 * @cpu: the processor number on which the timeout will happen
427 *
428 * This is the same as __round_jiffies_relative() except that it will never
429 * round down. This is useful for timeouts for which the exact time
430 * of firing does not matter too much, as long as they don't fire too
431 * early.
432 */
433unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
434{
435 unsigned long j0 = jiffies;
436
437 /* Use j0 because jiffies might change while we run */
438 return round_jiffies_common(j + j0, cpu, true) - j0;
439}
440EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
441
442/**
443 * round_jiffies_up - function to round jiffies up to a full second
444 * @j: the time in (absolute) jiffies that should be rounded
445 *
446 * This is the same as round_jiffies() except that it will never
447 * round down. This is useful for timeouts for which the exact time
448 * of firing does not matter too much, as long as they don't fire too
449 * early.
450 */
451unsigned long round_jiffies_up(unsigned long j)
452{
453 return round_jiffies_common(j, raw_smp_processor_id(), true);
454}
455EXPORT_SYMBOL_GPL(round_jiffies_up);
456
457/**
458 * round_jiffies_up_relative - function to round jiffies up to a full second
459 * @j: the time in (relative) jiffies that should be rounded
460 *
461 * This is the same as round_jiffies_relative() except that it will never
462 * round down. This is useful for timeouts for which the exact time
463 * of firing does not matter too much, as long as they don't fire too
464 * early.
465 */
466unsigned long round_jiffies_up_relative(unsigned long j)
467{
468 return __round_jiffies_up_relative(j, raw_smp_processor_id());
469}
470EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
471
472
473static inline unsigned int timer_get_idx(struct timer_list *timer)
474{
475 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
476}
477
478static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
479{
480 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
481 idx << TIMER_ARRAYSHIFT;
482}
483
484/*
485 * Helper function to calculate the array index for a given expiry
486 * time.
487 */
488static inline unsigned calc_index(unsigned expires, unsigned lvl)
489{
490 expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
491 return LVL_OFFS(lvl) + (expires & LVL_MASK);
492}
493
494static int calc_wheel_index(unsigned long expires, unsigned long clk)
495{
496 unsigned long delta = expires - clk;
497 unsigned int idx;
498
499 if (delta < LVL_START(1)) {
500 idx = calc_index(expires, 0);
501 } else if (delta < LVL_START(2)) {
502 idx = calc_index(expires, 1);
503 } else if (delta < LVL_START(3)) {
504 idx = calc_index(expires, 2);
505 } else if (delta < LVL_START(4)) {
506 idx = calc_index(expires, 3);
507 } else if (delta < LVL_START(5)) {
508 idx = calc_index(expires, 4);
509 } else if (delta < LVL_START(6)) {
510 idx = calc_index(expires, 5);
511 } else if (delta < LVL_START(7)) {
512 idx = calc_index(expires, 6);
513 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
514 idx = calc_index(expires, 7);
515 } else if ((long) delta < 0) {
516 idx = clk & LVL_MASK;
517 } else {
518 /*
519 * Force expire obscene large timeouts to expire at the
520 * capacity limit of the wheel.
521 */
522 if (expires >= WHEEL_TIMEOUT_CUTOFF)
523 expires = WHEEL_TIMEOUT_MAX;
524
525 idx = calc_index(expires, LVL_DEPTH - 1);
526 }
527 return idx;
528}
529
530/*
531 * Enqueue the timer into the hash bucket, mark it pending in
532 * the bitmap and store the index in the timer flags.
533 */
534static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
535 unsigned int idx)
536{
537 hlist_add_head(&timer->entry, base->vectors + idx);
538 __set_bit(idx, base->pending_map);
539 timer_set_idx(timer, idx);
540}
541
542static void
543__internal_add_timer(struct timer_base *base, struct timer_list *timer)
544{
545 unsigned int idx;
546
547 idx = calc_wheel_index(timer->expires, base->clk);
548 enqueue_timer(base, timer, idx);
549}
550
551static void
552trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
553{
554 if (!is_timers_nohz_active())
555 return;
556
557 /*
558 * TODO: This wants some optimizing similar to the code below, but we
559 * will do that when we switch from push to pull for deferrable timers.
560 */
561 if (timer->flags & TIMER_DEFERRABLE) {
562 if (tick_nohz_full_cpu(base->cpu))
563 wake_up_nohz_cpu(base->cpu);
564 return;
565 }
566
567 /*
568 * We might have to IPI the remote CPU if the base is idle and the
569 * timer is not deferrable. If the other CPU is on the way to idle
570 * then it can't set base->is_idle as we hold the base lock:
571 */
572 if (!base->is_idle)
573 return;
574
575 /* Check whether this is the new first expiring timer: */
576 if (time_after_eq(timer->expires, base->next_expiry))
577 return;
578
579 /*
580 * Set the next expiry time and kick the CPU so it can reevaluate the
581 * wheel:
582 */
583 base->next_expiry = timer->expires;
584 wake_up_nohz_cpu(base->cpu);
585}
586
587static void
588internal_add_timer(struct timer_base *base, struct timer_list *timer)
589{
590 __internal_add_timer(base, timer);
591 trigger_dyntick_cpu(base, timer);
592}
593
594#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
595
596static struct debug_obj_descr timer_debug_descr;
597
598static void *timer_debug_hint(void *addr)
599{
600 return ((struct timer_list *) addr)->function;
601}
602
603static bool timer_is_static_object(void *addr)
604{
605 struct timer_list *timer = addr;
606
607 return (timer->entry.pprev == NULL &&
608 timer->entry.next == TIMER_ENTRY_STATIC);
609}
610
611/*
612 * fixup_init is called when:
613 * - an active object is initialized
614 */
615static bool timer_fixup_init(void *addr, enum debug_obj_state state)
616{
617 struct timer_list *timer = addr;
618
619 switch (state) {
620 case ODEBUG_STATE_ACTIVE:
621 del_timer_sync(timer);
622 debug_object_init(timer, &timer_debug_descr);
623 return true;
624 default:
625 return false;
626 }
627}
628
629/* Stub timer callback for improperly used timers. */
630static void stub_timer(struct timer_list *unused)
631{
632 WARN_ON(1);
633}
634
635/*
636 * fixup_activate is called when:
637 * - an active object is activated
638 * - an unknown non-static object is activated
639 */
640static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
641{
642 struct timer_list *timer = addr;
643
644 switch (state) {
645 case ODEBUG_STATE_NOTAVAILABLE:
646 timer_setup(timer, stub_timer, 0);
647 return true;
648
649 case ODEBUG_STATE_ACTIVE:
650 WARN_ON(1);
651
652 default:
653 return false;
654 }
655}
656
657/*
658 * fixup_free is called when:
659 * - an active object is freed
660 */
661static bool timer_fixup_free(void *addr, enum debug_obj_state state)
662{
663 struct timer_list *timer = addr;
664
665 switch (state) {
666 case ODEBUG_STATE_ACTIVE:
667 del_timer_sync(timer);
668 debug_object_free(timer, &timer_debug_descr);
669 return true;
670 default:
671 return false;
672 }
673}
674
675/*
676 * fixup_assert_init is called when:
677 * - an untracked/uninit-ed object is found
678 */
679static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
680{
681 struct timer_list *timer = addr;
682
683 switch (state) {
684 case ODEBUG_STATE_NOTAVAILABLE:
685 timer_setup(timer, stub_timer, 0);
686 return true;
687 default:
688 return false;
689 }
690}
691
692static struct debug_obj_descr timer_debug_descr = {
693 .name = "timer_list",
694 .debug_hint = timer_debug_hint,
695 .is_static_object = timer_is_static_object,
696 .fixup_init = timer_fixup_init,
697 .fixup_activate = timer_fixup_activate,
698 .fixup_free = timer_fixup_free,
699 .fixup_assert_init = timer_fixup_assert_init,
700};
701
702static inline void debug_timer_init(struct timer_list *timer)
703{
704 debug_object_init(timer, &timer_debug_descr);
705}
706
707static inline void debug_timer_activate(struct timer_list *timer)
708{
709 debug_object_activate(timer, &timer_debug_descr);
710}
711
712static inline void debug_timer_deactivate(struct timer_list *timer)
713{
714 debug_object_deactivate(timer, &timer_debug_descr);
715}
716
717static inline void debug_timer_free(struct timer_list *timer)
718{
719 debug_object_free(timer, &timer_debug_descr);
720}
721
722static inline void debug_timer_assert_init(struct timer_list *timer)
723{
724 debug_object_assert_init(timer, &timer_debug_descr);
725}
726
727static void do_init_timer(struct timer_list *timer,
728 void (*func)(struct timer_list *),
729 unsigned int flags,
730 const char *name, struct lock_class_key *key);
731
732void init_timer_on_stack_key(struct timer_list *timer,
733 void (*func)(struct timer_list *),
734 unsigned int flags,
735 const char *name, struct lock_class_key *key)
736{
737 debug_object_init_on_stack(timer, &timer_debug_descr);
738 do_init_timer(timer, func, flags, name, key);
739}
740EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
741
742void destroy_timer_on_stack(struct timer_list *timer)
743{
744 debug_object_free(timer, &timer_debug_descr);
745}
746EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
747
748#else
749static inline void debug_timer_init(struct timer_list *timer) { }
750static inline void debug_timer_activate(struct timer_list *timer) { }
751static inline void debug_timer_deactivate(struct timer_list *timer) { }
752static inline void debug_timer_assert_init(struct timer_list *timer) { }
753#endif
754
755static inline void debug_init(struct timer_list *timer)
756{
757 debug_timer_init(timer);
758 trace_timer_init(timer);
759}
760
761static inline void
762debug_activate(struct timer_list *timer, unsigned long expires)
763{
764 debug_timer_activate(timer);
765 trace_timer_start(timer, expires, timer->flags);
766}
767
768static inline void debug_deactivate(struct timer_list *timer)
769{
770 debug_timer_deactivate(timer);
771 trace_timer_cancel(timer);
772}
773
774static inline void debug_assert_init(struct timer_list *timer)
775{
776 debug_timer_assert_init(timer);
777}
778
779static void do_init_timer(struct timer_list *timer,
780 void (*func)(struct timer_list *),
781 unsigned int flags,
782 const char *name, struct lock_class_key *key)
783{
784 timer->entry.pprev = NULL;
785 timer->function = func;
786 timer->flags = flags | raw_smp_processor_id();
787 lockdep_init_map(&timer->lockdep_map, name, key, 0);
788}
789
790/**
791 * init_timer_key - initialize a timer
792 * @timer: the timer to be initialized
793 * @func: timer callback function
794 * @flags: timer flags
795 * @name: name of the timer
796 * @key: lockdep class key of the fake lock used for tracking timer
797 * sync lock dependencies
798 *
799 * init_timer_key() must be done to a timer prior calling *any* of the
800 * other timer functions.
801 */
802void init_timer_key(struct timer_list *timer,
803 void (*func)(struct timer_list *), unsigned int flags,
804 const char *name, struct lock_class_key *key)
805{
806 debug_init(timer);
807 do_init_timer(timer, func, flags, name, key);
808}
809EXPORT_SYMBOL(init_timer_key);
810
811static inline void detach_timer(struct timer_list *timer, bool clear_pending)
812{
813 struct hlist_node *entry = &timer->entry;
814
815 debug_deactivate(timer);
816
817 __hlist_del(entry);
818 if (clear_pending)
819 entry->pprev = NULL;
820 entry->next = LIST_POISON2;
821}
822
823static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
824 bool clear_pending)
825{
826 unsigned idx = timer_get_idx(timer);
827
828 if (!timer_pending(timer))
829 return 0;
830
831 if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
832 __clear_bit(idx, base->pending_map);
833
834 detach_timer(timer, clear_pending);
835 return 1;
836}
837
838static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
839{
840 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
841
842 /*
843 * If the timer is deferrable and NO_HZ_COMMON is set then we need
844 * to use the deferrable base.
845 */
846 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
847 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
848 return base;
849}
850
851static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
852{
853 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
854
855 /*
856 * If the timer is deferrable and NO_HZ_COMMON is set then we need
857 * to use the deferrable base.
858 */
859 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
860 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
861 return base;
862}
863
864static inline struct timer_base *get_timer_base(u32 tflags)
865{
866 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
867}
868
869static inline struct timer_base *
870get_target_base(struct timer_base *base, unsigned tflags)
871{
872#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
873 if (static_branch_likely(&timers_migration_enabled) &&
874 !(tflags & TIMER_PINNED))
875 return get_timer_cpu_base(tflags, get_nohz_timer_target());
876#endif
877 return get_timer_this_cpu_base(tflags);
878}
879
880static inline void forward_timer_base(struct timer_base *base)
881{
882#ifdef CONFIG_NO_HZ_COMMON
883 unsigned long jnow;
884
885 /*
886 * We only forward the base when we are idle or have just come out of
887 * idle (must_forward_clk logic), and have a delta between base clock
888 * and jiffies. In the common case, run_timers will take care of it.
889 */
890 if (likely(!base->must_forward_clk))
891 return;
892
893 jnow = READ_ONCE(jiffies);
894 base->must_forward_clk = base->is_idle;
895 if ((long)(jnow - base->clk) < 2)
896 return;
897
898 /*
899 * If the next expiry value is > jiffies, then we fast forward to
900 * jiffies otherwise we forward to the next expiry value.
901 */
902 if (time_after(base->next_expiry, jnow))
903 base->clk = jnow;
904 else
905 base->clk = base->next_expiry;
906#endif
907}
908
909
910/*
911 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
912 * that all timers which are tied to this base are locked, and the base itself
913 * is locked too.
914 *
915 * So __run_timers/migrate_timers can safely modify all timers which could
916 * be found in the base->vectors array.
917 *
918 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
919 * to wait until the migration is done.
920 */
921static struct timer_base *lock_timer_base(struct timer_list *timer,
922 unsigned long *flags)
923 __acquires(timer->base->lock)
924{
925 for (;;) {
926 struct timer_base *base;
927 u32 tf;
928
929 /*
930 * We need to use READ_ONCE() here, otherwise the compiler
931 * might re-read @tf between the check for TIMER_MIGRATING
932 * and spin_lock().
933 */
934 tf = READ_ONCE(timer->flags);
935
936 if (!(tf & TIMER_MIGRATING)) {
937 base = get_timer_base(tf);
938 raw_spin_lock_irqsave(&base->lock, *flags);
939 if (timer->flags == tf)
940 return base;
941 raw_spin_unlock_irqrestore(&base->lock, *flags);
942 }
943 cpu_relax();
944 }
945}
946
947#define MOD_TIMER_PENDING_ONLY 0x01
948#define MOD_TIMER_REDUCE 0x02
949
950static inline int
951__mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options)
952{
953 struct timer_base *base, *new_base;
954 unsigned int idx = UINT_MAX;
955 unsigned long clk = 0, flags;
956 int ret = 0;
957
958 BUG_ON(!timer->function);
959
960 /*
961 * This is a common optimization triggered by the networking code - if
962 * the timer is re-modified to have the same timeout or ends up in the
963 * same array bucket then just return:
964 */
965 if (timer_pending(timer)) {
966 /*
967 * The downside of this optimization is that it can result in
968 * larger granularity than you would get from adding a new
969 * timer with this expiry.
970 */
971 long diff = timer->expires - expires;
972
973 if (!diff)
974 return 1;
975 if (options & MOD_TIMER_REDUCE && diff <= 0)
976 return 1;
977
978 /*
979 * We lock timer base and calculate the bucket index right
980 * here. If the timer ends up in the same bucket, then we
981 * just update the expiry time and avoid the whole
982 * dequeue/enqueue dance.
983 */
984 base = lock_timer_base(timer, &flags);
985 forward_timer_base(base);
986
987 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) &&
988 time_before_eq(timer->expires, expires)) {
989 ret = 1;
990 goto out_unlock;
991 }
992
993 clk = base->clk;
994 idx = calc_wheel_index(expires, clk);
995
996 /*
997 * Retrieve and compare the array index of the pending
998 * timer. If it matches set the expiry to the new value so a
999 * subsequent call will exit in the expires check above.
1000 */
1001 if (idx == timer_get_idx(timer)) {
1002 if (!(options & MOD_TIMER_REDUCE))
1003 timer->expires = expires;
1004 else if (time_after(timer->expires, expires))
1005 timer->expires = expires;
1006 ret = 1;
1007 goto out_unlock;
1008 }
1009 } else {
1010 base = lock_timer_base(timer, &flags);
1011 forward_timer_base(base);
1012 }
1013
1014 ret = detach_if_pending(timer, base, false);
1015 if (!ret && (options & MOD_TIMER_PENDING_ONLY))
1016 goto out_unlock;
1017
1018 new_base = get_target_base(base, timer->flags);
1019
1020 if (base != new_base) {
1021 /*
1022 * We are trying to schedule the timer on the new base.
1023 * However we can't change timer's base while it is running,
1024 * otherwise del_timer_sync() can't detect that the timer's
1025 * handler yet has not finished. This also guarantees that the
1026 * timer is serialized wrt itself.
1027 */
1028 if (likely(base->running_timer != timer)) {
1029 /* See the comment in lock_timer_base() */
1030 timer->flags |= TIMER_MIGRATING;
1031
1032 raw_spin_unlock(&base->lock);
1033 base = new_base;
1034 raw_spin_lock(&base->lock);
1035 WRITE_ONCE(timer->flags,
1036 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1037 forward_timer_base(base);
1038 }
1039 }
1040
1041 debug_activate(timer, expires);
1042
1043 timer->expires = expires;
1044 /*
1045 * If 'idx' was calculated above and the base time did not advance
1046 * between calculating 'idx' and possibly switching the base, only
1047 * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
1048 * we need to (re)calculate the wheel index via
1049 * internal_add_timer().
1050 */
1051 if (idx != UINT_MAX && clk == base->clk) {
1052 enqueue_timer(base, timer, idx);
1053 trigger_dyntick_cpu(base, timer);
1054 } else {
1055 internal_add_timer(base, timer);
1056 }
1057
1058out_unlock:
1059 raw_spin_unlock_irqrestore(&base->lock, flags);
1060
1061 return ret;
1062}
1063
1064/**
1065 * mod_timer_pending - modify a pending timer's timeout
1066 * @timer: the pending timer to be modified
1067 * @expires: new timeout in jiffies
1068 *
1069 * mod_timer_pending() is the same for pending timers as mod_timer(),
1070 * but will not re-activate and modify already deleted timers.
1071 *
1072 * It is useful for unserialized use of timers.
1073 */
1074int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1075{
1076 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY);
1077}
1078EXPORT_SYMBOL(mod_timer_pending);
1079
1080/**
1081 * mod_timer - modify a timer's timeout
1082 * @timer: the timer to be modified
1083 * @expires: new timeout in jiffies
1084 *
1085 * mod_timer() is a more efficient way to update the expire field of an
1086 * active timer (if the timer is inactive it will be activated)
1087 *
1088 * mod_timer(timer, expires) is equivalent to:
1089 *
1090 * del_timer(timer); timer->expires = expires; add_timer(timer);
1091 *
1092 * Note that if there are multiple unserialized concurrent users of the
1093 * same timer, then mod_timer() is the only safe way to modify the timeout,
1094 * since add_timer() cannot modify an already running timer.
1095 *
1096 * The function returns whether it has modified a pending timer or not.
1097 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
1098 * active timer returns 1.)
1099 */
1100int mod_timer(struct timer_list *timer, unsigned long expires)
1101{
1102 return __mod_timer(timer, expires, 0);
1103}
1104EXPORT_SYMBOL(mod_timer);
1105
1106/**
1107 * timer_reduce - Modify a timer's timeout if it would reduce the timeout
1108 * @timer: The timer to be modified
1109 * @expires: New timeout in jiffies
1110 *
1111 * timer_reduce() is very similar to mod_timer(), except that it will only
1112 * modify a running timer if that would reduce the expiration time (it will
1113 * start a timer that isn't running).
1114 */
1115int timer_reduce(struct timer_list *timer, unsigned long expires)
1116{
1117 return __mod_timer(timer, expires, MOD_TIMER_REDUCE);
1118}
1119EXPORT_SYMBOL(timer_reduce);
1120
1121/**
1122 * add_timer - start a timer
1123 * @timer: the timer to be added
1124 *
1125 * The kernel will do a ->function(@timer) callback from the
1126 * timer interrupt at the ->expires point in the future. The
1127 * current time is 'jiffies'.
1128 *
1129 * The timer's ->expires, ->function fields must be set prior calling this
1130 * function.
1131 *
1132 * Timers with an ->expires field in the past will be executed in the next
1133 * timer tick.
1134 */
1135void add_timer(struct timer_list *timer)
1136{
1137 BUG_ON(timer_pending(timer));
1138 mod_timer(timer, timer->expires);
1139}
1140EXPORT_SYMBOL(add_timer);
1141
1142/**
1143 * add_timer_on - start a timer on a particular CPU
1144 * @timer: the timer to be added
1145 * @cpu: the CPU to start it on
1146 *
1147 * This is not very scalable on SMP. Double adds are not possible.
1148 */
1149void add_timer_on(struct timer_list *timer, int cpu)
1150{
1151 struct timer_base *new_base, *base;
1152 unsigned long flags;
1153
1154 BUG_ON(timer_pending(timer) || !timer->function);
1155
1156 new_base = get_timer_cpu_base(timer->flags, cpu);
1157
1158 /*
1159 * If @timer was on a different CPU, it should be migrated with the
1160 * old base locked to prevent other operations proceeding with the
1161 * wrong base locked. See lock_timer_base().
1162 */
1163 base = lock_timer_base(timer, &flags);
1164 if (base != new_base) {
1165 timer->flags |= TIMER_MIGRATING;
1166
1167 raw_spin_unlock(&base->lock);
1168 base = new_base;
1169 raw_spin_lock(&base->lock);
1170 WRITE_ONCE(timer->flags,
1171 (timer->flags & ~TIMER_BASEMASK) | cpu);
1172 }
1173 forward_timer_base(base);
1174
1175 debug_activate(timer, timer->expires);
1176 internal_add_timer(base, timer);
1177 raw_spin_unlock_irqrestore(&base->lock, flags);
1178}
1179EXPORT_SYMBOL_GPL(add_timer_on);
1180
1181/**
1182 * del_timer - deactivate a timer.
1183 * @timer: the timer to be deactivated
1184 *
1185 * del_timer() deactivates a timer - this works on both active and inactive
1186 * timers.
1187 *
1188 * The function returns whether it has deactivated a pending timer or not.
1189 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1190 * active timer returns 1.)
1191 */
1192int del_timer(struct timer_list *timer)
1193{
1194 struct timer_base *base;
1195 unsigned long flags;
1196 int ret = 0;
1197
1198 debug_assert_init(timer);
1199
1200 if (timer_pending(timer)) {
1201 base = lock_timer_base(timer, &flags);
1202 ret = detach_if_pending(timer, base, true);
1203 raw_spin_unlock_irqrestore(&base->lock, flags);
1204 }
1205
1206 return ret;
1207}
1208EXPORT_SYMBOL(del_timer);
1209
1210/**
1211 * try_to_del_timer_sync - Try to deactivate a timer
1212 * @timer: timer to delete
1213 *
1214 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1215 * exit the timer is not queued and the handler is not running on any CPU.
1216 */
1217int try_to_del_timer_sync(struct timer_list *timer)
1218{
1219 struct timer_base *base;
1220 unsigned long flags;
1221 int ret = -1;
1222
1223 debug_assert_init(timer);
1224
1225 base = lock_timer_base(timer, &flags);
1226
1227 if (base->running_timer != timer)
1228 ret = detach_if_pending(timer, base, true);
1229
1230 raw_spin_unlock_irqrestore(&base->lock, flags);
1231
1232 return ret;
1233}
1234EXPORT_SYMBOL(try_to_del_timer_sync);
1235
1236#ifdef CONFIG_SMP
1237/**
1238 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1239 * @timer: the timer to be deactivated
1240 *
1241 * This function only differs from del_timer() on SMP: besides deactivating
1242 * the timer it also makes sure the handler has finished executing on other
1243 * CPUs.
1244 *
1245 * Synchronization rules: Callers must prevent restarting of the timer,
1246 * otherwise this function is meaningless. It must not be called from
1247 * interrupt contexts unless the timer is an irqsafe one. The caller must
1248 * not hold locks which would prevent completion of the timer's
1249 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1250 * timer is not queued and the handler is not running on any CPU.
1251 *
1252 * Note: For !irqsafe timers, you must not hold locks that are held in
1253 * interrupt context while calling this function. Even if the lock has
1254 * nothing to do with the timer in question. Here's why:
1255 *
1256 * CPU0 CPU1
1257 * ---- ----
1258 * <SOFTIRQ>
1259 * call_timer_fn();
1260 * base->running_timer = mytimer;
1261 * spin_lock_irq(somelock);
1262 * <IRQ>
1263 * spin_lock(somelock);
1264 * del_timer_sync(mytimer);
1265 * while (base->running_timer == mytimer);
1266 *
1267 * Now del_timer_sync() will never return and never release somelock.
1268 * The interrupt on the other CPU is waiting to grab somelock but
1269 * it has interrupted the softirq that CPU0 is waiting to finish.
1270 *
1271 * The function returns whether it has deactivated a pending timer or not.
1272 */
1273int del_timer_sync(struct timer_list *timer)
1274{
1275#ifdef CONFIG_LOCKDEP
1276 unsigned long flags;
1277
1278 /*
1279 * If lockdep gives a backtrace here, please reference
1280 * the synchronization rules above.
1281 */
1282 local_irq_save(flags);
1283 lock_map_acquire(&timer->lockdep_map);
1284 lock_map_release(&timer->lockdep_map);
1285 local_irq_restore(flags);
1286#endif
1287 /*
1288 * don't use it in hardirq context, because it
1289 * could lead to deadlock.
1290 */
1291 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1292 for (;;) {
1293 int ret = try_to_del_timer_sync(timer);
1294 if (ret >= 0)
1295 return ret;
1296 cpu_relax();
1297 }
1298}
1299EXPORT_SYMBOL(del_timer_sync);
1300#endif
1301
1302static void call_timer_fn(struct timer_list *timer, void (*fn)(struct timer_list *))
1303{
1304 int count = preempt_count();
1305
1306#ifdef CONFIG_LOCKDEP
1307 /*
1308 * It is permissible to free the timer from inside the
1309 * function that is called from it, this we need to take into
1310 * account for lockdep too. To avoid bogus "held lock freed"
1311 * warnings as well as problems when looking into
1312 * timer->lockdep_map, make a copy and use that here.
1313 */
1314 struct lockdep_map lockdep_map;
1315
1316 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1317#endif
1318 /*
1319 * Couple the lock chain with the lock chain at
1320 * del_timer_sync() by acquiring the lock_map around the fn()
1321 * call here and in del_timer_sync().
1322 */
1323 lock_map_acquire(&lockdep_map);
1324
1325 trace_timer_expire_entry(timer);
1326 fn(timer);
1327 trace_timer_expire_exit(timer);
1328
1329 lock_map_release(&lockdep_map);
1330
1331 if (count != preempt_count()) {
1332 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1333 fn, count, preempt_count());
1334 /*
1335 * Restore the preempt count. That gives us a decent
1336 * chance to survive and extract information. If the
1337 * callback kept a lock held, bad luck, but not worse
1338 * than the BUG() we had.
1339 */
1340 preempt_count_set(count);
1341 }
1342}
1343
1344static void expire_timers(struct timer_base *base, struct hlist_head *head)
1345{
1346 while (!hlist_empty(head)) {
1347 struct timer_list *timer;
1348 void (*fn)(struct timer_list *);
1349
1350 timer = hlist_entry(head->first, struct timer_list, entry);
1351
1352 base->running_timer = timer;
1353 detach_timer(timer, true);
1354
1355 fn = timer->function;
1356
1357 if (timer->flags & TIMER_IRQSAFE) {
1358 raw_spin_unlock(&base->lock);
1359 call_timer_fn(timer, fn);
1360 raw_spin_lock(&base->lock);
1361 } else {
1362 raw_spin_unlock_irq(&base->lock);
1363 call_timer_fn(timer, fn);
1364 raw_spin_lock_irq(&base->lock);
1365 }
1366 }
1367}
1368
1369static int __collect_expired_timers(struct timer_base *base,
1370 struct hlist_head *heads)
1371{
1372 unsigned long clk = base->clk;
1373 struct hlist_head *vec;
1374 int i, levels = 0;
1375 unsigned int idx;
1376
1377 for (i = 0; i < LVL_DEPTH; i++) {
1378 idx = (clk & LVL_MASK) + i * LVL_SIZE;
1379
1380 if (__test_and_clear_bit(idx, base->pending_map)) {
1381 vec = base->vectors + idx;
1382 hlist_move_list(vec, heads++);
1383 levels++;
1384 }
1385 /* Is it time to look at the next level? */
1386 if (clk & LVL_CLK_MASK)
1387 break;
1388 /* Shift clock for the next level granularity */
1389 clk >>= LVL_CLK_SHIFT;
1390 }
1391 return levels;
1392}
1393
1394#ifdef CONFIG_NO_HZ_COMMON
1395/*
1396 * Find the next pending bucket of a level. Search from level start (@offset)
1397 * + @clk upwards and if nothing there, search from start of the level
1398 * (@offset) up to @offset + clk.
1399 */
1400static int next_pending_bucket(struct timer_base *base, unsigned offset,
1401 unsigned clk)
1402{
1403 unsigned pos, start = offset + clk;
1404 unsigned end = offset + LVL_SIZE;
1405
1406 pos = find_next_bit(base->pending_map, end, start);
1407 if (pos < end)
1408 return pos - start;
1409
1410 pos = find_next_bit(base->pending_map, start, offset);
1411 return pos < start ? pos + LVL_SIZE - start : -1;
1412}
1413
1414/*
1415 * Search the first expiring timer in the various clock levels. Caller must
1416 * hold base->lock.
1417 */
1418static unsigned long __next_timer_interrupt(struct timer_base *base)
1419{
1420 unsigned long clk, next, adj;
1421 unsigned lvl, offset = 0;
1422
1423 next = base->clk + NEXT_TIMER_MAX_DELTA;
1424 clk = base->clk;
1425 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
1426 int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
1427
1428 if (pos >= 0) {
1429 unsigned long tmp = clk + (unsigned long) pos;
1430
1431 tmp <<= LVL_SHIFT(lvl);
1432 if (time_before(tmp, next))
1433 next = tmp;
1434 }
1435 /*
1436 * Clock for the next level. If the current level clock lower
1437 * bits are zero, we look at the next level as is. If not we
1438 * need to advance it by one because that's going to be the
1439 * next expiring bucket in that level. base->clk is the next
1440 * expiring jiffie. So in case of:
1441 *
1442 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1443 * 0 0 0 0 0 0
1444 *
1445 * we have to look at all levels @index 0. With
1446 *
1447 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1448 * 0 0 0 0 0 2
1449 *
1450 * LVL0 has the next expiring bucket @index 2. The upper
1451 * levels have the next expiring bucket @index 1.
1452 *
1453 * In case that the propagation wraps the next level the same
1454 * rules apply:
1455 *
1456 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1457 * 0 0 0 0 F 2
1458 *
1459 * So after looking at LVL0 we get:
1460 *
1461 * LVL5 LVL4 LVL3 LVL2 LVL1
1462 * 0 0 0 1 0
1463 *
1464 * So no propagation from LVL1 to LVL2 because that happened
1465 * with the add already, but then we need to propagate further
1466 * from LVL2 to LVL3.
1467 *
1468 * So the simple check whether the lower bits of the current
1469 * level are 0 or not is sufficient for all cases.
1470 */
1471 adj = clk & LVL_CLK_MASK ? 1 : 0;
1472 clk >>= LVL_CLK_SHIFT;
1473 clk += adj;
1474 }
1475 return next;
1476}
1477
1478/*
1479 * Check, if the next hrtimer event is before the next timer wheel
1480 * event:
1481 */
1482static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1483{
1484 u64 nextevt = hrtimer_get_next_event();
1485
1486 /*
1487 * If high resolution timers are enabled
1488 * hrtimer_get_next_event() returns KTIME_MAX.
1489 */
1490 if (expires <= nextevt)
1491 return expires;
1492
1493 /*
1494 * If the next timer is already expired, return the tick base
1495 * time so the tick is fired immediately.
1496 */
1497 if (nextevt <= basem)
1498 return basem;
1499
1500 /*
1501 * Round up to the next jiffie. High resolution timers are
1502 * off, so the hrtimers are expired in the tick and we need to
1503 * make sure that this tick really expires the timer to avoid
1504 * a ping pong of the nohz stop code.
1505 *
1506 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1507 */
1508 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1509}
1510
1511/**
1512 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1513 * @basej: base time jiffies
1514 * @basem: base time clock monotonic
1515 *
1516 * Returns the tick aligned clock monotonic time of the next pending
1517 * timer or KTIME_MAX if no timer is pending.
1518 */
1519u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1520{
1521 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1522 u64 expires = KTIME_MAX;
1523 unsigned long nextevt;
1524 bool is_max_delta;
1525
1526 /*
1527 * Pretend that there is no timer pending if the cpu is offline.
1528 * Possible pending timers will be migrated later to an active cpu.
1529 */
1530 if (cpu_is_offline(smp_processor_id()))
1531 return expires;
1532
1533 raw_spin_lock(&base->lock);
1534 nextevt = __next_timer_interrupt(base);
1535 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
1536 base->next_expiry = nextevt;
1537 /*
1538 * We have a fresh next event. Check whether we can forward the
1539 * base. We can only do that when @basej is past base->clk
1540 * otherwise we might rewind base->clk.
1541 */
1542 if (time_after(basej, base->clk)) {
1543 if (time_after(nextevt, basej))
1544 base->clk = basej;
1545 else if (time_after(nextevt, base->clk))
1546 base->clk = nextevt;
1547 }
1548
1549 if (time_before_eq(nextevt, basej)) {
1550 expires = basem;
1551 base->is_idle = false;
1552 } else {
1553 if (!is_max_delta)
1554 expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
1555 /*
1556 * If we expect to sleep more than a tick, mark the base idle.
1557 * Also the tick is stopped so any added timer must forward
1558 * the base clk itself to keep granularity small. This idle
1559 * logic is only maintained for the BASE_STD base, deferrable
1560 * timers may still see large granularity skew (by design).
1561 */
1562 if ((expires - basem) > TICK_NSEC) {
1563 base->must_forward_clk = true;
1564 base->is_idle = true;
1565 }
1566 }
1567 raw_spin_unlock(&base->lock);
1568
1569 return cmp_next_hrtimer_event(basem, expires);
1570}
1571
1572/**
1573 * timer_clear_idle - Clear the idle state of the timer base
1574 *
1575 * Called with interrupts disabled
1576 */
1577void timer_clear_idle(void)
1578{
1579 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1580
1581 /*
1582 * We do this unlocked. The worst outcome is a remote enqueue sending
1583 * a pointless IPI, but taking the lock would just make the window for
1584 * sending the IPI a few instructions smaller for the cost of taking
1585 * the lock in the exit from idle path.
1586 */
1587 base->is_idle = false;
1588}
1589
1590static int collect_expired_timers(struct timer_base *base,
1591 struct hlist_head *heads)
1592{
1593 /*
1594 * NOHZ optimization. After a long idle sleep we need to forward the
1595 * base to current jiffies. Avoid a loop by searching the bitfield for
1596 * the next expiring timer.
1597 */
1598 if ((long)(jiffies - base->clk) > 2) {
1599 unsigned long next = __next_timer_interrupt(base);
1600
1601 /*
1602 * If the next timer is ahead of time forward to current
1603 * jiffies, otherwise forward to the next expiry time:
1604 */
1605 if (time_after(next, jiffies)) {
1606 /*
1607 * The call site will increment base->clk and then
1608 * terminate the expiry loop immediately.
1609 */
1610 base->clk = jiffies;
1611 return 0;
1612 }
1613 base->clk = next;
1614 }
1615 return __collect_expired_timers(base, heads);
1616}
1617#else
1618static inline int collect_expired_timers(struct timer_base *base,
1619 struct hlist_head *heads)
1620{
1621 return __collect_expired_timers(base, heads);
1622}
1623#endif
1624
1625/*
1626 * Called from the timer interrupt handler to charge one tick to the current
1627 * process. user_tick is 1 if the tick is user time, 0 for system.
1628 */
1629void update_process_times(int user_tick)
1630{
1631 struct task_struct *p = current;
1632
1633 /* Note: this timer irq context must be accounted for as well. */
1634 account_process_tick(p, user_tick);
1635 run_local_timers();
1636 rcu_check_callbacks(user_tick);
1637#ifdef CONFIG_IRQ_WORK
1638 if (in_irq())
1639 irq_work_tick();
1640#endif
1641 scheduler_tick();
1642 if (IS_ENABLED(CONFIG_POSIX_TIMERS))
1643 run_posix_cpu_timers(p);
1644}
1645
1646/**
1647 * __run_timers - run all expired timers (if any) on this CPU.
1648 * @base: the timer vector to be processed.
1649 */
1650static inline void __run_timers(struct timer_base *base)
1651{
1652 struct hlist_head heads[LVL_DEPTH];
1653 int levels;
1654
1655 if (!time_after_eq(jiffies, base->clk))
1656 return;
1657
1658 raw_spin_lock_irq(&base->lock);
1659
1660 while (time_after_eq(jiffies, base->clk)) {
1661
1662 levels = collect_expired_timers(base, heads);
1663 base->clk++;
1664
1665 while (levels--)
1666 expire_timers(base, heads + levels);
1667 }
1668 base->running_timer = NULL;
1669 raw_spin_unlock_irq(&base->lock);
1670}
1671
1672/*
1673 * This function runs timers and the timer-tq in bottom half context.
1674 */
1675static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1676{
1677 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1678
1679 /*
1680 * must_forward_clk must be cleared before running timers so that any
1681 * timer functions that call mod_timer will not try to forward the
1682 * base. idle trcking / clock forwarding logic is only used with
1683 * BASE_STD timers.
1684 *
1685 * The deferrable base does not do idle tracking at all, so we do
1686 * not forward it. This can result in very large variations in
1687 * granularity for deferrable timers, but they can be deferred for
1688 * long periods due to idle.
1689 */
1690 base->must_forward_clk = false;
1691
1692 __run_timers(base);
1693 if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
1694 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
1695}
1696
1697/*
1698 * Called by the local, per-CPU timer interrupt on SMP.
1699 */
1700void run_local_timers(void)
1701{
1702 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1703
1704 hrtimer_run_queues();
1705 /* Raise the softirq only if required. */
1706 if (time_before(jiffies, base->clk)) {
1707 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
1708 return;
1709 /* CPU is awake, so check the deferrable base. */
1710 base++;
1711 if (time_before(jiffies, base->clk))
1712 return;
1713 }
1714 raise_softirq(TIMER_SOFTIRQ);
1715}
1716
1717/*
1718 * Since schedule_timeout()'s timer is defined on the stack, it must store
1719 * the target task on the stack as well.
1720 */
1721struct process_timer {
1722 struct timer_list timer;
1723 struct task_struct *task;
1724};
1725
1726static void process_timeout(struct timer_list *t)
1727{
1728 struct process_timer *timeout = from_timer(timeout, t, timer);
1729
1730 wake_up_process(timeout->task);
1731}
1732
1733/**
1734 * schedule_timeout - sleep until timeout
1735 * @timeout: timeout value in jiffies
1736 *
1737 * Make the current task sleep until @timeout jiffies have
1738 * elapsed. The routine will return immediately unless
1739 * the current task state has been set (see set_current_state()).
1740 *
1741 * You can set the task state as follows -
1742 *
1743 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1744 * pass before the routine returns unless the current task is explicitly
1745 * woken up, (e.g. by wake_up_process())".
1746 *
1747 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1748 * delivered to the current task or the current task is explicitly woken
1749 * up.
1750 *
1751 * The current task state is guaranteed to be TASK_RUNNING when this
1752 * routine returns.
1753 *
1754 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1755 * the CPU away without a bound on the timeout. In this case the return
1756 * value will be %MAX_SCHEDULE_TIMEOUT.
1757 *
1758 * Returns 0 when the timer has expired otherwise the remaining time in
1759 * jiffies will be returned. In all cases the return value is guaranteed
1760 * to be non-negative.
1761 */
1762signed long __sched schedule_timeout(signed long timeout)
1763{
1764 struct process_timer timer;
1765 unsigned long expire;
1766
1767 switch (timeout)
1768 {
1769 case MAX_SCHEDULE_TIMEOUT:
1770 /*
1771 * These two special cases are useful to be comfortable
1772 * in the caller. Nothing more. We could take
1773 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1774 * but I' d like to return a valid offset (>=0) to allow
1775 * the caller to do everything it want with the retval.
1776 */
1777 schedule();
1778 goto out;
1779 default:
1780 /*
1781 * Another bit of PARANOID. Note that the retval will be
1782 * 0 since no piece of kernel is supposed to do a check
1783 * for a negative retval of schedule_timeout() (since it
1784 * should never happens anyway). You just have the printk()
1785 * that will tell you if something is gone wrong and where.
1786 */
1787 if (timeout < 0) {
1788 printk(KERN_ERR "schedule_timeout: wrong timeout "
1789 "value %lx\n", timeout);
1790 dump_stack();
1791 current->state = TASK_RUNNING;
1792 goto out;
1793 }
1794 }
1795
1796 expire = timeout + jiffies;
1797
1798 timer.task = current;
1799 timer_setup_on_stack(&timer.timer, process_timeout, 0);
1800 __mod_timer(&timer.timer, expire, 0);
1801 schedule();
1802 del_singleshot_timer_sync(&timer.timer);
1803
1804 /* Remove the timer from the object tracker */
1805 destroy_timer_on_stack(&timer.timer);
1806
1807 timeout = expire - jiffies;
1808
1809 out:
1810 return timeout < 0 ? 0 : timeout;
1811}
1812EXPORT_SYMBOL(schedule_timeout);
1813
1814/*
1815 * We can use __set_current_state() here because schedule_timeout() calls
1816 * schedule() unconditionally.
1817 */
1818signed long __sched schedule_timeout_interruptible(signed long timeout)
1819{
1820 __set_current_state(TASK_INTERRUPTIBLE);
1821 return schedule_timeout(timeout);
1822}
1823EXPORT_SYMBOL(schedule_timeout_interruptible);
1824
1825signed long __sched schedule_timeout_killable(signed long timeout)
1826{
1827 __set_current_state(TASK_KILLABLE);
1828 return schedule_timeout(timeout);
1829}
1830EXPORT_SYMBOL(schedule_timeout_killable);
1831
1832signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1833{
1834 __set_current_state(TASK_UNINTERRUPTIBLE);
1835 return schedule_timeout(timeout);
1836}
1837EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1838
1839/*
1840 * Like schedule_timeout_uninterruptible(), except this task will not contribute
1841 * to load average.
1842 */
1843signed long __sched schedule_timeout_idle(signed long timeout)
1844{
1845 __set_current_state(TASK_IDLE);
1846 return schedule_timeout(timeout);
1847}
1848EXPORT_SYMBOL(schedule_timeout_idle);
1849
1850#ifdef CONFIG_HOTPLUG_CPU
1851static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
1852{
1853 struct timer_list *timer;
1854 int cpu = new_base->cpu;
1855
1856 while (!hlist_empty(head)) {
1857 timer = hlist_entry(head->first, struct timer_list, entry);
1858 detach_timer(timer, false);
1859 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1860 internal_add_timer(new_base, timer);
1861 }
1862}
1863
1864int timers_prepare_cpu(unsigned int cpu)
1865{
1866 struct timer_base *base;
1867 int b;
1868
1869 for (b = 0; b < NR_BASES; b++) {
1870 base = per_cpu_ptr(&timer_bases[b], cpu);
1871 base->clk = jiffies;
1872 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
1873 base->is_idle = false;
1874 base->must_forward_clk = true;
1875 }
1876 return 0;
1877}
1878
1879int timers_dead_cpu(unsigned int cpu)
1880{
1881 struct timer_base *old_base;
1882 struct timer_base *new_base;
1883 int b, i;
1884
1885 BUG_ON(cpu_online(cpu));
1886
1887 for (b = 0; b < NR_BASES; b++) {
1888 old_base = per_cpu_ptr(&timer_bases[b], cpu);
1889 new_base = get_cpu_ptr(&timer_bases[b]);
1890 /*
1891 * The caller is globally serialized and nobody else
1892 * takes two locks at once, deadlock is not possible.
1893 */
1894 raw_spin_lock_irq(&new_base->lock);
1895 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1896
1897 /*
1898 * The current CPUs base clock might be stale. Update it
1899 * before moving the timers over.
1900 */
1901 forward_timer_base(new_base);
1902
1903 BUG_ON(old_base->running_timer);
1904
1905 for (i = 0; i < WHEEL_SIZE; i++)
1906 migrate_timer_list(new_base, old_base->vectors + i);
1907
1908 raw_spin_unlock(&old_base->lock);
1909 raw_spin_unlock_irq(&new_base->lock);
1910 put_cpu_ptr(&timer_bases);
1911 }
1912 return 0;
1913}
1914
1915#endif /* CONFIG_HOTPLUG_CPU */
1916
1917static void __init init_timer_cpu(int cpu)
1918{
1919 struct timer_base *base;
1920 int i;
1921
1922 for (i = 0; i < NR_BASES; i++) {
1923 base = per_cpu_ptr(&timer_bases[i], cpu);
1924 base->cpu = cpu;
1925 raw_spin_lock_init(&base->lock);
1926 base->clk = jiffies;
1927 }
1928}
1929
1930static void __init init_timer_cpus(void)
1931{
1932 int cpu;
1933
1934 for_each_possible_cpu(cpu)
1935 init_timer_cpu(cpu);
1936}
1937
1938void __init init_timers(void)
1939{
1940 init_timer_cpus();
1941 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1942}
1943
1944/**
1945 * msleep - sleep safely even with waitqueue interruptions
1946 * @msecs: Time in milliseconds to sleep for
1947 */
1948void msleep(unsigned int msecs)
1949{
1950 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1951
1952 while (timeout)
1953 timeout = schedule_timeout_uninterruptible(timeout);
1954}
1955
1956EXPORT_SYMBOL(msleep);
1957
1958/**
1959 * msleep_interruptible - sleep waiting for signals
1960 * @msecs: Time in milliseconds to sleep for
1961 */
1962unsigned long msleep_interruptible(unsigned int msecs)
1963{
1964 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1965
1966 while (timeout && !signal_pending(current))
1967 timeout = schedule_timeout_interruptible(timeout);
1968 return jiffies_to_msecs(timeout);
1969}
1970
1971EXPORT_SYMBOL(msleep_interruptible);
1972
1973/**
1974 * usleep_range - Sleep for an approximate time
1975 * @min: Minimum time in usecs to sleep
1976 * @max: Maximum time in usecs to sleep
1977 *
1978 * In non-atomic context where the exact wakeup time is flexible, use
1979 * usleep_range() instead of udelay(). The sleep improves responsiveness
1980 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
1981 * power usage by allowing hrtimers to take advantage of an already-
1982 * scheduled interrupt instead of scheduling a new one just for this sleep.
1983 */
1984void __sched usleep_range(unsigned long min, unsigned long max)
1985{
1986 ktime_t exp = ktime_add_us(ktime_get(), min);
1987 u64 delta = (u64)(max - min) * NSEC_PER_USEC;
1988
1989 for (;;) {
1990 __set_current_state(TASK_UNINTERRUPTIBLE);
1991 /* Do not return before the requested sleep time has elapsed */
1992 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
1993 break;
1994 }
1995}
1996EXPORT_SYMBOL(usleep_range);