Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kernel internal timers
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
8 *
9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
10 * "A Kernel Model for Precision Timekeeping" by Dave Mills
11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
12 * serialize accesses to xtime/lost_ticks).
13 * Copyright (C) 1998 Andrea Arcangeli
14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
17 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
18 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
19 */
20
21#include <linux/kernel_stat.h>
22#include <linux/export.h>
23#include <linux/interrupt.h>
24#include <linux/percpu.h>
25#include <linux/init.h>
26#include <linux/mm.h>
27#include <linux/swap.h>
28#include <linux/pid_namespace.h>
29#include <linux/notifier.h>
30#include <linux/thread_info.h>
31#include <linux/time.h>
32#include <linux/jiffies.h>
33#include <linux/posix-timers.h>
34#include <linux/cpu.h>
35#include <linux/syscalls.h>
36#include <linux/delay.h>
37#include <linux/tick.h>
38#include <linux/kallsyms.h>
39#include <linux/irq_work.h>
40#include <linux/sched/signal.h>
41#include <linux/sched/sysctl.h>
42#include <linux/sched/nohz.h>
43#include <linux/sched/debug.h>
44#include <linux/slab.h>
45#include <linux/compat.h>
46#include <linux/random.h>
47#include <linux/sysctl.h>
48
49#include <linux/uaccess.h>
50#include <asm/unistd.h>
51#include <asm/div64.h>
52#include <asm/timex.h>
53#include <asm/io.h>
54
55#include "tick-internal.h"
56#include "timer_migration.h"
57
58#define CREATE_TRACE_POINTS
59#include <trace/events/timer.h>
60
61__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
62
63EXPORT_SYMBOL(jiffies_64);
64
65/*
66 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
67 * LVL_SIZE buckets. Each level is driven by its own clock and therefore each
68 * level has a different granularity.
69 *
70 * The level granularity is: LVL_CLK_DIV ^ level
71 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
72 *
73 * The array level of a newly armed timer depends on the relative expiry
74 * time. The farther the expiry time is away the higher the array level and
75 * therefore the granularity becomes.
76 *
77 * Contrary to the original timer wheel implementation, which aims for 'exact'
78 * expiry of the timers, this implementation removes the need for recascading
79 * the timers into the lower array levels. The previous 'classic' timer wheel
80 * implementation of the kernel already violated the 'exact' expiry by adding
81 * slack to the expiry time to provide batched expiration. The granularity
82 * levels provide implicit batching.
83 *
84 * This is an optimization of the original timer wheel implementation for the
85 * majority of the timer wheel use cases: timeouts. The vast majority of
86 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
87 * the timeout expires it indicates that normal operation is disturbed, so it
88 * does not matter much whether the timeout comes with a slight delay.
89 *
90 * The only exception to this are networking timers with a small expiry
91 * time. They rely on the granularity. Those fit into the first wheel level,
92 * which has HZ granularity.
93 *
94 * We don't have cascading anymore. timers with a expiry time above the
95 * capacity of the last wheel level are force expired at the maximum timeout
96 * value of the last wheel level. From data sampling we know that the maximum
97 * value observed is 5 days (network connection tracking), so this should not
98 * be an issue.
99 *
100 * The currently chosen array constants values are a good compromise between
101 * array size and granularity.
102 *
103 * This results in the following granularity and range levels:
104 *
105 * HZ 1000 steps
106 * Level Offset Granularity Range
107 * 0 0 1 ms 0 ms - 63 ms
108 * 1 64 8 ms 64 ms - 511 ms
109 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
110 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
111 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
112 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
113 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
114 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
115 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
116 *
117 * HZ 300
118 * Level Offset Granularity Range
119 * 0 0 3 ms 0 ms - 210 ms
120 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
121 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
122 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
123 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
124 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
125 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
126 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
127 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
128 *
129 * HZ 250
130 * Level Offset Granularity Range
131 * 0 0 4 ms 0 ms - 255 ms
132 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
133 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
134 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
135 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
136 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
137 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
138 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
139 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
140 *
141 * HZ 100
142 * Level Offset Granularity Range
143 * 0 0 10 ms 0 ms - 630 ms
144 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
145 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
146 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
147 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
148 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
149 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
150 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
151 */
152
153/* Clock divisor for the next level */
154#define LVL_CLK_SHIFT 3
155#define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
156#define LVL_CLK_MASK (LVL_CLK_DIV - 1)
157#define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
158#define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
159
160/*
161 * The time start value for each level to select the bucket at enqueue
162 * time. We start from the last possible delta of the previous level
163 * so that we can later add an extra LVL_GRAN(n) to n (see calc_index()).
164 */
165#define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
166
167/* Size of each clock level */
168#define LVL_BITS 6
169#define LVL_SIZE (1UL << LVL_BITS)
170#define LVL_MASK (LVL_SIZE - 1)
171#define LVL_OFFS(n) ((n) * LVL_SIZE)
172
173/* Level depth */
174#if HZ > 100
175# define LVL_DEPTH 9
176# else
177# define LVL_DEPTH 8
178#endif
179
180/* The cutoff (max. capacity of the wheel) */
181#define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
182#define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
183
184/*
185 * The resulting wheel size. If NOHZ is configured we allocate two
186 * wheels so we have a separate storage for the deferrable timers.
187 */
188#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
189
190#ifdef CONFIG_NO_HZ_COMMON
191/*
192 * If multiple bases need to be locked, use the base ordering for lock
193 * nesting, i.e. lowest number first.
194 */
195# define NR_BASES 3
196# define BASE_LOCAL 0
197# define BASE_GLOBAL 1
198# define BASE_DEF 2
199#else
200# define NR_BASES 1
201# define BASE_LOCAL 0
202# define BASE_GLOBAL 0
203# define BASE_DEF 0
204#endif
205
206/**
207 * struct timer_base - Per CPU timer base (number of base depends on config)
208 * @lock: Lock protecting the timer_base
209 * @running_timer: When expiring timers, the lock is dropped. To make
210 * sure not to race against deleting/modifying a
211 * currently running timer, the pointer is set to the
212 * timer, which expires at the moment. If no timer is
213 * running, the pointer is NULL.
214 * @expiry_lock: PREEMPT_RT only: Lock is taken in softirq around
215 * timer expiry callback execution and when trying to
216 * delete a running timer and it wasn't successful in
217 * the first glance. It prevents priority inversion
218 * when callback was preempted on a remote CPU and a
219 * caller tries to delete the running timer. It also
220 * prevents a life lock, when the task which tries to
221 * delete a timer preempted the softirq thread which
222 * is running the timer callback function.
223 * @timer_waiters: PREEMPT_RT only: Tells, if there is a waiter
224 * waiting for the end of the timer callback function
225 * execution.
226 * @clk: clock of the timer base; is updated before enqueue
227 * of a timer; during expiry, it is 1 offset ahead of
228 * jiffies to avoid endless requeuing to current
229 * jiffies
230 * @next_expiry: expiry value of the first timer; it is updated when
231 * finding the next timer and during enqueue; the
232 * value is not valid, when next_expiry_recalc is set
233 * @cpu: Number of CPU the timer base belongs to
234 * @next_expiry_recalc: States, whether a recalculation of next_expiry is
235 * required. Value is set true, when a timer was
236 * deleted.
237 * @is_idle: Is set, when timer_base is idle. It is triggered by NOHZ
238 * code. This state is only used in standard
239 * base. Deferrable timers, which are enqueued remotely
240 * never wake up an idle CPU. So no matter of supporting it
241 * for this base.
242 * @timers_pending: Is set, when a timer is pending in the base. It is only
243 * reliable when next_expiry_recalc is not set.
244 * @pending_map: bitmap of the timer wheel; each bit reflects a
245 * bucket of the wheel. When a bit is set, at least a
246 * single timer is enqueued in the related bucket.
247 * @vectors: Array of lists; Each array member reflects a bucket
248 * of the timer wheel. The list contains all timers
249 * which are enqueued into a specific bucket.
250 */
251struct timer_base {
252 raw_spinlock_t lock;
253 struct timer_list *running_timer;
254#ifdef CONFIG_PREEMPT_RT
255 spinlock_t expiry_lock;
256 atomic_t timer_waiters;
257#endif
258 unsigned long clk;
259 unsigned long next_expiry;
260 unsigned int cpu;
261 bool next_expiry_recalc;
262 bool is_idle;
263 bool timers_pending;
264 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
265 struct hlist_head vectors[WHEEL_SIZE];
266} ____cacheline_aligned;
267
268static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
269
270#ifdef CONFIG_NO_HZ_COMMON
271
272static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
273static DEFINE_MUTEX(timer_keys_mutex);
274
275static void timer_update_keys(struct work_struct *work);
276static DECLARE_WORK(timer_update_work, timer_update_keys);
277
278#ifdef CONFIG_SMP
279static unsigned int sysctl_timer_migration = 1;
280
281DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
282
283static void timers_update_migration(void)
284{
285 if (sysctl_timer_migration && tick_nohz_active)
286 static_branch_enable(&timers_migration_enabled);
287 else
288 static_branch_disable(&timers_migration_enabled);
289}
290
291#ifdef CONFIG_SYSCTL
292static int timer_migration_handler(struct ctl_table *table, int write,
293 void *buffer, size_t *lenp, loff_t *ppos)
294{
295 int ret;
296
297 mutex_lock(&timer_keys_mutex);
298 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
299 if (!ret && write)
300 timers_update_migration();
301 mutex_unlock(&timer_keys_mutex);
302 return ret;
303}
304
305static struct ctl_table timer_sysctl[] = {
306 {
307 .procname = "timer_migration",
308 .data = &sysctl_timer_migration,
309 .maxlen = sizeof(unsigned int),
310 .mode = 0644,
311 .proc_handler = timer_migration_handler,
312 .extra1 = SYSCTL_ZERO,
313 .extra2 = SYSCTL_ONE,
314 },
315 {}
316};
317
318static int __init timer_sysctl_init(void)
319{
320 register_sysctl("kernel", timer_sysctl);
321 return 0;
322}
323device_initcall(timer_sysctl_init);
324#endif /* CONFIG_SYSCTL */
325#else /* CONFIG_SMP */
326static inline void timers_update_migration(void) { }
327#endif /* !CONFIG_SMP */
328
329static void timer_update_keys(struct work_struct *work)
330{
331 mutex_lock(&timer_keys_mutex);
332 timers_update_migration();
333 static_branch_enable(&timers_nohz_active);
334 mutex_unlock(&timer_keys_mutex);
335}
336
337void timers_update_nohz(void)
338{
339 schedule_work(&timer_update_work);
340}
341
342static inline bool is_timers_nohz_active(void)
343{
344 return static_branch_unlikely(&timers_nohz_active);
345}
346#else
347static inline bool is_timers_nohz_active(void) { return false; }
348#endif /* NO_HZ_COMMON */
349
350static unsigned long round_jiffies_common(unsigned long j, int cpu,
351 bool force_up)
352{
353 int rem;
354 unsigned long original = j;
355
356 /*
357 * We don't want all cpus firing their timers at once hitting the
358 * same lock or cachelines, so we skew each extra cpu with an extra
359 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
360 * already did this.
361 * The skew is done by adding 3*cpunr, then round, then subtract this
362 * extra offset again.
363 */
364 j += cpu * 3;
365
366 rem = j % HZ;
367
368 /*
369 * If the target jiffie is just after a whole second (which can happen
370 * due to delays of the timer irq, long irq off times etc etc) then
371 * we should round down to the whole second, not up. Use 1/4th second
372 * as cutoff for this rounding as an extreme upper bound for this.
373 * But never round down if @force_up is set.
374 */
375 if (rem < HZ/4 && !force_up) /* round down */
376 j = j - rem;
377 else /* round up */
378 j = j - rem + HZ;
379
380 /* now that we have rounded, subtract the extra skew again */
381 j -= cpu * 3;
382
383 /*
384 * Make sure j is still in the future. Otherwise return the
385 * unmodified value.
386 */
387 return time_is_after_jiffies(j) ? j : original;
388}
389
390/**
391 * __round_jiffies - function to round jiffies to a full second
392 * @j: the time in (absolute) jiffies that should be rounded
393 * @cpu: the processor number on which the timeout will happen
394 *
395 * __round_jiffies() rounds an absolute time in the future (in jiffies)
396 * up or down to (approximately) full seconds. This is useful for timers
397 * for which the exact time they fire does not matter too much, as long as
398 * they fire approximately every X seconds.
399 *
400 * By rounding these timers to whole seconds, all such timers will fire
401 * at the same time, rather than at various times spread out. The goal
402 * of this is to have the CPU wake up less, which saves power.
403 *
404 * The exact rounding is skewed for each processor to avoid all
405 * processors firing at the exact same time, which could lead
406 * to lock contention or spurious cache line bouncing.
407 *
408 * The return value is the rounded version of the @j parameter.
409 */
410unsigned long __round_jiffies(unsigned long j, int cpu)
411{
412 return round_jiffies_common(j, cpu, false);
413}
414EXPORT_SYMBOL_GPL(__round_jiffies);
415
416/**
417 * __round_jiffies_relative - function to round jiffies to a full second
418 * @j: the time in (relative) jiffies that should be rounded
419 * @cpu: the processor number on which the timeout will happen
420 *
421 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
422 * up or down to (approximately) full seconds. This is useful for timers
423 * for which the exact time they fire does not matter too much, as long as
424 * they fire approximately every X seconds.
425 *
426 * By rounding these timers to whole seconds, all such timers will fire
427 * at the same time, rather than at various times spread out. The goal
428 * of this is to have the CPU wake up less, which saves power.
429 *
430 * The exact rounding is skewed for each processor to avoid all
431 * processors firing at the exact same time, which could lead
432 * to lock contention or spurious cache line bouncing.
433 *
434 * The return value is the rounded version of the @j parameter.
435 */
436unsigned long __round_jiffies_relative(unsigned long j, int cpu)
437{
438 unsigned long j0 = jiffies;
439
440 /* Use j0 because jiffies might change while we run */
441 return round_jiffies_common(j + j0, cpu, false) - j0;
442}
443EXPORT_SYMBOL_GPL(__round_jiffies_relative);
444
445/**
446 * round_jiffies - function to round jiffies to a full second
447 * @j: the time in (absolute) jiffies that should be rounded
448 *
449 * round_jiffies() rounds an absolute time in the future (in jiffies)
450 * up or down to (approximately) full seconds. This is useful for timers
451 * for which the exact time they fire does not matter too much, as long as
452 * they fire approximately every X seconds.
453 *
454 * By rounding these timers to whole seconds, all such timers will fire
455 * at the same time, rather than at various times spread out. The goal
456 * of this is to have the CPU wake up less, which saves power.
457 *
458 * The return value is the rounded version of the @j parameter.
459 */
460unsigned long round_jiffies(unsigned long j)
461{
462 return round_jiffies_common(j, raw_smp_processor_id(), false);
463}
464EXPORT_SYMBOL_GPL(round_jiffies);
465
466/**
467 * round_jiffies_relative - function to round jiffies to a full second
468 * @j: the time in (relative) jiffies that should be rounded
469 *
470 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
471 * up or down to (approximately) full seconds. This is useful for timers
472 * for which the exact time they fire does not matter too much, as long as
473 * they fire approximately every X seconds.
474 *
475 * By rounding these timers to whole seconds, all such timers will fire
476 * at the same time, rather than at various times spread out. The goal
477 * of this is to have the CPU wake up less, which saves power.
478 *
479 * The return value is the rounded version of the @j parameter.
480 */
481unsigned long round_jiffies_relative(unsigned long j)
482{
483 return __round_jiffies_relative(j, raw_smp_processor_id());
484}
485EXPORT_SYMBOL_GPL(round_jiffies_relative);
486
487/**
488 * __round_jiffies_up - function to round jiffies up to a full second
489 * @j: the time in (absolute) jiffies that should be rounded
490 * @cpu: the processor number on which the timeout will happen
491 *
492 * This is the same as __round_jiffies() except that it will never
493 * round down. This is useful for timeouts for which the exact time
494 * of firing does not matter too much, as long as they don't fire too
495 * early.
496 */
497unsigned long __round_jiffies_up(unsigned long j, int cpu)
498{
499 return round_jiffies_common(j, cpu, true);
500}
501EXPORT_SYMBOL_GPL(__round_jiffies_up);
502
503/**
504 * __round_jiffies_up_relative - function to round jiffies up to a full second
505 * @j: the time in (relative) jiffies that should be rounded
506 * @cpu: the processor number on which the timeout will happen
507 *
508 * This is the same as __round_jiffies_relative() except that it will never
509 * round down. This is useful for timeouts for which the exact time
510 * of firing does not matter too much, as long as they don't fire too
511 * early.
512 */
513unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
514{
515 unsigned long j0 = jiffies;
516
517 /* Use j0 because jiffies might change while we run */
518 return round_jiffies_common(j + j0, cpu, true) - j0;
519}
520EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
521
522/**
523 * round_jiffies_up - function to round jiffies up to a full second
524 * @j: the time in (absolute) jiffies that should be rounded
525 *
526 * This is the same as round_jiffies() except that it will never
527 * round down. This is useful for timeouts for which the exact time
528 * of firing does not matter too much, as long as they don't fire too
529 * early.
530 */
531unsigned long round_jiffies_up(unsigned long j)
532{
533 return round_jiffies_common(j, raw_smp_processor_id(), true);
534}
535EXPORT_SYMBOL_GPL(round_jiffies_up);
536
537/**
538 * round_jiffies_up_relative - function to round jiffies up to a full second
539 * @j: the time in (relative) jiffies that should be rounded
540 *
541 * This is the same as round_jiffies_relative() except that it will never
542 * round down. This is useful for timeouts for which the exact time
543 * of firing does not matter too much, as long as they don't fire too
544 * early.
545 */
546unsigned long round_jiffies_up_relative(unsigned long j)
547{
548 return __round_jiffies_up_relative(j, raw_smp_processor_id());
549}
550EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
551
552
553static inline unsigned int timer_get_idx(struct timer_list *timer)
554{
555 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
556}
557
558static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
559{
560 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
561 idx << TIMER_ARRAYSHIFT;
562}
563
564/*
565 * Helper function to calculate the array index for a given expiry
566 * time.
567 */
568static inline unsigned calc_index(unsigned long expires, unsigned lvl,
569 unsigned long *bucket_expiry)
570{
571
572 /*
573 * The timer wheel has to guarantee that a timer does not fire
574 * early. Early expiry can happen due to:
575 * - Timer is armed at the edge of a tick
576 * - Truncation of the expiry time in the outer wheel levels
577 *
578 * Round up with level granularity to prevent this.
579 */
580 expires = (expires >> LVL_SHIFT(lvl)) + 1;
581 *bucket_expiry = expires << LVL_SHIFT(lvl);
582 return LVL_OFFS(lvl) + (expires & LVL_MASK);
583}
584
585static int calc_wheel_index(unsigned long expires, unsigned long clk,
586 unsigned long *bucket_expiry)
587{
588 unsigned long delta = expires - clk;
589 unsigned int idx;
590
591 if (delta < LVL_START(1)) {
592 idx = calc_index(expires, 0, bucket_expiry);
593 } else if (delta < LVL_START(2)) {
594 idx = calc_index(expires, 1, bucket_expiry);
595 } else if (delta < LVL_START(3)) {
596 idx = calc_index(expires, 2, bucket_expiry);
597 } else if (delta < LVL_START(4)) {
598 idx = calc_index(expires, 3, bucket_expiry);
599 } else if (delta < LVL_START(5)) {
600 idx = calc_index(expires, 4, bucket_expiry);
601 } else if (delta < LVL_START(6)) {
602 idx = calc_index(expires, 5, bucket_expiry);
603 } else if (delta < LVL_START(7)) {
604 idx = calc_index(expires, 6, bucket_expiry);
605 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
606 idx = calc_index(expires, 7, bucket_expiry);
607 } else if ((long) delta < 0) {
608 idx = clk & LVL_MASK;
609 *bucket_expiry = clk;
610 } else {
611 /*
612 * Force expire obscene large timeouts to expire at the
613 * capacity limit of the wheel.
614 */
615 if (delta >= WHEEL_TIMEOUT_CUTOFF)
616 expires = clk + WHEEL_TIMEOUT_MAX;
617
618 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry);
619 }
620 return idx;
621}
622
623static void
624trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
625{
626 /*
627 * Deferrable timers do not prevent the CPU from entering dynticks and
628 * are not taken into account on the idle/nohz_full path. An IPI when a
629 * new deferrable timer is enqueued will wake up the remote CPU but
630 * nothing will be done with the deferrable timer base. Therefore skip
631 * the remote IPI for deferrable timers completely.
632 */
633 if (!is_timers_nohz_active() || timer->flags & TIMER_DEFERRABLE)
634 return;
635
636 /*
637 * We might have to IPI the remote CPU if the base is idle and the
638 * timer is pinned. If it is a non pinned timer, it is only queued
639 * on the remote CPU, when timer was running during queueing. Then
640 * everything is handled by remote CPU anyway. If the other CPU is
641 * on the way to idle then it can't set base->is_idle as we hold
642 * the base lock:
643 */
644 if (base->is_idle) {
645 WARN_ON_ONCE(!(timer->flags & TIMER_PINNED ||
646 tick_nohz_full_cpu(base->cpu)));
647 wake_up_nohz_cpu(base->cpu);
648 }
649}
650
651/*
652 * Enqueue the timer into the hash bucket, mark it pending in
653 * the bitmap, store the index in the timer flags then wake up
654 * the target CPU if needed.
655 */
656static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
657 unsigned int idx, unsigned long bucket_expiry)
658{
659
660 hlist_add_head(&timer->entry, base->vectors + idx);
661 __set_bit(idx, base->pending_map);
662 timer_set_idx(timer, idx);
663
664 trace_timer_start(timer, bucket_expiry);
665
666 /*
667 * Check whether this is the new first expiring timer. The
668 * effective expiry time of the timer is required here
669 * (bucket_expiry) instead of timer->expires.
670 */
671 if (time_before(bucket_expiry, base->next_expiry)) {
672 /*
673 * Set the next expiry time and kick the CPU so it
674 * can reevaluate the wheel:
675 */
676 base->next_expiry = bucket_expiry;
677 base->timers_pending = true;
678 base->next_expiry_recalc = false;
679 trigger_dyntick_cpu(base, timer);
680 }
681}
682
683static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
684{
685 unsigned long bucket_expiry;
686 unsigned int idx;
687
688 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry);
689 enqueue_timer(base, timer, idx, bucket_expiry);
690}
691
692#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
693
694static const struct debug_obj_descr timer_debug_descr;
695
696struct timer_hint {
697 void (*function)(struct timer_list *t);
698 long offset;
699};
700
701#define TIMER_HINT(fn, container, timr, hintfn) \
702 { \
703 .function = fn, \
704 .offset = offsetof(container, hintfn) - \
705 offsetof(container, timr) \
706 }
707
708static const struct timer_hint timer_hints[] = {
709 TIMER_HINT(delayed_work_timer_fn,
710 struct delayed_work, timer, work.func),
711 TIMER_HINT(kthread_delayed_work_timer_fn,
712 struct kthread_delayed_work, timer, work.func),
713};
714
715static void *timer_debug_hint(void *addr)
716{
717 struct timer_list *timer = addr;
718 int i;
719
720 for (i = 0; i < ARRAY_SIZE(timer_hints); i++) {
721 if (timer_hints[i].function == timer->function) {
722 void (**fn)(void) = addr + timer_hints[i].offset;
723
724 return *fn;
725 }
726 }
727
728 return timer->function;
729}
730
731static bool timer_is_static_object(void *addr)
732{
733 struct timer_list *timer = addr;
734
735 return (timer->entry.pprev == NULL &&
736 timer->entry.next == TIMER_ENTRY_STATIC);
737}
738
739/*
740 * timer_fixup_init is called when:
741 * - an active object is initialized
742 */
743static bool timer_fixup_init(void *addr, enum debug_obj_state state)
744{
745 struct timer_list *timer = addr;
746
747 switch (state) {
748 case ODEBUG_STATE_ACTIVE:
749 del_timer_sync(timer);
750 debug_object_init(timer, &timer_debug_descr);
751 return true;
752 default:
753 return false;
754 }
755}
756
757/* Stub timer callback for improperly used timers. */
758static void stub_timer(struct timer_list *unused)
759{
760 WARN_ON(1);
761}
762
763/*
764 * timer_fixup_activate is called when:
765 * - an active object is activated
766 * - an unknown non-static object is activated
767 */
768static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
769{
770 struct timer_list *timer = addr;
771
772 switch (state) {
773 case ODEBUG_STATE_NOTAVAILABLE:
774 timer_setup(timer, stub_timer, 0);
775 return true;
776
777 case ODEBUG_STATE_ACTIVE:
778 WARN_ON(1);
779 fallthrough;
780 default:
781 return false;
782 }
783}
784
785/*
786 * timer_fixup_free is called when:
787 * - an active object is freed
788 */
789static bool timer_fixup_free(void *addr, enum debug_obj_state state)
790{
791 struct timer_list *timer = addr;
792
793 switch (state) {
794 case ODEBUG_STATE_ACTIVE:
795 del_timer_sync(timer);
796 debug_object_free(timer, &timer_debug_descr);
797 return true;
798 default:
799 return false;
800 }
801}
802
803/*
804 * timer_fixup_assert_init is called when:
805 * - an untracked/uninit-ed object is found
806 */
807static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
808{
809 struct timer_list *timer = addr;
810
811 switch (state) {
812 case ODEBUG_STATE_NOTAVAILABLE:
813 timer_setup(timer, stub_timer, 0);
814 return true;
815 default:
816 return false;
817 }
818}
819
820static const struct debug_obj_descr timer_debug_descr = {
821 .name = "timer_list",
822 .debug_hint = timer_debug_hint,
823 .is_static_object = timer_is_static_object,
824 .fixup_init = timer_fixup_init,
825 .fixup_activate = timer_fixup_activate,
826 .fixup_free = timer_fixup_free,
827 .fixup_assert_init = timer_fixup_assert_init,
828};
829
830static inline void debug_timer_init(struct timer_list *timer)
831{
832 debug_object_init(timer, &timer_debug_descr);
833}
834
835static inline void debug_timer_activate(struct timer_list *timer)
836{
837 debug_object_activate(timer, &timer_debug_descr);
838}
839
840static inline void debug_timer_deactivate(struct timer_list *timer)
841{
842 debug_object_deactivate(timer, &timer_debug_descr);
843}
844
845static inline void debug_timer_assert_init(struct timer_list *timer)
846{
847 debug_object_assert_init(timer, &timer_debug_descr);
848}
849
850static void do_init_timer(struct timer_list *timer,
851 void (*func)(struct timer_list *),
852 unsigned int flags,
853 const char *name, struct lock_class_key *key);
854
855void init_timer_on_stack_key(struct timer_list *timer,
856 void (*func)(struct timer_list *),
857 unsigned int flags,
858 const char *name, struct lock_class_key *key)
859{
860 debug_object_init_on_stack(timer, &timer_debug_descr);
861 do_init_timer(timer, func, flags, name, key);
862}
863EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
864
865void destroy_timer_on_stack(struct timer_list *timer)
866{
867 debug_object_free(timer, &timer_debug_descr);
868}
869EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
870
871#else
872static inline void debug_timer_init(struct timer_list *timer) { }
873static inline void debug_timer_activate(struct timer_list *timer) { }
874static inline void debug_timer_deactivate(struct timer_list *timer) { }
875static inline void debug_timer_assert_init(struct timer_list *timer) { }
876#endif
877
878static inline void debug_init(struct timer_list *timer)
879{
880 debug_timer_init(timer);
881 trace_timer_init(timer);
882}
883
884static inline void debug_deactivate(struct timer_list *timer)
885{
886 debug_timer_deactivate(timer);
887 trace_timer_cancel(timer);
888}
889
890static inline void debug_assert_init(struct timer_list *timer)
891{
892 debug_timer_assert_init(timer);
893}
894
895static void do_init_timer(struct timer_list *timer,
896 void (*func)(struct timer_list *),
897 unsigned int flags,
898 const char *name, struct lock_class_key *key)
899{
900 timer->entry.pprev = NULL;
901 timer->function = func;
902 if (WARN_ON_ONCE(flags & ~TIMER_INIT_FLAGS))
903 flags &= TIMER_INIT_FLAGS;
904 timer->flags = flags | raw_smp_processor_id();
905 lockdep_init_map(&timer->lockdep_map, name, key, 0);
906}
907
908/**
909 * init_timer_key - initialize a timer
910 * @timer: the timer to be initialized
911 * @func: timer callback function
912 * @flags: timer flags
913 * @name: name of the timer
914 * @key: lockdep class key of the fake lock used for tracking timer
915 * sync lock dependencies
916 *
917 * init_timer_key() must be done to a timer prior to calling *any* of the
918 * other timer functions.
919 */
920void init_timer_key(struct timer_list *timer,
921 void (*func)(struct timer_list *), unsigned int flags,
922 const char *name, struct lock_class_key *key)
923{
924 debug_init(timer);
925 do_init_timer(timer, func, flags, name, key);
926}
927EXPORT_SYMBOL(init_timer_key);
928
929static inline void detach_timer(struct timer_list *timer, bool clear_pending)
930{
931 struct hlist_node *entry = &timer->entry;
932
933 debug_deactivate(timer);
934
935 __hlist_del(entry);
936 if (clear_pending)
937 entry->pprev = NULL;
938 entry->next = LIST_POISON2;
939}
940
941static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
942 bool clear_pending)
943{
944 unsigned idx = timer_get_idx(timer);
945
946 if (!timer_pending(timer))
947 return 0;
948
949 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) {
950 __clear_bit(idx, base->pending_map);
951 base->next_expiry_recalc = true;
952 }
953
954 detach_timer(timer, clear_pending);
955 return 1;
956}
957
958static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
959{
960 int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
961 struct timer_base *base;
962
963 base = per_cpu_ptr(&timer_bases[index], cpu);
964
965 /*
966 * If the timer is deferrable and NO_HZ_COMMON is set then we need
967 * to use the deferrable base.
968 */
969 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
970 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
971 return base;
972}
973
974static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
975{
976 int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
977 struct timer_base *base;
978
979 base = this_cpu_ptr(&timer_bases[index]);
980
981 /*
982 * If the timer is deferrable and NO_HZ_COMMON is set then we need
983 * to use the deferrable base.
984 */
985 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
986 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
987 return base;
988}
989
990static inline struct timer_base *get_timer_base(u32 tflags)
991{
992 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
993}
994
995static inline void __forward_timer_base(struct timer_base *base,
996 unsigned long basej)
997{
998 /*
999 * Check whether we can forward the base. We can only do that when
1000 * @basej is past base->clk otherwise we might rewind base->clk.
1001 */
1002 if (time_before_eq(basej, base->clk))
1003 return;
1004
1005 /*
1006 * If the next expiry value is > jiffies, then we fast forward to
1007 * jiffies otherwise we forward to the next expiry value.
1008 */
1009 if (time_after(base->next_expiry, basej)) {
1010 base->clk = basej;
1011 } else {
1012 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk)))
1013 return;
1014 base->clk = base->next_expiry;
1015 }
1016
1017}
1018
1019static inline void forward_timer_base(struct timer_base *base)
1020{
1021 __forward_timer_base(base, READ_ONCE(jiffies));
1022}
1023
1024/*
1025 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
1026 * that all timers which are tied to this base are locked, and the base itself
1027 * is locked too.
1028 *
1029 * So __run_timers/migrate_timers can safely modify all timers which could
1030 * be found in the base->vectors array.
1031 *
1032 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
1033 * to wait until the migration is done.
1034 */
1035static struct timer_base *lock_timer_base(struct timer_list *timer,
1036 unsigned long *flags)
1037 __acquires(timer->base->lock)
1038{
1039 for (;;) {
1040 struct timer_base *base;
1041 u32 tf;
1042
1043 /*
1044 * We need to use READ_ONCE() here, otherwise the compiler
1045 * might re-read @tf between the check for TIMER_MIGRATING
1046 * and spin_lock().
1047 */
1048 tf = READ_ONCE(timer->flags);
1049
1050 if (!(tf & TIMER_MIGRATING)) {
1051 base = get_timer_base(tf);
1052 raw_spin_lock_irqsave(&base->lock, *flags);
1053 if (timer->flags == tf)
1054 return base;
1055 raw_spin_unlock_irqrestore(&base->lock, *flags);
1056 }
1057 cpu_relax();
1058 }
1059}
1060
1061#define MOD_TIMER_PENDING_ONLY 0x01
1062#define MOD_TIMER_REDUCE 0x02
1063#define MOD_TIMER_NOTPENDING 0x04
1064
1065static inline int
1066__mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options)
1067{
1068 unsigned long clk = 0, flags, bucket_expiry;
1069 struct timer_base *base, *new_base;
1070 unsigned int idx = UINT_MAX;
1071 int ret = 0;
1072
1073 debug_assert_init(timer);
1074
1075 /*
1076 * This is a common optimization triggered by the networking code - if
1077 * the timer is re-modified to have the same timeout or ends up in the
1078 * same array bucket then just return:
1079 */
1080 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) {
1081 /*
1082 * The downside of this optimization is that it can result in
1083 * larger granularity than you would get from adding a new
1084 * timer with this expiry.
1085 */
1086 long diff = timer->expires - expires;
1087
1088 if (!diff)
1089 return 1;
1090 if (options & MOD_TIMER_REDUCE && diff <= 0)
1091 return 1;
1092
1093 /*
1094 * We lock timer base and calculate the bucket index right
1095 * here. If the timer ends up in the same bucket, then we
1096 * just update the expiry time and avoid the whole
1097 * dequeue/enqueue dance.
1098 */
1099 base = lock_timer_base(timer, &flags);
1100 /*
1101 * Has @timer been shutdown? This needs to be evaluated
1102 * while holding base lock to prevent a race against the
1103 * shutdown code.
1104 */
1105 if (!timer->function)
1106 goto out_unlock;
1107
1108 forward_timer_base(base);
1109
1110 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) &&
1111 time_before_eq(timer->expires, expires)) {
1112 ret = 1;
1113 goto out_unlock;
1114 }
1115
1116 clk = base->clk;
1117 idx = calc_wheel_index(expires, clk, &bucket_expiry);
1118
1119 /*
1120 * Retrieve and compare the array index of the pending
1121 * timer. If it matches set the expiry to the new value so a
1122 * subsequent call will exit in the expires check above.
1123 */
1124 if (idx == timer_get_idx(timer)) {
1125 if (!(options & MOD_TIMER_REDUCE))
1126 timer->expires = expires;
1127 else if (time_after(timer->expires, expires))
1128 timer->expires = expires;
1129 ret = 1;
1130 goto out_unlock;
1131 }
1132 } else {
1133 base = lock_timer_base(timer, &flags);
1134 /*
1135 * Has @timer been shutdown? This needs to be evaluated
1136 * while holding base lock to prevent a race against the
1137 * shutdown code.
1138 */
1139 if (!timer->function)
1140 goto out_unlock;
1141
1142 forward_timer_base(base);
1143 }
1144
1145 ret = detach_if_pending(timer, base, false);
1146 if (!ret && (options & MOD_TIMER_PENDING_ONLY))
1147 goto out_unlock;
1148
1149 new_base = get_timer_this_cpu_base(timer->flags);
1150
1151 if (base != new_base) {
1152 /*
1153 * We are trying to schedule the timer on the new base.
1154 * However we can't change timer's base while it is running,
1155 * otherwise timer_delete_sync() can't detect that the timer's
1156 * handler yet has not finished. This also guarantees that the
1157 * timer is serialized wrt itself.
1158 */
1159 if (likely(base->running_timer != timer)) {
1160 /* See the comment in lock_timer_base() */
1161 timer->flags |= TIMER_MIGRATING;
1162
1163 raw_spin_unlock(&base->lock);
1164 base = new_base;
1165 raw_spin_lock(&base->lock);
1166 WRITE_ONCE(timer->flags,
1167 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1168 forward_timer_base(base);
1169 }
1170 }
1171
1172 debug_timer_activate(timer);
1173
1174 timer->expires = expires;
1175 /*
1176 * If 'idx' was calculated above and the base time did not advance
1177 * between calculating 'idx' and possibly switching the base, only
1178 * enqueue_timer() is required. Otherwise we need to (re)calculate
1179 * the wheel index via internal_add_timer().
1180 */
1181 if (idx != UINT_MAX && clk == base->clk)
1182 enqueue_timer(base, timer, idx, bucket_expiry);
1183 else
1184 internal_add_timer(base, timer);
1185
1186out_unlock:
1187 raw_spin_unlock_irqrestore(&base->lock, flags);
1188
1189 return ret;
1190}
1191
1192/**
1193 * mod_timer_pending - Modify a pending timer's timeout
1194 * @timer: The pending timer to be modified
1195 * @expires: New absolute timeout in jiffies
1196 *
1197 * mod_timer_pending() is the same for pending timers as mod_timer(), but
1198 * will not activate inactive timers.
1199 *
1200 * If @timer->function == NULL then the start operation is silently
1201 * discarded.
1202 *
1203 * Return:
1204 * * %0 - The timer was inactive and not modified or was in
1205 * shutdown state and the operation was discarded
1206 * * %1 - The timer was active and requeued to expire at @expires
1207 */
1208int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1209{
1210 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY);
1211}
1212EXPORT_SYMBOL(mod_timer_pending);
1213
1214/**
1215 * mod_timer - Modify a timer's timeout
1216 * @timer: The timer to be modified
1217 * @expires: New absolute timeout in jiffies
1218 *
1219 * mod_timer(timer, expires) is equivalent to:
1220 *
1221 * del_timer(timer); timer->expires = expires; add_timer(timer);
1222 *
1223 * mod_timer() is more efficient than the above open coded sequence. In
1224 * case that the timer is inactive, the del_timer() part is a NOP. The
1225 * timer is in any case activated with the new expiry time @expires.
1226 *
1227 * Note that if there are multiple unserialized concurrent users of the
1228 * same timer, then mod_timer() is the only safe way to modify the timeout,
1229 * since add_timer() cannot modify an already running timer.
1230 *
1231 * If @timer->function == NULL then the start operation is silently
1232 * discarded. In this case the return value is 0 and meaningless.
1233 *
1234 * Return:
1235 * * %0 - The timer was inactive and started or was in shutdown
1236 * state and the operation was discarded
1237 * * %1 - The timer was active and requeued to expire at @expires or
1238 * the timer was active and not modified because @expires did
1239 * not change the effective expiry time
1240 */
1241int mod_timer(struct timer_list *timer, unsigned long expires)
1242{
1243 return __mod_timer(timer, expires, 0);
1244}
1245EXPORT_SYMBOL(mod_timer);
1246
1247/**
1248 * timer_reduce - Modify a timer's timeout if it would reduce the timeout
1249 * @timer: The timer to be modified
1250 * @expires: New absolute timeout in jiffies
1251 *
1252 * timer_reduce() is very similar to mod_timer(), except that it will only
1253 * modify an enqueued timer if that would reduce the expiration time. If
1254 * @timer is not enqueued it starts the timer.
1255 *
1256 * If @timer->function == NULL then the start operation is silently
1257 * discarded.
1258 *
1259 * Return:
1260 * * %0 - The timer was inactive and started or was in shutdown
1261 * state and the operation was discarded
1262 * * %1 - The timer was active and requeued to expire at @expires or
1263 * the timer was active and not modified because @expires
1264 * did not change the effective expiry time such that the
1265 * timer would expire earlier than already scheduled
1266 */
1267int timer_reduce(struct timer_list *timer, unsigned long expires)
1268{
1269 return __mod_timer(timer, expires, MOD_TIMER_REDUCE);
1270}
1271EXPORT_SYMBOL(timer_reduce);
1272
1273/**
1274 * add_timer - Start a timer
1275 * @timer: The timer to be started
1276 *
1277 * Start @timer to expire at @timer->expires in the future. @timer->expires
1278 * is the absolute expiry time measured in 'jiffies'. When the timer expires
1279 * timer->function(timer) will be invoked from soft interrupt context.
1280 *
1281 * The @timer->expires and @timer->function fields must be set prior
1282 * to calling this function.
1283 *
1284 * If @timer->function == NULL then the start operation is silently
1285 * discarded.
1286 *
1287 * If @timer->expires is already in the past @timer will be queued to
1288 * expire at the next timer tick.
1289 *
1290 * This can only operate on an inactive timer. Attempts to invoke this on
1291 * an active timer are rejected with a warning.
1292 */
1293void add_timer(struct timer_list *timer)
1294{
1295 if (WARN_ON_ONCE(timer_pending(timer)))
1296 return;
1297 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
1298}
1299EXPORT_SYMBOL(add_timer);
1300
1301/**
1302 * add_timer_local() - Start a timer on the local CPU
1303 * @timer: The timer to be started
1304 *
1305 * Same as add_timer() except that the timer flag TIMER_PINNED is set.
1306 *
1307 * See add_timer() for further details.
1308 */
1309void add_timer_local(struct timer_list *timer)
1310{
1311 if (WARN_ON_ONCE(timer_pending(timer)))
1312 return;
1313 timer->flags |= TIMER_PINNED;
1314 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
1315}
1316EXPORT_SYMBOL(add_timer_local);
1317
1318/**
1319 * add_timer_global() - Start a timer without TIMER_PINNED flag set
1320 * @timer: The timer to be started
1321 *
1322 * Same as add_timer() except that the timer flag TIMER_PINNED is unset.
1323 *
1324 * See add_timer() for further details.
1325 */
1326void add_timer_global(struct timer_list *timer)
1327{
1328 if (WARN_ON_ONCE(timer_pending(timer)))
1329 return;
1330 timer->flags &= ~TIMER_PINNED;
1331 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
1332}
1333EXPORT_SYMBOL(add_timer_global);
1334
1335/**
1336 * add_timer_on - Start a timer on a particular CPU
1337 * @timer: The timer to be started
1338 * @cpu: The CPU to start it on
1339 *
1340 * Same as add_timer() except that it starts the timer on the given CPU and
1341 * the TIMER_PINNED flag is set. When timer shouldn't be a pinned timer in
1342 * the next round, add_timer_global() should be used instead as it unsets
1343 * the TIMER_PINNED flag.
1344 *
1345 * See add_timer() for further details.
1346 */
1347void add_timer_on(struct timer_list *timer, int cpu)
1348{
1349 struct timer_base *new_base, *base;
1350 unsigned long flags;
1351
1352 debug_assert_init(timer);
1353
1354 if (WARN_ON_ONCE(timer_pending(timer)))
1355 return;
1356
1357 /* Make sure timer flags have TIMER_PINNED flag set */
1358 timer->flags |= TIMER_PINNED;
1359
1360 new_base = get_timer_cpu_base(timer->flags, cpu);
1361
1362 /*
1363 * If @timer was on a different CPU, it should be migrated with the
1364 * old base locked to prevent other operations proceeding with the
1365 * wrong base locked. See lock_timer_base().
1366 */
1367 base = lock_timer_base(timer, &flags);
1368 /*
1369 * Has @timer been shutdown? This needs to be evaluated while
1370 * holding base lock to prevent a race against the shutdown code.
1371 */
1372 if (!timer->function)
1373 goto out_unlock;
1374
1375 if (base != new_base) {
1376 timer->flags |= TIMER_MIGRATING;
1377
1378 raw_spin_unlock(&base->lock);
1379 base = new_base;
1380 raw_spin_lock(&base->lock);
1381 WRITE_ONCE(timer->flags,
1382 (timer->flags & ~TIMER_BASEMASK) | cpu);
1383 }
1384 forward_timer_base(base);
1385
1386 debug_timer_activate(timer);
1387 internal_add_timer(base, timer);
1388out_unlock:
1389 raw_spin_unlock_irqrestore(&base->lock, flags);
1390}
1391EXPORT_SYMBOL_GPL(add_timer_on);
1392
1393/**
1394 * __timer_delete - Internal function: Deactivate a timer
1395 * @timer: The timer to be deactivated
1396 * @shutdown: If true, this indicates that the timer is about to be
1397 * shutdown permanently.
1398 *
1399 * If @shutdown is true then @timer->function is set to NULL under the
1400 * timer base lock which prevents further rearming of the time. In that
1401 * case any attempt to rearm @timer after this function returns will be
1402 * silently ignored.
1403 *
1404 * Return:
1405 * * %0 - The timer was not pending
1406 * * %1 - The timer was pending and deactivated
1407 */
1408static int __timer_delete(struct timer_list *timer, bool shutdown)
1409{
1410 struct timer_base *base;
1411 unsigned long flags;
1412 int ret = 0;
1413
1414 debug_assert_init(timer);
1415
1416 /*
1417 * If @shutdown is set then the lock has to be taken whether the
1418 * timer is pending or not to protect against a concurrent rearm
1419 * which might hit between the lockless pending check and the lock
1420 * acquisition. By taking the lock it is ensured that such a newly
1421 * enqueued timer is dequeued and cannot end up with
1422 * timer->function == NULL in the expiry code.
1423 *
1424 * If timer->function is currently executed, then this makes sure
1425 * that the callback cannot requeue the timer.
1426 */
1427 if (timer_pending(timer) || shutdown) {
1428 base = lock_timer_base(timer, &flags);
1429 ret = detach_if_pending(timer, base, true);
1430 if (shutdown)
1431 timer->function = NULL;
1432 raw_spin_unlock_irqrestore(&base->lock, flags);
1433 }
1434
1435 return ret;
1436}
1437
1438/**
1439 * timer_delete - Deactivate a timer
1440 * @timer: The timer to be deactivated
1441 *
1442 * The function only deactivates a pending timer, but contrary to
1443 * timer_delete_sync() it does not take into account whether the timer's
1444 * callback function is concurrently executed on a different CPU or not.
1445 * It neither prevents rearming of the timer. If @timer can be rearmed
1446 * concurrently then the return value of this function is meaningless.
1447 *
1448 * Return:
1449 * * %0 - The timer was not pending
1450 * * %1 - The timer was pending and deactivated
1451 */
1452int timer_delete(struct timer_list *timer)
1453{
1454 return __timer_delete(timer, false);
1455}
1456EXPORT_SYMBOL(timer_delete);
1457
1458/**
1459 * timer_shutdown - Deactivate a timer and prevent rearming
1460 * @timer: The timer to be deactivated
1461 *
1462 * The function does not wait for an eventually running timer callback on a
1463 * different CPU but it prevents rearming of the timer. Any attempt to arm
1464 * @timer after this function returns will be silently ignored.
1465 *
1466 * This function is useful for teardown code and should only be used when
1467 * timer_shutdown_sync() cannot be invoked due to locking or context constraints.
1468 *
1469 * Return:
1470 * * %0 - The timer was not pending
1471 * * %1 - The timer was pending
1472 */
1473int timer_shutdown(struct timer_list *timer)
1474{
1475 return __timer_delete(timer, true);
1476}
1477EXPORT_SYMBOL_GPL(timer_shutdown);
1478
1479/**
1480 * __try_to_del_timer_sync - Internal function: Try to deactivate a timer
1481 * @timer: Timer to deactivate
1482 * @shutdown: If true, this indicates that the timer is about to be
1483 * shutdown permanently.
1484 *
1485 * If @shutdown is true then @timer->function is set to NULL under the
1486 * timer base lock which prevents further rearming of the timer. Any
1487 * attempt to rearm @timer after this function returns will be silently
1488 * ignored.
1489 *
1490 * This function cannot guarantee that the timer cannot be rearmed
1491 * right after dropping the base lock if @shutdown is false. That
1492 * needs to be prevented by the calling code if necessary.
1493 *
1494 * Return:
1495 * * %0 - The timer was not pending
1496 * * %1 - The timer was pending and deactivated
1497 * * %-1 - The timer callback function is running on a different CPU
1498 */
1499static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown)
1500{
1501 struct timer_base *base;
1502 unsigned long flags;
1503 int ret = -1;
1504
1505 debug_assert_init(timer);
1506
1507 base = lock_timer_base(timer, &flags);
1508
1509 if (base->running_timer != timer)
1510 ret = detach_if_pending(timer, base, true);
1511 if (shutdown)
1512 timer->function = NULL;
1513
1514 raw_spin_unlock_irqrestore(&base->lock, flags);
1515
1516 return ret;
1517}
1518
1519/**
1520 * try_to_del_timer_sync - Try to deactivate a timer
1521 * @timer: Timer to deactivate
1522 *
1523 * This function tries to deactivate a timer. On success the timer is not
1524 * queued and the timer callback function is not running on any CPU.
1525 *
1526 * This function does not guarantee that the timer cannot be rearmed right
1527 * after dropping the base lock. That needs to be prevented by the calling
1528 * code if necessary.
1529 *
1530 * Return:
1531 * * %0 - The timer was not pending
1532 * * %1 - The timer was pending and deactivated
1533 * * %-1 - The timer callback function is running on a different CPU
1534 */
1535int try_to_del_timer_sync(struct timer_list *timer)
1536{
1537 return __try_to_del_timer_sync(timer, false);
1538}
1539EXPORT_SYMBOL(try_to_del_timer_sync);
1540
1541#ifdef CONFIG_PREEMPT_RT
1542static __init void timer_base_init_expiry_lock(struct timer_base *base)
1543{
1544 spin_lock_init(&base->expiry_lock);
1545}
1546
1547static inline void timer_base_lock_expiry(struct timer_base *base)
1548{
1549 spin_lock(&base->expiry_lock);
1550}
1551
1552static inline void timer_base_unlock_expiry(struct timer_base *base)
1553{
1554 spin_unlock(&base->expiry_lock);
1555}
1556
1557/*
1558 * The counterpart to del_timer_wait_running().
1559 *
1560 * If there is a waiter for base->expiry_lock, then it was waiting for the
1561 * timer callback to finish. Drop expiry_lock and reacquire it. That allows
1562 * the waiter to acquire the lock and make progress.
1563 */
1564static void timer_sync_wait_running(struct timer_base *base)
1565{
1566 if (atomic_read(&base->timer_waiters)) {
1567 raw_spin_unlock_irq(&base->lock);
1568 spin_unlock(&base->expiry_lock);
1569 spin_lock(&base->expiry_lock);
1570 raw_spin_lock_irq(&base->lock);
1571 }
1572}
1573
1574/*
1575 * This function is called on PREEMPT_RT kernels when the fast path
1576 * deletion of a timer failed because the timer callback function was
1577 * running.
1578 *
1579 * This prevents priority inversion, if the softirq thread on a remote CPU
1580 * got preempted, and it prevents a life lock when the task which tries to
1581 * delete a timer preempted the softirq thread running the timer callback
1582 * function.
1583 */
1584static void del_timer_wait_running(struct timer_list *timer)
1585{
1586 u32 tf;
1587
1588 tf = READ_ONCE(timer->flags);
1589 if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) {
1590 struct timer_base *base = get_timer_base(tf);
1591
1592 /*
1593 * Mark the base as contended and grab the expiry lock,
1594 * which is held by the softirq across the timer
1595 * callback. Drop the lock immediately so the softirq can
1596 * expire the next timer. In theory the timer could already
1597 * be running again, but that's more than unlikely and just
1598 * causes another wait loop.
1599 */
1600 atomic_inc(&base->timer_waiters);
1601 spin_lock_bh(&base->expiry_lock);
1602 atomic_dec(&base->timer_waiters);
1603 spin_unlock_bh(&base->expiry_lock);
1604 }
1605}
1606#else
1607static inline void timer_base_init_expiry_lock(struct timer_base *base) { }
1608static inline void timer_base_lock_expiry(struct timer_base *base) { }
1609static inline void timer_base_unlock_expiry(struct timer_base *base) { }
1610static inline void timer_sync_wait_running(struct timer_base *base) { }
1611static inline void del_timer_wait_running(struct timer_list *timer) { }
1612#endif
1613
1614/**
1615 * __timer_delete_sync - Internal function: Deactivate a timer and wait
1616 * for the handler to finish.
1617 * @timer: The timer to be deactivated
1618 * @shutdown: If true, @timer->function will be set to NULL under the
1619 * timer base lock which prevents rearming of @timer
1620 *
1621 * If @shutdown is not set the timer can be rearmed later. If the timer can
1622 * be rearmed concurrently, i.e. after dropping the base lock then the
1623 * return value is meaningless.
1624 *
1625 * If @shutdown is set then @timer->function is set to NULL under timer
1626 * base lock which prevents rearming of the timer. Any attempt to rearm
1627 * a shutdown timer is silently ignored.
1628 *
1629 * If the timer should be reused after shutdown it has to be initialized
1630 * again.
1631 *
1632 * Return:
1633 * * %0 - The timer was not pending
1634 * * %1 - The timer was pending and deactivated
1635 */
1636static int __timer_delete_sync(struct timer_list *timer, bool shutdown)
1637{
1638 int ret;
1639
1640#ifdef CONFIG_LOCKDEP
1641 unsigned long flags;
1642
1643 /*
1644 * If lockdep gives a backtrace here, please reference
1645 * the synchronization rules above.
1646 */
1647 local_irq_save(flags);
1648 lock_map_acquire(&timer->lockdep_map);
1649 lock_map_release(&timer->lockdep_map);
1650 local_irq_restore(flags);
1651#endif
1652 /*
1653 * don't use it in hardirq context, because it
1654 * could lead to deadlock.
1655 */
1656 WARN_ON(in_hardirq() && !(timer->flags & TIMER_IRQSAFE));
1657
1658 /*
1659 * Must be able to sleep on PREEMPT_RT because of the slowpath in
1660 * del_timer_wait_running().
1661 */
1662 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
1663 lockdep_assert_preemption_enabled();
1664
1665 do {
1666 ret = __try_to_del_timer_sync(timer, shutdown);
1667
1668 if (unlikely(ret < 0)) {
1669 del_timer_wait_running(timer);
1670 cpu_relax();
1671 }
1672 } while (ret < 0);
1673
1674 return ret;
1675}
1676
1677/**
1678 * timer_delete_sync - Deactivate a timer and wait for the handler to finish.
1679 * @timer: The timer to be deactivated
1680 *
1681 * Synchronization rules: Callers must prevent restarting of the timer,
1682 * otherwise this function is meaningless. It must not be called from
1683 * interrupt contexts unless the timer is an irqsafe one. The caller must
1684 * not hold locks which would prevent completion of the timer's callback
1685 * function. The timer's handler must not call add_timer_on(). Upon exit
1686 * the timer is not queued and the handler is not running on any CPU.
1687 *
1688 * For !irqsafe timers, the caller must not hold locks that are held in
1689 * interrupt context. Even if the lock has nothing to do with the timer in
1690 * question. Here's why::
1691 *
1692 * CPU0 CPU1
1693 * ---- ----
1694 * <SOFTIRQ>
1695 * call_timer_fn();
1696 * base->running_timer = mytimer;
1697 * spin_lock_irq(somelock);
1698 * <IRQ>
1699 * spin_lock(somelock);
1700 * timer_delete_sync(mytimer);
1701 * while (base->running_timer == mytimer);
1702 *
1703 * Now timer_delete_sync() will never return and never release somelock.
1704 * The interrupt on the other CPU is waiting to grab somelock but it has
1705 * interrupted the softirq that CPU0 is waiting to finish.
1706 *
1707 * This function cannot guarantee that the timer is not rearmed again by
1708 * some concurrent or preempting code, right after it dropped the base
1709 * lock. If there is the possibility of a concurrent rearm then the return
1710 * value of the function is meaningless.
1711 *
1712 * If such a guarantee is needed, e.g. for teardown situations then use
1713 * timer_shutdown_sync() instead.
1714 *
1715 * Return:
1716 * * %0 - The timer was not pending
1717 * * %1 - The timer was pending and deactivated
1718 */
1719int timer_delete_sync(struct timer_list *timer)
1720{
1721 return __timer_delete_sync(timer, false);
1722}
1723EXPORT_SYMBOL(timer_delete_sync);
1724
1725/**
1726 * timer_shutdown_sync - Shutdown a timer and prevent rearming
1727 * @timer: The timer to be shutdown
1728 *
1729 * When the function returns it is guaranteed that:
1730 * - @timer is not queued
1731 * - The callback function of @timer is not running
1732 * - @timer cannot be enqueued again. Any attempt to rearm
1733 * @timer is silently ignored.
1734 *
1735 * See timer_delete_sync() for synchronization rules.
1736 *
1737 * This function is useful for final teardown of an infrastructure where
1738 * the timer is subject to a circular dependency problem.
1739 *
1740 * A common pattern for this is a timer and a workqueue where the timer can
1741 * schedule work and work can arm the timer. On shutdown the workqueue must
1742 * be destroyed and the timer must be prevented from rearming. Unless the
1743 * code has conditionals like 'if (mything->in_shutdown)' to prevent that
1744 * there is no way to get this correct with timer_delete_sync().
1745 *
1746 * timer_shutdown_sync() is solving the problem. The correct ordering of
1747 * calls in this case is:
1748 *
1749 * timer_shutdown_sync(&mything->timer);
1750 * workqueue_destroy(&mything->workqueue);
1751 *
1752 * After this 'mything' can be safely freed.
1753 *
1754 * This obviously implies that the timer is not required to be functional
1755 * for the rest of the shutdown operation.
1756 *
1757 * Return:
1758 * * %0 - The timer was not pending
1759 * * %1 - The timer was pending
1760 */
1761int timer_shutdown_sync(struct timer_list *timer)
1762{
1763 return __timer_delete_sync(timer, true);
1764}
1765EXPORT_SYMBOL_GPL(timer_shutdown_sync);
1766
1767static void call_timer_fn(struct timer_list *timer,
1768 void (*fn)(struct timer_list *),
1769 unsigned long baseclk)
1770{
1771 int count = preempt_count();
1772
1773#ifdef CONFIG_LOCKDEP
1774 /*
1775 * It is permissible to free the timer from inside the
1776 * function that is called from it, this we need to take into
1777 * account for lockdep too. To avoid bogus "held lock freed"
1778 * warnings as well as problems when looking into
1779 * timer->lockdep_map, make a copy and use that here.
1780 */
1781 struct lockdep_map lockdep_map;
1782
1783 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1784#endif
1785 /*
1786 * Couple the lock chain with the lock chain at
1787 * timer_delete_sync() by acquiring the lock_map around the fn()
1788 * call here and in timer_delete_sync().
1789 */
1790 lock_map_acquire(&lockdep_map);
1791
1792 trace_timer_expire_entry(timer, baseclk);
1793 fn(timer);
1794 trace_timer_expire_exit(timer);
1795
1796 lock_map_release(&lockdep_map);
1797
1798 if (count != preempt_count()) {
1799 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n",
1800 fn, count, preempt_count());
1801 /*
1802 * Restore the preempt count. That gives us a decent
1803 * chance to survive and extract information. If the
1804 * callback kept a lock held, bad luck, but not worse
1805 * than the BUG() we had.
1806 */
1807 preempt_count_set(count);
1808 }
1809}
1810
1811static void expire_timers(struct timer_base *base, struct hlist_head *head)
1812{
1813 /*
1814 * This value is required only for tracing. base->clk was
1815 * incremented directly before expire_timers was called. But expiry
1816 * is related to the old base->clk value.
1817 */
1818 unsigned long baseclk = base->clk - 1;
1819
1820 while (!hlist_empty(head)) {
1821 struct timer_list *timer;
1822 void (*fn)(struct timer_list *);
1823
1824 timer = hlist_entry(head->first, struct timer_list, entry);
1825
1826 base->running_timer = timer;
1827 detach_timer(timer, true);
1828
1829 fn = timer->function;
1830
1831 if (WARN_ON_ONCE(!fn)) {
1832 /* Should never happen. Emphasis on should! */
1833 base->running_timer = NULL;
1834 continue;
1835 }
1836
1837 if (timer->flags & TIMER_IRQSAFE) {
1838 raw_spin_unlock(&base->lock);
1839 call_timer_fn(timer, fn, baseclk);
1840 raw_spin_lock(&base->lock);
1841 base->running_timer = NULL;
1842 } else {
1843 raw_spin_unlock_irq(&base->lock);
1844 call_timer_fn(timer, fn, baseclk);
1845 raw_spin_lock_irq(&base->lock);
1846 base->running_timer = NULL;
1847 timer_sync_wait_running(base);
1848 }
1849 }
1850}
1851
1852static int collect_expired_timers(struct timer_base *base,
1853 struct hlist_head *heads)
1854{
1855 unsigned long clk = base->clk = base->next_expiry;
1856 struct hlist_head *vec;
1857 int i, levels = 0;
1858 unsigned int idx;
1859
1860 for (i = 0; i < LVL_DEPTH; i++) {
1861 idx = (clk & LVL_MASK) + i * LVL_SIZE;
1862
1863 if (__test_and_clear_bit(idx, base->pending_map)) {
1864 vec = base->vectors + idx;
1865 hlist_move_list(vec, heads++);
1866 levels++;
1867 }
1868 /* Is it time to look at the next level? */
1869 if (clk & LVL_CLK_MASK)
1870 break;
1871 /* Shift clock for the next level granularity */
1872 clk >>= LVL_CLK_SHIFT;
1873 }
1874 return levels;
1875}
1876
1877/*
1878 * Find the next pending bucket of a level. Search from level start (@offset)
1879 * + @clk upwards and if nothing there, search from start of the level
1880 * (@offset) up to @offset + clk.
1881 */
1882static int next_pending_bucket(struct timer_base *base, unsigned offset,
1883 unsigned clk)
1884{
1885 unsigned pos, start = offset + clk;
1886 unsigned end = offset + LVL_SIZE;
1887
1888 pos = find_next_bit(base->pending_map, end, start);
1889 if (pos < end)
1890 return pos - start;
1891
1892 pos = find_next_bit(base->pending_map, start, offset);
1893 return pos < start ? pos + LVL_SIZE - start : -1;
1894}
1895
1896/*
1897 * Search the first expiring timer in the various clock levels. Caller must
1898 * hold base->lock.
1899 *
1900 * Store next expiry time in base->next_expiry.
1901 */
1902static void next_expiry_recalc(struct timer_base *base)
1903{
1904 unsigned long clk, next, adj;
1905 unsigned lvl, offset = 0;
1906
1907 next = base->clk + NEXT_TIMER_MAX_DELTA;
1908 clk = base->clk;
1909 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
1910 int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
1911 unsigned long lvl_clk = clk & LVL_CLK_MASK;
1912
1913 if (pos >= 0) {
1914 unsigned long tmp = clk + (unsigned long) pos;
1915
1916 tmp <<= LVL_SHIFT(lvl);
1917 if (time_before(tmp, next))
1918 next = tmp;
1919
1920 /*
1921 * If the next expiration happens before we reach
1922 * the next level, no need to check further.
1923 */
1924 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK))
1925 break;
1926 }
1927 /*
1928 * Clock for the next level. If the current level clock lower
1929 * bits are zero, we look at the next level as is. If not we
1930 * need to advance it by one because that's going to be the
1931 * next expiring bucket in that level. base->clk is the next
1932 * expiring jiffie. So in case of:
1933 *
1934 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1935 * 0 0 0 0 0 0
1936 *
1937 * we have to look at all levels @index 0. With
1938 *
1939 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1940 * 0 0 0 0 0 2
1941 *
1942 * LVL0 has the next expiring bucket @index 2. The upper
1943 * levels have the next expiring bucket @index 1.
1944 *
1945 * In case that the propagation wraps the next level the same
1946 * rules apply:
1947 *
1948 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1949 * 0 0 0 0 F 2
1950 *
1951 * So after looking at LVL0 we get:
1952 *
1953 * LVL5 LVL4 LVL3 LVL2 LVL1
1954 * 0 0 0 1 0
1955 *
1956 * So no propagation from LVL1 to LVL2 because that happened
1957 * with the add already, but then we need to propagate further
1958 * from LVL2 to LVL3.
1959 *
1960 * So the simple check whether the lower bits of the current
1961 * level are 0 or not is sufficient for all cases.
1962 */
1963 adj = lvl_clk ? 1 : 0;
1964 clk >>= LVL_CLK_SHIFT;
1965 clk += adj;
1966 }
1967
1968 base->next_expiry = next;
1969 base->next_expiry_recalc = false;
1970 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
1971}
1972
1973#ifdef CONFIG_NO_HZ_COMMON
1974/*
1975 * Check, if the next hrtimer event is before the next timer wheel
1976 * event:
1977 */
1978static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1979{
1980 u64 nextevt = hrtimer_get_next_event();
1981
1982 /*
1983 * If high resolution timers are enabled
1984 * hrtimer_get_next_event() returns KTIME_MAX.
1985 */
1986 if (expires <= nextevt)
1987 return expires;
1988
1989 /*
1990 * If the next timer is already expired, return the tick base
1991 * time so the tick is fired immediately.
1992 */
1993 if (nextevt <= basem)
1994 return basem;
1995
1996 /*
1997 * Round up to the next jiffie. High resolution timers are
1998 * off, so the hrtimers are expired in the tick and we need to
1999 * make sure that this tick really expires the timer to avoid
2000 * a ping pong of the nohz stop code.
2001 *
2002 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
2003 */
2004 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
2005}
2006
2007static unsigned long next_timer_interrupt(struct timer_base *base,
2008 unsigned long basej)
2009{
2010 if (base->next_expiry_recalc)
2011 next_expiry_recalc(base);
2012
2013 /*
2014 * Move next_expiry for the empty base into the future to prevent an
2015 * unnecessary raise of the timer softirq when the next_expiry value
2016 * will be reached even if there is no timer pending.
2017 *
2018 * This update is also required to make timer_base::next_expiry values
2019 * easy comparable to find out which base holds the first pending timer.
2020 */
2021 if (!base->timers_pending)
2022 base->next_expiry = basej + NEXT_TIMER_MAX_DELTA;
2023
2024 return base->next_expiry;
2025}
2026
2027static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem,
2028 struct timer_base *base_local,
2029 struct timer_base *base_global,
2030 struct timer_events *tevt)
2031{
2032 unsigned long nextevt, nextevt_local, nextevt_global;
2033 bool local_first;
2034
2035 nextevt_local = next_timer_interrupt(base_local, basej);
2036 nextevt_global = next_timer_interrupt(base_global, basej);
2037
2038 local_first = time_before_eq(nextevt_local, nextevt_global);
2039
2040 nextevt = local_first ? nextevt_local : nextevt_global;
2041
2042 /*
2043 * If the @nextevt is at max. one tick away, use @nextevt and store
2044 * it in the local expiry value. The next global event is irrelevant in
2045 * this case and can be left as KTIME_MAX.
2046 */
2047 if (time_before_eq(nextevt, basej + 1)) {
2048 /* If we missed a tick already, force 0 delta */
2049 if (time_before(nextevt, basej))
2050 nextevt = basej;
2051 tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC;
2052
2053 /*
2054 * This is required for the remote check only but it doesn't
2055 * hurt, when it is done for both call sites:
2056 *
2057 * * The remote callers will only take care of the global timers
2058 * as local timers will be handled by CPU itself. When not
2059 * updating tevt->global with the already missed first global
2060 * timer, it is possible that it will be missed completely.
2061 *
2062 * * The local callers will ignore the tevt->global anyway, when
2063 * nextevt is max. one tick away.
2064 */
2065 if (!local_first)
2066 tevt->global = tevt->local;
2067 return nextevt;
2068 }
2069
2070 /*
2071 * Update tevt.* values:
2072 *
2073 * If the local queue expires first, then the global event can be
2074 * ignored. If the global queue is empty, nothing to do either.
2075 */
2076 if (!local_first && base_global->timers_pending)
2077 tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
2078
2079 if (base_local->timers_pending)
2080 tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
2081
2082 return nextevt;
2083}
2084
2085# ifdef CONFIG_SMP
2086/**
2087 * fetch_next_timer_interrupt_remote() - Store next timers into @tevt
2088 * @basej: base time jiffies
2089 * @basem: base time clock monotonic
2090 * @tevt: Pointer to the storage for the expiry values
2091 * @cpu: Remote CPU
2092 *
2093 * Stores the next pending local and global timer expiry values in the
2094 * struct pointed to by @tevt. If a queue is empty the corresponding
2095 * field is set to KTIME_MAX. If local event expires before global
2096 * event, global event is set to KTIME_MAX as well.
2097 *
2098 * Caller needs to make sure timer base locks are held (use
2099 * timer_lock_remote_bases() for this purpose).
2100 */
2101void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem,
2102 struct timer_events *tevt,
2103 unsigned int cpu)
2104{
2105 struct timer_base *base_local, *base_global;
2106
2107 /* Preset local / global events */
2108 tevt->local = tevt->global = KTIME_MAX;
2109
2110 base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
2111 base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
2112
2113 lockdep_assert_held(&base_local->lock);
2114 lockdep_assert_held(&base_global->lock);
2115
2116 fetch_next_timer_interrupt(basej, basem, base_local, base_global, tevt);
2117}
2118
2119/**
2120 * timer_unlock_remote_bases - unlock timer bases of cpu
2121 * @cpu: Remote CPU
2122 *
2123 * Unlocks the remote timer bases.
2124 */
2125void timer_unlock_remote_bases(unsigned int cpu)
2126 __releases(timer_bases[BASE_LOCAL]->lock)
2127 __releases(timer_bases[BASE_GLOBAL]->lock)
2128{
2129 struct timer_base *base_local, *base_global;
2130
2131 base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
2132 base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
2133
2134 raw_spin_unlock(&base_global->lock);
2135 raw_spin_unlock(&base_local->lock);
2136}
2137
2138/**
2139 * timer_lock_remote_bases - lock timer bases of cpu
2140 * @cpu: Remote CPU
2141 *
2142 * Locks the remote timer bases.
2143 */
2144void timer_lock_remote_bases(unsigned int cpu)
2145 __acquires(timer_bases[BASE_LOCAL]->lock)
2146 __acquires(timer_bases[BASE_GLOBAL]->lock)
2147{
2148 struct timer_base *base_local, *base_global;
2149
2150 base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
2151 base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
2152
2153 lockdep_assert_irqs_disabled();
2154
2155 raw_spin_lock(&base_local->lock);
2156 raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
2157}
2158
2159/**
2160 * timer_base_is_idle() - Return whether timer base is set idle
2161 *
2162 * Returns value of local timer base is_idle value.
2163 */
2164bool timer_base_is_idle(void)
2165{
2166 return __this_cpu_read(timer_bases[BASE_LOCAL].is_idle);
2167}
2168
2169static void __run_timer_base(struct timer_base *base);
2170
2171/**
2172 * timer_expire_remote() - expire global timers of cpu
2173 * @cpu: Remote CPU
2174 *
2175 * Expire timers of global base of remote CPU.
2176 */
2177void timer_expire_remote(unsigned int cpu)
2178{
2179 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
2180
2181 __run_timer_base(base);
2182}
2183
2184static void timer_use_tmigr(unsigned long basej, u64 basem,
2185 unsigned long *nextevt, bool *tick_stop_path,
2186 bool timer_base_idle, struct timer_events *tevt)
2187{
2188 u64 next_tmigr;
2189
2190 if (timer_base_idle)
2191 next_tmigr = tmigr_cpu_new_timer(tevt->global);
2192 else if (tick_stop_path)
2193 next_tmigr = tmigr_cpu_deactivate(tevt->global);
2194 else
2195 next_tmigr = tmigr_quick_check(tevt->global);
2196
2197 /*
2198 * If the CPU is the last going idle in timer migration hierarchy, make
2199 * sure the CPU will wake up in time to handle remote timers.
2200 * next_tmigr == KTIME_MAX if other CPUs are still active.
2201 */
2202 if (next_tmigr < tevt->local) {
2203 u64 tmp;
2204
2205 /* If we missed a tick already, force 0 delta */
2206 if (next_tmigr < basem)
2207 next_tmigr = basem;
2208
2209 tmp = div_u64(next_tmigr - basem, TICK_NSEC);
2210
2211 *nextevt = basej + (unsigned long)tmp;
2212 tevt->local = next_tmigr;
2213 }
2214}
2215# else
2216static void timer_use_tmigr(unsigned long basej, u64 basem,
2217 unsigned long *nextevt, bool *tick_stop_path,
2218 bool timer_base_idle, struct timer_events *tevt)
2219{
2220 /*
2221 * Make sure first event is written into tevt->local to not miss a
2222 * timer on !SMP systems.
2223 */
2224 tevt->local = min_t(u64, tevt->local, tevt->global);
2225}
2226# endif /* CONFIG_SMP */
2227
2228static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
2229 bool *idle)
2230{
2231 struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
2232 struct timer_base *base_local, *base_global;
2233 unsigned long nextevt;
2234 bool idle_is_possible;
2235
2236 /*
2237 * When the CPU is offline, the tick is cancelled and nothing is supposed
2238 * to try to stop it.
2239 */
2240 if (WARN_ON_ONCE(cpu_is_offline(smp_processor_id()))) {
2241 if (idle)
2242 *idle = true;
2243 return tevt.local;
2244 }
2245
2246 base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
2247 base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
2248
2249 raw_spin_lock(&base_local->lock);
2250 raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
2251
2252 nextevt = fetch_next_timer_interrupt(basej, basem, base_local,
2253 base_global, &tevt);
2254
2255 /*
2256 * If the next event is only one jiffie ahead there is no need to call
2257 * timer migration hierarchy related functions. The value for the next
2258 * global timer in @tevt struct equals then KTIME_MAX. This is also
2259 * true, when the timer base is idle.
2260 *
2261 * The proper timer migration hierarchy function depends on the callsite
2262 * and whether timer base is idle or not. @nextevt will be updated when
2263 * this CPU needs to handle the first timer migration hierarchy
2264 * event. See timer_use_tmigr() for detailed information.
2265 */
2266 idle_is_possible = time_after(nextevt, basej + 1);
2267 if (idle_is_possible)
2268 timer_use_tmigr(basej, basem, &nextevt, idle,
2269 base_local->is_idle, &tevt);
2270
2271 /*
2272 * We have a fresh next event. Check whether we can forward the
2273 * base.
2274 */
2275 __forward_timer_base(base_local, basej);
2276 __forward_timer_base(base_global, basej);
2277
2278 /*
2279 * Set base->is_idle only when caller is timer_base_try_to_set_idle()
2280 */
2281 if (idle) {
2282 /*
2283 * Bases are idle if the next event is more than a tick
2284 * away. Caution: @nextevt could have changed by enqueueing a
2285 * global timer into timer migration hierarchy. Therefore a new
2286 * check is required here.
2287 *
2288 * If the base is marked idle then any timer add operation must
2289 * forward the base clk itself to keep granularity small. This
2290 * idle logic is only maintained for the BASE_LOCAL and
2291 * BASE_GLOBAL base, deferrable timers may still see large
2292 * granularity skew (by design).
2293 */
2294 if (!base_local->is_idle && time_after(nextevt, basej + 1)) {
2295 base_local->is_idle = true;
2296 /*
2297 * Global timers queued locally while running in a task
2298 * in nohz_full mode need a self-IPI to kick reprogramming
2299 * in IRQ tail.
2300 */
2301 if (tick_nohz_full_cpu(base_local->cpu))
2302 base_global->is_idle = true;
2303 trace_timer_base_idle(true, base_local->cpu);
2304 }
2305 *idle = base_local->is_idle;
2306
2307 /*
2308 * When timer base is not set idle, undo the effect of
2309 * tmigr_cpu_deactivate() to prevent inconsistent states - active
2310 * timer base but inactive timer migration hierarchy.
2311 *
2312 * When timer base was already marked idle, nothing will be
2313 * changed here.
2314 */
2315 if (!base_local->is_idle && idle_is_possible)
2316 tmigr_cpu_activate();
2317 }
2318
2319 raw_spin_unlock(&base_global->lock);
2320 raw_spin_unlock(&base_local->lock);
2321
2322 return cmp_next_hrtimer_event(basem, tevt.local);
2323}
2324
2325/**
2326 * get_next_timer_interrupt() - return the time (clock mono) of the next timer
2327 * @basej: base time jiffies
2328 * @basem: base time clock monotonic
2329 *
2330 * Returns the tick aligned clock monotonic time of the next pending timer or
2331 * KTIME_MAX if no timer is pending. If timer of global base was queued into
2332 * timer migration hierarchy, first global timer is not taken into account. If
2333 * it was the last CPU of timer migration hierarchy going idle, first global
2334 * event is taken into account.
2335 */
2336u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
2337{
2338 return __get_next_timer_interrupt(basej, basem, NULL);
2339}
2340
2341/**
2342 * timer_base_try_to_set_idle() - Try to set the idle state of the timer bases
2343 * @basej: base time jiffies
2344 * @basem: base time clock monotonic
2345 * @idle: pointer to store the value of timer_base->is_idle on return;
2346 * *idle contains the information whether tick was already stopped
2347 *
2348 * Returns the tick aligned clock monotonic time of the next pending timer or
2349 * KTIME_MAX if no timer is pending. When tick was already stopped KTIME_MAX is
2350 * returned as well.
2351 */
2352u64 timer_base_try_to_set_idle(unsigned long basej, u64 basem, bool *idle)
2353{
2354 if (*idle)
2355 return KTIME_MAX;
2356
2357 return __get_next_timer_interrupt(basej, basem, idle);
2358}
2359
2360/**
2361 * timer_clear_idle - Clear the idle state of the timer base
2362 *
2363 * Called with interrupts disabled
2364 */
2365void timer_clear_idle(void)
2366{
2367 /*
2368 * We do this unlocked. The worst outcome is a remote pinned timer
2369 * enqueue sending a pointless IPI, but taking the lock would just
2370 * make the window for sending the IPI a few instructions smaller
2371 * for the cost of taking the lock in the exit from idle
2372 * path. Required for BASE_LOCAL only.
2373 */
2374 __this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false);
2375 if (tick_nohz_full_cpu(smp_processor_id()))
2376 __this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false);
2377 trace_timer_base_idle(false, smp_processor_id());
2378
2379 /* Activate without holding the timer_base->lock */
2380 tmigr_cpu_activate();
2381}
2382#endif
2383
2384/**
2385 * __run_timers - run all expired timers (if any) on this CPU.
2386 * @base: the timer vector to be processed.
2387 */
2388static inline void __run_timers(struct timer_base *base)
2389{
2390 struct hlist_head heads[LVL_DEPTH];
2391 int levels;
2392
2393 lockdep_assert_held(&base->lock);
2394
2395 if (base->running_timer)
2396 return;
2397
2398 while (time_after_eq(jiffies, base->clk) &&
2399 time_after_eq(jiffies, base->next_expiry)) {
2400 levels = collect_expired_timers(base, heads);
2401 /*
2402 * The two possible reasons for not finding any expired
2403 * timer at this clk are that all matching timers have been
2404 * dequeued or no timer has been queued since
2405 * base::next_expiry was set to base::clk +
2406 * NEXT_TIMER_MAX_DELTA.
2407 */
2408 WARN_ON_ONCE(!levels && !base->next_expiry_recalc
2409 && base->timers_pending);
2410 /*
2411 * While executing timers, base->clk is set 1 offset ahead of
2412 * jiffies to avoid endless requeuing to current jiffies.
2413 */
2414 base->clk++;
2415 next_expiry_recalc(base);
2416
2417 while (levels--)
2418 expire_timers(base, heads + levels);
2419 }
2420}
2421
2422static void __run_timer_base(struct timer_base *base)
2423{
2424 if (time_before(jiffies, base->next_expiry))
2425 return;
2426
2427 timer_base_lock_expiry(base);
2428 raw_spin_lock_irq(&base->lock);
2429 __run_timers(base);
2430 raw_spin_unlock_irq(&base->lock);
2431 timer_base_unlock_expiry(base);
2432}
2433
2434static void run_timer_base(int index)
2435{
2436 struct timer_base *base = this_cpu_ptr(&timer_bases[index]);
2437
2438 __run_timer_base(base);
2439}
2440
2441/*
2442 * This function runs timers and the timer-tq in bottom half context.
2443 */
2444static __latent_entropy void run_timer_softirq(struct softirq_action *h)
2445{
2446 run_timer_base(BASE_LOCAL);
2447 if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) {
2448 run_timer_base(BASE_GLOBAL);
2449 run_timer_base(BASE_DEF);
2450
2451 if (is_timers_nohz_active())
2452 tmigr_handle_remote();
2453 }
2454}
2455
2456/*
2457 * Called by the local, per-CPU timer interrupt on SMP.
2458 */
2459static void run_local_timers(void)
2460{
2461 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
2462
2463 hrtimer_run_queues();
2464
2465 for (int i = 0; i < NR_BASES; i++, base++) {
2466 /* Raise the softirq only if required. */
2467 if (time_after_eq(jiffies, base->next_expiry) ||
2468 (i == BASE_DEF && tmigr_requires_handle_remote())) {
2469 raise_softirq(TIMER_SOFTIRQ);
2470 return;
2471 }
2472 }
2473}
2474
2475/*
2476 * Called from the timer interrupt handler to charge one tick to the current
2477 * process. user_tick is 1 if the tick is user time, 0 for system.
2478 */
2479void update_process_times(int user_tick)
2480{
2481 struct task_struct *p = current;
2482
2483 /* Note: this timer irq context must be accounted for as well. */
2484 account_process_tick(p, user_tick);
2485 run_local_timers();
2486 rcu_sched_clock_irq(user_tick);
2487#ifdef CONFIG_IRQ_WORK
2488 if (in_irq())
2489 irq_work_tick();
2490#endif
2491 scheduler_tick();
2492 if (IS_ENABLED(CONFIG_POSIX_TIMERS))
2493 run_posix_cpu_timers();
2494}
2495
2496/*
2497 * Since schedule_timeout()'s timer is defined on the stack, it must store
2498 * the target task on the stack as well.
2499 */
2500struct process_timer {
2501 struct timer_list timer;
2502 struct task_struct *task;
2503};
2504
2505static void process_timeout(struct timer_list *t)
2506{
2507 struct process_timer *timeout = from_timer(timeout, t, timer);
2508
2509 wake_up_process(timeout->task);
2510}
2511
2512/**
2513 * schedule_timeout - sleep until timeout
2514 * @timeout: timeout value in jiffies
2515 *
2516 * Make the current task sleep until @timeout jiffies have elapsed.
2517 * The function behavior depends on the current task state
2518 * (see also set_current_state() description):
2519 *
2520 * %TASK_RUNNING - the scheduler is called, but the task does not sleep
2521 * at all. That happens because sched_submit_work() does nothing for
2522 * tasks in %TASK_RUNNING state.
2523 *
2524 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
2525 * pass before the routine returns unless the current task is explicitly
2526 * woken up, (e.g. by wake_up_process()).
2527 *
2528 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2529 * delivered to the current task or the current task is explicitly woken
2530 * up.
2531 *
2532 * The current task state is guaranteed to be %TASK_RUNNING when this
2533 * routine returns.
2534 *
2535 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
2536 * the CPU away without a bound on the timeout. In this case the return
2537 * value will be %MAX_SCHEDULE_TIMEOUT.
2538 *
2539 * Returns 0 when the timer has expired otherwise the remaining time in
2540 * jiffies will be returned. In all cases the return value is guaranteed
2541 * to be non-negative.
2542 */
2543signed long __sched schedule_timeout(signed long timeout)
2544{
2545 struct process_timer timer;
2546 unsigned long expire;
2547
2548 switch (timeout)
2549 {
2550 case MAX_SCHEDULE_TIMEOUT:
2551 /*
2552 * These two special cases are useful to be comfortable
2553 * in the caller. Nothing more. We could take
2554 * MAX_SCHEDULE_TIMEOUT from one of the negative value
2555 * but I' d like to return a valid offset (>=0) to allow
2556 * the caller to do everything it want with the retval.
2557 */
2558 schedule();
2559 goto out;
2560 default:
2561 /*
2562 * Another bit of PARANOID. Note that the retval will be
2563 * 0 since no piece of kernel is supposed to do a check
2564 * for a negative retval of schedule_timeout() (since it
2565 * should never happens anyway). You just have the printk()
2566 * that will tell you if something is gone wrong and where.
2567 */
2568 if (timeout < 0) {
2569 printk(KERN_ERR "schedule_timeout: wrong timeout "
2570 "value %lx\n", timeout);
2571 dump_stack();
2572 __set_current_state(TASK_RUNNING);
2573 goto out;
2574 }
2575 }
2576
2577 expire = timeout + jiffies;
2578
2579 timer.task = current;
2580 timer_setup_on_stack(&timer.timer, process_timeout, 0);
2581 __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
2582 schedule();
2583 del_timer_sync(&timer.timer);
2584
2585 /* Remove the timer from the object tracker */
2586 destroy_timer_on_stack(&timer.timer);
2587
2588 timeout = expire - jiffies;
2589
2590 out:
2591 return timeout < 0 ? 0 : timeout;
2592}
2593EXPORT_SYMBOL(schedule_timeout);
2594
2595/*
2596 * We can use __set_current_state() here because schedule_timeout() calls
2597 * schedule() unconditionally.
2598 */
2599signed long __sched schedule_timeout_interruptible(signed long timeout)
2600{
2601 __set_current_state(TASK_INTERRUPTIBLE);
2602 return schedule_timeout(timeout);
2603}
2604EXPORT_SYMBOL(schedule_timeout_interruptible);
2605
2606signed long __sched schedule_timeout_killable(signed long timeout)
2607{
2608 __set_current_state(TASK_KILLABLE);
2609 return schedule_timeout(timeout);
2610}
2611EXPORT_SYMBOL(schedule_timeout_killable);
2612
2613signed long __sched schedule_timeout_uninterruptible(signed long timeout)
2614{
2615 __set_current_state(TASK_UNINTERRUPTIBLE);
2616 return schedule_timeout(timeout);
2617}
2618EXPORT_SYMBOL(schedule_timeout_uninterruptible);
2619
2620/*
2621 * Like schedule_timeout_uninterruptible(), except this task will not contribute
2622 * to load average.
2623 */
2624signed long __sched schedule_timeout_idle(signed long timeout)
2625{
2626 __set_current_state(TASK_IDLE);
2627 return schedule_timeout(timeout);
2628}
2629EXPORT_SYMBOL(schedule_timeout_idle);
2630
2631#ifdef CONFIG_HOTPLUG_CPU
2632static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
2633{
2634 struct timer_list *timer;
2635 int cpu = new_base->cpu;
2636
2637 while (!hlist_empty(head)) {
2638 timer = hlist_entry(head->first, struct timer_list, entry);
2639 detach_timer(timer, false);
2640 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
2641 internal_add_timer(new_base, timer);
2642 }
2643}
2644
2645int timers_prepare_cpu(unsigned int cpu)
2646{
2647 struct timer_base *base;
2648 int b;
2649
2650 for (b = 0; b < NR_BASES; b++) {
2651 base = per_cpu_ptr(&timer_bases[b], cpu);
2652 base->clk = jiffies;
2653 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
2654 base->next_expiry_recalc = false;
2655 base->timers_pending = false;
2656 base->is_idle = false;
2657 }
2658 return 0;
2659}
2660
2661int timers_dead_cpu(unsigned int cpu)
2662{
2663 struct timer_base *old_base;
2664 struct timer_base *new_base;
2665 int b, i;
2666
2667 for (b = 0; b < NR_BASES; b++) {
2668 old_base = per_cpu_ptr(&timer_bases[b], cpu);
2669 new_base = get_cpu_ptr(&timer_bases[b]);
2670 /*
2671 * The caller is globally serialized and nobody else
2672 * takes two locks at once, deadlock is not possible.
2673 */
2674 raw_spin_lock_irq(&new_base->lock);
2675 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
2676
2677 /*
2678 * The current CPUs base clock might be stale. Update it
2679 * before moving the timers over.
2680 */
2681 forward_timer_base(new_base);
2682
2683 WARN_ON_ONCE(old_base->running_timer);
2684 old_base->running_timer = NULL;
2685
2686 for (i = 0; i < WHEEL_SIZE; i++)
2687 migrate_timer_list(new_base, old_base->vectors + i);
2688
2689 raw_spin_unlock(&old_base->lock);
2690 raw_spin_unlock_irq(&new_base->lock);
2691 put_cpu_ptr(&timer_bases);
2692 }
2693 return 0;
2694}
2695
2696#endif /* CONFIG_HOTPLUG_CPU */
2697
2698static void __init init_timer_cpu(int cpu)
2699{
2700 struct timer_base *base;
2701 int i;
2702
2703 for (i = 0; i < NR_BASES; i++) {
2704 base = per_cpu_ptr(&timer_bases[i], cpu);
2705 base->cpu = cpu;
2706 raw_spin_lock_init(&base->lock);
2707 base->clk = jiffies;
2708 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
2709 timer_base_init_expiry_lock(base);
2710 }
2711}
2712
2713static void __init init_timer_cpus(void)
2714{
2715 int cpu;
2716
2717 for_each_possible_cpu(cpu)
2718 init_timer_cpu(cpu);
2719}
2720
2721void __init init_timers(void)
2722{
2723 init_timer_cpus();
2724 posix_cputimers_init_work();
2725 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
2726}
2727
2728/**
2729 * msleep - sleep safely even with waitqueue interruptions
2730 * @msecs: Time in milliseconds to sleep for
2731 */
2732void msleep(unsigned int msecs)
2733{
2734 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
2735
2736 while (timeout)
2737 timeout = schedule_timeout_uninterruptible(timeout);
2738}
2739
2740EXPORT_SYMBOL(msleep);
2741
2742/**
2743 * msleep_interruptible - sleep waiting for signals
2744 * @msecs: Time in milliseconds to sleep for
2745 */
2746unsigned long msleep_interruptible(unsigned int msecs)
2747{
2748 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
2749
2750 while (timeout && !signal_pending(current))
2751 timeout = schedule_timeout_interruptible(timeout);
2752 return jiffies_to_msecs(timeout);
2753}
2754
2755EXPORT_SYMBOL(msleep_interruptible);
2756
2757/**
2758 * usleep_range_state - Sleep for an approximate time in a given state
2759 * @min: Minimum time in usecs to sleep
2760 * @max: Maximum time in usecs to sleep
2761 * @state: State of the current task that will be while sleeping
2762 *
2763 * In non-atomic context where the exact wakeup time is flexible, use
2764 * usleep_range_state() instead of udelay(). The sleep improves responsiveness
2765 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
2766 * power usage by allowing hrtimers to take advantage of an already-
2767 * scheduled interrupt instead of scheduling a new one just for this sleep.
2768 */
2769void __sched usleep_range_state(unsigned long min, unsigned long max,
2770 unsigned int state)
2771{
2772 ktime_t exp = ktime_add_us(ktime_get(), min);
2773 u64 delta = (u64)(max - min) * NSEC_PER_USEC;
2774
2775 for (;;) {
2776 __set_current_state(state);
2777 /* Do not return before the requested sleep time has elapsed */
2778 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
2779 break;
2780 }
2781}
2782EXPORT_SYMBOL(usleep_range_state);
1/*
2 * linux/kernel/timer.c
3 *
4 * Kernel internal timers
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
23#include <linux/export.h>
24#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
29#include <linux/pid_namespace.h>
30#include <linux/notifier.h>
31#include <linux/thread_info.h>
32#include <linux/time.h>
33#include <linux/jiffies.h>
34#include <linux/posix-timers.h>
35#include <linux/cpu.h>
36#include <linux/syscalls.h>
37#include <linux/delay.h>
38#include <linux/tick.h>
39#include <linux/kallsyms.h>
40#include <linux/irq_work.h>
41#include <linux/sched.h>
42#include <linux/sched/sysctl.h>
43#include <linux/slab.h>
44#include <linux/compat.h>
45
46#include <asm/uaccess.h>
47#include <asm/unistd.h>
48#include <asm/div64.h>
49#include <asm/timex.h>
50#include <asm/io.h>
51
52#include "tick-internal.h"
53
54#define CREATE_TRACE_POINTS
55#include <trace/events/timer.h>
56
57__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
58
59EXPORT_SYMBOL(jiffies_64);
60
61/*
62 * per-CPU timer vector definitions:
63 */
64#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
65#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
66#define TVN_SIZE (1 << TVN_BITS)
67#define TVR_SIZE (1 << TVR_BITS)
68#define TVN_MASK (TVN_SIZE - 1)
69#define TVR_MASK (TVR_SIZE - 1)
70#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
71
72struct tvec {
73 struct hlist_head vec[TVN_SIZE];
74};
75
76struct tvec_root {
77 struct hlist_head vec[TVR_SIZE];
78};
79
80struct tvec_base {
81 spinlock_t lock;
82 struct timer_list *running_timer;
83 unsigned long timer_jiffies;
84 unsigned long next_timer;
85 unsigned long active_timers;
86 unsigned long all_timers;
87 int cpu;
88 bool migration_enabled;
89 bool nohz_active;
90 struct tvec_root tv1;
91 struct tvec tv2;
92 struct tvec tv3;
93 struct tvec tv4;
94 struct tvec tv5;
95} ____cacheline_aligned;
96
97
98static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
99
100#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
101unsigned int sysctl_timer_migration = 1;
102
103void timers_update_migration(bool update_nohz)
104{
105 bool on = sysctl_timer_migration && tick_nohz_active;
106 unsigned int cpu;
107
108 /* Avoid the loop, if nothing to update */
109 if (this_cpu_read(tvec_bases.migration_enabled) == on)
110 return;
111
112 for_each_possible_cpu(cpu) {
113 per_cpu(tvec_bases.migration_enabled, cpu) = on;
114 per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
115 if (!update_nohz)
116 continue;
117 per_cpu(tvec_bases.nohz_active, cpu) = true;
118 per_cpu(hrtimer_bases.nohz_active, cpu) = true;
119 }
120}
121
122int timer_migration_handler(struct ctl_table *table, int write,
123 void __user *buffer, size_t *lenp,
124 loff_t *ppos)
125{
126 static DEFINE_MUTEX(mutex);
127 int ret;
128
129 mutex_lock(&mutex);
130 ret = proc_dointvec(table, write, buffer, lenp, ppos);
131 if (!ret && write)
132 timers_update_migration(false);
133 mutex_unlock(&mutex);
134 return ret;
135}
136
137static inline struct tvec_base *get_target_base(struct tvec_base *base,
138 int pinned)
139{
140 if (pinned || !base->migration_enabled)
141 return this_cpu_ptr(&tvec_bases);
142 return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
143}
144#else
145static inline struct tvec_base *get_target_base(struct tvec_base *base,
146 int pinned)
147{
148 return this_cpu_ptr(&tvec_bases);
149}
150#endif
151
152static unsigned long round_jiffies_common(unsigned long j, int cpu,
153 bool force_up)
154{
155 int rem;
156 unsigned long original = j;
157
158 /*
159 * We don't want all cpus firing their timers at once hitting the
160 * same lock or cachelines, so we skew each extra cpu with an extra
161 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
162 * already did this.
163 * The skew is done by adding 3*cpunr, then round, then subtract this
164 * extra offset again.
165 */
166 j += cpu * 3;
167
168 rem = j % HZ;
169
170 /*
171 * If the target jiffie is just after a whole second (which can happen
172 * due to delays of the timer irq, long irq off times etc etc) then
173 * we should round down to the whole second, not up. Use 1/4th second
174 * as cutoff for this rounding as an extreme upper bound for this.
175 * But never round down if @force_up is set.
176 */
177 if (rem < HZ/4 && !force_up) /* round down */
178 j = j - rem;
179 else /* round up */
180 j = j - rem + HZ;
181
182 /* now that we have rounded, subtract the extra skew again */
183 j -= cpu * 3;
184
185 /*
186 * Make sure j is still in the future. Otherwise return the
187 * unmodified value.
188 */
189 return time_is_after_jiffies(j) ? j : original;
190}
191
192/**
193 * __round_jiffies - function to round jiffies to a full second
194 * @j: the time in (absolute) jiffies that should be rounded
195 * @cpu: the processor number on which the timeout will happen
196 *
197 * __round_jiffies() rounds an absolute time in the future (in jiffies)
198 * up or down to (approximately) full seconds. This is useful for timers
199 * for which the exact time they fire does not matter too much, as long as
200 * they fire approximately every X seconds.
201 *
202 * By rounding these timers to whole seconds, all such timers will fire
203 * at the same time, rather than at various times spread out. The goal
204 * of this is to have the CPU wake up less, which saves power.
205 *
206 * The exact rounding is skewed for each processor to avoid all
207 * processors firing at the exact same time, which could lead
208 * to lock contention or spurious cache line bouncing.
209 *
210 * The return value is the rounded version of the @j parameter.
211 */
212unsigned long __round_jiffies(unsigned long j, int cpu)
213{
214 return round_jiffies_common(j, cpu, false);
215}
216EXPORT_SYMBOL_GPL(__round_jiffies);
217
218/**
219 * __round_jiffies_relative - function to round jiffies to a full second
220 * @j: the time in (relative) jiffies that should be rounded
221 * @cpu: the processor number on which the timeout will happen
222 *
223 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
224 * up or down to (approximately) full seconds. This is useful for timers
225 * for which the exact time they fire does not matter too much, as long as
226 * they fire approximately every X seconds.
227 *
228 * By rounding these timers to whole seconds, all such timers will fire
229 * at the same time, rather than at various times spread out. The goal
230 * of this is to have the CPU wake up less, which saves power.
231 *
232 * The exact rounding is skewed for each processor to avoid all
233 * processors firing at the exact same time, which could lead
234 * to lock contention or spurious cache line bouncing.
235 *
236 * The return value is the rounded version of the @j parameter.
237 */
238unsigned long __round_jiffies_relative(unsigned long j, int cpu)
239{
240 unsigned long j0 = jiffies;
241
242 /* Use j0 because jiffies might change while we run */
243 return round_jiffies_common(j + j0, cpu, false) - j0;
244}
245EXPORT_SYMBOL_GPL(__round_jiffies_relative);
246
247/**
248 * round_jiffies - function to round jiffies to a full second
249 * @j: the time in (absolute) jiffies that should be rounded
250 *
251 * round_jiffies() rounds an absolute time in the future (in jiffies)
252 * up or down to (approximately) full seconds. This is useful for timers
253 * for which the exact time they fire does not matter too much, as long as
254 * they fire approximately every X seconds.
255 *
256 * By rounding these timers to whole seconds, all such timers will fire
257 * at the same time, rather than at various times spread out. The goal
258 * of this is to have the CPU wake up less, which saves power.
259 *
260 * The return value is the rounded version of the @j parameter.
261 */
262unsigned long round_jiffies(unsigned long j)
263{
264 return round_jiffies_common(j, raw_smp_processor_id(), false);
265}
266EXPORT_SYMBOL_GPL(round_jiffies);
267
268/**
269 * round_jiffies_relative - function to round jiffies to a full second
270 * @j: the time in (relative) jiffies that should be rounded
271 *
272 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
273 * up or down to (approximately) full seconds. This is useful for timers
274 * for which the exact time they fire does not matter too much, as long as
275 * they fire approximately every X seconds.
276 *
277 * By rounding these timers to whole seconds, all such timers will fire
278 * at the same time, rather than at various times spread out. The goal
279 * of this is to have the CPU wake up less, which saves power.
280 *
281 * The return value is the rounded version of the @j parameter.
282 */
283unsigned long round_jiffies_relative(unsigned long j)
284{
285 return __round_jiffies_relative(j, raw_smp_processor_id());
286}
287EXPORT_SYMBOL_GPL(round_jiffies_relative);
288
289/**
290 * __round_jiffies_up - function to round jiffies up to a full second
291 * @j: the time in (absolute) jiffies that should be rounded
292 * @cpu: the processor number on which the timeout will happen
293 *
294 * This is the same as __round_jiffies() except that it will never
295 * round down. This is useful for timeouts for which the exact time
296 * of firing does not matter too much, as long as they don't fire too
297 * early.
298 */
299unsigned long __round_jiffies_up(unsigned long j, int cpu)
300{
301 return round_jiffies_common(j, cpu, true);
302}
303EXPORT_SYMBOL_GPL(__round_jiffies_up);
304
305/**
306 * __round_jiffies_up_relative - function to round jiffies up to a full second
307 * @j: the time in (relative) jiffies that should be rounded
308 * @cpu: the processor number on which the timeout will happen
309 *
310 * This is the same as __round_jiffies_relative() except that it will never
311 * round down. This is useful for timeouts for which the exact time
312 * of firing does not matter too much, as long as they don't fire too
313 * early.
314 */
315unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
316{
317 unsigned long j0 = jiffies;
318
319 /* Use j0 because jiffies might change while we run */
320 return round_jiffies_common(j + j0, cpu, true) - j0;
321}
322EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
323
324/**
325 * round_jiffies_up - function to round jiffies up to a full second
326 * @j: the time in (absolute) jiffies that should be rounded
327 *
328 * This is the same as round_jiffies() except that it will never
329 * round down. This is useful for timeouts for which the exact time
330 * of firing does not matter too much, as long as they don't fire too
331 * early.
332 */
333unsigned long round_jiffies_up(unsigned long j)
334{
335 return round_jiffies_common(j, raw_smp_processor_id(), true);
336}
337EXPORT_SYMBOL_GPL(round_jiffies_up);
338
339/**
340 * round_jiffies_up_relative - function to round jiffies up to a full second
341 * @j: the time in (relative) jiffies that should be rounded
342 *
343 * This is the same as round_jiffies_relative() except that it will never
344 * round down. This is useful for timeouts for which the exact time
345 * of firing does not matter too much, as long as they don't fire too
346 * early.
347 */
348unsigned long round_jiffies_up_relative(unsigned long j)
349{
350 return __round_jiffies_up_relative(j, raw_smp_processor_id());
351}
352EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
353
354/**
355 * set_timer_slack - set the allowed slack for a timer
356 * @timer: the timer to be modified
357 * @slack_hz: the amount of time (in jiffies) allowed for rounding
358 *
359 * Set the amount of time, in jiffies, that a certain timer has
360 * in terms of slack. By setting this value, the timer subsystem
361 * will schedule the actual timer somewhere between
362 * the time mod_timer() asks for, and that time plus the slack.
363 *
364 * By setting the slack to -1, a percentage of the delay is used
365 * instead.
366 */
367void set_timer_slack(struct timer_list *timer, int slack_hz)
368{
369 timer->slack = slack_hz;
370}
371EXPORT_SYMBOL_GPL(set_timer_slack);
372
373static void
374__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
375{
376 unsigned long expires = timer->expires;
377 unsigned long idx = expires - base->timer_jiffies;
378 struct hlist_head *vec;
379
380 if (idx < TVR_SIZE) {
381 int i = expires & TVR_MASK;
382 vec = base->tv1.vec + i;
383 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
384 int i = (expires >> TVR_BITS) & TVN_MASK;
385 vec = base->tv2.vec + i;
386 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
387 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
388 vec = base->tv3.vec + i;
389 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
390 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
391 vec = base->tv4.vec + i;
392 } else if ((signed long) idx < 0) {
393 /*
394 * Can happen if you add a timer with expires == jiffies,
395 * or you set a timer to go off in the past
396 */
397 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
398 } else {
399 int i;
400 /* If the timeout is larger than MAX_TVAL (on 64-bit
401 * architectures or with CONFIG_BASE_SMALL=1) then we
402 * use the maximum timeout.
403 */
404 if (idx > MAX_TVAL) {
405 idx = MAX_TVAL;
406 expires = idx + base->timer_jiffies;
407 }
408 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
409 vec = base->tv5.vec + i;
410 }
411
412 hlist_add_head(&timer->entry, vec);
413}
414
415static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
416{
417 /* Advance base->jiffies, if the base is empty */
418 if (!base->all_timers++)
419 base->timer_jiffies = jiffies;
420
421 __internal_add_timer(base, timer);
422 /*
423 * Update base->active_timers and base->next_timer
424 */
425 if (!(timer->flags & TIMER_DEFERRABLE)) {
426 if (!base->active_timers++ ||
427 time_before(timer->expires, base->next_timer))
428 base->next_timer = timer->expires;
429 }
430
431 /*
432 * Check whether the other CPU is in dynticks mode and needs
433 * to be triggered to reevaluate the timer wheel.
434 * We are protected against the other CPU fiddling
435 * with the timer by holding the timer base lock. This also
436 * makes sure that a CPU on the way to stop its tick can not
437 * evaluate the timer wheel.
438 *
439 * Spare the IPI for deferrable timers on idle targets though.
440 * The next busy ticks will take care of it. Except full dynticks
441 * require special care against races with idle_cpu(), lets deal
442 * with that later.
443 */
444 if (base->nohz_active) {
445 if (!(timer->flags & TIMER_DEFERRABLE) ||
446 tick_nohz_full_cpu(base->cpu))
447 wake_up_nohz_cpu(base->cpu);
448 }
449}
450
451#ifdef CONFIG_TIMER_STATS
452void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
453{
454 if (timer->start_site)
455 return;
456
457 timer->start_site = addr;
458 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
459 timer->start_pid = current->pid;
460}
461
462static void timer_stats_account_timer(struct timer_list *timer)
463{
464 void *site;
465
466 /*
467 * start_site can be concurrently reset by
468 * timer_stats_timer_clear_start_info()
469 */
470 site = READ_ONCE(timer->start_site);
471 if (likely(!site))
472 return;
473
474 timer_stats_update_stats(timer, timer->start_pid, site,
475 timer->function, timer->start_comm,
476 timer->flags);
477}
478
479#else
480static void timer_stats_account_timer(struct timer_list *timer) {}
481#endif
482
483#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
484
485static struct debug_obj_descr timer_debug_descr;
486
487static void *timer_debug_hint(void *addr)
488{
489 return ((struct timer_list *) addr)->function;
490}
491
492/*
493 * fixup_init is called when:
494 * - an active object is initialized
495 */
496static int timer_fixup_init(void *addr, enum debug_obj_state state)
497{
498 struct timer_list *timer = addr;
499
500 switch (state) {
501 case ODEBUG_STATE_ACTIVE:
502 del_timer_sync(timer);
503 debug_object_init(timer, &timer_debug_descr);
504 return 1;
505 default:
506 return 0;
507 }
508}
509
510/* Stub timer callback for improperly used timers. */
511static void stub_timer(unsigned long data)
512{
513 WARN_ON(1);
514}
515
516/*
517 * fixup_activate is called when:
518 * - an active object is activated
519 * - an unknown object is activated (might be a statically initialized object)
520 */
521static int timer_fixup_activate(void *addr, enum debug_obj_state state)
522{
523 struct timer_list *timer = addr;
524
525 switch (state) {
526
527 case ODEBUG_STATE_NOTAVAILABLE:
528 /*
529 * This is not really a fixup. The timer was
530 * statically initialized. We just make sure that it
531 * is tracked in the object tracker.
532 */
533 if (timer->entry.pprev == NULL &&
534 timer->entry.next == TIMER_ENTRY_STATIC) {
535 debug_object_init(timer, &timer_debug_descr);
536 debug_object_activate(timer, &timer_debug_descr);
537 return 0;
538 } else {
539 setup_timer(timer, stub_timer, 0);
540 return 1;
541 }
542 return 0;
543
544 case ODEBUG_STATE_ACTIVE:
545 WARN_ON(1);
546
547 default:
548 return 0;
549 }
550}
551
552/*
553 * fixup_free is called when:
554 * - an active object is freed
555 */
556static int timer_fixup_free(void *addr, enum debug_obj_state state)
557{
558 struct timer_list *timer = addr;
559
560 switch (state) {
561 case ODEBUG_STATE_ACTIVE:
562 del_timer_sync(timer);
563 debug_object_free(timer, &timer_debug_descr);
564 return 1;
565 default:
566 return 0;
567 }
568}
569
570/*
571 * fixup_assert_init is called when:
572 * - an untracked/uninit-ed object is found
573 */
574static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
575{
576 struct timer_list *timer = addr;
577
578 switch (state) {
579 case ODEBUG_STATE_NOTAVAILABLE:
580 if (timer->entry.next == TIMER_ENTRY_STATIC) {
581 /*
582 * This is not really a fixup. The timer was
583 * statically initialized. We just make sure that it
584 * is tracked in the object tracker.
585 */
586 debug_object_init(timer, &timer_debug_descr);
587 return 0;
588 } else {
589 setup_timer(timer, stub_timer, 0);
590 return 1;
591 }
592 default:
593 return 0;
594 }
595}
596
597static struct debug_obj_descr timer_debug_descr = {
598 .name = "timer_list",
599 .debug_hint = timer_debug_hint,
600 .fixup_init = timer_fixup_init,
601 .fixup_activate = timer_fixup_activate,
602 .fixup_free = timer_fixup_free,
603 .fixup_assert_init = timer_fixup_assert_init,
604};
605
606static inline void debug_timer_init(struct timer_list *timer)
607{
608 debug_object_init(timer, &timer_debug_descr);
609}
610
611static inline void debug_timer_activate(struct timer_list *timer)
612{
613 debug_object_activate(timer, &timer_debug_descr);
614}
615
616static inline void debug_timer_deactivate(struct timer_list *timer)
617{
618 debug_object_deactivate(timer, &timer_debug_descr);
619}
620
621static inline void debug_timer_free(struct timer_list *timer)
622{
623 debug_object_free(timer, &timer_debug_descr);
624}
625
626static inline void debug_timer_assert_init(struct timer_list *timer)
627{
628 debug_object_assert_init(timer, &timer_debug_descr);
629}
630
631static void do_init_timer(struct timer_list *timer, unsigned int flags,
632 const char *name, struct lock_class_key *key);
633
634void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
635 const char *name, struct lock_class_key *key)
636{
637 debug_object_init_on_stack(timer, &timer_debug_descr);
638 do_init_timer(timer, flags, name, key);
639}
640EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
641
642void destroy_timer_on_stack(struct timer_list *timer)
643{
644 debug_object_free(timer, &timer_debug_descr);
645}
646EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
647
648#else
649static inline void debug_timer_init(struct timer_list *timer) { }
650static inline void debug_timer_activate(struct timer_list *timer) { }
651static inline void debug_timer_deactivate(struct timer_list *timer) { }
652static inline void debug_timer_assert_init(struct timer_list *timer) { }
653#endif
654
655static inline void debug_init(struct timer_list *timer)
656{
657 debug_timer_init(timer);
658 trace_timer_init(timer);
659}
660
661static inline void
662debug_activate(struct timer_list *timer, unsigned long expires)
663{
664 debug_timer_activate(timer);
665 trace_timer_start(timer, expires, timer->flags);
666}
667
668static inline void debug_deactivate(struct timer_list *timer)
669{
670 debug_timer_deactivate(timer);
671 trace_timer_cancel(timer);
672}
673
674static inline void debug_assert_init(struct timer_list *timer)
675{
676 debug_timer_assert_init(timer);
677}
678
679static void do_init_timer(struct timer_list *timer, unsigned int flags,
680 const char *name, struct lock_class_key *key)
681{
682 timer->entry.pprev = NULL;
683 timer->flags = flags | raw_smp_processor_id();
684 timer->slack = -1;
685#ifdef CONFIG_TIMER_STATS
686 timer->start_site = NULL;
687 timer->start_pid = -1;
688 memset(timer->start_comm, 0, TASK_COMM_LEN);
689#endif
690 lockdep_init_map(&timer->lockdep_map, name, key, 0);
691}
692
693/**
694 * init_timer_key - initialize a timer
695 * @timer: the timer to be initialized
696 * @flags: timer flags
697 * @name: name of the timer
698 * @key: lockdep class key of the fake lock used for tracking timer
699 * sync lock dependencies
700 *
701 * init_timer_key() must be done to a timer prior calling *any* of the
702 * other timer functions.
703 */
704void init_timer_key(struct timer_list *timer, unsigned int flags,
705 const char *name, struct lock_class_key *key)
706{
707 debug_init(timer);
708 do_init_timer(timer, flags, name, key);
709}
710EXPORT_SYMBOL(init_timer_key);
711
712static inline void detach_timer(struct timer_list *timer, bool clear_pending)
713{
714 struct hlist_node *entry = &timer->entry;
715
716 debug_deactivate(timer);
717
718 __hlist_del(entry);
719 if (clear_pending)
720 entry->pprev = NULL;
721 entry->next = LIST_POISON2;
722}
723
724static inline void
725detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
726{
727 detach_timer(timer, true);
728 if (!(timer->flags & TIMER_DEFERRABLE))
729 base->active_timers--;
730 base->all_timers--;
731}
732
733static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
734 bool clear_pending)
735{
736 if (!timer_pending(timer))
737 return 0;
738
739 detach_timer(timer, clear_pending);
740 if (!(timer->flags & TIMER_DEFERRABLE)) {
741 base->active_timers--;
742 if (timer->expires == base->next_timer)
743 base->next_timer = base->timer_jiffies;
744 }
745 /* If this was the last timer, advance base->jiffies */
746 if (!--base->all_timers)
747 base->timer_jiffies = jiffies;
748 return 1;
749}
750
751/*
752 * We are using hashed locking: holding per_cpu(tvec_bases).lock
753 * means that all timers which are tied to this base via timer->base are
754 * locked, and the base itself is locked too.
755 *
756 * So __run_timers/migrate_timers can safely modify all timers which could
757 * be found on ->tvX lists.
758 *
759 * When the timer's base is locked and removed from the list, the
760 * TIMER_MIGRATING flag is set, FIXME
761 */
762static struct tvec_base *lock_timer_base(struct timer_list *timer,
763 unsigned long *flags)
764 __acquires(timer->base->lock)
765{
766 for (;;) {
767 u32 tf = timer->flags;
768 struct tvec_base *base;
769
770 if (!(tf & TIMER_MIGRATING)) {
771 base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
772 spin_lock_irqsave(&base->lock, *flags);
773 if (timer->flags == tf)
774 return base;
775 spin_unlock_irqrestore(&base->lock, *flags);
776 }
777 cpu_relax();
778 }
779}
780
781static inline int
782__mod_timer(struct timer_list *timer, unsigned long expires,
783 bool pending_only, int pinned)
784{
785 struct tvec_base *base, *new_base;
786 unsigned long flags;
787 int ret = 0;
788
789 timer_stats_timer_set_start_info(timer);
790 BUG_ON(!timer->function);
791
792 base = lock_timer_base(timer, &flags);
793
794 ret = detach_if_pending(timer, base, false);
795 if (!ret && pending_only)
796 goto out_unlock;
797
798 debug_activate(timer, expires);
799
800 new_base = get_target_base(base, pinned);
801
802 if (base != new_base) {
803 /*
804 * We are trying to schedule the timer on the local CPU.
805 * However we can't change timer's base while it is running,
806 * otherwise del_timer_sync() can't detect that the timer's
807 * handler yet has not finished. This also guarantees that
808 * the timer is serialized wrt itself.
809 */
810 if (likely(base->running_timer != timer)) {
811 /* See the comment in lock_timer_base() */
812 timer->flags |= TIMER_MIGRATING;
813
814 spin_unlock(&base->lock);
815 base = new_base;
816 spin_lock(&base->lock);
817 WRITE_ONCE(timer->flags,
818 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
819 }
820 }
821
822 timer->expires = expires;
823 internal_add_timer(base, timer);
824
825out_unlock:
826 spin_unlock_irqrestore(&base->lock, flags);
827
828 return ret;
829}
830
831/**
832 * mod_timer_pending - modify a pending timer's timeout
833 * @timer: the pending timer to be modified
834 * @expires: new timeout in jiffies
835 *
836 * mod_timer_pending() is the same for pending timers as mod_timer(),
837 * but will not re-activate and modify already deleted timers.
838 *
839 * It is useful for unserialized use of timers.
840 */
841int mod_timer_pending(struct timer_list *timer, unsigned long expires)
842{
843 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
844}
845EXPORT_SYMBOL(mod_timer_pending);
846
847/*
848 * Decide where to put the timer while taking the slack into account
849 *
850 * Algorithm:
851 * 1) calculate the maximum (absolute) time
852 * 2) calculate the highest bit where the expires and new max are different
853 * 3) use this bit to make a mask
854 * 4) use the bitmask to round down the maximum time, so that all last
855 * bits are zeros
856 */
857static inline
858unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
859{
860 unsigned long expires_limit, mask;
861 int bit;
862
863 if (timer->slack >= 0) {
864 expires_limit = expires + timer->slack;
865 } else {
866 long delta = expires - jiffies;
867
868 if (delta < 256)
869 return expires;
870
871 expires_limit = expires + delta / 256;
872 }
873 mask = expires ^ expires_limit;
874 if (mask == 0)
875 return expires;
876
877 bit = __fls(mask);
878
879 mask = (1UL << bit) - 1;
880
881 expires_limit = expires_limit & ~(mask);
882
883 return expires_limit;
884}
885
886/**
887 * mod_timer - modify a timer's timeout
888 * @timer: the timer to be modified
889 * @expires: new timeout in jiffies
890 *
891 * mod_timer() is a more efficient way to update the expire field of an
892 * active timer (if the timer is inactive it will be activated)
893 *
894 * mod_timer(timer, expires) is equivalent to:
895 *
896 * del_timer(timer); timer->expires = expires; add_timer(timer);
897 *
898 * Note that if there are multiple unserialized concurrent users of the
899 * same timer, then mod_timer() is the only safe way to modify the timeout,
900 * since add_timer() cannot modify an already running timer.
901 *
902 * The function returns whether it has modified a pending timer or not.
903 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
904 * active timer returns 1.)
905 */
906int mod_timer(struct timer_list *timer, unsigned long expires)
907{
908 expires = apply_slack(timer, expires);
909
910 /*
911 * This is a common optimization triggered by the
912 * networking code - if the timer is re-modified
913 * to be the same thing then just return:
914 */
915 if (timer_pending(timer) && timer->expires == expires)
916 return 1;
917
918 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
919}
920EXPORT_SYMBOL(mod_timer);
921
922/**
923 * mod_timer_pinned - modify a timer's timeout
924 * @timer: the timer to be modified
925 * @expires: new timeout in jiffies
926 *
927 * mod_timer_pinned() is a way to update the expire field of an
928 * active timer (if the timer is inactive it will be activated)
929 * and to ensure that the timer is scheduled on the current CPU.
930 *
931 * Note that this does not prevent the timer from being migrated
932 * when the current CPU goes offline. If this is a problem for
933 * you, use CPU-hotplug notifiers to handle it correctly, for
934 * example, cancelling the timer when the corresponding CPU goes
935 * offline.
936 *
937 * mod_timer_pinned(timer, expires) is equivalent to:
938 *
939 * del_timer(timer); timer->expires = expires; add_timer(timer);
940 */
941int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
942{
943 if (timer->expires == expires && timer_pending(timer))
944 return 1;
945
946 return __mod_timer(timer, expires, false, TIMER_PINNED);
947}
948EXPORT_SYMBOL(mod_timer_pinned);
949
950/**
951 * add_timer - start a timer
952 * @timer: the timer to be added
953 *
954 * The kernel will do a ->function(->data) callback from the
955 * timer interrupt at the ->expires point in the future. The
956 * current time is 'jiffies'.
957 *
958 * The timer's ->expires, ->function (and if the handler uses it, ->data)
959 * fields must be set prior calling this function.
960 *
961 * Timers with an ->expires field in the past will be executed in the next
962 * timer tick.
963 */
964void add_timer(struct timer_list *timer)
965{
966 BUG_ON(timer_pending(timer));
967 mod_timer(timer, timer->expires);
968}
969EXPORT_SYMBOL(add_timer);
970
971/**
972 * add_timer_on - start a timer on a particular CPU
973 * @timer: the timer to be added
974 * @cpu: the CPU to start it on
975 *
976 * This is not very scalable on SMP. Double adds are not possible.
977 */
978void add_timer_on(struct timer_list *timer, int cpu)
979{
980 struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
981 struct tvec_base *base;
982 unsigned long flags;
983
984 timer_stats_timer_set_start_info(timer);
985 BUG_ON(timer_pending(timer) || !timer->function);
986
987 /*
988 * If @timer was on a different CPU, it should be migrated with the
989 * old base locked to prevent other operations proceeding with the
990 * wrong base locked. See lock_timer_base().
991 */
992 base = lock_timer_base(timer, &flags);
993 if (base != new_base) {
994 timer->flags |= TIMER_MIGRATING;
995
996 spin_unlock(&base->lock);
997 base = new_base;
998 spin_lock(&base->lock);
999 WRITE_ONCE(timer->flags,
1000 (timer->flags & ~TIMER_BASEMASK) | cpu);
1001 }
1002
1003 debug_activate(timer, timer->expires);
1004 internal_add_timer(base, timer);
1005 spin_unlock_irqrestore(&base->lock, flags);
1006}
1007EXPORT_SYMBOL_GPL(add_timer_on);
1008
1009/**
1010 * del_timer - deactive a timer.
1011 * @timer: the timer to be deactivated
1012 *
1013 * del_timer() deactivates a timer - this works on both active and inactive
1014 * timers.
1015 *
1016 * The function returns whether it has deactivated a pending timer or not.
1017 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1018 * active timer returns 1.)
1019 */
1020int del_timer(struct timer_list *timer)
1021{
1022 struct tvec_base *base;
1023 unsigned long flags;
1024 int ret = 0;
1025
1026 debug_assert_init(timer);
1027
1028 timer_stats_timer_clear_start_info(timer);
1029 if (timer_pending(timer)) {
1030 base = lock_timer_base(timer, &flags);
1031 ret = detach_if_pending(timer, base, true);
1032 spin_unlock_irqrestore(&base->lock, flags);
1033 }
1034
1035 return ret;
1036}
1037EXPORT_SYMBOL(del_timer);
1038
1039/**
1040 * try_to_del_timer_sync - Try to deactivate a timer
1041 * @timer: timer do del
1042 *
1043 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1044 * exit the timer is not queued and the handler is not running on any CPU.
1045 */
1046int try_to_del_timer_sync(struct timer_list *timer)
1047{
1048 struct tvec_base *base;
1049 unsigned long flags;
1050 int ret = -1;
1051
1052 debug_assert_init(timer);
1053
1054 base = lock_timer_base(timer, &flags);
1055
1056 if (base->running_timer != timer) {
1057 timer_stats_timer_clear_start_info(timer);
1058 ret = detach_if_pending(timer, base, true);
1059 }
1060 spin_unlock_irqrestore(&base->lock, flags);
1061
1062 return ret;
1063}
1064EXPORT_SYMBOL(try_to_del_timer_sync);
1065
1066#ifdef CONFIG_SMP
1067/**
1068 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1069 * @timer: the timer to be deactivated
1070 *
1071 * This function only differs from del_timer() on SMP: besides deactivating
1072 * the timer it also makes sure the handler has finished executing on other
1073 * CPUs.
1074 *
1075 * Synchronization rules: Callers must prevent restarting of the timer,
1076 * otherwise this function is meaningless. It must not be called from
1077 * interrupt contexts unless the timer is an irqsafe one. The caller must
1078 * not hold locks which would prevent completion of the timer's
1079 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1080 * timer is not queued and the handler is not running on any CPU.
1081 *
1082 * Note: For !irqsafe timers, you must not hold locks that are held in
1083 * interrupt context while calling this function. Even if the lock has
1084 * nothing to do with the timer in question. Here's why:
1085 *
1086 * CPU0 CPU1
1087 * ---- ----
1088 * <SOFTIRQ>
1089 * call_timer_fn();
1090 * base->running_timer = mytimer;
1091 * spin_lock_irq(somelock);
1092 * <IRQ>
1093 * spin_lock(somelock);
1094 * del_timer_sync(mytimer);
1095 * while (base->running_timer == mytimer);
1096 *
1097 * Now del_timer_sync() will never return and never release somelock.
1098 * The interrupt on the other CPU is waiting to grab somelock but
1099 * it has interrupted the softirq that CPU0 is waiting to finish.
1100 *
1101 * The function returns whether it has deactivated a pending timer or not.
1102 */
1103int del_timer_sync(struct timer_list *timer)
1104{
1105#ifdef CONFIG_LOCKDEP
1106 unsigned long flags;
1107
1108 /*
1109 * If lockdep gives a backtrace here, please reference
1110 * the synchronization rules above.
1111 */
1112 local_irq_save(flags);
1113 lock_map_acquire(&timer->lockdep_map);
1114 lock_map_release(&timer->lockdep_map);
1115 local_irq_restore(flags);
1116#endif
1117 /*
1118 * don't use it in hardirq context, because it
1119 * could lead to deadlock.
1120 */
1121 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1122 for (;;) {
1123 int ret = try_to_del_timer_sync(timer);
1124 if (ret >= 0)
1125 return ret;
1126 cpu_relax();
1127 }
1128}
1129EXPORT_SYMBOL(del_timer_sync);
1130#endif
1131
1132static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1133{
1134 /* cascade all the timers from tv up one level */
1135 struct timer_list *timer;
1136 struct hlist_node *tmp;
1137 struct hlist_head tv_list;
1138
1139 hlist_move_list(tv->vec + index, &tv_list);
1140
1141 /*
1142 * We are removing _all_ timers from the list, so we
1143 * don't have to detach them individually.
1144 */
1145 hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1146 /* No accounting, while moving them */
1147 __internal_add_timer(base, timer);
1148 }
1149
1150 return index;
1151}
1152
1153static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1154 unsigned long data)
1155{
1156 int count = preempt_count();
1157
1158#ifdef CONFIG_LOCKDEP
1159 /*
1160 * It is permissible to free the timer from inside the
1161 * function that is called from it, this we need to take into
1162 * account for lockdep too. To avoid bogus "held lock freed"
1163 * warnings as well as problems when looking into
1164 * timer->lockdep_map, make a copy and use that here.
1165 */
1166 struct lockdep_map lockdep_map;
1167
1168 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1169#endif
1170 /*
1171 * Couple the lock chain with the lock chain at
1172 * del_timer_sync() by acquiring the lock_map around the fn()
1173 * call here and in del_timer_sync().
1174 */
1175 lock_map_acquire(&lockdep_map);
1176
1177 trace_timer_expire_entry(timer);
1178 fn(data);
1179 trace_timer_expire_exit(timer);
1180
1181 lock_map_release(&lockdep_map);
1182
1183 if (count != preempt_count()) {
1184 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1185 fn, count, preempt_count());
1186 /*
1187 * Restore the preempt count. That gives us a decent
1188 * chance to survive and extract information. If the
1189 * callback kept a lock held, bad luck, but not worse
1190 * than the BUG() we had.
1191 */
1192 preempt_count_set(count);
1193 }
1194}
1195
1196#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1197
1198/**
1199 * __run_timers - run all expired timers (if any) on this CPU.
1200 * @base: the timer vector to be processed.
1201 *
1202 * This function cascades all vectors and executes all expired timer
1203 * vectors.
1204 */
1205static inline void __run_timers(struct tvec_base *base)
1206{
1207 struct timer_list *timer;
1208
1209 spin_lock_irq(&base->lock);
1210
1211 while (time_after_eq(jiffies, base->timer_jiffies)) {
1212 struct hlist_head work_list;
1213 struct hlist_head *head = &work_list;
1214 int index;
1215
1216 if (!base->all_timers) {
1217 base->timer_jiffies = jiffies;
1218 break;
1219 }
1220
1221 index = base->timer_jiffies & TVR_MASK;
1222
1223 /*
1224 * Cascade timers:
1225 */
1226 if (!index &&
1227 (!cascade(base, &base->tv2, INDEX(0))) &&
1228 (!cascade(base, &base->tv3, INDEX(1))) &&
1229 !cascade(base, &base->tv4, INDEX(2)))
1230 cascade(base, &base->tv5, INDEX(3));
1231 ++base->timer_jiffies;
1232 hlist_move_list(base->tv1.vec + index, head);
1233 while (!hlist_empty(head)) {
1234 void (*fn)(unsigned long);
1235 unsigned long data;
1236 bool irqsafe;
1237
1238 timer = hlist_entry(head->first, struct timer_list, entry);
1239 fn = timer->function;
1240 data = timer->data;
1241 irqsafe = timer->flags & TIMER_IRQSAFE;
1242
1243 timer_stats_account_timer(timer);
1244
1245 base->running_timer = timer;
1246 detach_expired_timer(timer, base);
1247
1248 if (irqsafe) {
1249 spin_unlock(&base->lock);
1250 call_timer_fn(timer, fn, data);
1251 spin_lock(&base->lock);
1252 } else {
1253 spin_unlock_irq(&base->lock);
1254 call_timer_fn(timer, fn, data);
1255 spin_lock_irq(&base->lock);
1256 }
1257 }
1258 }
1259 base->running_timer = NULL;
1260 spin_unlock_irq(&base->lock);
1261}
1262
1263#ifdef CONFIG_NO_HZ_COMMON
1264/*
1265 * Find out when the next timer event is due to happen. This
1266 * is used on S/390 to stop all activity when a CPU is idle.
1267 * This function needs to be called with interrupts disabled.
1268 */
1269static unsigned long __next_timer_interrupt(struct tvec_base *base)
1270{
1271 unsigned long timer_jiffies = base->timer_jiffies;
1272 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1273 int index, slot, array, found = 0;
1274 struct timer_list *nte;
1275 struct tvec *varray[4];
1276
1277 /* Look for timer events in tv1. */
1278 index = slot = timer_jiffies & TVR_MASK;
1279 do {
1280 hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
1281 if (nte->flags & TIMER_DEFERRABLE)
1282 continue;
1283
1284 found = 1;
1285 expires = nte->expires;
1286 /* Look at the cascade bucket(s)? */
1287 if (!index || slot < index)
1288 goto cascade;
1289 return expires;
1290 }
1291 slot = (slot + 1) & TVR_MASK;
1292 } while (slot != index);
1293
1294cascade:
1295 /* Calculate the next cascade event */
1296 if (index)
1297 timer_jiffies += TVR_SIZE - index;
1298 timer_jiffies >>= TVR_BITS;
1299
1300 /* Check tv2-tv5. */
1301 varray[0] = &base->tv2;
1302 varray[1] = &base->tv3;
1303 varray[2] = &base->tv4;
1304 varray[3] = &base->tv5;
1305
1306 for (array = 0; array < 4; array++) {
1307 struct tvec *varp = varray[array];
1308
1309 index = slot = timer_jiffies & TVN_MASK;
1310 do {
1311 hlist_for_each_entry(nte, varp->vec + slot, entry) {
1312 if (nte->flags & TIMER_DEFERRABLE)
1313 continue;
1314
1315 found = 1;
1316 if (time_before(nte->expires, expires))
1317 expires = nte->expires;
1318 }
1319 /*
1320 * Do we still search for the first timer or are
1321 * we looking up the cascade buckets ?
1322 */
1323 if (found) {
1324 /* Look at the cascade bucket(s)? */
1325 if (!index || slot < index)
1326 break;
1327 return expires;
1328 }
1329 slot = (slot + 1) & TVN_MASK;
1330 } while (slot != index);
1331
1332 if (index)
1333 timer_jiffies += TVN_SIZE - index;
1334 timer_jiffies >>= TVN_BITS;
1335 }
1336 return expires;
1337}
1338
1339/*
1340 * Check, if the next hrtimer event is before the next timer wheel
1341 * event:
1342 */
1343static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1344{
1345 u64 nextevt = hrtimer_get_next_event();
1346
1347 /*
1348 * If high resolution timers are enabled
1349 * hrtimer_get_next_event() returns KTIME_MAX.
1350 */
1351 if (expires <= nextevt)
1352 return expires;
1353
1354 /*
1355 * If the next timer is already expired, return the tick base
1356 * time so the tick is fired immediately.
1357 */
1358 if (nextevt <= basem)
1359 return basem;
1360
1361 /*
1362 * Round up to the next jiffie. High resolution timers are
1363 * off, so the hrtimers are expired in the tick and we need to
1364 * make sure that this tick really expires the timer to avoid
1365 * a ping pong of the nohz stop code.
1366 *
1367 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1368 */
1369 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1370}
1371
1372/**
1373 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1374 * @basej: base time jiffies
1375 * @basem: base time clock monotonic
1376 *
1377 * Returns the tick aligned clock monotonic time of the next pending
1378 * timer or KTIME_MAX if no timer is pending.
1379 */
1380u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1381{
1382 struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1383 u64 expires = KTIME_MAX;
1384 unsigned long nextevt;
1385
1386 /*
1387 * Pretend that there is no timer pending if the cpu is offline.
1388 * Possible pending timers will be migrated later to an active cpu.
1389 */
1390 if (cpu_is_offline(smp_processor_id()))
1391 return expires;
1392
1393 spin_lock(&base->lock);
1394 if (base->active_timers) {
1395 if (time_before_eq(base->next_timer, base->timer_jiffies))
1396 base->next_timer = __next_timer_interrupt(base);
1397 nextevt = base->next_timer;
1398 if (time_before_eq(nextevt, basej))
1399 expires = basem;
1400 else
1401 expires = basem + (nextevt - basej) * TICK_NSEC;
1402 }
1403 spin_unlock(&base->lock);
1404
1405 return cmp_next_hrtimer_event(basem, expires);
1406}
1407#endif
1408
1409/*
1410 * Called from the timer interrupt handler to charge one tick to the current
1411 * process. user_tick is 1 if the tick is user time, 0 for system.
1412 */
1413void update_process_times(int user_tick)
1414{
1415 struct task_struct *p = current;
1416
1417 /* Note: this timer irq context must be accounted for as well. */
1418 account_process_tick(p, user_tick);
1419 run_local_timers();
1420 rcu_check_callbacks(user_tick);
1421#ifdef CONFIG_IRQ_WORK
1422 if (in_irq())
1423 irq_work_tick();
1424#endif
1425 scheduler_tick();
1426 run_posix_cpu_timers(p);
1427}
1428
1429/*
1430 * This function runs timers and the timer-tq in bottom half context.
1431 */
1432static void run_timer_softirq(struct softirq_action *h)
1433{
1434 struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1435
1436 if (time_after_eq(jiffies, base->timer_jiffies))
1437 __run_timers(base);
1438}
1439
1440/*
1441 * Called by the local, per-CPU timer interrupt on SMP.
1442 */
1443void run_local_timers(void)
1444{
1445 hrtimer_run_queues();
1446 raise_softirq(TIMER_SOFTIRQ);
1447}
1448
1449#ifdef __ARCH_WANT_SYS_ALARM
1450
1451/*
1452 * For backwards compatibility? This can be done in libc so Alpha
1453 * and all newer ports shouldn't need it.
1454 */
1455SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1456{
1457 return alarm_setitimer(seconds);
1458}
1459
1460#endif
1461
1462static void process_timeout(unsigned long __data)
1463{
1464 wake_up_process((struct task_struct *)__data);
1465}
1466
1467/**
1468 * schedule_timeout - sleep until timeout
1469 * @timeout: timeout value in jiffies
1470 *
1471 * Make the current task sleep until @timeout jiffies have
1472 * elapsed. The routine will return immediately unless
1473 * the current task state has been set (see set_current_state()).
1474 *
1475 * You can set the task state as follows -
1476 *
1477 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1478 * pass before the routine returns. The routine will return 0
1479 *
1480 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1481 * delivered to the current task. In this case the remaining time
1482 * in jiffies will be returned, or 0 if the timer expired in time
1483 *
1484 * The current task state is guaranteed to be TASK_RUNNING when this
1485 * routine returns.
1486 *
1487 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1488 * the CPU away without a bound on the timeout. In this case the return
1489 * value will be %MAX_SCHEDULE_TIMEOUT.
1490 *
1491 * In all cases the return value is guaranteed to be non-negative.
1492 */
1493signed long __sched schedule_timeout(signed long timeout)
1494{
1495 struct timer_list timer;
1496 unsigned long expire;
1497
1498 switch (timeout)
1499 {
1500 case MAX_SCHEDULE_TIMEOUT:
1501 /*
1502 * These two special cases are useful to be comfortable
1503 * in the caller. Nothing more. We could take
1504 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1505 * but I' d like to return a valid offset (>=0) to allow
1506 * the caller to do everything it want with the retval.
1507 */
1508 schedule();
1509 goto out;
1510 default:
1511 /*
1512 * Another bit of PARANOID. Note that the retval will be
1513 * 0 since no piece of kernel is supposed to do a check
1514 * for a negative retval of schedule_timeout() (since it
1515 * should never happens anyway). You just have the printk()
1516 * that will tell you if something is gone wrong and where.
1517 */
1518 if (timeout < 0) {
1519 printk(KERN_ERR "schedule_timeout: wrong timeout "
1520 "value %lx\n", timeout);
1521 dump_stack();
1522 current->state = TASK_RUNNING;
1523 goto out;
1524 }
1525 }
1526
1527 expire = timeout + jiffies;
1528
1529 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1530 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1531 schedule();
1532 del_singleshot_timer_sync(&timer);
1533
1534 /* Remove the timer from the object tracker */
1535 destroy_timer_on_stack(&timer);
1536
1537 timeout = expire - jiffies;
1538
1539 out:
1540 return timeout < 0 ? 0 : timeout;
1541}
1542EXPORT_SYMBOL(schedule_timeout);
1543
1544/*
1545 * We can use __set_current_state() here because schedule_timeout() calls
1546 * schedule() unconditionally.
1547 */
1548signed long __sched schedule_timeout_interruptible(signed long timeout)
1549{
1550 __set_current_state(TASK_INTERRUPTIBLE);
1551 return schedule_timeout(timeout);
1552}
1553EXPORT_SYMBOL(schedule_timeout_interruptible);
1554
1555signed long __sched schedule_timeout_killable(signed long timeout)
1556{
1557 __set_current_state(TASK_KILLABLE);
1558 return schedule_timeout(timeout);
1559}
1560EXPORT_SYMBOL(schedule_timeout_killable);
1561
1562signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1563{
1564 __set_current_state(TASK_UNINTERRUPTIBLE);
1565 return schedule_timeout(timeout);
1566}
1567EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1568
1569/*
1570 * Like schedule_timeout_uninterruptible(), except this task will not contribute
1571 * to load average.
1572 */
1573signed long __sched schedule_timeout_idle(signed long timeout)
1574{
1575 __set_current_state(TASK_IDLE);
1576 return schedule_timeout(timeout);
1577}
1578EXPORT_SYMBOL(schedule_timeout_idle);
1579
1580#ifdef CONFIG_HOTPLUG_CPU
1581static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
1582{
1583 struct timer_list *timer;
1584 int cpu = new_base->cpu;
1585
1586 while (!hlist_empty(head)) {
1587 timer = hlist_entry(head->first, struct timer_list, entry);
1588 /* We ignore the accounting on the dying cpu */
1589 detach_timer(timer, false);
1590 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1591 internal_add_timer(new_base, timer);
1592 }
1593}
1594
1595static void migrate_timers(int cpu)
1596{
1597 struct tvec_base *old_base;
1598 struct tvec_base *new_base;
1599 int i;
1600
1601 BUG_ON(cpu_online(cpu));
1602 old_base = per_cpu_ptr(&tvec_bases, cpu);
1603 new_base = get_cpu_ptr(&tvec_bases);
1604 /*
1605 * The caller is globally serialized and nobody else
1606 * takes two locks at once, deadlock is not possible.
1607 */
1608 spin_lock_irq(&new_base->lock);
1609 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1610
1611 BUG_ON(old_base->running_timer);
1612
1613 for (i = 0; i < TVR_SIZE; i++)
1614 migrate_timer_list(new_base, old_base->tv1.vec + i);
1615 for (i = 0; i < TVN_SIZE; i++) {
1616 migrate_timer_list(new_base, old_base->tv2.vec + i);
1617 migrate_timer_list(new_base, old_base->tv3.vec + i);
1618 migrate_timer_list(new_base, old_base->tv4.vec + i);
1619 migrate_timer_list(new_base, old_base->tv5.vec + i);
1620 }
1621
1622 old_base->active_timers = 0;
1623 old_base->all_timers = 0;
1624
1625 spin_unlock(&old_base->lock);
1626 spin_unlock_irq(&new_base->lock);
1627 put_cpu_ptr(&tvec_bases);
1628}
1629
1630static int timer_cpu_notify(struct notifier_block *self,
1631 unsigned long action, void *hcpu)
1632{
1633 switch (action) {
1634 case CPU_DEAD:
1635 case CPU_DEAD_FROZEN:
1636 migrate_timers((long)hcpu);
1637 break;
1638 default:
1639 break;
1640 }
1641
1642 return NOTIFY_OK;
1643}
1644
1645static inline void timer_register_cpu_notifier(void)
1646{
1647 cpu_notifier(timer_cpu_notify, 0);
1648}
1649#else
1650static inline void timer_register_cpu_notifier(void) { }
1651#endif /* CONFIG_HOTPLUG_CPU */
1652
1653static void __init init_timer_cpu(int cpu)
1654{
1655 struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
1656
1657 base->cpu = cpu;
1658 spin_lock_init(&base->lock);
1659
1660 base->timer_jiffies = jiffies;
1661 base->next_timer = base->timer_jiffies;
1662}
1663
1664static void __init init_timer_cpus(void)
1665{
1666 int cpu;
1667
1668 for_each_possible_cpu(cpu)
1669 init_timer_cpu(cpu);
1670}
1671
1672void __init init_timers(void)
1673{
1674 init_timer_cpus();
1675 init_timer_stats();
1676 timer_register_cpu_notifier();
1677 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1678}
1679
1680/**
1681 * msleep - sleep safely even with waitqueue interruptions
1682 * @msecs: Time in milliseconds to sleep for
1683 */
1684void msleep(unsigned int msecs)
1685{
1686 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1687
1688 while (timeout)
1689 timeout = schedule_timeout_uninterruptible(timeout);
1690}
1691
1692EXPORT_SYMBOL(msleep);
1693
1694/**
1695 * msleep_interruptible - sleep waiting for signals
1696 * @msecs: Time in milliseconds to sleep for
1697 */
1698unsigned long msleep_interruptible(unsigned int msecs)
1699{
1700 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1701
1702 while (timeout && !signal_pending(current))
1703 timeout = schedule_timeout_interruptible(timeout);
1704 return jiffies_to_msecs(timeout);
1705}
1706
1707EXPORT_SYMBOL(msleep_interruptible);
1708
1709static void __sched do_usleep_range(unsigned long min, unsigned long max)
1710{
1711 ktime_t kmin;
1712 u64 delta;
1713
1714 kmin = ktime_set(0, min * NSEC_PER_USEC);
1715 delta = (u64)(max - min) * NSEC_PER_USEC;
1716 schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1717}
1718
1719/**
1720 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1721 * @min: Minimum time in usecs to sleep
1722 * @max: Maximum time in usecs to sleep
1723 */
1724void __sched usleep_range(unsigned long min, unsigned long max)
1725{
1726 __set_current_state(TASK_UNINTERRUPTIBLE);
1727 do_usleep_range(min, max);
1728}
1729EXPORT_SYMBOL(usleep_range);