Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
 
 
   3 *  Kernel internal timers
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
   8 *
   9 *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
  10 *              "A Kernel Model for Precision Timekeeping" by Dave Mills
  11 *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  12 *              serialize accesses to xtime/lost_ticks).
  13 *                              Copyright (C) 1998  Andrea Arcangeli
  14 *  1999-03-10  Improved NTP compatibility by Ulrich Windl
  15 *  2002-05-31	Move sys_sysinfo here and make its locking sane, Robert Love
  16 *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
  17 *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
  18 *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  19 */
  20
  21#include <linux/kernel_stat.h>
  22#include <linux/export.h>
  23#include <linux/interrupt.h>
  24#include <linux/percpu.h>
  25#include <linux/init.h>
  26#include <linux/mm.h>
  27#include <linux/swap.h>
  28#include <linux/pid_namespace.h>
  29#include <linux/notifier.h>
  30#include <linux/thread_info.h>
  31#include <linux/time.h>
  32#include <linux/jiffies.h>
  33#include <linux/posix-timers.h>
  34#include <linux/cpu.h>
  35#include <linux/syscalls.h>
  36#include <linux/delay.h>
  37#include <linux/tick.h>
  38#include <linux/kallsyms.h>
  39#include <linux/irq_work.h>
  40#include <linux/sched/signal.h>
  41#include <linux/sched/sysctl.h>
  42#include <linux/sched/nohz.h>
  43#include <linux/sched/debug.h>
  44#include <linux/slab.h>
  45#include <linux/compat.h>
  46#include <linux/random.h>
  47#include <linux/sysctl.h>
  48
  49#include <linux/uaccess.h>
  50#include <asm/unistd.h>
  51#include <asm/div64.h>
  52#include <asm/timex.h>
  53#include <asm/io.h>
  54
  55#include "tick-internal.h"
  56
  57#define CREATE_TRACE_POINTS
  58#include <trace/events/timer.h>
  59
  60__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  61
  62EXPORT_SYMBOL(jiffies_64);
  63
  64/*
  65 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
  66 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
  67 * level has a different granularity.
  68 *
  69 * The level granularity is:		LVL_CLK_DIV ^ lvl
  70 * The level clock frequency is:	HZ / (LVL_CLK_DIV ^ level)
  71 *
  72 * The array level of a newly armed timer depends on the relative expiry
  73 * time. The farther the expiry time is away the higher the array level and
  74 * therefor the granularity becomes.
  75 *
  76 * Contrary to the original timer wheel implementation, which aims for 'exact'
  77 * expiry of the timers, this implementation removes the need for recascading
  78 * the timers into the lower array levels. The previous 'classic' timer wheel
  79 * implementation of the kernel already violated the 'exact' expiry by adding
  80 * slack to the expiry time to provide batched expiration. The granularity
  81 * levels provide implicit batching.
  82 *
  83 * This is an optimization of the original timer wheel implementation for the
  84 * majority of the timer wheel use cases: timeouts. The vast majority of
  85 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
  86 * the timeout expires it indicates that normal operation is disturbed, so it
  87 * does not matter much whether the timeout comes with a slight delay.
  88 *
  89 * The only exception to this are networking timers with a small expiry
  90 * time. They rely on the granularity. Those fit into the first wheel level,
  91 * which has HZ granularity.
  92 *
  93 * We don't have cascading anymore. timers with a expiry time above the
  94 * capacity of the last wheel level are force expired at the maximum timeout
  95 * value of the last wheel level. From data sampling we know that the maximum
  96 * value observed is 5 days (network connection tracking), so this should not
  97 * be an issue.
  98 *
  99 * The currently chosen array constants values are a good compromise between
 100 * array size and granularity.
 101 *
 102 * This results in the following granularity and range levels:
 103 *
 104 * HZ 1000 steps
 105 * Level Offset  Granularity            Range
 106 *  0      0         1 ms                0 ms -         63 ms
 107 *  1     64         8 ms               64 ms -        511 ms
 108 *  2    128        64 ms              512 ms -       4095 ms (512ms - ~4s)
 109 *  3    192       512 ms             4096 ms -      32767 ms (~4s - ~32s)
 110 *  4    256      4096 ms (~4s)      32768 ms -     262143 ms (~32s - ~4m)
 111 *  5    320     32768 ms (~32s)    262144 ms -    2097151 ms (~4m - ~34m)
 112 *  6    384    262144 ms (~4m)    2097152 ms -   16777215 ms (~34m - ~4h)
 113 *  7    448   2097152 ms (~34m)  16777216 ms -  134217727 ms (~4h - ~1d)
 114 *  8    512  16777216 ms (~4h)  134217728 ms - 1073741822 ms (~1d - ~12d)
 115 *
 116 * HZ  300
 117 * Level Offset  Granularity            Range
 118 *  0	   0         3 ms                0 ms -        210 ms
 119 *  1	  64        26 ms              213 ms -       1703 ms (213ms - ~1s)
 120 *  2	 128       213 ms             1706 ms -      13650 ms (~1s - ~13s)
 121 *  3	 192      1706 ms (~1s)      13653 ms -     109223 ms (~13s - ~1m)
 122 *  4	 256     13653 ms (~13s)    109226 ms -     873810 ms (~1m - ~14m)
 123 *  5	 320    109226 ms (~1m)     873813 ms -    6990503 ms (~14m - ~1h)
 124 *  6	 384    873813 ms (~14m)   6990506 ms -   55924050 ms (~1h - ~15h)
 125 *  7	 448   6990506 ms (~1h)   55924053 ms -  447392423 ms (~15h - ~5d)
 126 *  8    512  55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
 127 *
 128 * HZ  250
 129 * Level Offset  Granularity            Range
 130 *  0	   0         4 ms                0 ms -        255 ms
 131 *  1	  64        32 ms              256 ms -       2047 ms (256ms - ~2s)
 132 *  2	 128       256 ms             2048 ms -      16383 ms (~2s - ~16s)
 133 *  3	 192      2048 ms (~2s)      16384 ms -     131071 ms (~16s - ~2m)
 134 *  4	 256     16384 ms (~16s)    131072 ms -    1048575 ms (~2m - ~17m)
 135 *  5	 320    131072 ms (~2m)    1048576 ms -    8388607 ms (~17m - ~2h)
 136 *  6	 384   1048576 ms (~17m)   8388608 ms -   67108863 ms (~2h - ~18h)
 137 *  7	 448   8388608 ms (~2h)   67108864 ms -  536870911 ms (~18h - ~6d)
 138 *  8    512  67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
 139 *
 140 * HZ  100
 141 * Level Offset  Granularity            Range
 142 *  0	   0         10 ms               0 ms -        630 ms
 143 *  1	  64         80 ms             640 ms -       5110 ms (640ms - ~5s)
 144 *  2	 128        640 ms            5120 ms -      40950 ms (~5s - ~40s)
 145 *  3	 192       5120 ms (~5s)     40960 ms -     327670 ms (~40s - ~5m)
 146 *  4	 256      40960 ms (~40s)   327680 ms -    2621430 ms (~5m - ~43m)
 147 *  5	 320     327680 ms (~5m)   2621440 ms -   20971510 ms (~43m - ~5h)
 148 *  6	 384    2621440 ms (~43m) 20971520 ms -  167772150 ms (~5h - ~1d)
 149 *  7	 448   20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
 150 */
 151
 152/* Clock divisor for the next level */
 153#define LVL_CLK_SHIFT	3
 154#define LVL_CLK_DIV	(1UL << LVL_CLK_SHIFT)
 155#define LVL_CLK_MASK	(LVL_CLK_DIV - 1)
 156#define LVL_SHIFT(n)	((n) * LVL_CLK_SHIFT)
 157#define LVL_GRAN(n)	(1UL << LVL_SHIFT(n))
 158
 159/*
 160 * The time start value for each level to select the bucket at enqueue
 161 * time. We start from the last possible delta of the previous level
 162 * so that we can later add an extra LVL_GRAN(n) to n (see calc_index()).
 163 */
 164#define LVL_START(n)	((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
 165
 166/* Size of each clock level */
 167#define LVL_BITS	6
 168#define LVL_SIZE	(1UL << LVL_BITS)
 169#define LVL_MASK	(LVL_SIZE - 1)
 170#define LVL_OFFS(n)	((n) * LVL_SIZE)
 171
 172/* Level depth */
 173#if HZ > 100
 174# define LVL_DEPTH	9
 175# else
 176# define LVL_DEPTH	8
 177#endif
 178
 179/* The cutoff (max. capacity of the wheel) */
 180#define WHEEL_TIMEOUT_CUTOFF	(LVL_START(LVL_DEPTH))
 181#define WHEEL_TIMEOUT_MAX	(WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
 182
 183/*
 184 * The resulting wheel size. If NOHZ is configured we allocate two
 185 * wheels so we have a separate storage for the deferrable timers.
 186 */
 187#define WHEEL_SIZE	(LVL_SIZE * LVL_DEPTH)
 
 
 
 
 
 
 188
 189#ifdef CONFIG_NO_HZ_COMMON
 190# define NR_BASES	2
 191# define BASE_STD	0
 192# define BASE_DEF	1
 193#else
 194# define NR_BASES	1
 195# define BASE_STD	0
 196# define BASE_DEF	0
 197#endif
 198
 199struct timer_base {
 200	raw_spinlock_t		lock;
 201	struct timer_list	*running_timer;
 202#ifdef CONFIG_PREEMPT_RT
 203	spinlock_t		expiry_lock;
 204	atomic_t		timer_waiters;
 205#endif
 206	unsigned long		clk;
 207	unsigned long		next_expiry;
 208	unsigned int		cpu;
 209	bool			next_expiry_recalc;
 210	bool			is_idle;
 211	bool			timers_pending;
 212	DECLARE_BITMAP(pending_map, WHEEL_SIZE);
 213	struct hlist_head	vectors[WHEEL_SIZE];
 214} ____cacheline_aligned;
 215
 216static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 217
 218#ifdef CONFIG_NO_HZ_COMMON
 219
 220static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
 221static DEFINE_MUTEX(timer_keys_mutex);
 222
 223static void timer_update_keys(struct work_struct *work);
 224static DECLARE_WORK(timer_update_work, timer_update_keys);
 225
 226#ifdef CONFIG_SMP
 227static unsigned int sysctl_timer_migration = 1;
 
 
 228
 229DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
 
 
 230
 231static void timers_update_migration(void)
 232{
 233	if (sysctl_timer_migration && tick_nohz_active)
 234		static_branch_enable(&timers_migration_enabled);
 235	else
 236		static_branch_disable(&timers_migration_enabled);
 
 
 237}
 238
 239#ifdef CONFIG_SYSCTL
 240static int timer_migration_handler(struct ctl_table *table, int write,
 241			    void *buffer, size_t *lenp, loff_t *ppos)
 242{
 
 243	int ret;
 244
 245	mutex_lock(&timer_keys_mutex);
 246	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 247	if (!ret && write)
 248		timers_update_migration();
 249	mutex_unlock(&timer_keys_mutex);
 250	return ret;
 251}
 252
 253static struct ctl_table timer_sysctl[] = {
 254	{
 255		.procname	= "timer_migration",
 256		.data		= &sysctl_timer_migration,
 257		.maxlen		= sizeof(unsigned int),
 258		.mode		= 0644,
 259		.proc_handler	= timer_migration_handler,
 260		.extra1		= SYSCTL_ZERO,
 261		.extra2		= SYSCTL_ONE,
 262	},
 263	{}
 264};
 265
 266static int __init timer_sysctl_init(void)
 267{
 268	register_sysctl("kernel", timer_sysctl);
 269	return 0;
 270}
 271device_initcall(timer_sysctl_init);
 272#endif /* CONFIG_SYSCTL */
 273#else /* CONFIG_SMP */
 274static inline void timers_update_migration(void) { }
 275#endif /* !CONFIG_SMP */
 276
 277static void timer_update_keys(struct work_struct *work)
 278{
 279	mutex_lock(&timer_keys_mutex);
 280	timers_update_migration();
 281	static_branch_enable(&timers_nohz_active);
 282	mutex_unlock(&timer_keys_mutex);
 283}
 284
 285void timers_update_nohz(void)
 286{
 287	schedule_work(&timer_update_work);
 288}
 289
 290static inline bool is_timers_nohz_active(void)
 291{
 292	return static_branch_unlikely(&timers_nohz_active);
 293}
 294#else
 295static inline bool is_timers_nohz_active(void) { return false; }
 296#endif /* NO_HZ_COMMON */
 297
 298static unsigned long round_jiffies_common(unsigned long j, int cpu,
 299		bool force_up)
 300{
 301	int rem;
 302	unsigned long original = j;
 303
 304	/*
 305	 * We don't want all cpus firing their timers at once hitting the
 306	 * same lock or cachelines, so we skew each extra cpu with an extra
 307	 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
 308	 * already did this.
 309	 * The skew is done by adding 3*cpunr, then round, then subtract this
 310	 * extra offset again.
 311	 */
 312	j += cpu * 3;
 313
 314	rem = j % HZ;
 315
 316	/*
 317	 * If the target jiffie is just after a whole second (which can happen
 318	 * due to delays of the timer irq, long irq off times etc etc) then
 319	 * we should round down to the whole second, not up. Use 1/4th second
 320	 * as cutoff for this rounding as an extreme upper bound for this.
 321	 * But never round down if @force_up is set.
 322	 */
 323	if (rem < HZ/4 && !force_up) /* round down */
 324		j = j - rem;
 325	else /* round up */
 326		j = j - rem + HZ;
 327
 328	/* now that we have rounded, subtract the extra skew again */
 329	j -= cpu * 3;
 330
 331	/*
 332	 * Make sure j is still in the future. Otherwise return the
 333	 * unmodified value.
 334	 */
 335	return time_is_after_jiffies(j) ? j : original;
 336}
 337
 338/**
 339 * __round_jiffies - function to round jiffies to a full second
 340 * @j: the time in (absolute) jiffies that should be rounded
 341 * @cpu: the processor number on which the timeout will happen
 342 *
 343 * __round_jiffies() rounds an absolute time in the future (in jiffies)
 344 * up or down to (approximately) full seconds. This is useful for timers
 345 * for which the exact time they fire does not matter too much, as long as
 346 * they fire approximately every X seconds.
 347 *
 348 * By rounding these timers to whole seconds, all such timers will fire
 349 * at the same time, rather than at various times spread out. The goal
 350 * of this is to have the CPU wake up less, which saves power.
 351 *
 352 * The exact rounding is skewed for each processor to avoid all
 353 * processors firing at the exact same time, which could lead
 354 * to lock contention or spurious cache line bouncing.
 355 *
 356 * The return value is the rounded version of the @j parameter.
 357 */
 358unsigned long __round_jiffies(unsigned long j, int cpu)
 359{
 360	return round_jiffies_common(j, cpu, false);
 361}
 362EXPORT_SYMBOL_GPL(__round_jiffies);
 363
 364/**
 365 * __round_jiffies_relative - function to round jiffies to a full second
 366 * @j: the time in (relative) jiffies that should be rounded
 367 * @cpu: the processor number on which the timeout will happen
 368 *
 369 * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 370 * up or down to (approximately) full seconds. This is useful for timers
 371 * for which the exact time they fire does not matter too much, as long as
 372 * they fire approximately every X seconds.
 373 *
 374 * By rounding these timers to whole seconds, all such timers will fire
 375 * at the same time, rather than at various times spread out. The goal
 376 * of this is to have the CPU wake up less, which saves power.
 377 *
 378 * The exact rounding is skewed for each processor to avoid all
 379 * processors firing at the exact same time, which could lead
 380 * to lock contention or spurious cache line bouncing.
 381 *
 382 * The return value is the rounded version of the @j parameter.
 383 */
 384unsigned long __round_jiffies_relative(unsigned long j, int cpu)
 385{
 386	unsigned long j0 = jiffies;
 387
 388	/* Use j0 because jiffies might change while we run */
 389	return round_jiffies_common(j + j0, cpu, false) - j0;
 390}
 391EXPORT_SYMBOL_GPL(__round_jiffies_relative);
 392
 393/**
 394 * round_jiffies - function to round jiffies to a full second
 395 * @j: the time in (absolute) jiffies that should be rounded
 396 *
 397 * round_jiffies() rounds an absolute time in the future (in jiffies)
 398 * up or down to (approximately) full seconds. This is useful for timers
 399 * for which the exact time they fire does not matter too much, as long as
 400 * they fire approximately every X seconds.
 401 *
 402 * By rounding these timers to whole seconds, all such timers will fire
 403 * at the same time, rather than at various times spread out. The goal
 404 * of this is to have the CPU wake up less, which saves power.
 405 *
 406 * The return value is the rounded version of the @j parameter.
 407 */
 408unsigned long round_jiffies(unsigned long j)
 409{
 410	return round_jiffies_common(j, raw_smp_processor_id(), false);
 411}
 412EXPORT_SYMBOL_GPL(round_jiffies);
 413
 414/**
 415 * round_jiffies_relative - function to round jiffies to a full second
 416 * @j: the time in (relative) jiffies that should be rounded
 417 *
 418 * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 419 * up or down to (approximately) full seconds. This is useful for timers
 420 * for which the exact time they fire does not matter too much, as long as
 421 * they fire approximately every X seconds.
 422 *
 423 * By rounding these timers to whole seconds, all such timers will fire
 424 * at the same time, rather than at various times spread out. The goal
 425 * of this is to have the CPU wake up less, which saves power.
 426 *
 427 * The return value is the rounded version of the @j parameter.
 428 */
 429unsigned long round_jiffies_relative(unsigned long j)
 430{
 431	return __round_jiffies_relative(j, raw_smp_processor_id());
 432}
 433EXPORT_SYMBOL_GPL(round_jiffies_relative);
 434
 435/**
 436 * __round_jiffies_up - function to round jiffies up to a full second
 437 * @j: the time in (absolute) jiffies that should be rounded
 438 * @cpu: the processor number on which the timeout will happen
 439 *
 440 * This is the same as __round_jiffies() except that it will never
 441 * round down.  This is useful for timeouts for which the exact time
 442 * of firing does not matter too much, as long as they don't fire too
 443 * early.
 444 */
 445unsigned long __round_jiffies_up(unsigned long j, int cpu)
 446{
 447	return round_jiffies_common(j, cpu, true);
 448}
 449EXPORT_SYMBOL_GPL(__round_jiffies_up);
 450
 451/**
 452 * __round_jiffies_up_relative - function to round jiffies up to a full second
 453 * @j: the time in (relative) jiffies that should be rounded
 454 * @cpu: the processor number on which the timeout will happen
 455 *
 456 * This is the same as __round_jiffies_relative() except that it will never
 457 * round down.  This is useful for timeouts for which the exact time
 458 * of firing does not matter too much, as long as they don't fire too
 459 * early.
 460 */
 461unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
 462{
 463	unsigned long j0 = jiffies;
 464
 465	/* Use j0 because jiffies might change while we run */
 466	return round_jiffies_common(j + j0, cpu, true) - j0;
 467}
 468EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
 469
 470/**
 471 * round_jiffies_up - function to round jiffies up to a full second
 472 * @j: the time in (absolute) jiffies that should be rounded
 473 *
 474 * This is the same as round_jiffies() except that it will never
 475 * round down.  This is useful for timeouts for which the exact time
 476 * of firing does not matter too much, as long as they don't fire too
 477 * early.
 478 */
 479unsigned long round_jiffies_up(unsigned long j)
 480{
 481	return round_jiffies_common(j, raw_smp_processor_id(), true);
 482}
 483EXPORT_SYMBOL_GPL(round_jiffies_up);
 484
 485/**
 486 * round_jiffies_up_relative - function to round jiffies up to a full second
 487 * @j: the time in (relative) jiffies that should be rounded
 488 *
 489 * This is the same as round_jiffies_relative() except that it will never
 490 * round down.  This is useful for timeouts for which the exact time
 491 * of firing does not matter too much, as long as they don't fire too
 492 * early.
 493 */
 494unsigned long round_jiffies_up_relative(unsigned long j)
 495{
 496	return __round_jiffies_up_relative(j, raw_smp_processor_id());
 497}
 498EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
 499
 500
 501static inline unsigned int timer_get_idx(struct timer_list *timer)
 502{
 503	return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
 504}
 505
 506static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
 
 
 
 
 
 
 
 507{
 508	timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
 509			idx << TIMER_ARRAYSHIFT;
 510}
 
 511
 512/*
 513 * Helper function to calculate the array index for a given expiry
 514 * time.
 515 */
 516static inline unsigned calc_index(unsigned long expires, unsigned lvl,
 517				  unsigned long *bucket_expiry)
 518{
 
 
 
 519
 520	/*
 521	 * The timer wheel has to guarantee that a timer does not fire
 522	 * early. Early expiry can happen due to:
 523	 * - Timer is armed at the edge of a tick
 524	 * - Truncation of the expiry time in the outer wheel levels
 525	 *
 526	 * Round up with level granularity to prevent this.
 527	 */
 528	expires = (expires >> LVL_SHIFT(lvl)) + 1;
 529	*bucket_expiry = expires << LVL_SHIFT(lvl);
 530	return LVL_OFFS(lvl) + (expires & LVL_MASK);
 531}
 532
 533static int calc_wheel_index(unsigned long expires, unsigned long clk,
 534			    unsigned long *bucket_expiry)
 535{
 536	unsigned long delta = expires - clk;
 537	unsigned int idx;
 538
 539	if (delta < LVL_START(1)) {
 540		idx = calc_index(expires, 0, bucket_expiry);
 541	} else if (delta < LVL_START(2)) {
 542		idx = calc_index(expires, 1, bucket_expiry);
 543	} else if (delta < LVL_START(3)) {
 544		idx = calc_index(expires, 2, bucket_expiry);
 545	} else if (delta < LVL_START(4)) {
 546		idx = calc_index(expires, 3, bucket_expiry);
 547	} else if (delta < LVL_START(5)) {
 548		idx = calc_index(expires, 4, bucket_expiry);
 549	} else if (delta < LVL_START(6)) {
 550		idx = calc_index(expires, 5, bucket_expiry);
 551	} else if (delta < LVL_START(7)) {
 552		idx = calc_index(expires, 6, bucket_expiry);
 553	} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
 554		idx = calc_index(expires, 7, bucket_expiry);
 555	} else if ((long) delta < 0) {
 556		idx = clk & LVL_MASK;
 557		*bucket_expiry = clk;
 558	} else {
 559		/*
 560		 * Force expire obscene large timeouts to expire at the
 561		 * capacity limit of the wheel.
 562		 */
 563		if (delta >= WHEEL_TIMEOUT_CUTOFF)
 564			expires = clk + WHEEL_TIMEOUT_MAX;
 565
 566		idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry);
 
 
 
 
 
 
 
 
 
 567	}
 568	return idx;
 
 569}
 570
 571static void
 572trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
 573{
 574	if (!is_timers_nohz_active())
 575		return;
 
 576
 
 577	/*
 578	 * TODO: This wants some optimizing similar to the code below, but we
 579	 * will do that when we switch from push to pull for deferrable timers.
 580	 */
 581	if (timer->flags & TIMER_DEFERRABLE) {
 582		if (tick_nohz_full_cpu(base->cpu))
 583			wake_up_nohz_cpu(base->cpu);
 584		return;
 585	}
 586
 587	/*
 588	 * We might have to IPI the remote CPU if the base is idle and the
 589	 * timer is not deferrable. If the other CPU is on the way to idle
 590	 * then it can't set base->is_idle as we hold the base lock:
 591	 */
 592	if (base->is_idle)
 593		wake_up_nohz_cpu(base->cpu);
 
 
 
 
 
 
 
 
 
 
 
 594}
 595
 596/*
 597 * Enqueue the timer into the hash bucket, mark it pending in
 598 * the bitmap, store the index in the timer flags then wake up
 599 * the target CPU if needed.
 600 */
 601static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
 602			  unsigned int idx, unsigned long bucket_expiry)
 603{
 
 
 604
 605	hlist_add_head(&timer->entry, base->vectors + idx);
 606	__set_bit(idx, base->pending_map);
 607	timer_set_idx(timer, idx);
 
 608
 609	trace_timer_start(timer, timer->expires, timer->flags);
 
 
 610
 611	/*
 612	 * Check whether this is the new first expiring timer. The
 613	 * effective expiry time of the timer is required here
 614	 * (bucket_expiry) instead of timer->expires.
 615	 */
 616	if (time_before(bucket_expiry, base->next_expiry)) {
 617		/*
 618		 * Set the next expiry time and kick the CPU so it
 619		 * can reevaluate the wheel:
 620		 */
 621		base->next_expiry = bucket_expiry;
 622		base->timers_pending = true;
 623		base->next_expiry_recalc = false;
 624		trigger_dyntick_cpu(base, timer);
 625	}
 626}
 627
 628static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
 629{
 630	unsigned long bucket_expiry;
 631	unsigned int idx;
 632
 633	idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry);
 634	enqueue_timer(base, timer, idx, bucket_expiry);
 
 635}
 636
 637#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 638
 639static const struct debug_obj_descr timer_debug_descr;
 640
 641struct timer_hint {
 642	void	(*function)(struct timer_list *t);
 643	long	offset;
 644};
 645
 646#define TIMER_HINT(fn, container, timr, hintfn)			\
 647	{							\
 648		.function = fn,					\
 649		.offset	  = offsetof(container, hintfn) -	\
 650			    offsetof(container, timr)		\
 651	}
 652
 653static const struct timer_hint timer_hints[] = {
 654	TIMER_HINT(delayed_work_timer_fn,
 655		   struct delayed_work, timer, work.func),
 656	TIMER_HINT(kthread_delayed_work_timer_fn,
 657		   struct kthread_delayed_work, timer, work.func),
 658};
 659
 660static void *timer_debug_hint(void *addr)
 661{
 662	struct timer_list *timer = addr;
 663	int i;
 664
 665	for (i = 0; i < ARRAY_SIZE(timer_hints); i++) {
 666		if (timer_hints[i].function == timer->function) {
 667			void (**fn)(void) = addr + timer_hints[i].offset;
 668
 669			return *fn;
 670		}
 671	}
 672
 673	return timer->function;
 674}
 675
 676static bool timer_is_static_object(void *addr)
 677{
 678	struct timer_list *timer = addr;
 679
 680	return (timer->entry.pprev == NULL &&
 681		timer->entry.next == TIMER_ENTRY_STATIC);
 682}
 683
 684/*
 685 * fixup_init is called when:
 686 * - an active object is initialized
 687 */
 688static bool timer_fixup_init(void *addr, enum debug_obj_state state)
 689{
 690	struct timer_list *timer = addr;
 691
 692	switch (state) {
 693	case ODEBUG_STATE_ACTIVE:
 694		del_timer_sync(timer);
 695		debug_object_init(timer, &timer_debug_descr);
 696		return true;
 697	default:
 698		return false;
 699	}
 700}
 701
 702/* Stub timer callback for improperly used timers. */
 703static void stub_timer(struct timer_list *unused)
 704{
 705	WARN_ON(1);
 706}
 707
 708/*
 709 * fixup_activate is called when:
 710 * - an active object is activated
 711 * - an unknown non-static object is activated
 712 */
 713static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
 714{
 715	struct timer_list *timer = addr;
 716
 717	switch (state) {
 
 718	case ODEBUG_STATE_NOTAVAILABLE:
 719		timer_setup(timer, stub_timer, 0);
 720		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 721
 722	case ODEBUG_STATE_ACTIVE:
 723		WARN_ON(1);
 724		fallthrough;
 725	default:
 726		return false;
 727	}
 728}
 729
 730/*
 731 * fixup_free is called when:
 732 * - an active object is freed
 733 */
 734static bool timer_fixup_free(void *addr, enum debug_obj_state state)
 735{
 736	struct timer_list *timer = addr;
 737
 738	switch (state) {
 739	case ODEBUG_STATE_ACTIVE:
 740		del_timer_sync(timer);
 741		debug_object_free(timer, &timer_debug_descr);
 742		return true;
 743	default:
 744		return false;
 745	}
 746}
 747
 748/*
 749 * fixup_assert_init is called when:
 750 * - an untracked/uninit-ed object is found
 751 */
 752static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
 753{
 754	struct timer_list *timer = addr;
 755
 756	switch (state) {
 757	case ODEBUG_STATE_NOTAVAILABLE:
 758		timer_setup(timer, stub_timer, 0);
 759		return true;
 
 
 
 
 
 
 
 
 
 
 760	default:
 761		return false;
 762	}
 763}
 764
 765static const struct debug_obj_descr timer_debug_descr = {
 766	.name			= "timer_list",
 767	.debug_hint		= timer_debug_hint,
 768	.is_static_object	= timer_is_static_object,
 769	.fixup_init		= timer_fixup_init,
 770	.fixup_activate		= timer_fixup_activate,
 771	.fixup_free		= timer_fixup_free,
 772	.fixup_assert_init	= timer_fixup_assert_init,
 773};
 774
 775static inline void debug_timer_init(struct timer_list *timer)
 776{
 777	debug_object_init(timer, &timer_debug_descr);
 778}
 779
 780static inline void debug_timer_activate(struct timer_list *timer)
 781{
 782	debug_object_activate(timer, &timer_debug_descr);
 783}
 784
 785static inline void debug_timer_deactivate(struct timer_list *timer)
 786{
 787	debug_object_deactivate(timer, &timer_debug_descr);
 788}
 789
 
 
 
 
 
 790static inline void debug_timer_assert_init(struct timer_list *timer)
 791{
 792	debug_object_assert_init(timer, &timer_debug_descr);
 793}
 794
 795static void do_init_timer(struct timer_list *timer,
 796			  void (*func)(struct timer_list *),
 797			  unsigned int flags,
 798			  const char *name, struct lock_class_key *key);
 799
 800void init_timer_on_stack_key(struct timer_list *timer,
 801			     void (*func)(struct timer_list *),
 802			     unsigned int flags,
 803			     const char *name, struct lock_class_key *key)
 804{
 805	debug_object_init_on_stack(timer, &timer_debug_descr);
 806	do_init_timer(timer, func, flags, name, key);
 807}
 808EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
 809
 810void destroy_timer_on_stack(struct timer_list *timer)
 811{
 812	debug_object_free(timer, &timer_debug_descr);
 813}
 814EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
 815
 816#else
 817static inline void debug_timer_init(struct timer_list *timer) { }
 818static inline void debug_timer_activate(struct timer_list *timer) { }
 819static inline void debug_timer_deactivate(struct timer_list *timer) { }
 820static inline void debug_timer_assert_init(struct timer_list *timer) { }
 821#endif
 822
 823static inline void debug_init(struct timer_list *timer)
 824{
 825	debug_timer_init(timer);
 826	trace_timer_init(timer);
 827}
 828
 
 
 
 
 
 
 
 829static inline void debug_deactivate(struct timer_list *timer)
 830{
 831	debug_timer_deactivate(timer);
 832	trace_timer_cancel(timer);
 833}
 834
 835static inline void debug_assert_init(struct timer_list *timer)
 836{
 837	debug_timer_assert_init(timer);
 838}
 839
 840static void do_init_timer(struct timer_list *timer,
 841			  void (*func)(struct timer_list *),
 842			  unsigned int flags,
 843			  const char *name, struct lock_class_key *key)
 844{
 845	timer->entry.pprev = NULL;
 846	timer->function = func;
 847	if (WARN_ON_ONCE(flags & ~TIMER_INIT_FLAGS))
 848		flags &= TIMER_INIT_FLAGS;
 849	timer->flags = flags | raw_smp_processor_id();
 
 
 
 
 
 
 850	lockdep_init_map(&timer->lockdep_map, name, key, 0);
 851}
 852
 853/**
 854 * init_timer_key - initialize a timer
 855 * @timer: the timer to be initialized
 856 * @func: timer callback function
 857 * @flags: timer flags
 858 * @name: name of the timer
 859 * @key: lockdep class key of the fake lock used for tracking timer
 860 *       sync lock dependencies
 861 *
 862 * init_timer_key() must be done to a timer prior calling *any* of the
 863 * other timer functions.
 864 */
 865void init_timer_key(struct timer_list *timer,
 866		    void (*func)(struct timer_list *), unsigned int flags,
 867		    const char *name, struct lock_class_key *key)
 868{
 869	debug_init(timer);
 870	do_init_timer(timer, func, flags, name, key);
 871}
 872EXPORT_SYMBOL(init_timer_key);
 873
 874static inline void detach_timer(struct timer_list *timer, bool clear_pending)
 875{
 876	struct hlist_node *entry = &timer->entry;
 877
 878	debug_deactivate(timer);
 879
 880	__hlist_del(entry);
 881	if (clear_pending)
 882		entry->pprev = NULL;
 883	entry->next = LIST_POISON2;
 884}
 885
 886static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
 887			     bool clear_pending)
 888{
 889	unsigned idx = timer_get_idx(timer);
 
 
 
 
 890
 
 
 
 891	if (!timer_pending(timer))
 892		return 0;
 893
 894	if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) {
 895		__clear_bit(idx, base->pending_map);
 896		base->next_expiry_recalc = true;
 897	}
 898
 899	detach_timer(timer, clear_pending);
 
 
 
 
 
 
 
 
 900	return 1;
 901}
 902
 903static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
 904{
 905	struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
 906
 907	/*
 908	 * If the timer is deferrable and NO_HZ_COMMON is set then we need
 909	 * to use the deferrable base.
 910	 */
 911	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
 912		base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
 913	return base;
 914}
 915
 916static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
 917{
 918	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 919
 920	/*
 921	 * If the timer is deferrable and NO_HZ_COMMON is set then we need
 922	 * to use the deferrable base.
 923	 */
 924	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
 925		base = this_cpu_ptr(&timer_bases[BASE_DEF]);
 926	return base;
 927}
 928
 929static inline struct timer_base *get_timer_base(u32 tflags)
 930{
 931	return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
 932}
 933
 934static inline struct timer_base *
 935get_target_base(struct timer_base *base, unsigned tflags)
 936{
 937#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 938	if (static_branch_likely(&timers_migration_enabled) &&
 939	    !(tflags & TIMER_PINNED))
 940		return get_timer_cpu_base(tflags, get_nohz_timer_target());
 941#endif
 942	return get_timer_this_cpu_base(tflags);
 943}
 944
 945static inline void forward_timer_base(struct timer_base *base)
 946{
 947	unsigned long jnow = READ_ONCE(jiffies);
 948
 949	/*
 950	 * No need to forward if we are close enough below jiffies.
 951	 * Also while executing timers, base->clk is 1 offset ahead
 952	 * of jiffies to avoid endless requeuing to current jiffies.
 953	 */
 954	if ((long)(jnow - base->clk) < 1)
 955		return;
 956
 957	/*
 958	 * If the next expiry value is > jiffies, then we fast forward to
 959	 * jiffies otherwise we forward to the next expiry value.
 960	 */
 961	if (time_after(base->next_expiry, jnow)) {
 962		base->clk = jnow;
 963	} else {
 964		if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk)))
 965			return;
 966		base->clk = base->next_expiry;
 967	}
 968}
 969
 970
 971/*
 972 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
 973 * that all timers which are tied to this base are locked, and the base itself
 974 * is locked too.
 975 *
 976 * So __run_timers/migrate_timers can safely modify all timers which could
 977 * be found in the base->vectors array.
 978 *
 979 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
 980 * to wait until the migration is done.
 981 */
 982static struct timer_base *lock_timer_base(struct timer_list *timer,
 983					  unsigned long *flags)
 984	__acquires(timer->base->lock)
 985{
 986	for (;;) {
 987		struct timer_base *base;
 988		u32 tf;
 989
 990		/*
 991		 * We need to use READ_ONCE() here, otherwise the compiler
 992		 * might re-read @tf between the check for TIMER_MIGRATING
 993		 * and spin_lock().
 994		 */
 995		tf = READ_ONCE(timer->flags);
 996
 997		if (!(tf & TIMER_MIGRATING)) {
 998			base = get_timer_base(tf);
 999			raw_spin_lock_irqsave(&base->lock, *flags);
1000			if (timer->flags == tf)
1001				return base;
1002			raw_spin_unlock_irqrestore(&base->lock, *flags);
1003		}
1004		cpu_relax();
1005	}
1006}
1007
1008#define MOD_TIMER_PENDING_ONLY		0x01
1009#define MOD_TIMER_REDUCE		0x02
1010#define MOD_TIMER_NOTPENDING		0x04
1011
1012static inline int
1013__mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options)
 
1014{
1015	unsigned long clk = 0, flags, bucket_expiry;
1016	struct timer_base *base, *new_base;
1017	unsigned int idx = UINT_MAX;
1018	int ret = 0;
1019
1020	debug_assert_init(timer);
1021
1022	/*
1023	 * This is a common optimization triggered by the networking code - if
1024	 * the timer is re-modified to have the same timeout or ends up in the
1025	 * same array bucket then just return:
1026	 */
1027	if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) {
1028		/*
1029		 * The downside of this optimization is that it can result in
1030		 * larger granularity than you would get from adding a new
1031		 * timer with this expiry.
1032		 */
1033		long diff = timer->expires - expires;
1034
1035		if (!diff)
1036			return 1;
1037		if (options & MOD_TIMER_REDUCE && diff <= 0)
1038			return 1;
1039
1040		/*
1041		 * We lock timer base and calculate the bucket index right
1042		 * here. If the timer ends up in the same bucket, then we
1043		 * just update the expiry time and avoid the whole
1044		 * dequeue/enqueue dance.
1045		 */
1046		base = lock_timer_base(timer, &flags);
1047		/*
1048		 * Has @timer been shutdown? This needs to be evaluated
1049		 * while holding base lock to prevent a race against the
1050		 * shutdown code.
1051		 */
1052		if (!timer->function)
1053			goto out_unlock;
1054
1055		forward_timer_base(base);
1056
1057		if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) &&
1058		    time_before_eq(timer->expires, expires)) {
1059			ret = 1;
1060			goto out_unlock;
1061		}
1062
1063		clk = base->clk;
1064		idx = calc_wheel_index(expires, clk, &bucket_expiry);
1065
1066		/*
1067		 * Retrieve and compare the array index of the pending
1068		 * timer. If it matches set the expiry to the new value so a
1069		 * subsequent call will exit in the expires check above.
1070		 */
1071		if (idx == timer_get_idx(timer)) {
1072			if (!(options & MOD_TIMER_REDUCE))
1073				timer->expires = expires;
1074			else if (time_after(timer->expires, expires))
1075				timer->expires = expires;
1076			ret = 1;
1077			goto out_unlock;
1078		}
1079	} else {
1080		base = lock_timer_base(timer, &flags);
1081		/*
1082		 * Has @timer been shutdown? This needs to be evaluated
1083		 * while holding base lock to prevent a race against the
1084		 * shutdown code.
1085		 */
1086		if (!timer->function)
1087			goto out_unlock;
1088
1089		forward_timer_base(base);
1090	}
1091
1092	ret = detach_if_pending(timer, base, false);
1093	if (!ret && (options & MOD_TIMER_PENDING_ONLY))
1094		goto out_unlock;
1095
1096	new_base = get_target_base(base, timer->flags);
 
 
1097
1098	if (base != new_base) {
1099		/*
1100		 * We are trying to schedule the timer on the new base.
1101		 * However we can't change timer's base while it is running,
1102		 * otherwise timer_delete_sync() can't detect that the timer's
1103		 * handler yet has not finished. This also guarantees that the
1104		 * timer is serialized wrt itself.
1105		 */
1106		if (likely(base->running_timer != timer)) {
1107			/* See the comment in lock_timer_base() */
1108			timer->flags |= TIMER_MIGRATING;
1109
1110			raw_spin_unlock(&base->lock);
1111			base = new_base;
1112			raw_spin_lock(&base->lock);
1113			WRITE_ONCE(timer->flags,
1114				   (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1115			forward_timer_base(base);
1116		}
1117	}
1118
1119	debug_timer_activate(timer);
1120
1121	timer->expires = expires;
1122	/*
1123	 * If 'idx' was calculated above and the base time did not advance
1124	 * between calculating 'idx' and possibly switching the base, only
1125	 * enqueue_timer() is required. Otherwise we need to (re)calculate
1126	 * the wheel index via internal_add_timer().
1127	 */
1128	if (idx != UINT_MAX && clk == base->clk)
1129		enqueue_timer(base, timer, idx, bucket_expiry);
1130	else
1131		internal_add_timer(base, timer);
1132
1133out_unlock:
1134	raw_spin_unlock_irqrestore(&base->lock, flags);
1135
1136	return ret;
1137}
1138
1139/**
1140 * mod_timer_pending - Modify a pending timer's timeout
1141 * @timer:	The pending timer to be modified
1142 * @expires:	New absolute timeout in jiffies
1143 *
1144 * mod_timer_pending() is the same for pending timers as mod_timer(), but
1145 * will not activate inactive timers.
1146 *
1147 * If @timer->function == NULL then the start operation is silently
1148 * discarded.
1149 *
1150 * Return:
1151 * * %0 - The timer was inactive and not modified or was in
1152 *	  shutdown state and the operation was discarded
1153 * * %1 - The timer was active and requeued to expire at @expires
1154 */
1155int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1156{
1157	return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY);
1158}
1159EXPORT_SYMBOL(mod_timer_pending);
1160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1161/**
1162 * mod_timer - Modify a timer's timeout
1163 * @timer:	The timer to be modified
1164 * @expires:	New absolute timeout in jiffies
 
 
 
1165 *
1166 * mod_timer(timer, expires) is equivalent to:
1167 *
1168 *     del_timer(timer); timer->expires = expires; add_timer(timer);
1169 *
1170 * mod_timer() is more efficient than the above open coded sequence. In
1171 * case that the timer is inactive, the del_timer() part is a NOP. The
1172 * timer is in any case activated with the new expiry time @expires.
1173 *
1174 * Note that if there are multiple unserialized concurrent users of the
1175 * same timer, then mod_timer() is the only safe way to modify the timeout,
1176 * since add_timer() cannot modify an already running timer.
1177 *
1178 * If @timer->function == NULL then the start operation is silently
1179 * discarded. In this case the return value is 0 and meaningless.
1180 *
1181 * Return:
1182 * * %0 - The timer was inactive and started or was in shutdown
1183 *	  state and the operation was discarded
1184 * * %1 - The timer was active and requeued to expire at @expires or
1185 *	  the timer was active and not modified because @expires did
1186 *	  not change the effective expiry time
1187 */
1188int mod_timer(struct timer_list *timer, unsigned long expires)
1189{
1190	return __mod_timer(timer, expires, 0);
 
 
 
 
 
 
 
 
 
 
1191}
1192EXPORT_SYMBOL(mod_timer);
1193
1194/**
1195 * timer_reduce - Modify a timer's timeout if it would reduce the timeout
1196 * @timer:	The timer to be modified
1197 * @expires:	New absolute timeout in jiffies
1198 *
1199 * timer_reduce() is very similar to mod_timer(), except that it will only
1200 * modify an enqueued timer if that would reduce the expiration time. If
1201 * @timer is not enqueued it starts the timer.
 
 
 
 
 
 
1202 *
1203 * If @timer->function == NULL then the start operation is silently
1204 * discarded.
1205 *
1206 * Return:
1207 * * %0 - The timer was inactive and started or was in shutdown
1208 *	  state and the operation was discarded
1209 * * %1 - The timer was active and requeued to expire at @expires or
1210 *	  the timer was active and not modified because @expires
1211 *	  did not change the effective expiry time such that the
1212 *	  timer would expire earlier than already scheduled
1213 */
1214int timer_reduce(struct timer_list *timer, unsigned long expires)
1215{
1216	return __mod_timer(timer, expires, MOD_TIMER_REDUCE);
 
 
 
1217}
1218EXPORT_SYMBOL(timer_reduce);
1219
1220/**
1221 * add_timer - Start a timer
1222 * @timer:	The timer to be started
1223 *
1224 * Start @timer to expire at @timer->expires in the future. @timer->expires
1225 * is the absolute expiry time measured in 'jiffies'. When the timer expires
1226 * timer->function(timer) will be invoked from soft interrupt context.
1227 *
1228 * The @timer->expires and @timer->function fields must be set prior
1229 * to calling this function.
1230 *
1231 * If @timer->function == NULL then the start operation is silently
1232 * discarded.
 
1233 *
1234 * If @timer->expires is already in the past @timer will be queued to
1235 * expire at the next timer tick.
1236 *
1237 * This can only operate on an inactive timer. Attempts to invoke this on
1238 * an active timer are rejected with a warning.
1239 */
1240void add_timer(struct timer_list *timer)
1241{
1242	if (WARN_ON_ONCE(timer_pending(timer)))
1243		return;
1244	__mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
1245}
1246EXPORT_SYMBOL(add_timer);
1247
1248/**
1249 * add_timer_on - Start a timer on a particular CPU
1250 * @timer:	The timer to be started
1251 * @cpu:	The CPU to start it on
1252 *
1253 * Same as add_timer() except that it starts the timer on the given CPU.
1254 *
1255 * See add_timer() for further details.
1256 */
1257void add_timer_on(struct timer_list *timer, int cpu)
1258{
1259	struct timer_base *new_base, *base;
 
1260	unsigned long flags;
1261
1262	debug_assert_init(timer);
1263
1264	if (WARN_ON_ONCE(timer_pending(timer)))
1265		return;
1266
1267	new_base = get_timer_cpu_base(timer->flags, cpu);
1268
1269	/*
1270	 * If @timer was on a different CPU, it should be migrated with the
1271	 * old base locked to prevent other operations proceeding with the
1272	 * wrong base locked.  See lock_timer_base().
1273	 */
1274	base = lock_timer_base(timer, &flags);
1275	/*
1276	 * Has @timer been shutdown? This needs to be evaluated while
1277	 * holding base lock to prevent a race against the shutdown code.
1278	 */
1279	if (!timer->function)
1280		goto out_unlock;
1281
1282	if (base != new_base) {
1283		timer->flags |= TIMER_MIGRATING;
1284
1285		raw_spin_unlock(&base->lock);
1286		base = new_base;
1287		raw_spin_lock(&base->lock);
1288		WRITE_ONCE(timer->flags,
1289			   (timer->flags & ~TIMER_BASEMASK) | cpu);
1290	}
1291	forward_timer_base(base);
1292
1293	debug_timer_activate(timer);
1294	internal_add_timer(base, timer);
1295out_unlock:
1296	raw_spin_unlock_irqrestore(&base->lock, flags);
1297}
1298EXPORT_SYMBOL_GPL(add_timer_on);
1299
1300/**
1301 * __timer_delete - Internal function: Deactivate a timer
1302 * @timer:	The timer to be deactivated
1303 * @shutdown:	If true, this indicates that the timer is about to be
1304 *		shutdown permanently.
1305 *
1306 * If @shutdown is true then @timer->function is set to NULL under the
1307 * timer base lock which prevents further rearming of the time. In that
1308 * case any attempt to rearm @timer after this function returns will be
1309 * silently ignored.
1310 *
1311 * Return:
1312 * * %0 - The timer was not pending
1313 * * %1 - The timer was pending and deactivated
1314 */
1315static int __timer_delete(struct timer_list *timer, bool shutdown)
1316{
1317	struct timer_base *base;
1318	unsigned long flags;
1319	int ret = 0;
1320
1321	debug_assert_init(timer);
1322
1323	/*
1324	 * If @shutdown is set then the lock has to be taken whether the
1325	 * timer is pending or not to protect against a concurrent rearm
1326	 * which might hit between the lockless pending check and the lock
1327	 * aquisition. By taking the lock it is ensured that such a newly
1328	 * enqueued timer is dequeued and cannot end up with
1329	 * timer->function == NULL in the expiry code.
1330	 *
1331	 * If timer->function is currently executed, then this makes sure
1332	 * that the callback cannot requeue the timer.
1333	 */
1334	if (timer_pending(timer) || shutdown) {
1335		base = lock_timer_base(timer, &flags);
1336		ret = detach_if_pending(timer, base, true);
1337		if (shutdown)
1338			timer->function = NULL;
1339		raw_spin_unlock_irqrestore(&base->lock, flags);
1340	}
1341
1342	return ret;
1343}
 
1344
1345/**
1346 * timer_delete - Deactivate a timer
1347 * @timer:	The timer to be deactivated
1348 *
1349 * The function only deactivates a pending timer, but contrary to
1350 * timer_delete_sync() it does not take into account whether the timer's
1351 * callback function is concurrently executed on a different CPU or not.
1352 * It neither prevents rearming of the timer.  If @timer can be rearmed
1353 * concurrently then the return value of this function is meaningless.
1354 *
1355 * Return:
1356 * * %0 - The timer was not pending
1357 * * %1 - The timer was pending and deactivated
1358 */
1359int timer_delete(struct timer_list *timer)
1360{
1361	return __timer_delete(timer, false);
1362}
1363EXPORT_SYMBOL(timer_delete);
1364
1365/**
1366 * timer_shutdown - Deactivate a timer and prevent rearming
1367 * @timer:	The timer to be deactivated
1368 *
1369 * The function does not wait for an eventually running timer callback on a
1370 * different CPU but it prevents rearming of the timer. Any attempt to arm
1371 * @timer after this function returns will be silently ignored.
1372 *
1373 * This function is useful for teardown code and should only be used when
1374 * timer_shutdown_sync() cannot be invoked due to locking or context constraints.
1375 *
1376 * Return:
1377 * * %0 - The timer was not pending
1378 * * %1 - The timer was pending
1379 */
1380int timer_shutdown(struct timer_list *timer)
1381{
1382	return __timer_delete(timer, true);
1383}
1384EXPORT_SYMBOL_GPL(timer_shutdown);
1385
1386/**
1387 * __try_to_del_timer_sync - Internal function: Try to deactivate a timer
1388 * @timer:	Timer to deactivate
1389 * @shutdown:	If true, this indicates that the timer is about to be
1390 *		shutdown permanently.
1391 *
1392 * If @shutdown is true then @timer->function is set to NULL under the
1393 * timer base lock which prevents further rearming of the timer. Any
1394 * attempt to rearm @timer after this function returns will be silently
1395 * ignored.
1396 *
1397 * This function cannot guarantee that the timer cannot be rearmed
1398 * right after dropping the base lock if @shutdown is false. That
1399 * needs to be prevented by the calling code if necessary.
1400 *
1401 * Return:
1402 * * %0  - The timer was not pending
1403 * * %1  - The timer was pending and deactivated
1404 * * %-1 - The timer callback function is running on a different CPU
1405 */
1406static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown)
1407{
1408	struct timer_base *base;
1409	unsigned long flags;
1410	int ret = -1;
1411
1412	debug_assert_init(timer);
1413
1414	base = lock_timer_base(timer, &flags);
1415
1416	if (base->running_timer != timer)
 
1417		ret = detach_if_pending(timer, base, true);
1418	if (shutdown)
1419		timer->function = NULL;
1420
1421	raw_spin_unlock_irqrestore(&base->lock, flags);
1422
1423	return ret;
1424}
 
1425
 
1426/**
1427 * try_to_del_timer_sync - Try to deactivate a timer
1428 * @timer:	Timer to deactivate
1429 *
1430 * This function tries to deactivate a timer. On success the timer is not
1431 * queued and the timer callback function is not running on any CPU.
 
1432 *
1433 * This function does not guarantee that the timer cannot be rearmed right
1434 * after dropping the base lock. That needs to be prevented by the calling
1435 * code if necessary.
1436 *
1437 * Return:
1438 * * %0  - The timer was not pending
1439 * * %1  - The timer was pending and deactivated
1440 * * %-1 - The timer callback function is running on a different CPU
1441 */
1442int try_to_del_timer_sync(struct timer_list *timer)
1443{
1444	return __try_to_del_timer_sync(timer, false);
1445}
1446EXPORT_SYMBOL(try_to_del_timer_sync);
1447
1448#ifdef CONFIG_PREEMPT_RT
1449static __init void timer_base_init_expiry_lock(struct timer_base *base)
1450{
1451	spin_lock_init(&base->expiry_lock);
1452}
1453
1454static inline void timer_base_lock_expiry(struct timer_base *base)
1455{
1456	spin_lock(&base->expiry_lock);
1457}
1458
1459static inline void timer_base_unlock_expiry(struct timer_base *base)
1460{
1461	spin_unlock(&base->expiry_lock);
1462}
1463
1464/*
1465 * The counterpart to del_timer_wait_running().
1466 *
1467 * If there is a waiter for base->expiry_lock, then it was waiting for the
1468 * timer callback to finish. Drop expiry_lock and reacquire it. That allows
1469 * the waiter to acquire the lock and make progress.
1470 */
1471static void timer_sync_wait_running(struct timer_base *base)
1472{
1473	if (atomic_read(&base->timer_waiters)) {
1474		raw_spin_unlock_irq(&base->lock);
1475		spin_unlock(&base->expiry_lock);
1476		spin_lock(&base->expiry_lock);
1477		raw_spin_lock_irq(&base->lock);
1478	}
1479}
1480
1481/*
1482 * This function is called on PREEMPT_RT kernels when the fast path
1483 * deletion of a timer failed because the timer callback function was
1484 * running.
1485 *
1486 * This prevents priority inversion, if the softirq thread on a remote CPU
1487 * got preempted, and it prevents a life lock when the task which tries to
1488 * delete a timer preempted the softirq thread running the timer callback
1489 * function.
1490 */
1491static void del_timer_wait_running(struct timer_list *timer)
1492{
1493	u32 tf;
1494
1495	tf = READ_ONCE(timer->flags);
1496	if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) {
1497		struct timer_base *base = get_timer_base(tf);
1498
1499		/*
1500		 * Mark the base as contended and grab the expiry lock,
1501		 * which is held by the softirq across the timer
1502		 * callback. Drop the lock immediately so the softirq can
1503		 * expire the next timer. In theory the timer could already
1504		 * be running again, but that's more than unlikely and just
1505		 * causes another wait loop.
1506		 */
1507		atomic_inc(&base->timer_waiters);
1508		spin_lock_bh(&base->expiry_lock);
1509		atomic_dec(&base->timer_waiters);
1510		spin_unlock_bh(&base->expiry_lock);
1511	}
1512}
1513#else
1514static inline void timer_base_init_expiry_lock(struct timer_base *base) { }
1515static inline void timer_base_lock_expiry(struct timer_base *base) { }
1516static inline void timer_base_unlock_expiry(struct timer_base *base) { }
1517static inline void timer_sync_wait_running(struct timer_base *base) { }
1518static inline void del_timer_wait_running(struct timer_list *timer) { }
1519#endif
1520
1521/**
1522 * __timer_delete_sync - Internal function: Deactivate a timer and wait
1523 *			 for the handler to finish.
1524 * @timer:	The timer to be deactivated
1525 * @shutdown:	If true, @timer->function will be set to NULL under the
1526 *		timer base lock which prevents rearming of @timer
1527 *
1528 * If @shutdown is not set the timer can be rearmed later. If the timer can
1529 * be rearmed concurrently, i.e. after dropping the base lock then the
1530 * return value is meaningless.
1531 *
1532 * If @shutdown is set then @timer->function is set to NULL under timer
1533 * base lock which prevents rearming of the timer. Any attempt to rearm
1534 * a shutdown timer is silently ignored.
1535 *
1536 * If the timer should be reused after shutdown it has to be initialized
1537 * again.
1538 *
1539 * Return:
1540 * * %0	- The timer was not pending
1541 * * %1	- The timer was pending and deactivated
1542 */
1543static int __timer_delete_sync(struct timer_list *timer, bool shutdown)
1544{
1545	int ret;
1546
1547#ifdef CONFIG_LOCKDEP
1548	unsigned long flags;
1549
1550	/*
1551	 * If lockdep gives a backtrace here, please reference
1552	 * the synchronization rules above.
1553	 */
1554	local_irq_save(flags);
1555	lock_map_acquire(&timer->lockdep_map);
1556	lock_map_release(&timer->lockdep_map);
1557	local_irq_restore(flags);
1558#endif
1559	/*
1560	 * don't use it in hardirq context, because it
1561	 * could lead to deadlock.
1562	 */
1563	WARN_ON(in_hardirq() && !(timer->flags & TIMER_IRQSAFE));
1564
1565	/*
1566	 * Must be able to sleep on PREEMPT_RT because of the slowpath in
1567	 * del_timer_wait_running().
1568	 */
1569	if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
1570		lockdep_assert_preemption_enabled();
1571
1572	do {
1573		ret = __try_to_del_timer_sync(timer, shutdown);
1574
1575		if (unlikely(ret < 0)) {
1576			del_timer_wait_running(timer);
1577			cpu_relax();
1578		}
1579	} while (ret < 0);
1580
1581	return ret;
1582}
 
 
1583
1584/**
1585 * timer_delete_sync - Deactivate a timer and wait for the handler to finish.
1586 * @timer:	The timer to be deactivated
1587 *
1588 * Synchronization rules: Callers must prevent restarting of the timer,
1589 * otherwise this function is meaningless. It must not be called from
1590 * interrupt contexts unless the timer is an irqsafe one. The caller must
1591 * not hold locks which would prevent completion of the timer's callback
1592 * function. The timer's handler must not call add_timer_on(). Upon exit
1593 * the timer is not queued and the handler is not running on any CPU.
1594 *
1595 * For !irqsafe timers, the caller must not hold locks that are held in
1596 * interrupt context. Even if the lock has nothing to do with the timer in
1597 * question.  Here's why::
1598 *
1599 *    CPU0                             CPU1
1600 *    ----                             ----
1601 *                                     <SOFTIRQ>
1602 *                                       call_timer_fn();
1603 *                                       base->running_timer = mytimer;
1604 *    spin_lock_irq(somelock);
1605 *                                     <IRQ>
1606 *                                        spin_lock(somelock);
1607 *    timer_delete_sync(mytimer);
1608 *    while (base->running_timer == mytimer);
1609 *
1610 * Now timer_delete_sync() will never return and never release somelock.
1611 * The interrupt on the other CPU is waiting to grab somelock but it has
1612 * interrupted the softirq that CPU0 is waiting to finish.
1613 *
1614 * This function cannot guarantee that the timer is not rearmed again by
1615 * some concurrent or preempting code, right after it dropped the base
1616 * lock. If there is the possibility of a concurrent rearm then the return
1617 * value of the function is meaningless.
1618 *
1619 * If such a guarantee is needed, e.g. for teardown situations then use
1620 * timer_shutdown_sync() instead.
1621 *
1622 * Return:
1623 * * %0	- The timer was not pending
1624 * * %1	- The timer was pending and deactivated
1625 */
1626int timer_delete_sync(struct timer_list *timer)
1627{
1628	return __timer_delete_sync(timer, false);
1629}
1630EXPORT_SYMBOL(timer_delete_sync);
 
1631
1632/**
1633 * timer_shutdown_sync - Shutdown a timer and prevent rearming
1634 * @timer: The timer to be shutdown
1635 *
1636 * When the function returns it is guaranteed that:
1637 *   - @timer is not queued
1638 *   - The callback function of @timer is not running
1639 *   - @timer cannot be enqueued again. Any attempt to rearm
1640 *     @timer is silently ignored.
1641 *
1642 * See timer_delete_sync() for synchronization rules.
1643 *
1644 * This function is useful for final teardown of an infrastructure where
1645 * the timer is subject to a circular dependency problem.
1646 *
1647 * A common pattern for this is a timer and a workqueue where the timer can
1648 * schedule work and work can arm the timer. On shutdown the workqueue must
1649 * be destroyed and the timer must be prevented from rearming. Unless the
1650 * code has conditionals like 'if (mything->in_shutdown)' to prevent that
1651 * there is no way to get this correct with timer_delete_sync().
1652 *
1653 * timer_shutdown_sync() is solving the problem. The correct ordering of
1654 * calls in this case is:
1655 *
1656 *	timer_shutdown_sync(&mything->timer);
1657 *	workqueue_destroy(&mything->workqueue);
1658 *
1659 * After this 'mything' can be safely freed.
1660 *
1661 * This obviously implies that the timer is not required to be functional
1662 * for the rest of the shutdown operation.
1663 *
1664 * Return:
1665 * * %0 - The timer was not pending
1666 * * %1 - The timer was pending
1667 */
1668int timer_shutdown_sync(struct timer_list *timer)
1669{
1670	return __timer_delete_sync(timer, true);
1671}
1672EXPORT_SYMBOL_GPL(timer_shutdown_sync);
1673
1674static void call_timer_fn(struct timer_list *timer,
1675			  void (*fn)(struct timer_list *),
1676			  unsigned long baseclk)
1677{
1678	int count = preempt_count();
1679
1680#ifdef CONFIG_LOCKDEP
1681	/*
1682	 * It is permissible to free the timer from inside the
1683	 * function that is called from it, this we need to take into
1684	 * account for lockdep too. To avoid bogus "held lock freed"
1685	 * warnings as well as problems when looking into
1686	 * timer->lockdep_map, make a copy and use that here.
1687	 */
1688	struct lockdep_map lockdep_map;
1689
1690	lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1691#endif
1692	/*
1693	 * Couple the lock chain with the lock chain at
1694	 * timer_delete_sync() by acquiring the lock_map around the fn()
1695	 * call here and in timer_delete_sync().
1696	 */
1697	lock_map_acquire(&lockdep_map);
1698
1699	trace_timer_expire_entry(timer, baseclk);
1700	fn(timer);
1701	trace_timer_expire_exit(timer);
1702
1703	lock_map_release(&lockdep_map);
1704
1705	if (count != preempt_count()) {
1706		WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n",
1707			  fn, count, preempt_count());
1708		/*
1709		 * Restore the preempt count. That gives us a decent
1710		 * chance to survive and extract information. If the
1711		 * callback kept a lock held, bad luck, but not worse
1712		 * than the BUG() we had.
1713		 */
1714		preempt_count_set(count);
1715	}
1716}
1717
1718static void expire_timers(struct timer_base *base, struct hlist_head *head)
1719{
1720	/*
1721	 * This value is required only for tracing. base->clk was
1722	 * incremented directly before expire_timers was called. But expiry
1723	 * is related to the old base->clk value.
1724	 */
1725	unsigned long baseclk = base->clk - 1;
1726
1727	while (!hlist_empty(head)) {
1728		struct timer_list *timer;
1729		void (*fn)(struct timer_list *);
1730
1731		timer = hlist_entry(head->first, struct timer_list, entry);
1732
1733		base->running_timer = timer;
1734		detach_timer(timer, true);
 
 
 
 
 
 
1735
1736		fn = timer->function;
1737
1738		if (WARN_ON_ONCE(!fn)) {
1739			/* Should never happen. Emphasis on should! */
1740			base->running_timer = NULL;
1741			continue;
1742		}
1743
1744		if (timer->flags & TIMER_IRQSAFE) {
1745			raw_spin_unlock(&base->lock);
1746			call_timer_fn(timer, fn, baseclk);
1747			raw_spin_lock(&base->lock);
1748			base->running_timer = NULL;
1749		} else {
1750			raw_spin_unlock_irq(&base->lock);
1751			call_timer_fn(timer, fn, baseclk);
1752			raw_spin_lock_irq(&base->lock);
1753			base->running_timer = NULL;
1754			timer_sync_wait_running(base);
1755		}
1756	}
1757}
1758
1759static int collect_expired_timers(struct timer_base *base,
1760				  struct hlist_head *heads)
1761{
1762	unsigned long clk = base->clk = base->next_expiry;
1763	struct hlist_head *vec;
1764	int i, levels = 0;
1765	unsigned int idx;
1766
1767	for (i = 0; i < LVL_DEPTH; i++) {
1768		idx = (clk & LVL_MASK) + i * LVL_SIZE;
1769
1770		if (__test_and_clear_bit(idx, base->pending_map)) {
1771			vec = base->vectors + idx;
1772			hlist_move_list(vec, heads++);
1773			levels++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1774		}
1775		/* Is it time to look at the next level? */
1776		if (clk & LVL_CLK_MASK)
1777			break;
1778		/* Shift clock for the next level granularity */
1779		clk >>= LVL_CLK_SHIFT;
1780	}
1781	return levels;
 
1782}
1783
 
1784/*
1785 * Find the next pending bucket of a level. Search from level start (@offset)
1786 * + @clk upwards and if nothing there, search from start of the level
1787 * (@offset) up to @offset + clk.
1788 */
1789static int next_pending_bucket(struct timer_base *base, unsigned offset,
1790			       unsigned clk)
1791{
1792	unsigned pos, start = offset + clk;
1793	unsigned end = offset + LVL_SIZE;
1794
1795	pos = find_next_bit(base->pending_map, end, start);
1796	if (pos < end)
1797		return pos - start;
1798
1799	pos = find_next_bit(base->pending_map, start, offset);
1800	return pos < start ? pos + LVL_SIZE - start : -1;
1801}
1802
1803/*
1804 * Search the first expiring timer in the various clock levels. Caller must
1805 * hold base->lock.
1806 */
1807static unsigned long __next_timer_interrupt(struct timer_base *base)
1808{
1809	unsigned long clk, next, adj;
1810	unsigned lvl, offset = 0;
1811
1812	next = base->clk + NEXT_TIMER_MAX_DELTA;
1813	clk = base->clk;
1814	for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
1815		int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
1816		unsigned long lvl_clk = clk & LVL_CLK_MASK;
1817
1818		if (pos >= 0) {
1819			unsigned long tmp = clk + (unsigned long) pos;
1820
1821			tmp <<= LVL_SHIFT(lvl);
1822			if (time_before(tmp, next))
1823				next = tmp;
 
 
 
 
1824
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1825			/*
1826			 * If the next expiration happens before we reach
1827			 * the next level, no need to check further.
1828			 */
1829			if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK))
1830				break;
1831		}
1832		/*
1833		 * Clock for the next level. If the current level clock lower
1834		 * bits are zero, we look at the next level as is. If not we
1835		 * need to advance it by one because that's going to be the
1836		 * next expiring bucket in that level. base->clk is the next
1837		 * expiring jiffie. So in case of:
1838		 *
1839		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1840		 *  0    0    0    0    0    0
1841		 *
1842		 * we have to look at all levels @index 0. With
1843		 *
1844		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1845		 *  0    0    0    0    0    2
1846		 *
1847		 * LVL0 has the next expiring bucket @index 2. The upper
1848		 * levels have the next expiring bucket @index 1.
1849		 *
1850		 * In case that the propagation wraps the next level the same
1851		 * rules apply:
1852		 *
1853		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1854		 *  0    0    0    0    F    2
1855		 *
1856		 * So after looking at LVL0 we get:
1857		 *
1858		 * LVL5 LVL4 LVL3 LVL2 LVL1
1859		 *  0    0    0    1    0
1860		 *
1861		 * So no propagation from LVL1 to LVL2 because that happened
1862		 * with the add already, but then we need to propagate further
1863		 * from LVL2 to LVL3.
1864		 *
1865		 * So the simple check whether the lower bits of the current
1866		 * level are 0 or not is sufficient for all cases.
1867		 */
1868		adj = lvl_clk ? 1 : 0;
1869		clk >>= LVL_CLK_SHIFT;
1870		clk += adj;
1871	}
1872
1873	base->next_expiry_recalc = false;
1874	base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
1875
1876	return next;
1877}
1878
1879#ifdef CONFIG_NO_HZ_COMMON
1880/*
1881 * Check, if the next hrtimer event is before the next timer wheel
1882 * event:
1883 */
1884static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1885{
1886	u64 nextevt = hrtimer_get_next_event();
1887
1888	/*
1889	 * If high resolution timers are enabled
1890	 * hrtimer_get_next_event() returns KTIME_MAX.
1891	 */
1892	if (expires <= nextevt)
1893		return expires;
1894
1895	/*
1896	 * If the next timer is already expired, return the tick base
1897	 * time so the tick is fired immediately.
1898	 */
1899	if (nextevt <= basem)
1900		return basem;
1901
1902	/*
1903	 * Round up to the next jiffie. High resolution timers are
1904	 * off, so the hrtimers are expired in the tick and we need to
1905	 * make sure that this tick really expires the timer to avoid
1906	 * a ping pong of the nohz stop code.
1907	 *
1908	 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1909	 */
1910	return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1911}
1912
1913/**
1914 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1915 * @basej:	base time jiffies
1916 * @basem:	base time clock monotonic
1917 *
1918 * Returns the tick aligned clock monotonic time of the next pending
1919 * timer or KTIME_MAX if no timer is pending.
1920 */
1921u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1922{
1923	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1924	u64 expires = KTIME_MAX;
1925	unsigned long nextevt;
1926
1927	/*
1928	 * Pretend that there is no timer pending if the cpu is offline.
1929	 * Possible pending timers will be migrated later to an active cpu.
1930	 */
1931	if (cpu_is_offline(smp_processor_id()))
1932		return expires;
1933
1934	raw_spin_lock(&base->lock);
1935	if (base->next_expiry_recalc)
1936		base->next_expiry = __next_timer_interrupt(base);
1937	nextevt = base->next_expiry;
1938
1939	/*
1940	 * We have a fresh next event. Check whether we can forward the
1941	 * base. We can only do that when @basej is past base->clk
1942	 * otherwise we might rewind base->clk.
1943	 */
1944	if (time_after(basej, base->clk)) {
1945		if (time_after(nextevt, basej))
1946			base->clk = basej;
1947		else if (time_after(nextevt, base->clk))
1948			base->clk = nextevt;
1949	}
1950
1951	if (time_before_eq(nextevt, basej)) {
1952		expires = basem;
1953		base->is_idle = false;
1954	} else {
1955		if (base->timers_pending)
1956			expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
1957		/*
1958		 * If we expect to sleep more than a tick, mark the base idle.
1959		 * Also the tick is stopped so any added timer must forward
1960		 * the base clk itself to keep granularity small. This idle
1961		 * logic is only maintained for the BASE_STD base, deferrable
1962		 * timers may still see large granularity skew (by design).
1963		 */
1964		if ((expires - basem) > TICK_NSEC)
1965			base->is_idle = true;
1966	}
1967	raw_spin_unlock(&base->lock);
1968
1969	return cmp_next_hrtimer_event(basem, expires);
1970}
1971
1972/**
1973 * timer_clear_idle - Clear the idle state of the timer base
1974 *
1975 * Called with interrupts disabled
1976 */
1977void timer_clear_idle(void)
1978{
1979	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1980
1981	/*
1982	 * We do this unlocked. The worst outcome is a remote enqueue sending
1983	 * a pointless IPI, but taking the lock would just make the window for
1984	 * sending the IPI a few instructions smaller for the cost of taking
1985	 * the lock in the exit from idle path.
1986	 */
1987	base->is_idle = false;
1988}
1989#endif
1990
1991/**
1992 * __run_timers - run all expired timers (if any) on this CPU.
1993 * @base: the timer vector to be processed.
1994 */
1995static inline void __run_timers(struct timer_base *base)
1996{
1997	struct hlist_head heads[LVL_DEPTH];
1998	int levels;
1999
2000	if (time_before(jiffies, base->next_expiry))
2001		return;
2002
2003	timer_base_lock_expiry(base);
2004	raw_spin_lock_irq(&base->lock);
2005
2006	while (time_after_eq(jiffies, base->clk) &&
2007	       time_after_eq(jiffies, base->next_expiry)) {
2008		levels = collect_expired_timers(base, heads);
2009		/*
2010		 * The two possible reasons for not finding any expired
2011		 * timer at this clk are that all matching timers have been
2012		 * dequeued or no timer has been queued since
2013		 * base::next_expiry was set to base::clk +
2014		 * NEXT_TIMER_MAX_DELTA.
2015		 */
2016		WARN_ON_ONCE(!levels && !base->next_expiry_recalc
2017			     && base->timers_pending);
2018		base->clk++;
2019		base->next_expiry = __next_timer_interrupt(base);
2020
2021		while (levels--)
2022			expire_timers(base, heads + levels);
2023	}
2024	raw_spin_unlock_irq(&base->lock);
2025	timer_base_unlock_expiry(base);
 
 
 
 
 
2026}
2027
2028/*
2029 * This function runs timers and the timer-tq in bottom half context.
2030 */
2031static __latent_entropy void run_timer_softirq(struct softirq_action *h)
2032{
2033	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
2034
2035	__run_timers(base);
2036	if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
2037		__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
2038}
2039
2040/*
2041 * Called by the local, per-CPU timer interrupt on SMP.
2042 */
2043static void run_local_timers(void)
2044{
2045	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
2046
2047	hrtimer_run_queues();
2048	/* Raise the softirq only if required. */
2049	if (time_before(jiffies, base->next_expiry)) {
2050		if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
2051			return;
2052		/* CPU is awake, so check the deferrable base. */
2053		base++;
2054		if (time_before(jiffies, base->next_expiry))
2055			return;
2056	}
2057	raise_softirq(TIMER_SOFTIRQ);
2058}
2059
 
 
2060/*
2061 * Called from the timer interrupt handler to charge one tick to the current
2062 * process.  user_tick is 1 if the tick is user time, 0 for system.
2063 */
2064void update_process_times(int user_tick)
2065{
2066	struct task_struct *p = current;
2067
2068	/* Note: this timer irq context must be accounted for as well. */
2069	account_process_tick(p, user_tick);
2070	run_local_timers();
2071	rcu_sched_clock_irq(user_tick);
2072#ifdef CONFIG_IRQ_WORK
2073	if (in_irq())
2074		irq_work_tick();
2075#endif
2076	scheduler_tick();
2077	if (IS_ENABLED(CONFIG_POSIX_TIMERS))
2078		run_posix_cpu_timers();
2079}
2080
2081/*
2082 * Since schedule_timeout()'s timer is defined on the stack, it must store
2083 * the target task on the stack as well.
2084 */
2085struct process_timer {
2086	struct timer_list timer;
2087	struct task_struct *task;
2088};
2089
2090static void process_timeout(struct timer_list *t)
2091{
2092	struct process_timer *timeout = from_timer(timeout, t, timer);
2093
2094	wake_up_process(timeout->task);
2095}
2096
2097/**
2098 * schedule_timeout - sleep until timeout
2099 * @timeout: timeout value in jiffies
2100 *
2101 * Make the current task sleep until @timeout jiffies have elapsed.
2102 * The function behavior depends on the current task state
2103 * (see also set_current_state() description):
2104 *
2105 * %TASK_RUNNING - the scheduler is called, but the task does not sleep
2106 * at all. That happens because sched_submit_work() does nothing for
2107 * tasks in %TASK_RUNNING state.
2108 *
2109 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
2110 * pass before the routine returns unless the current task is explicitly
2111 * woken up, (e.g. by wake_up_process()).
2112 *
2113 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2114 * delivered to the current task or the current task is explicitly woken
2115 * up.
2116 *
2117 * The current task state is guaranteed to be %TASK_RUNNING when this
2118 * routine returns.
2119 *
2120 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
2121 * the CPU away without a bound on the timeout. In this case the return
2122 * value will be %MAX_SCHEDULE_TIMEOUT.
2123 *
2124 * Returns 0 when the timer has expired otherwise the remaining time in
2125 * jiffies will be returned. In all cases the return value is guaranteed
2126 * to be non-negative.
2127 */
2128signed long __sched schedule_timeout(signed long timeout)
2129{
2130	struct process_timer timer;
2131	unsigned long expire;
2132
2133	switch (timeout)
2134	{
2135	case MAX_SCHEDULE_TIMEOUT:
2136		/*
2137		 * These two special cases are useful to be comfortable
2138		 * in the caller. Nothing more. We could take
2139		 * MAX_SCHEDULE_TIMEOUT from one of the negative value
2140		 * but I' d like to return a valid offset (>=0) to allow
2141		 * the caller to do everything it want with the retval.
2142		 */
2143		schedule();
2144		goto out;
2145	default:
2146		/*
2147		 * Another bit of PARANOID. Note that the retval will be
2148		 * 0 since no piece of kernel is supposed to do a check
2149		 * for a negative retval of schedule_timeout() (since it
2150		 * should never happens anyway). You just have the printk()
2151		 * that will tell you if something is gone wrong and where.
2152		 */
2153		if (timeout < 0) {
2154			printk(KERN_ERR "schedule_timeout: wrong timeout "
2155				"value %lx\n", timeout);
2156			dump_stack();
2157			__set_current_state(TASK_RUNNING);
2158			goto out;
2159		}
2160	}
2161
2162	expire = timeout + jiffies;
2163
2164	timer.task = current;
2165	timer_setup_on_stack(&timer.timer, process_timeout, 0);
2166	__mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
2167	schedule();
2168	del_timer_sync(&timer.timer);
2169
2170	/* Remove the timer from the object tracker */
2171	destroy_timer_on_stack(&timer.timer);
2172
2173	timeout = expire - jiffies;
2174
2175 out:
2176	return timeout < 0 ? 0 : timeout;
2177}
2178EXPORT_SYMBOL(schedule_timeout);
2179
2180/*
2181 * We can use __set_current_state() here because schedule_timeout() calls
2182 * schedule() unconditionally.
2183 */
2184signed long __sched schedule_timeout_interruptible(signed long timeout)
2185{
2186	__set_current_state(TASK_INTERRUPTIBLE);
2187	return schedule_timeout(timeout);
2188}
2189EXPORT_SYMBOL(schedule_timeout_interruptible);
2190
2191signed long __sched schedule_timeout_killable(signed long timeout)
2192{
2193	__set_current_state(TASK_KILLABLE);
2194	return schedule_timeout(timeout);
2195}
2196EXPORT_SYMBOL(schedule_timeout_killable);
2197
2198signed long __sched schedule_timeout_uninterruptible(signed long timeout)
2199{
2200	__set_current_state(TASK_UNINTERRUPTIBLE);
2201	return schedule_timeout(timeout);
2202}
2203EXPORT_SYMBOL(schedule_timeout_uninterruptible);
2204
2205/*
2206 * Like schedule_timeout_uninterruptible(), except this task will not contribute
2207 * to load average.
2208 */
2209signed long __sched schedule_timeout_idle(signed long timeout)
2210{
2211	__set_current_state(TASK_IDLE);
2212	return schedule_timeout(timeout);
2213}
2214EXPORT_SYMBOL(schedule_timeout_idle);
2215
2216#ifdef CONFIG_HOTPLUG_CPU
2217static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
2218{
2219	struct timer_list *timer;
2220	int cpu = new_base->cpu;
2221
2222	while (!hlist_empty(head)) {
2223		timer = hlist_entry(head->first, struct timer_list, entry);
 
2224		detach_timer(timer, false);
2225		timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
2226		internal_add_timer(new_base, timer);
2227	}
2228}
2229
2230int timers_prepare_cpu(unsigned int cpu)
2231{
2232	struct timer_base *base;
2233	int b;
2234
2235	for (b = 0; b < NR_BASES; b++) {
2236		base = per_cpu_ptr(&timer_bases[b], cpu);
2237		base->clk = jiffies;
2238		base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
2239		base->next_expiry_recalc = false;
2240		base->timers_pending = false;
2241		base->is_idle = false;
2242	}
2243	return 0;
2244}
2245
2246int timers_dead_cpu(unsigned int cpu)
2247{
2248	struct timer_base *old_base;
2249	struct timer_base *new_base;
2250	int b, i;
 
 
 
 
2251
2252	for (b = 0; b < NR_BASES; b++) {
2253		old_base = per_cpu_ptr(&timer_bases[b], cpu);
2254		new_base = get_cpu_ptr(&timer_bases[b]);
2255		/*
2256		 * The caller is globally serialized and nobody else
2257		 * takes two locks at once, deadlock is not possible.
2258		 */
2259		raw_spin_lock_irq(&new_base->lock);
2260		raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
2261
2262		/*
2263		 * The current CPUs base clock might be stale. Update it
2264		 * before moving the timers over.
2265		 */
2266		forward_timer_base(new_base);
 
 
 
2267
2268		WARN_ON_ONCE(old_base->running_timer);
2269		old_base->running_timer = NULL;
2270
2271		for (i = 0; i < WHEEL_SIZE; i++)
2272			migrate_timer_list(new_base, old_base->vectors + i);
 
 
2273
2274		raw_spin_unlock(&old_base->lock);
2275		raw_spin_unlock_irq(&new_base->lock);
2276		put_cpu_ptr(&timer_bases);
 
 
 
 
 
 
 
2277	}
2278	return 0;
 
2279}
2280
 
 
 
 
 
 
2281#endif /* CONFIG_HOTPLUG_CPU */
2282
2283static void __init init_timer_cpu(int cpu)
2284{
2285	struct timer_base *base;
2286	int i;
2287
2288	for (i = 0; i < NR_BASES; i++) {
2289		base = per_cpu_ptr(&timer_bases[i], cpu);
2290		base->cpu = cpu;
2291		raw_spin_lock_init(&base->lock);
2292		base->clk = jiffies;
2293		base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
2294		timer_base_init_expiry_lock(base);
2295	}
2296}
2297
2298static void __init init_timer_cpus(void)
2299{
2300	int cpu;
2301
2302	for_each_possible_cpu(cpu)
2303		init_timer_cpu(cpu);
2304}
2305
2306void __init init_timers(void)
2307{
2308	init_timer_cpus();
2309	posix_cputimers_init_work();
 
2310	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
2311}
2312
2313/**
2314 * msleep - sleep safely even with waitqueue interruptions
2315 * @msecs: Time in milliseconds to sleep for
2316 */
2317void msleep(unsigned int msecs)
2318{
2319	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
2320
2321	while (timeout)
2322		timeout = schedule_timeout_uninterruptible(timeout);
2323}
2324
2325EXPORT_SYMBOL(msleep);
2326
2327/**
2328 * msleep_interruptible - sleep waiting for signals
2329 * @msecs: Time in milliseconds to sleep for
2330 */
2331unsigned long msleep_interruptible(unsigned int msecs)
2332{
2333	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
2334
2335	while (timeout && !signal_pending(current))
2336		timeout = schedule_timeout_interruptible(timeout);
2337	return jiffies_to_msecs(timeout);
2338}
2339
2340EXPORT_SYMBOL(msleep_interruptible);
2341
 
 
 
 
 
 
 
 
 
 
2342/**
2343 * usleep_range_state - Sleep for an approximate time in a given state
2344 * @min:	Minimum time in usecs to sleep
2345 * @max:	Maximum time in usecs to sleep
2346 * @state:	State of the current task that will be while sleeping
2347 *
2348 * In non-atomic context where the exact wakeup time is flexible, use
2349 * usleep_range_state() instead of udelay().  The sleep improves responsiveness
2350 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
2351 * power usage by allowing hrtimers to take advantage of an already-
2352 * scheduled interrupt instead of scheduling a new one just for this sleep.
2353 */
2354void __sched usleep_range_state(unsigned long min, unsigned long max,
2355				unsigned int state)
2356{
2357	ktime_t exp = ktime_add_us(ktime_get(), min);
2358	u64 delta = (u64)(max - min) * NSEC_PER_USEC;
2359
2360	for (;;) {
2361		__set_current_state(state);
2362		/* Do not return before the requested sleep time has elapsed */
2363		if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
2364			break;
2365	}
2366}
2367EXPORT_SYMBOL(usleep_range_state);
v4.6
 
   1/*
   2 *  linux/kernel/timer.c
   3 *
   4 *  Kernel internal timers
   5 *
   6 *  Copyright (C) 1991, 1992  Linus Torvalds
   7 *
   8 *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
   9 *
  10 *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
  11 *              "A Kernel Model for Precision Timekeeping" by Dave Mills
  12 *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13 *              serialize accesses to xtime/lost_ticks).
  14 *                              Copyright (C) 1998  Andrea Arcangeli
  15 *  1999-03-10  Improved NTP compatibility by Ulrich Windl
  16 *  2002-05-31	Move sys_sysinfo here and make its locking sane, Robert Love
  17 *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
  18 *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
  19 *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20 */
  21
  22#include <linux/kernel_stat.h>
  23#include <linux/export.h>
  24#include <linux/interrupt.h>
  25#include <linux/percpu.h>
  26#include <linux/init.h>
  27#include <linux/mm.h>
  28#include <linux/swap.h>
  29#include <linux/pid_namespace.h>
  30#include <linux/notifier.h>
  31#include <linux/thread_info.h>
  32#include <linux/time.h>
  33#include <linux/jiffies.h>
  34#include <linux/posix-timers.h>
  35#include <linux/cpu.h>
  36#include <linux/syscalls.h>
  37#include <linux/delay.h>
  38#include <linux/tick.h>
  39#include <linux/kallsyms.h>
  40#include <linux/irq_work.h>
  41#include <linux/sched.h>
  42#include <linux/sched/sysctl.h>
 
 
  43#include <linux/slab.h>
  44#include <linux/compat.h>
 
 
  45
  46#include <asm/uaccess.h>
  47#include <asm/unistd.h>
  48#include <asm/div64.h>
  49#include <asm/timex.h>
  50#include <asm/io.h>
  51
  52#include "tick-internal.h"
  53
  54#define CREATE_TRACE_POINTS
  55#include <trace/events/timer.h>
  56
  57__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  58
  59EXPORT_SYMBOL(jiffies_64);
  60
  61/*
  62 * per-CPU timer vector definitions:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  63 */
  64#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  65#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  66#define TVN_SIZE (1 << TVN_BITS)
  67#define TVR_SIZE (1 << TVR_BITS)
  68#define TVN_MASK (TVN_SIZE - 1)
  69#define TVR_MASK (TVR_SIZE - 1)
  70#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
  71
  72struct tvec {
  73	struct hlist_head vec[TVN_SIZE];
  74};
 
 
 
 
 
 
  75
  76struct tvec_root {
  77	struct hlist_head vec[TVR_SIZE];
  78};
 
 
 
 
 
 
 
 
 
 
 
 
 
  79
  80struct tvec_base {
  81	spinlock_t lock;
  82	struct timer_list *running_timer;
  83	unsigned long timer_jiffies;
  84	unsigned long next_timer;
  85	unsigned long active_timers;
  86	unsigned long all_timers;
  87	int cpu;
  88	bool migration_enabled;
  89	bool nohz_active;
  90	struct tvec_root tv1;
  91	struct tvec tv2;
  92	struct tvec tv3;
  93	struct tvec tv4;
  94	struct tvec tv5;
  95} ____cacheline_aligned;
  96
 
  97
  98static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
 
  99
 100#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 101unsigned int sysctl_timer_migration = 1;
 102
 103void timers_update_migration(bool update_nohz)
 104{
 105	bool on = sysctl_timer_migration && tick_nohz_active;
 106	unsigned int cpu;
 107
 108	/* Avoid the loop, if nothing to update */
 109	if (this_cpu_read(tvec_bases.migration_enabled) == on)
 110		return;
 111
 112	for_each_possible_cpu(cpu) {
 113		per_cpu(tvec_bases.migration_enabled, cpu) = on;
 114		per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
 115		if (!update_nohz)
 116			continue;
 117		per_cpu(tvec_bases.nohz_active, cpu) = true;
 118		per_cpu(hrtimer_bases.nohz_active, cpu) = true;
 119	}
 120}
 121
 122int timer_migration_handler(struct ctl_table *table, int write,
 123			    void __user *buffer, size_t *lenp,
 124			    loff_t *ppos)
 125{
 126	static DEFINE_MUTEX(mutex);
 127	int ret;
 128
 129	mutex_lock(&mutex);
 130	ret = proc_dointvec(table, write, buffer, lenp, ppos);
 131	if (!ret && write)
 132		timers_update_migration(false);
 133	mutex_unlock(&mutex);
 134	return ret;
 135}
 136
 137static inline struct tvec_base *get_target_base(struct tvec_base *base,
 138						int pinned)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 139{
 140	if (pinned || !base->migration_enabled)
 141		return this_cpu_ptr(&tvec_bases);
 142	return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
 
 143}
 144#else
 145static inline struct tvec_base *get_target_base(struct tvec_base *base,
 146						int pinned)
 
 
 
 
 147{
 148	return this_cpu_ptr(&tvec_bases);
 149}
 150#endif
 
 
 151
 152static unsigned long round_jiffies_common(unsigned long j, int cpu,
 153		bool force_up)
 154{
 155	int rem;
 156	unsigned long original = j;
 157
 158	/*
 159	 * We don't want all cpus firing their timers at once hitting the
 160	 * same lock or cachelines, so we skew each extra cpu with an extra
 161	 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
 162	 * already did this.
 163	 * The skew is done by adding 3*cpunr, then round, then subtract this
 164	 * extra offset again.
 165	 */
 166	j += cpu * 3;
 167
 168	rem = j % HZ;
 169
 170	/*
 171	 * If the target jiffie is just after a whole second (which can happen
 172	 * due to delays of the timer irq, long irq off times etc etc) then
 173	 * we should round down to the whole second, not up. Use 1/4th second
 174	 * as cutoff for this rounding as an extreme upper bound for this.
 175	 * But never round down if @force_up is set.
 176	 */
 177	if (rem < HZ/4 && !force_up) /* round down */
 178		j = j - rem;
 179	else /* round up */
 180		j = j - rem + HZ;
 181
 182	/* now that we have rounded, subtract the extra skew again */
 183	j -= cpu * 3;
 184
 185	/*
 186	 * Make sure j is still in the future. Otherwise return the
 187	 * unmodified value.
 188	 */
 189	return time_is_after_jiffies(j) ? j : original;
 190}
 191
 192/**
 193 * __round_jiffies - function to round jiffies to a full second
 194 * @j: the time in (absolute) jiffies that should be rounded
 195 * @cpu: the processor number on which the timeout will happen
 196 *
 197 * __round_jiffies() rounds an absolute time in the future (in jiffies)
 198 * up or down to (approximately) full seconds. This is useful for timers
 199 * for which the exact time they fire does not matter too much, as long as
 200 * they fire approximately every X seconds.
 201 *
 202 * By rounding these timers to whole seconds, all such timers will fire
 203 * at the same time, rather than at various times spread out. The goal
 204 * of this is to have the CPU wake up less, which saves power.
 205 *
 206 * The exact rounding is skewed for each processor to avoid all
 207 * processors firing at the exact same time, which could lead
 208 * to lock contention or spurious cache line bouncing.
 209 *
 210 * The return value is the rounded version of the @j parameter.
 211 */
 212unsigned long __round_jiffies(unsigned long j, int cpu)
 213{
 214	return round_jiffies_common(j, cpu, false);
 215}
 216EXPORT_SYMBOL_GPL(__round_jiffies);
 217
 218/**
 219 * __round_jiffies_relative - function to round jiffies to a full second
 220 * @j: the time in (relative) jiffies that should be rounded
 221 * @cpu: the processor number on which the timeout will happen
 222 *
 223 * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 224 * up or down to (approximately) full seconds. This is useful for timers
 225 * for which the exact time they fire does not matter too much, as long as
 226 * they fire approximately every X seconds.
 227 *
 228 * By rounding these timers to whole seconds, all such timers will fire
 229 * at the same time, rather than at various times spread out. The goal
 230 * of this is to have the CPU wake up less, which saves power.
 231 *
 232 * The exact rounding is skewed for each processor to avoid all
 233 * processors firing at the exact same time, which could lead
 234 * to lock contention or spurious cache line bouncing.
 235 *
 236 * The return value is the rounded version of the @j parameter.
 237 */
 238unsigned long __round_jiffies_relative(unsigned long j, int cpu)
 239{
 240	unsigned long j0 = jiffies;
 241
 242	/* Use j0 because jiffies might change while we run */
 243	return round_jiffies_common(j + j0, cpu, false) - j0;
 244}
 245EXPORT_SYMBOL_GPL(__round_jiffies_relative);
 246
 247/**
 248 * round_jiffies - function to round jiffies to a full second
 249 * @j: the time in (absolute) jiffies that should be rounded
 250 *
 251 * round_jiffies() rounds an absolute time in the future (in jiffies)
 252 * up or down to (approximately) full seconds. This is useful for timers
 253 * for which the exact time they fire does not matter too much, as long as
 254 * they fire approximately every X seconds.
 255 *
 256 * By rounding these timers to whole seconds, all such timers will fire
 257 * at the same time, rather than at various times spread out. The goal
 258 * of this is to have the CPU wake up less, which saves power.
 259 *
 260 * The return value is the rounded version of the @j parameter.
 261 */
 262unsigned long round_jiffies(unsigned long j)
 263{
 264	return round_jiffies_common(j, raw_smp_processor_id(), false);
 265}
 266EXPORT_SYMBOL_GPL(round_jiffies);
 267
 268/**
 269 * round_jiffies_relative - function to round jiffies to a full second
 270 * @j: the time in (relative) jiffies that should be rounded
 271 *
 272 * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 273 * up or down to (approximately) full seconds. This is useful for timers
 274 * for which the exact time they fire does not matter too much, as long as
 275 * they fire approximately every X seconds.
 276 *
 277 * By rounding these timers to whole seconds, all such timers will fire
 278 * at the same time, rather than at various times spread out. The goal
 279 * of this is to have the CPU wake up less, which saves power.
 280 *
 281 * The return value is the rounded version of the @j parameter.
 282 */
 283unsigned long round_jiffies_relative(unsigned long j)
 284{
 285	return __round_jiffies_relative(j, raw_smp_processor_id());
 286}
 287EXPORT_SYMBOL_GPL(round_jiffies_relative);
 288
 289/**
 290 * __round_jiffies_up - function to round jiffies up to a full second
 291 * @j: the time in (absolute) jiffies that should be rounded
 292 * @cpu: the processor number on which the timeout will happen
 293 *
 294 * This is the same as __round_jiffies() except that it will never
 295 * round down.  This is useful for timeouts for which the exact time
 296 * of firing does not matter too much, as long as they don't fire too
 297 * early.
 298 */
 299unsigned long __round_jiffies_up(unsigned long j, int cpu)
 300{
 301	return round_jiffies_common(j, cpu, true);
 302}
 303EXPORT_SYMBOL_GPL(__round_jiffies_up);
 304
 305/**
 306 * __round_jiffies_up_relative - function to round jiffies up to a full second
 307 * @j: the time in (relative) jiffies that should be rounded
 308 * @cpu: the processor number on which the timeout will happen
 309 *
 310 * This is the same as __round_jiffies_relative() except that it will never
 311 * round down.  This is useful for timeouts for which the exact time
 312 * of firing does not matter too much, as long as they don't fire too
 313 * early.
 314 */
 315unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
 316{
 317	unsigned long j0 = jiffies;
 318
 319	/* Use j0 because jiffies might change while we run */
 320	return round_jiffies_common(j + j0, cpu, true) - j0;
 321}
 322EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
 323
 324/**
 325 * round_jiffies_up - function to round jiffies up to a full second
 326 * @j: the time in (absolute) jiffies that should be rounded
 327 *
 328 * This is the same as round_jiffies() except that it will never
 329 * round down.  This is useful for timeouts for which the exact time
 330 * of firing does not matter too much, as long as they don't fire too
 331 * early.
 332 */
 333unsigned long round_jiffies_up(unsigned long j)
 334{
 335	return round_jiffies_common(j, raw_smp_processor_id(), true);
 336}
 337EXPORT_SYMBOL_GPL(round_jiffies_up);
 338
 339/**
 340 * round_jiffies_up_relative - function to round jiffies up to a full second
 341 * @j: the time in (relative) jiffies that should be rounded
 342 *
 343 * This is the same as round_jiffies_relative() except that it will never
 344 * round down.  This is useful for timeouts for which the exact time
 345 * of firing does not matter too much, as long as they don't fire too
 346 * early.
 347 */
 348unsigned long round_jiffies_up_relative(unsigned long j)
 349{
 350	return __round_jiffies_up_relative(j, raw_smp_processor_id());
 351}
 352EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
 353
 354/**
 355 * set_timer_slack - set the allowed slack for a timer
 356 * @timer: the timer to be modified
 357 * @slack_hz: the amount of time (in jiffies) allowed for rounding
 358 *
 359 * Set the amount of time, in jiffies, that a certain timer has
 360 * in terms of slack. By setting this value, the timer subsystem
 361 * will schedule the actual timer somewhere between
 362 * the time mod_timer() asks for, and that time plus the slack.
 363 *
 364 * By setting the slack to -1, a percentage of the delay is used
 365 * instead.
 366 */
 367void set_timer_slack(struct timer_list *timer, int slack_hz)
 368{
 369	timer->slack = slack_hz;
 
 370}
 371EXPORT_SYMBOL_GPL(set_timer_slack);
 372
 373static void
 374__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 
 
 
 
 375{
 376	unsigned long expires = timer->expires;
 377	unsigned long idx = expires - base->timer_jiffies;
 378	struct hlist_head *vec;
 379
 380	if (idx < TVR_SIZE) {
 381		int i = expires & TVR_MASK;
 382		vec = base->tv1.vec + i;
 383	} else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
 384		int i = (expires >> TVR_BITS) & TVN_MASK;
 385		vec = base->tv2.vec + i;
 386	} else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
 387		int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
 388		vec = base->tv3.vec + i;
 389	} else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
 390		int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
 391		vec = base->tv4.vec + i;
 392	} else if ((signed long) idx < 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 393		/*
 394		 * Can happen if you add a timer with expires == jiffies,
 395		 * or you set a timer to go off in the past
 396		 */
 397		vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
 398	} else {
 399		int i;
 400		/* If the timeout is larger than MAX_TVAL (on 64-bit
 401		 * architectures or with CONFIG_BASE_SMALL=1) then we
 402		 * use the maximum timeout.
 403		 */
 404		if (idx > MAX_TVAL) {
 405			idx = MAX_TVAL;
 406			expires = idx + base->timer_jiffies;
 407		}
 408		i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
 409		vec = base->tv5.vec + i;
 410	}
 411
 412	hlist_add_head(&timer->entry, vec);
 413}
 414
 415static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 
 416{
 417	/* Advance base->jiffies, if the base is empty */
 418	if (!base->all_timers++)
 419		base->timer_jiffies = jiffies;
 420
 421	__internal_add_timer(base, timer);
 422	/*
 423	 * Update base->active_timers and base->next_timer
 
 424	 */
 425	if (!(timer->flags & TIMER_DEFERRABLE)) {
 426		if (!base->active_timers++ ||
 427		    time_before(timer->expires, base->next_timer))
 428			base->next_timer = timer->expires;
 429	}
 430
 431	/*
 432	 * Check whether the other CPU is in dynticks mode and needs
 433	 * to be triggered to reevaluate the timer wheel.
 434	 * We are protected against the other CPU fiddling
 435	 * with the timer by holding the timer base lock. This also
 436	 * makes sure that a CPU on the way to stop its tick can not
 437	 * evaluate the timer wheel.
 438	 *
 439	 * Spare the IPI for deferrable timers on idle targets though.
 440	 * The next busy ticks will take care of it. Except full dynticks
 441	 * require special care against races with idle_cpu(), lets deal
 442	 * with that later.
 443	 */
 444	if (base->nohz_active) {
 445		if (!(timer->flags & TIMER_DEFERRABLE) ||
 446		    tick_nohz_full_cpu(base->cpu))
 447			wake_up_nohz_cpu(base->cpu);
 448	}
 449}
 450
 451#ifdef CONFIG_TIMER_STATS
 452void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
 
 
 
 
 
 453{
 454	if (timer->start_site)
 455		return;
 456
 457	timer->start_site = addr;
 458	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
 459	timer->start_pid = current->pid;
 460}
 461
 462static void timer_stats_account_timer(struct timer_list *timer)
 463{
 464	void *site;
 465
 466	/*
 467	 * start_site can be concurrently reset by
 468	 * timer_stats_timer_clear_start_info()
 
 469	 */
 470	site = READ_ONCE(timer->start_site);
 471	if (likely(!site))
 472		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 473
 474	timer_stats_update_stats(timer, timer->start_pid, site,
 475				 timer->function, timer->start_comm,
 476				 timer->flags);
 477}
 478
 479#else
 480static void timer_stats_account_timer(struct timer_list *timer) {}
 481#endif
 
 
 
 
 
 482
 483#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 
 
 
 
 
 484
 485static struct debug_obj_descr timer_debug_descr;
 
 
 
 
 
 486
 487static void *timer_debug_hint(void *addr)
 488{
 489	return ((struct timer_list *) addr)->function;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 490}
 491
 492/*
 493 * fixup_init is called when:
 494 * - an active object is initialized
 495 */
 496static int timer_fixup_init(void *addr, enum debug_obj_state state)
 497{
 498	struct timer_list *timer = addr;
 499
 500	switch (state) {
 501	case ODEBUG_STATE_ACTIVE:
 502		del_timer_sync(timer);
 503		debug_object_init(timer, &timer_debug_descr);
 504		return 1;
 505	default:
 506		return 0;
 507	}
 508}
 509
 510/* Stub timer callback for improperly used timers. */
 511static void stub_timer(unsigned long data)
 512{
 513	WARN_ON(1);
 514}
 515
 516/*
 517 * fixup_activate is called when:
 518 * - an active object is activated
 519 * - an unknown object is activated (might be a statically initialized object)
 520 */
 521static int timer_fixup_activate(void *addr, enum debug_obj_state state)
 522{
 523	struct timer_list *timer = addr;
 524
 525	switch (state) {
 526
 527	case ODEBUG_STATE_NOTAVAILABLE:
 528		/*
 529		 * This is not really a fixup. The timer was
 530		 * statically initialized. We just make sure that it
 531		 * is tracked in the object tracker.
 532		 */
 533		if (timer->entry.pprev == NULL &&
 534		    timer->entry.next == TIMER_ENTRY_STATIC) {
 535			debug_object_init(timer, &timer_debug_descr);
 536			debug_object_activate(timer, &timer_debug_descr);
 537			return 0;
 538		} else {
 539			setup_timer(timer, stub_timer, 0);
 540			return 1;
 541		}
 542		return 0;
 543
 544	case ODEBUG_STATE_ACTIVE:
 545		WARN_ON(1);
 546
 547	default:
 548		return 0;
 549	}
 550}
 551
 552/*
 553 * fixup_free is called when:
 554 * - an active object is freed
 555 */
 556static int timer_fixup_free(void *addr, enum debug_obj_state state)
 557{
 558	struct timer_list *timer = addr;
 559
 560	switch (state) {
 561	case ODEBUG_STATE_ACTIVE:
 562		del_timer_sync(timer);
 563		debug_object_free(timer, &timer_debug_descr);
 564		return 1;
 565	default:
 566		return 0;
 567	}
 568}
 569
 570/*
 571 * fixup_assert_init is called when:
 572 * - an untracked/uninit-ed object is found
 573 */
 574static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
 575{
 576	struct timer_list *timer = addr;
 577
 578	switch (state) {
 579	case ODEBUG_STATE_NOTAVAILABLE:
 580		if (timer->entry.next == TIMER_ENTRY_STATIC) {
 581			/*
 582			 * This is not really a fixup. The timer was
 583			 * statically initialized. We just make sure that it
 584			 * is tracked in the object tracker.
 585			 */
 586			debug_object_init(timer, &timer_debug_descr);
 587			return 0;
 588		} else {
 589			setup_timer(timer, stub_timer, 0);
 590			return 1;
 591		}
 592	default:
 593		return 0;
 594	}
 595}
 596
 597static struct debug_obj_descr timer_debug_descr = {
 598	.name			= "timer_list",
 599	.debug_hint		= timer_debug_hint,
 
 600	.fixup_init		= timer_fixup_init,
 601	.fixup_activate		= timer_fixup_activate,
 602	.fixup_free		= timer_fixup_free,
 603	.fixup_assert_init	= timer_fixup_assert_init,
 604};
 605
 606static inline void debug_timer_init(struct timer_list *timer)
 607{
 608	debug_object_init(timer, &timer_debug_descr);
 609}
 610
 611static inline void debug_timer_activate(struct timer_list *timer)
 612{
 613	debug_object_activate(timer, &timer_debug_descr);
 614}
 615
 616static inline void debug_timer_deactivate(struct timer_list *timer)
 617{
 618	debug_object_deactivate(timer, &timer_debug_descr);
 619}
 620
 621static inline void debug_timer_free(struct timer_list *timer)
 622{
 623	debug_object_free(timer, &timer_debug_descr);
 624}
 625
 626static inline void debug_timer_assert_init(struct timer_list *timer)
 627{
 628	debug_object_assert_init(timer, &timer_debug_descr);
 629}
 630
 631static void do_init_timer(struct timer_list *timer, unsigned int flags,
 
 
 632			  const char *name, struct lock_class_key *key);
 633
 634void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
 
 
 635			     const char *name, struct lock_class_key *key)
 636{
 637	debug_object_init_on_stack(timer, &timer_debug_descr);
 638	do_init_timer(timer, flags, name, key);
 639}
 640EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
 641
 642void destroy_timer_on_stack(struct timer_list *timer)
 643{
 644	debug_object_free(timer, &timer_debug_descr);
 645}
 646EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
 647
 648#else
 649static inline void debug_timer_init(struct timer_list *timer) { }
 650static inline void debug_timer_activate(struct timer_list *timer) { }
 651static inline void debug_timer_deactivate(struct timer_list *timer) { }
 652static inline void debug_timer_assert_init(struct timer_list *timer) { }
 653#endif
 654
 655static inline void debug_init(struct timer_list *timer)
 656{
 657	debug_timer_init(timer);
 658	trace_timer_init(timer);
 659}
 660
 661static inline void
 662debug_activate(struct timer_list *timer, unsigned long expires)
 663{
 664	debug_timer_activate(timer);
 665	trace_timer_start(timer, expires, timer->flags);
 666}
 667
 668static inline void debug_deactivate(struct timer_list *timer)
 669{
 670	debug_timer_deactivate(timer);
 671	trace_timer_cancel(timer);
 672}
 673
 674static inline void debug_assert_init(struct timer_list *timer)
 675{
 676	debug_timer_assert_init(timer);
 677}
 678
 679static void do_init_timer(struct timer_list *timer, unsigned int flags,
 
 
 680			  const char *name, struct lock_class_key *key)
 681{
 682	timer->entry.pprev = NULL;
 
 
 
 683	timer->flags = flags | raw_smp_processor_id();
 684	timer->slack = -1;
 685#ifdef CONFIG_TIMER_STATS
 686	timer->start_site = NULL;
 687	timer->start_pid = -1;
 688	memset(timer->start_comm, 0, TASK_COMM_LEN);
 689#endif
 690	lockdep_init_map(&timer->lockdep_map, name, key, 0);
 691}
 692
 693/**
 694 * init_timer_key - initialize a timer
 695 * @timer: the timer to be initialized
 
 696 * @flags: timer flags
 697 * @name: name of the timer
 698 * @key: lockdep class key of the fake lock used for tracking timer
 699 *       sync lock dependencies
 700 *
 701 * init_timer_key() must be done to a timer prior calling *any* of the
 702 * other timer functions.
 703 */
 704void init_timer_key(struct timer_list *timer, unsigned int flags,
 
 705		    const char *name, struct lock_class_key *key)
 706{
 707	debug_init(timer);
 708	do_init_timer(timer, flags, name, key);
 709}
 710EXPORT_SYMBOL(init_timer_key);
 711
 712static inline void detach_timer(struct timer_list *timer, bool clear_pending)
 713{
 714	struct hlist_node *entry = &timer->entry;
 715
 716	debug_deactivate(timer);
 717
 718	__hlist_del(entry);
 719	if (clear_pending)
 720		entry->pprev = NULL;
 721	entry->next = LIST_POISON2;
 722}
 723
 724static inline void
 725detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
 726{
 727	detach_timer(timer, true);
 728	if (!(timer->flags & TIMER_DEFERRABLE))
 729		base->active_timers--;
 730	base->all_timers--;
 731}
 732
 733static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
 734			     bool clear_pending)
 735{
 736	if (!timer_pending(timer))
 737		return 0;
 738
 
 
 
 
 
 739	detach_timer(timer, clear_pending);
 740	if (!(timer->flags & TIMER_DEFERRABLE)) {
 741		base->active_timers--;
 742		if (timer->expires == base->next_timer)
 743			base->next_timer = base->timer_jiffies;
 744	}
 745	/* If this was the last timer, advance base->jiffies */
 746	if (!--base->all_timers)
 747		base->timer_jiffies = jiffies;
 748	return 1;
 749}
 750
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 751/*
 752 * We are using hashed locking: holding per_cpu(tvec_bases).lock
 753 * means that all timers which are tied to this base via timer->base are
 754 * locked, and the base itself is locked too.
 755 *
 756 * So __run_timers/migrate_timers can safely modify all timers which could
 757 * be found on ->tvX lists.
 758 *
 759 * When the timer's base is locked and removed from the list, the
 760 * TIMER_MIGRATING flag is set, FIXME
 761 */
 762static struct tvec_base *lock_timer_base(struct timer_list *timer,
 763					unsigned long *flags)
 764	__acquires(timer->base->lock)
 765{
 766	for (;;) {
 767		u32 tf = timer->flags;
 768		struct tvec_base *base;
 
 
 
 
 
 
 
 769
 770		if (!(tf & TIMER_MIGRATING)) {
 771			base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
 772			spin_lock_irqsave(&base->lock, *flags);
 773			if (timer->flags == tf)
 774				return base;
 775			spin_unlock_irqrestore(&base->lock, *flags);
 776		}
 777		cpu_relax();
 778	}
 779}
 780
 
 
 
 
 781static inline int
 782__mod_timer(struct timer_list *timer, unsigned long expires,
 783	    bool pending_only, int pinned)
 784{
 785	struct tvec_base *base, *new_base;
 786	unsigned long flags;
 
 787	int ret = 0;
 788
 789	timer_stats_timer_set_start_info(timer);
 790	BUG_ON(!timer->function);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 791
 792	base = lock_timer_base(timer, &flags);
 
 793
 794	ret = detach_if_pending(timer, base, false);
 795	if (!ret && pending_only)
 796		goto out_unlock;
 797
 798	debug_activate(timer, expires);
 799
 800	new_base = get_target_base(base, pinned);
 801
 802	if (base != new_base) {
 803		/*
 804		 * We are trying to schedule the timer on the local CPU.
 805		 * However we can't change timer's base while it is running,
 806		 * otherwise del_timer_sync() can't detect that the timer's
 807		 * handler yet has not finished. This also guarantees that
 808		 * the timer is serialized wrt itself.
 809		 */
 810		if (likely(base->running_timer != timer)) {
 811			/* See the comment in lock_timer_base() */
 812			timer->flags |= TIMER_MIGRATING;
 813
 814			spin_unlock(&base->lock);
 815			base = new_base;
 816			spin_lock(&base->lock);
 817			WRITE_ONCE(timer->flags,
 818				   (timer->flags & ~TIMER_BASEMASK) | base->cpu);
 
 819		}
 820	}
 821
 
 
 822	timer->expires = expires;
 823	internal_add_timer(base, timer);
 
 
 
 
 
 
 
 
 
 824
 825out_unlock:
 826	spin_unlock_irqrestore(&base->lock, flags);
 827
 828	return ret;
 829}
 830
 831/**
 832 * mod_timer_pending - modify a pending timer's timeout
 833 * @timer: the pending timer to be modified
 834 * @expires: new timeout in jiffies
 835 *
 836 * mod_timer_pending() is the same for pending timers as mod_timer(),
 837 * but will not re-activate and modify already deleted timers.
 838 *
 839 * It is useful for unserialized use of timers.
 
 
 
 
 
 
 840 */
 841int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 842{
 843	return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
 844}
 845EXPORT_SYMBOL(mod_timer_pending);
 846
 847/*
 848 * Decide where to put the timer while taking the slack into account
 849 *
 850 * Algorithm:
 851 *   1) calculate the maximum (absolute) time
 852 *   2) calculate the highest bit where the expires and new max are different
 853 *   3) use this bit to make a mask
 854 *   4) use the bitmask to round down the maximum time, so that all last
 855 *      bits are zeros
 856 */
 857static inline
 858unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
 859{
 860	unsigned long expires_limit, mask;
 861	int bit;
 862
 863	if (timer->slack >= 0) {
 864		expires_limit = expires + timer->slack;
 865	} else {
 866		long delta = expires - jiffies;
 867
 868		if (delta < 256)
 869			return expires;
 870
 871		expires_limit = expires + delta / 256;
 872	}
 873	mask = expires ^ expires_limit;
 874	if (mask == 0)
 875		return expires;
 876
 877	bit = __fls(mask);
 878
 879	mask = (1UL << bit) - 1;
 880
 881	expires_limit = expires_limit & ~(mask);
 882
 883	return expires_limit;
 884}
 885
 886/**
 887 * mod_timer - modify a timer's timeout
 888 * @timer: the timer to be modified
 889 * @expires: new timeout in jiffies
 890 *
 891 * mod_timer() is a more efficient way to update the expire field of an
 892 * active timer (if the timer is inactive it will be activated)
 893 *
 894 * mod_timer(timer, expires) is equivalent to:
 895 *
 896 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 897 *
 
 
 
 
 898 * Note that if there are multiple unserialized concurrent users of the
 899 * same timer, then mod_timer() is the only safe way to modify the timeout,
 900 * since add_timer() cannot modify an already running timer.
 901 *
 902 * The function returns whether it has modified a pending timer or not.
 903 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
 904 * active timer returns 1.)
 
 
 
 
 
 
 905 */
 906int mod_timer(struct timer_list *timer, unsigned long expires)
 907{
 908	expires = apply_slack(timer, expires);
 909
 910	/*
 911	 * This is a common optimization triggered by the
 912	 * networking code - if the timer is re-modified
 913	 * to be the same thing then just return:
 914	 */
 915	if (timer_pending(timer) && timer->expires == expires)
 916		return 1;
 917
 918	return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
 919}
 920EXPORT_SYMBOL(mod_timer);
 921
 922/**
 923 * mod_timer_pinned - modify a timer's timeout
 924 * @timer: the timer to be modified
 925 * @expires: new timeout in jiffies
 926 *
 927 * mod_timer_pinned() is a way to update the expire field of an
 928 * active timer (if the timer is inactive it will be activated)
 929 * and to ensure that the timer is scheduled on the current CPU.
 930 *
 931 * Note that this does not prevent the timer from being migrated
 932 * when the current CPU goes offline.  If this is a problem for
 933 * you, use CPU-hotplug notifiers to handle it correctly, for
 934 * example, cancelling the timer when the corresponding CPU goes
 935 * offline.
 936 *
 937 * mod_timer_pinned(timer, expires) is equivalent to:
 
 938 *
 939 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 
 
 
 
 
 
 940 */
 941int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
 942{
 943	if (timer->expires == expires && timer_pending(timer))
 944		return 1;
 945
 946	return __mod_timer(timer, expires, false, TIMER_PINNED);
 947}
 948EXPORT_SYMBOL(mod_timer_pinned);
 949
 950/**
 951 * add_timer - start a timer
 952 * @timer: the timer to be added
 
 
 
 
 
 
 
 953 *
 954 * The kernel will do a ->function(->data) callback from the
 955 * timer interrupt at the ->expires point in the future. The
 956 * current time is 'jiffies'.
 957 *
 958 * The timer's ->expires, ->function (and if the handler uses it, ->data)
 959 * fields must be set prior calling this function.
 960 *
 961 * Timers with an ->expires field in the past will be executed in the next
 962 * timer tick.
 963 */
 964void add_timer(struct timer_list *timer)
 965{
 966	BUG_ON(timer_pending(timer));
 967	mod_timer(timer, timer->expires);
 
 968}
 969EXPORT_SYMBOL(add_timer);
 970
 971/**
 972 * add_timer_on - start a timer on a particular CPU
 973 * @timer: the timer to be added
 974 * @cpu: the CPU to start it on
 
 
 975 *
 976 * This is not very scalable on SMP. Double adds are not possible.
 977 */
 978void add_timer_on(struct timer_list *timer, int cpu)
 979{
 980	struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
 981	struct tvec_base *base;
 982	unsigned long flags;
 983
 984	timer_stats_timer_set_start_info(timer);
 985	BUG_ON(timer_pending(timer) || !timer->function);
 
 
 
 
 986
 987	/*
 988	 * If @timer was on a different CPU, it should be migrated with the
 989	 * old base locked to prevent other operations proceeding with the
 990	 * wrong base locked.  See lock_timer_base().
 991	 */
 992	base = lock_timer_base(timer, &flags);
 
 
 
 
 
 
 
 993	if (base != new_base) {
 994		timer->flags |= TIMER_MIGRATING;
 995
 996		spin_unlock(&base->lock);
 997		base = new_base;
 998		spin_lock(&base->lock);
 999		WRITE_ONCE(timer->flags,
1000			   (timer->flags & ~TIMER_BASEMASK) | cpu);
1001	}
 
1002
1003	debug_activate(timer, timer->expires);
1004	internal_add_timer(base, timer);
1005	spin_unlock_irqrestore(&base->lock, flags);
 
1006}
1007EXPORT_SYMBOL_GPL(add_timer_on);
1008
1009/**
1010 * del_timer - deactive a timer.
1011 * @timer: the timer to be deactivated
1012 *
1013 * del_timer() deactivates a timer - this works on both active and inactive
1014 * timers.
1015 *
1016 * The function returns whether it has deactivated a pending timer or not.
1017 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1018 * active timer returns 1.)
 
 
 
 
1019 */
1020int del_timer(struct timer_list *timer)
1021{
1022	struct tvec_base *base;
1023	unsigned long flags;
1024	int ret = 0;
1025
1026	debug_assert_init(timer);
1027
1028	timer_stats_timer_clear_start_info(timer);
1029	if (timer_pending(timer)) {
 
 
 
 
 
 
 
 
 
 
1030		base = lock_timer_base(timer, &flags);
1031		ret = detach_if_pending(timer, base, true);
1032		spin_unlock_irqrestore(&base->lock, flags);
 
 
1033	}
1034
1035	return ret;
1036}
1037EXPORT_SYMBOL(del_timer);
1038
1039/**
1040 * try_to_del_timer_sync - Try to deactivate a timer
1041 * @timer: timer do del
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1042 *
1043 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1044 * exit the timer is not queued and the handler is not running on any CPU.
 
 
1045 */
1046int try_to_del_timer_sync(struct timer_list *timer)
1047{
1048	struct tvec_base *base;
1049	unsigned long flags;
1050	int ret = -1;
1051
1052	debug_assert_init(timer);
1053
1054	base = lock_timer_base(timer, &flags);
1055
1056	if (base->running_timer != timer) {
1057		timer_stats_timer_clear_start_info(timer);
1058		ret = detach_if_pending(timer, base, true);
1059	}
1060	spin_unlock_irqrestore(&base->lock, flags);
 
 
1061
1062	return ret;
1063}
1064EXPORT_SYMBOL(try_to_del_timer_sync);
1065
1066#ifdef CONFIG_SMP
1067/**
1068 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1069 * @timer: the timer to be deactivated
1070 *
1071 * This function only differs from del_timer() on SMP: besides deactivating
1072 * the timer it also makes sure the handler has finished executing on other
1073 * CPUs.
1074 *
1075 * Synchronization rules: Callers must prevent restarting of the timer,
1076 * otherwise this function is meaningless. It must not be called from
1077 * interrupt contexts unless the timer is an irqsafe one. The caller must
1078 * not hold locks which would prevent completion of the timer's
1079 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1080 * timer is not queued and the handler is not running on any CPU.
1081 *
1082 * Note: For !irqsafe timers, you must not hold locks that are held in
1083 *   interrupt context while calling this function. Even if the lock has
1084 *   nothing to do with the timer in question.  Here's why:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1085 *
1086 *    CPU0                             CPU1
1087 *    ----                             ----
1088 *                                   <SOFTIRQ>
1089 *                                   call_timer_fn();
1090 *                                     base->running_timer = mytimer;
1091 *  spin_lock_irq(somelock);
1092 *                                     <IRQ>
1093 *                                        spin_lock(somelock);
1094 *  del_timer_sync(mytimer);
1095 *   while (base->running_timer == mytimer);
1096 *
1097 * Now del_timer_sync() will never return and never release somelock.
1098 * The interrupt on the other CPU is waiting to grab somelock but
1099 * it has interrupted the softirq that CPU0 is waiting to finish.
1100 *
1101 * The function returns whether it has deactivated a pending timer or not.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1102 */
1103int del_timer_sync(struct timer_list *timer)
1104{
 
 
1105#ifdef CONFIG_LOCKDEP
1106	unsigned long flags;
1107
1108	/*
1109	 * If lockdep gives a backtrace here, please reference
1110	 * the synchronization rules above.
1111	 */
1112	local_irq_save(flags);
1113	lock_map_acquire(&timer->lockdep_map);
1114	lock_map_release(&timer->lockdep_map);
1115	local_irq_restore(flags);
1116#endif
1117	/*
1118	 * don't use it in hardirq context, because it
1119	 * could lead to deadlock.
1120	 */
1121	WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1122	for (;;) {
1123		int ret = try_to_del_timer_sync(timer);
1124		if (ret >= 0)
1125			return ret;
1126		cpu_relax();
1127	}
 
 
 
 
 
 
 
 
 
 
 
 
1128}
1129EXPORT_SYMBOL(del_timer_sync);
1130#endif
1131
1132static int cascade(struct tvec_base *base, struct tvec *tv, int index)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1133{
1134	/* cascade all the timers from tv up one level */
1135	struct timer_list *timer;
1136	struct hlist_node *tmp;
1137	struct hlist_head tv_list;
1138
1139	hlist_move_list(tv->vec + index, &tv_list);
1140
1141	/*
1142	 * We are removing _all_ timers from the list, so we
1143	 * don't have to detach them individually.
1144	 */
1145	hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1146		/* No accounting, while moving them */
1147		__internal_add_timer(base, timer);
1148	}
1149
1150	return index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1151}
 
1152
1153static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1154			  unsigned long data)
 
1155{
1156	int count = preempt_count();
1157
1158#ifdef CONFIG_LOCKDEP
1159	/*
1160	 * It is permissible to free the timer from inside the
1161	 * function that is called from it, this we need to take into
1162	 * account for lockdep too. To avoid bogus "held lock freed"
1163	 * warnings as well as problems when looking into
1164	 * timer->lockdep_map, make a copy and use that here.
1165	 */
1166	struct lockdep_map lockdep_map;
1167
1168	lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1169#endif
1170	/*
1171	 * Couple the lock chain with the lock chain at
1172	 * del_timer_sync() by acquiring the lock_map around the fn()
1173	 * call here and in del_timer_sync().
1174	 */
1175	lock_map_acquire(&lockdep_map);
1176
1177	trace_timer_expire_entry(timer);
1178	fn(data);
1179	trace_timer_expire_exit(timer);
1180
1181	lock_map_release(&lockdep_map);
1182
1183	if (count != preempt_count()) {
1184		WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1185			  fn, count, preempt_count());
1186		/*
1187		 * Restore the preempt count. That gives us a decent
1188		 * chance to survive and extract information. If the
1189		 * callback kept a lock held, bad luck, but not worse
1190		 * than the BUG() we had.
1191		 */
1192		preempt_count_set(count);
1193	}
1194}
1195
1196#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
 
 
 
 
 
 
 
 
 
 
 
1197
1198/**
1199 * __run_timers - run all expired timers (if any) on this CPU.
1200 * @base: the timer vector to be processed.
1201 *
1202 * This function cascades all vectors and executes all expired timer
1203 * vectors.
1204 */
1205static inline void __run_timers(struct tvec_base *base)
1206{
1207	struct timer_list *timer;
1208
1209	spin_lock_irq(&base->lock);
1210
1211	while (time_after_eq(jiffies, base->timer_jiffies)) {
1212		struct hlist_head work_list;
1213		struct hlist_head *head = &work_list;
1214		int index;
 
1215
1216		if (!base->all_timers) {
1217			base->timer_jiffies = jiffies;
1218			break;
 
 
 
 
 
 
 
 
1219		}
 
 
1220
1221		index = base->timer_jiffies & TVR_MASK;
 
 
 
 
 
 
1222
1223		/*
1224		 * Cascade timers:
1225		 */
1226		if (!index &&
1227			(!cascade(base, &base->tv2, INDEX(0))) &&
1228				(!cascade(base, &base->tv3, INDEX(1))) &&
1229					!cascade(base, &base->tv4, INDEX(2)))
1230			cascade(base, &base->tv5, INDEX(3));
1231		++base->timer_jiffies;
1232		hlist_move_list(base->tv1.vec + index, head);
1233		while (!hlist_empty(head)) {
1234			void (*fn)(unsigned long);
1235			unsigned long data;
1236			bool irqsafe;
1237
1238			timer = hlist_entry(head->first, struct timer_list, entry);
1239			fn = timer->function;
1240			data = timer->data;
1241			irqsafe = timer->flags & TIMER_IRQSAFE;
1242
1243			timer_stats_account_timer(timer);
1244
1245			base->running_timer = timer;
1246			detach_expired_timer(timer, base);
1247
1248			if (irqsafe) {
1249				spin_unlock(&base->lock);
1250				call_timer_fn(timer, fn, data);
1251				spin_lock(&base->lock);
1252			} else {
1253				spin_unlock_irq(&base->lock);
1254				call_timer_fn(timer, fn, data);
1255				spin_lock_irq(&base->lock);
1256			}
1257		}
 
 
 
 
 
1258	}
1259	base->running_timer = NULL;
1260	spin_unlock_irq(&base->lock);
1261}
1262
1263#ifdef CONFIG_NO_HZ_COMMON
1264/*
1265 * Find out when the next timer event is due to happen. This
1266 * is used on S/390 to stop all activity when a CPU is idle.
1267 * This function needs to be called with interrupts disabled.
1268 */
1269static unsigned long __next_timer_interrupt(struct tvec_base *base)
1270{
1271	unsigned long timer_jiffies = base->timer_jiffies;
1272	unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1273	int index, slot, array, found = 0;
1274	struct timer_list *nte;
1275	struct tvec *varray[4];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1276
1277	/* Look for timer events in tv1. */
1278	index = slot = timer_jiffies & TVR_MASK;
1279	do {
1280		hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
1281			if (nte->flags & TIMER_DEFERRABLE)
1282				continue;
1283
1284			found = 1;
1285			expires = nte->expires;
1286			/* Look at the cascade bucket(s)? */
1287			if (!index || slot < index)
1288				goto cascade;
1289			return expires;
1290		}
1291		slot = (slot + 1) & TVR_MASK;
1292	} while (slot != index);
1293
1294cascade:
1295	/* Calculate the next cascade event */
1296	if (index)
1297		timer_jiffies += TVR_SIZE - index;
1298	timer_jiffies >>= TVR_BITS;
1299
1300	/* Check tv2-tv5. */
1301	varray[0] = &base->tv2;
1302	varray[1] = &base->tv3;
1303	varray[2] = &base->tv4;
1304	varray[3] = &base->tv5;
1305
1306	for (array = 0; array < 4; array++) {
1307		struct tvec *varp = varray[array];
1308
1309		index = slot = timer_jiffies & TVN_MASK;
1310		do {
1311			hlist_for_each_entry(nte, varp->vec + slot, entry) {
1312				if (nte->flags & TIMER_DEFERRABLE)
1313					continue;
1314
1315				found = 1;
1316				if (time_before(nte->expires, expires))
1317					expires = nte->expires;
1318			}
1319			/*
1320			 * Do we still search for the first timer or are
1321			 * we looking up the cascade buckets ?
1322			 */
1323			if (found) {
1324				/* Look at the cascade bucket(s)? */
1325				if (!index || slot < index)
1326					break;
1327				return expires;
1328			}
1329			slot = (slot + 1) & TVN_MASK;
1330		} while (slot != index);
1331
1332		if (index)
1333			timer_jiffies += TVN_SIZE - index;
1334		timer_jiffies >>= TVN_BITS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1335	}
1336	return expires;
 
 
 
 
1337}
1338
 
1339/*
1340 * Check, if the next hrtimer event is before the next timer wheel
1341 * event:
1342 */
1343static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1344{
1345	u64 nextevt = hrtimer_get_next_event();
1346
1347	/*
1348	 * If high resolution timers are enabled
1349	 * hrtimer_get_next_event() returns KTIME_MAX.
1350	 */
1351	if (expires <= nextevt)
1352		return expires;
1353
1354	/*
1355	 * If the next timer is already expired, return the tick base
1356	 * time so the tick is fired immediately.
1357	 */
1358	if (nextevt <= basem)
1359		return basem;
1360
1361	/*
1362	 * Round up to the next jiffie. High resolution timers are
1363	 * off, so the hrtimers are expired in the tick and we need to
1364	 * make sure that this tick really expires the timer to avoid
1365	 * a ping pong of the nohz stop code.
1366	 *
1367	 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1368	 */
1369	return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1370}
1371
1372/**
1373 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1374 * @basej:	base time jiffies
1375 * @basem:	base time clock monotonic
1376 *
1377 * Returns the tick aligned clock monotonic time of the next pending
1378 * timer or KTIME_MAX if no timer is pending.
1379 */
1380u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1381{
1382	struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1383	u64 expires = KTIME_MAX;
1384	unsigned long nextevt;
1385
1386	/*
1387	 * Pretend that there is no timer pending if the cpu is offline.
1388	 * Possible pending timers will be migrated later to an active cpu.
1389	 */
1390	if (cpu_is_offline(smp_processor_id()))
1391		return expires;
1392
1393	spin_lock(&base->lock);
1394	if (base->active_timers) {
1395		if (time_before_eq(base->next_timer, base->timer_jiffies))
1396			base->next_timer = __next_timer_interrupt(base);
1397		nextevt = base->next_timer;
1398		if (time_before_eq(nextevt, basej))
1399			expires = basem;
1400		else
1401			expires = basem + (nextevt - basej) * TICK_NSEC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1402	}
1403	spin_unlock(&base->lock);
1404
1405	return cmp_next_hrtimer_event(basem, expires);
1406}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1407#endif
1408
1409/*
1410 * Called from the timer interrupt handler to charge one tick to the current
1411 * process.  user_tick is 1 if the tick is user time, 0 for system.
1412 */
1413void update_process_times(int user_tick)
1414{
1415	struct task_struct *p = current;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1416
1417	/* Note: this timer irq context must be accounted for as well. */
1418	account_process_tick(p, user_tick);
1419	run_local_timers();
1420	rcu_check_callbacks(user_tick);
1421#ifdef CONFIG_IRQ_WORK
1422	if (in_irq())
1423		irq_work_tick();
1424#endif
1425	scheduler_tick();
1426	run_posix_cpu_timers(p);
1427}
1428
1429/*
1430 * This function runs timers and the timer-tq in bottom half context.
1431 */
1432static void run_timer_softirq(struct softirq_action *h)
1433{
1434	struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1435
1436	if (time_after_eq(jiffies, base->timer_jiffies))
1437		__run_timers(base);
 
1438}
1439
1440/*
1441 * Called by the local, per-CPU timer interrupt on SMP.
1442 */
1443void run_local_timers(void)
1444{
 
 
1445	hrtimer_run_queues();
 
 
 
 
 
 
 
 
 
1446	raise_softirq(TIMER_SOFTIRQ);
1447}
1448
1449#ifdef __ARCH_WANT_SYS_ALARM
1450
1451/*
1452 * For backwards compatibility?  This can be done in libc so Alpha
1453 * and all newer ports shouldn't need it.
1454 */
1455SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1456{
1457	return alarm_setitimer(seconds);
 
 
 
 
 
 
 
 
 
 
 
 
1458}
1459
1460#endif
 
 
 
 
 
 
 
1461
1462static void process_timeout(unsigned long __data)
1463{
1464	wake_up_process((struct task_struct *)__data);
 
 
1465}
1466
1467/**
1468 * schedule_timeout - sleep until timeout
1469 * @timeout: timeout value in jiffies
1470 *
1471 * Make the current task sleep until @timeout jiffies have
1472 * elapsed. The routine will return immediately unless
1473 * the current task state has been set (see set_current_state()).
1474 *
1475 * You can set the task state as follows -
 
 
1476 *
1477 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1478 * pass before the routine returns. The routine will return 0
 
1479 *
1480 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1481 * delivered to the current task. In this case the remaining time
1482 * in jiffies will be returned, or 0 if the timer expired in time
1483 *
1484 * The current task state is guaranteed to be TASK_RUNNING when this
1485 * routine returns.
1486 *
1487 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1488 * the CPU away without a bound on the timeout. In this case the return
1489 * value will be %MAX_SCHEDULE_TIMEOUT.
1490 *
1491 * In all cases the return value is guaranteed to be non-negative.
 
 
1492 */
1493signed long __sched schedule_timeout(signed long timeout)
1494{
1495	struct timer_list timer;
1496	unsigned long expire;
1497
1498	switch (timeout)
1499	{
1500	case MAX_SCHEDULE_TIMEOUT:
1501		/*
1502		 * These two special cases are useful to be comfortable
1503		 * in the caller. Nothing more. We could take
1504		 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1505		 * but I' d like to return a valid offset (>=0) to allow
1506		 * the caller to do everything it want with the retval.
1507		 */
1508		schedule();
1509		goto out;
1510	default:
1511		/*
1512		 * Another bit of PARANOID. Note that the retval will be
1513		 * 0 since no piece of kernel is supposed to do a check
1514		 * for a negative retval of schedule_timeout() (since it
1515		 * should never happens anyway). You just have the printk()
1516		 * that will tell you if something is gone wrong and where.
1517		 */
1518		if (timeout < 0) {
1519			printk(KERN_ERR "schedule_timeout: wrong timeout "
1520				"value %lx\n", timeout);
1521			dump_stack();
1522			current->state = TASK_RUNNING;
1523			goto out;
1524		}
1525	}
1526
1527	expire = timeout + jiffies;
1528
1529	setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1530	__mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
 
1531	schedule();
1532	del_singleshot_timer_sync(&timer);
1533
1534	/* Remove the timer from the object tracker */
1535	destroy_timer_on_stack(&timer);
1536
1537	timeout = expire - jiffies;
1538
1539 out:
1540	return timeout < 0 ? 0 : timeout;
1541}
1542EXPORT_SYMBOL(schedule_timeout);
1543
1544/*
1545 * We can use __set_current_state() here because schedule_timeout() calls
1546 * schedule() unconditionally.
1547 */
1548signed long __sched schedule_timeout_interruptible(signed long timeout)
1549{
1550	__set_current_state(TASK_INTERRUPTIBLE);
1551	return schedule_timeout(timeout);
1552}
1553EXPORT_SYMBOL(schedule_timeout_interruptible);
1554
1555signed long __sched schedule_timeout_killable(signed long timeout)
1556{
1557	__set_current_state(TASK_KILLABLE);
1558	return schedule_timeout(timeout);
1559}
1560EXPORT_SYMBOL(schedule_timeout_killable);
1561
1562signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1563{
1564	__set_current_state(TASK_UNINTERRUPTIBLE);
1565	return schedule_timeout(timeout);
1566}
1567EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1568
1569/*
1570 * Like schedule_timeout_uninterruptible(), except this task will not contribute
1571 * to load average.
1572 */
1573signed long __sched schedule_timeout_idle(signed long timeout)
1574{
1575	__set_current_state(TASK_IDLE);
1576	return schedule_timeout(timeout);
1577}
1578EXPORT_SYMBOL(schedule_timeout_idle);
1579
1580#ifdef CONFIG_HOTPLUG_CPU
1581static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
1582{
1583	struct timer_list *timer;
1584	int cpu = new_base->cpu;
1585
1586	while (!hlist_empty(head)) {
1587		timer = hlist_entry(head->first, struct timer_list, entry);
1588		/* We ignore the accounting on the dying cpu */
1589		detach_timer(timer, false);
1590		timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1591		internal_add_timer(new_base, timer);
1592	}
1593}
1594
1595static void migrate_timers(int cpu)
1596{
1597	struct tvec_base *old_base;
1598	struct tvec_base *new_base;
1599	int i;
 
 
 
 
 
 
 
 
 
 
1600
1601	BUG_ON(cpu_online(cpu));
1602	old_base = per_cpu_ptr(&tvec_bases, cpu);
1603	new_base = get_cpu_ptr(&tvec_bases);
1604	/*
1605	 * The caller is globally serialized and nobody else
1606	 * takes two locks at once, deadlock is not possible.
1607	 */
1608	spin_lock_irq(&new_base->lock);
1609	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1610
1611	BUG_ON(old_base->running_timer);
 
 
 
 
 
 
 
 
1612
1613	for (i = 0; i < TVR_SIZE; i++)
1614		migrate_timer_list(new_base, old_base->tv1.vec + i);
1615	for (i = 0; i < TVN_SIZE; i++) {
1616		migrate_timer_list(new_base, old_base->tv2.vec + i);
1617		migrate_timer_list(new_base, old_base->tv3.vec + i);
1618		migrate_timer_list(new_base, old_base->tv4.vec + i);
1619		migrate_timer_list(new_base, old_base->tv5.vec + i);
1620	}
1621
1622	old_base->active_timers = 0;
1623	old_base->all_timers = 0;
1624
1625	spin_unlock(&old_base->lock);
1626	spin_unlock_irq(&new_base->lock);
1627	put_cpu_ptr(&tvec_bases);
1628}
1629
1630static int timer_cpu_notify(struct notifier_block *self,
1631				unsigned long action, void *hcpu)
1632{
1633	switch (action) {
1634	case CPU_DEAD:
1635	case CPU_DEAD_FROZEN:
1636		migrate_timers((long)hcpu);
1637		break;
1638	default:
1639		break;
1640	}
1641
1642	return NOTIFY_OK;
1643}
1644
1645static inline void timer_register_cpu_notifier(void)
1646{
1647	cpu_notifier(timer_cpu_notify, 0);
1648}
1649#else
1650static inline void timer_register_cpu_notifier(void) { }
1651#endif /* CONFIG_HOTPLUG_CPU */
1652
1653static void __init init_timer_cpu(int cpu)
1654{
1655	struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
 
1656
1657	base->cpu = cpu;
1658	spin_lock_init(&base->lock);
1659
1660	base->timer_jiffies = jiffies;
1661	base->next_timer = base->timer_jiffies;
 
 
 
1662}
1663
1664static void __init init_timer_cpus(void)
1665{
1666	int cpu;
1667
1668	for_each_possible_cpu(cpu)
1669		init_timer_cpu(cpu);
1670}
1671
1672void __init init_timers(void)
1673{
1674	init_timer_cpus();
1675	init_timer_stats();
1676	timer_register_cpu_notifier();
1677	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1678}
1679
1680/**
1681 * msleep - sleep safely even with waitqueue interruptions
1682 * @msecs: Time in milliseconds to sleep for
1683 */
1684void msleep(unsigned int msecs)
1685{
1686	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1687
1688	while (timeout)
1689		timeout = schedule_timeout_uninterruptible(timeout);
1690}
1691
1692EXPORT_SYMBOL(msleep);
1693
1694/**
1695 * msleep_interruptible - sleep waiting for signals
1696 * @msecs: Time in milliseconds to sleep for
1697 */
1698unsigned long msleep_interruptible(unsigned int msecs)
1699{
1700	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1701
1702	while (timeout && !signal_pending(current))
1703		timeout = schedule_timeout_interruptible(timeout);
1704	return jiffies_to_msecs(timeout);
1705}
1706
1707EXPORT_SYMBOL(msleep_interruptible);
1708
1709static void __sched do_usleep_range(unsigned long min, unsigned long max)
1710{
1711	ktime_t kmin;
1712	u64 delta;
1713
1714	kmin = ktime_set(0, min * NSEC_PER_USEC);
1715	delta = (u64)(max - min) * NSEC_PER_USEC;
1716	schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1717}
1718
1719/**
1720 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1721 * @min: Minimum time in usecs to sleep
1722 * @max: Maximum time in usecs to sleep
 
 
 
 
 
 
 
1723 */
1724void __sched usleep_range(unsigned long min, unsigned long max)
 
1725{
1726	__set_current_state(TASK_UNINTERRUPTIBLE);
1727	do_usleep_range(min, max);
 
 
 
 
 
 
 
1728}
1729EXPORT_SYMBOL(usleep_range);