Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1/*
   2 *  linux/kernel/timer.c
   3 *
   4 *  Kernel internal timers
   5 *
   6 *  Copyright (C) 1991, 1992  Linus Torvalds
   7 *
   8 *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
   9 *
  10 *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
  11 *              "A Kernel Model for Precision Timekeeping" by Dave Mills
  12 *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13 *              serialize accesses to xtime/lost_ticks).
  14 *                              Copyright (C) 1998  Andrea Arcangeli
  15 *  1999-03-10  Improved NTP compatibility by Ulrich Windl
  16 *  2002-05-31	Move sys_sysinfo here and make its locking sane, Robert Love
  17 *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
  18 *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
  19 *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20 */
  21
  22#include <linux/kernel_stat.h>
  23#include <linux/export.h>
  24#include <linux/interrupt.h>
  25#include <linux/percpu.h>
  26#include <linux/init.h>
  27#include <linux/mm.h>
  28#include <linux/swap.h>
  29#include <linux/pid_namespace.h>
  30#include <linux/notifier.h>
  31#include <linux/thread_info.h>
  32#include <linux/time.h>
  33#include <linux/jiffies.h>
  34#include <linux/posix-timers.h>
  35#include <linux/cpu.h>
  36#include <linux/syscalls.h>
  37#include <linux/delay.h>
  38#include <linux/tick.h>
  39#include <linux/kallsyms.h>
  40#include <linux/irq_work.h>
  41#include <linux/sched.h>
  42#include <linux/sched/sysctl.h>
  43#include <linux/slab.h>
  44#include <linux/compat.h>
  45
  46#include <asm/uaccess.h>
  47#include <asm/unistd.h>
  48#include <asm/div64.h>
  49#include <asm/timex.h>
  50#include <asm/io.h>
  51
  52#define CREATE_TRACE_POINTS
  53#include <trace/events/timer.h>
  54
  55__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  56
  57EXPORT_SYMBOL(jiffies_64);
  58
  59/*
  60 * per-CPU timer vector definitions:
  61 */
  62#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  63#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  64#define TVN_SIZE (1 << TVN_BITS)
  65#define TVR_SIZE (1 << TVR_BITS)
  66#define TVN_MASK (TVN_SIZE - 1)
  67#define TVR_MASK (TVR_SIZE - 1)
  68#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
  69
  70struct tvec {
  71	struct list_head vec[TVN_SIZE];
  72};
  73
  74struct tvec_root {
  75	struct list_head vec[TVR_SIZE];
  76};
  77
  78struct tvec_base {
  79	spinlock_t lock;
  80	struct timer_list *running_timer;
  81	unsigned long timer_jiffies;
  82	unsigned long next_timer;
  83	unsigned long active_timers;
  84	unsigned long all_timers;
  85	struct tvec_root tv1;
  86	struct tvec tv2;
  87	struct tvec tv3;
  88	struct tvec tv4;
  89	struct tvec tv5;
  90} ____cacheline_aligned;
  91
  92struct tvec_base boot_tvec_bases;
  93EXPORT_SYMBOL(boot_tvec_bases);
  94static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
  95
  96/* Functions below help us manage 'deferrable' flag */
  97static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
  98{
  99	return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
 100}
 101
 102static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
 103{
 104	return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
 105}
 106
 107static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
 108{
 109	return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
 110}
 111
 112static inline void
 113timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
 114{
 115	unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
 116
 117	timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
 118}
 119
 120static unsigned long round_jiffies_common(unsigned long j, int cpu,
 121		bool force_up)
 122{
 123	int rem;
 124	unsigned long original = j;
 125
 126	/*
 127	 * We don't want all cpus firing their timers at once hitting the
 128	 * same lock or cachelines, so we skew each extra cpu with an extra
 129	 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
 130	 * already did this.
 131	 * The skew is done by adding 3*cpunr, then round, then subtract this
 132	 * extra offset again.
 133	 */
 134	j += cpu * 3;
 135
 136	rem = j % HZ;
 137
 138	/*
 139	 * If the target jiffie is just after a whole second (which can happen
 140	 * due to delays of the timer irq, long irq off times etc etc) then
 141	 * we should round down to the whole second, not up. Use 1/4th second
 142	 * as cutoff for this rounding as an extreme upper bound for this.
 143	 * But never round down if @force_up is set.
 144	 */
 145	if (rem < HZ/4 && !force_up) /* round down */
 146		j = j - rem;
 147	else /* round up */
 148		j = j - rem + HZ;
 149
 150	/* now that we have rounded, subtract the extra skew again */
 151	j -= cpu * 3;
 152
 153	/*
 154	 * Make sure j is still in the future. Otherwise return the
 155	 * unmodified value.
 156	 */
 157	return time_is_after_jiffies(j) ? j : original;
 158}
 159
 160/**
 161 * __round_jiffies - function to round jiffies to a full second
 162 * @j: the time in (absolute) jiffies that should be rounded
 163 * @cpu: the processor number on which the timeout will happen
 164 *
 165 * __round_jiffies() rounds an absolute time in the future (in jiffies)
 166 * up or down to (approximately) full seconds. This is useful for timers
 167 * for which the exact time they fire does not matter too much, as long as
 168 * they fire approximately every X seconds.
 169 *
 170 * By rounding these timers to whole seconds, all such timers will fire
 171 * at the same time, rather than at various times spread out. The goal
 172 * of this is to have the CPU wake up less, which saves power.
 173 *
 174 * The exact rounding is skewed for each processor to avoid all
 175 * processors firing at the exact same time, which could lead
 176 * to lock contention or spurious cache line bouncing.
 177 *
 178 * The return value is the rounded version of the @j parameter.
 179 */
 180unsigned long __round_jiffies(unsigned long j, int cpu)
 181{
 182	return round_jiffies_common(j, cpu, false);
 183}
 184EXPORT_SYMBOL_GPL(__round_jiffies);
 185
 186/**
 187 * __round_jiffies_relative - function to round jiffies to a full second
 188 * @j: the time in (relative) jiffies that should be rounded
 189 * @cpu: the processor number on which the timeout will happen
 190 *
 191 * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 192 * up or down to (approximately) full seconds. This is useful for timers
 193 * for which the exact time they fire does not matter too much, as long as
 194 * they fire approximately every X seconds.
 195 *
 196 * By rounding these timers to whole seconds, all such timers will fire
 197 * at the same time, rather than at various times spread out. The goal
 198 * of this is to have the CPU wake up less, which saves power.
 199 *
 200 * The exact rounding is skewed for each processor to avoid all
 201 * processors firing at the exact same time, which could lead
 202 * to lock contention or spurious cache line bouncing.
 203 *
 204 * The return value is the rounded version of the @j parameter.
 205 */
 206unsigned long __round_jiffies_relative(unsigned long j, int cpu)
 207{
 208	unsigned long j0 = jiffies;
 209
 210	/* Use j0 because jiffies might change while we run */
 211	return round_jiffies_common(j + j0, cpu, false) - j0;
 212}
 213EXPORT_SYMBOL_GPL(__round_jiffies_relative);
 214
 215/**
 216 * round_jiffies - function to round jiffies to a full second
 217 * @j: the time in (absolute) jiffies that should be rounded
 218 *
 219 * round_jiffies() rounds an absolute time in the future (in jiffies)
 220 * up or down to (approximately) full seconds. This is useful for timers
 221 * for which the exact time they fire does not matter too much, as long as
 222 * they fire approximately every X seconds.
 223 *
 224 * By rounding these timers to whole seconds, all such timers will fire
 225 * at the same time, rather than at various times spread out. The goal
 226 * of this is to have the CPU wake up less, which saves power.
 227 *
 228 * The return value is the rounded version of the @j parameter.
 229 */
 230unsigned long round_jiffies(unsigned long j)
 231{
 232	return round_jiffies_common(j, raw_smp_processor_id(), false);
 233}
 234EXPORT_SYMBOL_GPL(round_jiffies);
 235
 236/**
 237 * round_jiffies_relative - function to round jiffies to a full second
 238 * @j: the time in (relative) jiffies that should be rounded
 239 *
 240 * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 241 * up or down to (approximately) full seconds. This is useful for timers
 242 * for which the exact time they fire does not matter too much, as long as
 243 * they fire approximately every X seconds.
 244 *
 245 * By rounding these timers to whole seconds, all such timers will fire
 246 * at the same time, rather than at various times spread out. The goal
 247 * of this is to have the CPU wake up less, which saves power.
 248 *
 249 * The return value is the rounded version of the @j parameter.
 250 */
 251unsigned long round_jiffies_relative(unsigned long j)
 252{
 253	return __round_jiffies_relative(j, raw_smp_processor_id());
 254}
 255EXPORT_SYMBOL_GPL(round_jiffies_relative);
 256
 257/**
 258 * __round_jiffies_up - function to round jiffies up to a full second
 259 * @j: the time in (absolute) jiffies that should be rounded
 260 * @cpu: the processor number on which the timeout will happen
 261 *
 262 * This is the same as __round_jiffies() except that it will never
 263 * round down.  This is useful for timeouts for which the exact time
 264 * of firing does not matter too much, as long as they don't fire too
 265 * early.
 266 */
 267unsigned long __round_jiffies_up(unsigned long j, int cpu)
 268{
 269	return round_jiffies_common(j, cpu, true);
 270}
 271EXPORT_SYMBOL_GPL(__round_jiffies_up);
 272
 273/**
 274 * __round_jiffies_up_relative - function to round jiffies up to a full second
 275 * @j: the time in (relative) jiffies that should be rounded
 276 * @cpu: the processor number on which the timeout will happen
 277 *
 278 * This is the same as __round_jiffies_relative() except that it will never
 279 * round down.  This is useful for timeouts for which the exact time
 280 * of firing does not matter too much, as long as they don't fire too
 281 * early.
 282 */
 283unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
 284{
 285	unsigned long j0 = jiffies;
 286
 287	/* Use j0 because jiffies might change while we run */
 288	return round_jiffies_common(j + j0, cpu, true) - j0;
 289}
 290EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
 291
 292/**
 293 * round_jiffies_up - function to round jiffies up to a full second
 294 * @j: the time in (absolute) jiffies that should be rounded
 295 *
 296 * This is the same as round_jiffies() except that it will never
 297 * round down.  This is useful for timeouts for which the exact time
 298 * of firing does not matter too much, as long as they don't fire too
 299 * early.
 300 */
 301unsigned long round_jiffies_up(unsigned long j)
 302{
 303	return round_jiffies_common(j, raw_smp_processor_id(), true);
 304}
 305EXPORT_SYMBOL_GPL(round_jiffies_up);
 306
 307/**
 308 * round_jiffies_up_relative - function to round jiffies up to a full second
 309 * @j: the time in (relative) jiffies that should be rounded
 310 *
 311 * This is the same as round_jiffies_relative() except that it will never
 312 * round down.  This is useful for timeouts for which the exact time
 313 * of firing does not matter too much, as long as they don't fire too
 314 * early.
 315 */
 316unsigned long round_jiffies_up_relative(unsigned long j)
 317{
 318	return __round_jiffies_up_relative(j, raw_smp_processor_id());
 319}
 320EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
 321
 322/**
 323 * set_timer_slack - set the allowed slack for a timer
 324 * @timer: the timer to be modified
 325 * @slack_hz: the amount of time (in jiffies) allowed for rounding
 326 *
 327 * Set the amount of time, in jiffies, that a certain timer has
 328 * in terms of slack. By setting this value, the timer subsystem
 329 * will schedule the actual timer somewhere between
 330 * the time mod_timer() asks for, and that time plus the slack.
 331 *
 332 * By setting the slack to -1, a percentage of the delay is used
 333 * instead.
 334 */
 335void set_timer_slack(struct timer_list *timer, int slack_hz)
 336{
 337	timer->slack = slack_hz;
 338}
 339EXPORT_SYMBOL_GPL(set_timer_slack);
 340
 341/*
 342 * If the list is empty, catch up ->timer_jiffies to the current time.
 343 * The caller must hold the tvec_base lock.  Returns true if the list
 344 * was empty and therefore ->timer_jiffies was updated.
 345 */
 346static bool catchup_timer_jiffies(struct tvec_base *base)
 347{
 348	if (!base->all_timers) {
 349		base->timer_jiffies = jiffies;
 350		return true;
 351	}
 352	return false;
 353}
 354
 355static void
 356__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 357{
 358	unsigned long expires = timer->expires;
 359	unsigned long idx = expires - base->timer_jiffies;
 360	struct list_head *vec;
 361
 362	if (idx < TVR_SIZE) {
 363		int i = expires & TVR_MASK;
 364		vec = base->tv1.vec + i;
 365	} else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
 366		int i = (expires >> TVR_BITS) & TVN_MASK;
 367		vec = base->tv2.vec + i;
 368	} else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
 369		int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
 370		vec = base->tv3.vec + i;
 371	} else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
 372		int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
 373		vec = base->tv4.vec + i;
 374	} else if ((signed long) idx < 0) {
 375		/*
 376		 * Can happen if you add a timer with expires == jiffies,
 377		 * or you set a timer to go off in the past
 378		 */
 379		vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
 380	} else {
 381		int i;
 382		/* If the timeout is larger than MAX_TVAL (on 64-bit
 383		 * architectures or with CONFIG_BASE_SMALL=1) then we
 384		 * use the maximum timeout.
 385		 */
 386		if (idx > MAX_TVAL) {
 387			idx = MAX_TVAL;
 388			expires = idx + base->timer_jiffies;
 389		}
 390		i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
 391		vec = base->tv5.vec + i;
 392	}
 393	/*
 394	 * Timers are FIFO:
 395	 */
 396	list_add_tail(&timer->entry, vec);
 397}
 398
 399static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 400{
 401	(void)catchup_timer_jiffies(base);
 402	__internal_add_timer(base, timer);
 403	/*
 404	 * Update base->active_timers and base->next_timer
 405	 */
 406	if (!tbase_get_deferrable(timer->base)) {
 407		if (!base->active_timers++ ||
 408		    time_before(timer->expires, base->next_timer))
 409			base->next_timer = timer->expires;
 410	}
 411	base->all_timers++;
 412}
 413
 414#ifdef CONFIG_TIMER_STATS
 415void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
 416{
 417	if (timer->start_site)
 418		return;
 419
 420	timer->start_site = addr;
 421	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
 422	timer->start_pid = current->pid;
 423}
 424
 425static void timer_stats_account_timer(struct timer_list *timer)
 426{
 427	unsigned int flag = 0;
 428
 429	if (likely(!timer->start_site))
 430		return;
 431	if (unlikely(tbase_get_deferrable(timer->base)))
 432		flag |= TIMER_STATS_FLAG_DEFERRABLE;
 433
 434	timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
 435				 timer->function, timer->start_comm, flag);
 436}
 437
 438#else
 439static void timer_stats_account_timer(struct timer_list *timer) {}
 440#endif
 441
 442#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 443
 444static struct debug_obj_descr timer_debug_descr;
 445
 446static void *timer_debug_hint(void *addr)
 447{
 448	return ((struct timer_list *) addr)->function;
 449}
 450
 451/*
 452 * fixup_init is called when:
 453 * - an active object is initialized
 454 */
 455static int timer_fixup_init(void *addr, enum debug_obj_state state)
 456{
 457	struct timer_list *timer = addr;
 458
 459	switch (state) {
 460	case ODEBUG_STATE_ACTIVE:
 461		del_timer_sync(timer);
 462		debug_object_init(timer, &timer_debug_descr);
 463		return 1;
 464	default:
 465		return 0;
 466	}
 467}
 468
 469/* Stub timer callback for improperly used timers. */
 470static void stub_timer(unsigned long data)
 471{
 472	WARN_ON(1);
 473}
 474
 475/*
 476 * fixup_activate is called when:
 477 * - an active object is activated
 478 * - an unknown object is activated (might be a statically initialized object)
 479 */
 480static int timer_fixup_activate(void *addr, enum debug_obj_state state)
 481{
 482	struct timer_list *timer = addr;
 483
 484	switch (state) {
 485
 486	case ODEBUG_STATE_NOTAVAILABLE:
 487		/*
 488		 * This is not really a fixup. The timer was
 489		 * statically initialized. We just make sure that it
 490		 * is tracked in the object tracker.
 491		 */
 492		if (timer->entry.next == NULL &&
 493		    timer->entry.prev == TIMER_ENTRY_STATIC) {
 494			debug_object_init(timer, &timer_debug_descr);
 495			debug_object_activate(timer, &timer_debug_descr);
 496			return 0;
 497		} else {
 498			setup_timer(timer, stub_timer, 0);
 499			return 1;
 500		}
 501		return 0;
 502
 503	case ODEBUG_STATE_ACTIVE:
 504		WARN_ON(1);
 505
 506	default:
 507		return 0;
 508	}
 509}
 510
 511/*
 512 * fixup_free is called when:
 513 * - an active object is freed
 514 */
 515static int timer_fixup_free(void *addr, enum debug_obj_state state)
 516{
 517	struct timer_list *timer = addr;
 518
 519	switch (state) {
 520	case ODEBUG_STATE_ACTIVE:
 521		del_timer_sync(timer);
 522		debug_object_free(timer, &timer_debug_descr);
 523		return 1;
 524	default:
 525		return 0;
 526	}
 527}
 528
 529/*
 530 * fixup_assert_init is called when:
 531 * - an untracked/uninit-ed object is found
 532 */
 533static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
 534{
 535	struct timer_list *timer = addr;
 536
 537	switch (state) {
 538	case ODEBUG_STATE_NOTAVAILABLE:
 539		if (timer->entry.prev == TIMER_ENTRY_STATIC) {
 540			/*
 541			 * This is not really a fixup. The timer was
 542			 * statically initialized. We just make sure that it
 543			 * is tracked in the object tracker.
 544			 */
 545			debug_object_init(timer, &timer_debug_descr);
 546			return 0;
 547		} else {
 548			setup_timer(timer, stub_timer, 0);
 549			return 1;
 550		}
 551	default:
 552		return 0;
 553	}
 554}
 555
 556static struct debug_obj_descr timer_debug_descr = {
 557	.name			= "timer_list",
 558	.debug_hint		= timer_debug_hint,
 559	.fixup_init		= timer_fixup_init,
 560	.fixup_activate		= timer_fixup_activate,
 561	.fixup_free		= timer_fixup_free,
 562	.fixup_assert_init	= timer_fixup_assert_init,
 563};
 564
 565static inline void debug_timer_init(struct timer_list *timer)
 566{
 567	debug_object_init(timer, &timer_debug_descr);
 568}
 569
 570static inline void debug_timer_activate(struct timer_list *timer)
 571{
 572	debug_object_activate(timer, &timer_debug_descr);
 573}
 574
 575static inline void debug_timer_deactivate(struct timer_list *timer)
 576{
 577	debug_object_deactivate(timer, &timer_debug_descr);
 578}
 579
 580static inline void debug_timer_free(struct timer_list *timer)
 581{
 582	debug_object_free(timer, &timer_debug_descr);
 583}
 584
 585static inline void debug_timer_assert_init(struct timer_list *timer)
 586{
 587	debug_object_assert_init(timer, &timer_debug_descr);
 588}
 589
 590static void do_init_timer(struct timer_list *timer, unsigned int flags,
 591			  const char *name, struct lock_class_key *key);
 592
 593void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
 594			     const char *name, struct lock_class_key *key)
 595{
 596	debug_object_init_on_stack(timer, &timer_debug_descr);
 597	do_init_timer(timer, flags, name, key);
 598}
 599EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
 600
 601void destroy_timer_on_stack(struct timer_list *timer)
 602{
 603	debug_object_free(timer, &timer_debug_descr);
 604}
 605EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
 606
 607#else
 608static inline void debug_timer_init(struct timer_list *timer) { }
 609static inline void debug_timer_activate(struct timer_list *timer) { }
 610static inline void debug_timer_deactivate(struct timer_list *timer) { }
 611static inline void debug_timer_assert_init(struct timer_list *timer) { }
 612#endif
 613
 614static inline void debug_init(struct timer_list *timer)
 615{
 616	debug_timer_init(timer);
 617	trace_timer_init(timer);
 618}
 619
 620static inline void
 621debug_activate(struct timer_list *timer, unsigned long expires)
 622{
 623	debug_timer_activate(timer);
 624	trace_timer_start(timer, expires);
 625}
 626
 627static inline void debug_deactivate(struct timer_list *timer)
 628{
 629	debug_timer_deactivate(timer);
 630	trace_timer_cancel(timer);
 631}
 632
 633static inline void debug_assert_init(struct timer_list *timer)
 634{
 635	debug_timer_assert_init(timer);
 636}
 637
 638static void do_init_timer(struct timer_list *timer, unsigned int flags,
 639			  const char *name, struct lock_class_key *key)
 640{
 641	struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
 642
 643	timer->entry.next = NULL;
 644	timer->base = (void *)((unsigned long)base | flags);
 645	timer->slack = -1;
 646#ifdef CONFIG_TIMER_STATS
 647	timer->start_site = NULL;
 648	timer->start_pid = -1;
 649	memset(timer->start_comm, 0, TASK_COMM_LEN);
 650#endif
 651	lockdep_init_map(&timer->lockdep_map, name, key, 0);
 652}
 653
 654/**
 655 * init_timer_key - initialize a timer
 656 * @timer: the timer to be initialized
 657 * @flags: timer flags
 658 * @name: name of the timer
 659 * @key: lockdep class key of the fake lock used for tracking timer
 660 *       sync lock dependencies
 661 *
 662 * init_timer_key() must be done to a timer prior calling *any* of the
 663 * other timer functions.
 664 */
 665void init_timer_key(struct timer_list *timer, unsigned int flags,
 666		    const char *name, struct lock_class_key *key)
 667{
 668	debug_init(timer);
 669	do_init_timer(timer, flags, name, key);
 670}
 671EXPORT_SYMBOL(init_timer_key);
 672
 673static inline void detach_timer(struct timer_list *timer, bool clear_pending)
 674{
 675	struct list_head *entry = &timer->entry;
 676
 677	debug_deactivate(timer);
 678
 679	__list_del(entry->prev, entry->next);
 680	if (clear_pending)
 681		entry->next = NULL;
 682	entry->prev = LIST_POISON2;
 683}
 684
 685static inline void
 686detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
 687{
 688	detach_timer(timer, true);
 689	if (!tbase_get_deferrable(timer->base))
 690		base->active_timers--;
 691	base->all_timers--;
 692	(void)catchup_timer_jiffies(base);
 693}
 694
 695static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
 696			     bool clear_pending)
 697{
 698	if (!timer_pending(timer))
 699		return 0;
 700
 701	detach_timer(timer, clear_pending);
 702	if (!tbase_get_deferrable(timer->base)) {
 703		base->active_timers--;
 704		if (timer->expires == base->next_timer)
 705			base->next_timer = base->timer_jiffies;
 706	}
 707	base->all_timers--;
 708	(void)catchup_timer_jiffies(base);
 709	return 1;
 710}
 711
 712/*
 713 * We are using hashed locking: holding per_cpu(tvec_bases).lock
 714 * means that all timers which are tied to this base via timer->base are
 715 * locked, and the base itself is locked too.
 716 *
 717 * So __run_timers/migrate_timers can safely modify all timers which could
 718 * be found on ->tvX lists.
 719 *
 720 * When the timer's base is locked, and the timer removed from list, it is
 721 * possible to set timer->base = NULL and drop the lock: the timer remains
 722 * locked.
 723 */
 724static struct tvec_base *lock_timer_base(struct timer_list *timer,
 725					unsigned long *flags)
 726	__acquires(timer->base->lock)
 727{
 728	struct tvec_base *base;
 729
 730	for (;;) {
 731		struct tvec_base *prelock_base = timer->base;
 732		base = tbase_get_base(prelock_base);
 733		if (likely(base != NULL)) {
 734			spin_lock_irqsave(&base->lock, *flags);
 735			if (likely(prelock_base == timer->base))
 736				return base;
 737			/* The timer has migrated to another CPU */
 738			spin_unlock_irqrestore(&base->lock, *flags);
 739		}
 740		cpu_relax();
 741	}
 742}
 743
 744static inline int
 745__mod_timer(struct timer_list *timer, unsigned long expires,
 746						bool pending_only, int pinned)
 747{
 748	struct tvec_base *base, *new_base;
 749	unsigned long flags;
 750	int ret = 0 , cpu;
 751
 752	timer_stats_timer_set_start_info(timer);
 753	BUG_ON(!timer->function);
 754
 755	base = lock_timer_base(timer, &flags);
 756
 757	ret = detach_if_pending(timer, base, false);
 758	if (!ret && pending_only)
 759		goto out_unlock;
 760
 761	debug_activate(timer, expires);
 762
 763	cpu = get_nohz_timer_target(pinned);
 764	new_base = per_cpu(tvec_bases, cpu);
 765
 766	if (base != new_base) {
 767		/*
 768		 * We are trying to schedule the timer on the local CPU.
 769		 * However we can't change timer's base while it is running,
 770		 * otherwise del_timer_sync() can't detect that the timer's
 771		 * handler yet has not finished. This also guarantees that
 772		 * the timer is serialized wrt itself.
 773		 */
 774		if (likely(base->running_timer != timer)) {
 775			/* See the comment in lock_timer_base() */
 776			timer_set_base(timer, NULL);
 777			spin_unlock(&base->lock);
 778			base = new_base;
 779			spin_lock(&base->lock);
 780			timer_set_base(timer, base);
 781		}
 782	}
 783
 784	timer->expires = expires;
 785	internal_add_timer(base, timer);
 786
 787out_unlock:
 788	spin_unlock_irqrestore(&base->lock, flags);
 789
 790	return ret;
 791}
 792
 793/**
 794 * mod_timer_pending - modify a pending timer's timeout
 795 * @timer: the pending timer to be modified
 796 * @expires: new timeout in jiffies
 797 *
 798 * mod_timer_pending() is the same for pending timers as mod_timer(),
 799 * but will not re-activate and modify already deleted timers.
 800 *
 801 * It is useful for unserialized use of timers.
 802 */
 803int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 804{
 805	return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
 806}
 807EXPORT_SYMBOL(mod_timer_pending);
 808
 809/*
 810 * Decide where to put the timer while taking the slack into account
 811 *
 812 * Algorithm:
 813 *   1) calculate the maximum (absolute) time
 814 *   2) calculate the highest bit where the expires and new max are different
 815 *   3) use this bit to make a mask
 816 *   4) use the bitmask to round down the maximum time, so that all last
 817 *      bits are zeros
 818 */
 819static inline
 820unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
 821{
 822	unsigned long expires_limit, mask;
 823	int bit;
 824
 825	if (timer->slack >= 0) {
 826		expires_limit = expires + timer->slack;
 827	} else {
 828		long delta = expires - jiffies;
 829
 830		if (delta < 256)
 831			return expires;
 832
 833		expires_limit = expires + delta / 256;
 834	}
 835	mask = expires ^ expires_limit;
 836	if (mask == 0)
 837		return expires;
 838
 839	bit = find_last_bit(&mask, BITS_PER_LONG);
 840
 841	mask = (1UL << bit) - 1;
 842
 843	expires_limit = expires_limit & ~(mask);
 844
 845	return expires_limit;
 846}
 847
 848/**
 849 * mod_timer - modify a timer's timeout
 850 * @timer: the timer to be modified
 851 * @expires: new timeout in jiffies
 852 *
 853 * mod_timer() is a more efficient way to update the expire field of an
 854 * active timer (if the timer is inactive it will be activated)
 855 *
 856 * mod_timer(timer, expires) is equivalent to:
 857 *
 858 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 859 *
 860 * Note that if there are multiple unserialized concurrent users of the
 861 * same timer, then mod_timer() is the only safe way to modify the timeout,
 862 * since add_timer() cannot modify an already running timer.
 863 *
 864 * The function returns whether it has modified a pending timer or not.
 865 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
 866 * active timer returns 1.)
 867 */
 868int mod_timer(struct timer_list *timer, unsigned long expires)
 869{
 870	expires = apply_slack(timer, expires);
 871
 872	/*
 873	 * This is a common optimization triggered by the
 874	 * networking code - if the timer is re-modified
 875	 * to be the same thing then just return:
 876	 */
 877	if (timer_pending(timer) && timer->expires == expires)
 878		return 1;
 879
 880	return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
 881}
 882EXPORT_SYMBOL(mod_timer);
 883
 884/**
 885 * mod_timer_pinned - modify a timer's timeout
 886 * @timer: the timer to be modified
 887 * @expires: new timeout in jiffies
 888 *
 889 * mod_timer_pinned() is a way to update the expire field of an
 890 * active timer (if the timer is inactive it will be activated)
 891 * and to ensure that the timer is scheduled on the current CPU.
 892 *
 893 * Note that this does not prevent the timer from being migrated
 894 * when the current CPU goes offline.  If this is a problem for
 895 * you, use CPU-hotplug notifiers to handle it correctly, for
 896 * example, cancelling the timer when the corresponding CPU goes
 897 * offline.
 898 *
 899 * mod_timer_pinned(timer, expires) is equivalent to:
 900 *
 901 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 902 */
 903int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
 904{
 905	if (timer->expires == expires && timer_pending(timer))
 906		return 1;
 907
 908	return __mod_timer(timer, expires, false, TIMER_PINNED);
 909}
 910EXPORT_SYMBOL(mod_timer_pinned);
 911
 912/**
 913 * add_timer - start a timer
 914 * @timer: the timer to be added
 915 *
 916 * The kernel will do a ->function(->data) callback from the
 917 * timer interrupt at the ->expires point in the future. The
 918 * current time is 'jiffies'.
 919 *
 920 * The timer's ->expires, ->function (and if the handler uses it, ->data)
 921 * fields must be set prior calling this function.
 922 *
 923 * Timers with an ->expires field in the past will be executed in the next
 924 * timer tick.
 925 */
 926void add_timer(struct timer_list *timer)
 927{
 928	BUG_ON(timer_pending(timer));
 929	mod_timer(timer, timer->expires);
 930}
 931EXPORT_SYMBOL(add_timer);
 932
 933/**
 934 * add_timer_on - start a timer on a particular CPU
 935 * @timer: the timer to be added
 936 * @cpu: the CPU to start it on
 937 *
 938 * This is not very scalable on SMP. Double adds are not possible.
 939 */
 940void add_timer_on(struct timer_list *timer, int cpu)
 941{
 942	struct tvec_base *base = per_cpu(tvec_bases, cpu);
 943	unsigned long flags;
 944
 945	timer_stats_timer_set_start_info(timer);
 946	BUG_ON(timer_pending(timer) || !timer->function);
 947	spin_lock_irqsave(&base->lock, flags);
 948	timer_set_base(timer, base);
 949	debug_activate(timer, timer->expires);
 950	internal_add_timer(base, timer);
 951	/*
 952	 * Check whether the other CPU is in dynticks mode and needs
 953	 * to be triggered to reevaluate the timer wheel.
 954	 * We are protected against the other CPU fiddling
 955	 * with the timer by holding the timer base lock. This also
 956	 * makes sure that a CPU on the way to stop its tick can not
 957	 * evaluate the timer wheel.
 958	 *
 959	 * Spare the IPI for deferrable timers on idle targets though.
 960	 * The next busy ticks will take care of it. Except full dynticks
 961	 * require special care against races with idle_cpu(), lets deal
 962	 * with that later.
 963	 */
 964	if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(cpu))
 965		wake_up_nohz_cpu(cpu);
 966
 967	spin_unlock_irqrestore(&base->lock, flags);
 968}
 969EXPORT_SYMBOL_GPL(add_timer_on);
 970
 971/**
 972 * del_timer - deactive a timer.
 973 * @timer: the timer to be deactivated
 974 *
 975 * del_timer() deactivates a timer - this works on both active and inactive
 976 * timers.
 977 *
 978 * The function returns whether it has deactivated a pending timer or not.
 979 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
 980 * active timer returns 1.)
 981 */
 982int del_timer(struct timer_list *timer)
 983{
 984	struct tvec_base *base;
 985	unsigned long flags;
 986	int ret = 0;
 987
 988	debug_assert_init(timer);
 989
 990	timer_stats_timer_clear_start_info(timer);
 991	if (timer_pending(timer)) {
 992		base = lock_timer_base(timer, &flags);
 993		ret = detach_if_pending(timer, base, true);
 994		spin_unlock_irqrestore(&base->lock, flags);
 995	}
 996
 997	return ret;
 998}
 999EXPORT_SYMBOL(del_timer);
1000
1001/**
1002 * try_to_del_timer_sync - Try to deactivate a timer
1003 * @timer: timer do del
1004 *
1005 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1006 * exit the timer is not queued and the handler is not running on any CPU.
1007 */
1008int try_to_del_timer_sync(struct timer_list *timer)
1009{
1010	struct tvec_base *base;
1011	unsigned long flags;
1012	int ret = -1;
1013
1014	debug_assert_init(timer);
1015
1016	base = lock_timer_base(timer, &flags);
1017
1018	if (base->running_timer != timer) {
1019		timer_stats_timer_clear_start_info(timer);
1020		ret = detach_if_pending(timer, base, true);
1021	}
1022	spin_unlock_irqrestore(&base->lock, flags);
1023
1024	return ret;
1025}
1026EXPORT_SYMBOL(try_to_del_timer_sync);
1027
1028#ifdef CONFIG_SMP
1029/**
1030 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1031 * @timer: the timer to be deactivated
1032 *
1033 * This function only differs from del_timer() on SMP: besides deactivating
1034 * the timer it also makes sure the handler has finished executing on other
1035 * CPUs.
1036 *
1037 * Synchronization rules: Callers must prevent restarting of the timer,
1038 * otherwise this function is meaningless. It must not be called from
1039 * interrupt contexts unless the timer is an irqsafe one. The caller must
1040 * not hold locks which would prevent completion of the timer's
1041 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1042 * timer is not queued and the handler is not running on any CPU.
1043 *
1044 * Note: For !irqsafe timers, you must not hold locks that are held in
1045 *   interrupt context while calling this function. Even if the lock has
1046 *   nothing to do with the timer in question.  Here's why:
1047 *
1048 *    CPU0                             CPU1
1049 *    ----                             ----
1050 *                                   <SOFTIRQ>
1051 *                                   call_timer_fn();
1052 *                                     base->running_timer = mytimer;
1053 *  spin_lock_irq(somelock);
1054 *                                     <IRQ>
1055 *                                        spin_lock(somelock);
1056 *  del_timer_sync(mytimer);
1057 *   while (base->running_timer == mytimer);
1058 *
1059 * Now del_timer_sync() will never return and never release somelock.
1060 * The interrupt on the other CPU is waiting to grab somelock but
1061 * it has interrupted the softirq that CPU0 is waiting to finish.
1062 *
1063 * The function returns whether it has deactivated a pending timer or not.
1064 */
1065int del_timer_sync(struct timer_list *timer)
1066{
1067#ifdef CONFIG_LOCKDEP
1068	unsigned long flags;
1069
1070	/*
1071	 * If lockdep gives a backtrace here, please reference
1072	 * the synchronization rules above.
1073	 */
1074	local_irq_save(flags);
1075	lock_map_acquire(&timer->lockdep_map);
1076	lock_map_release(&timer->lockdep_map);
1077	local_irq_restore(flags);
1078#endif
1079	/*
1080	 * don't use it in hardirq context, because it
1081	 * could lead to deadlock.
1082	 */
1083	WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
1084	for (;;) {
1085		int ret = try_to_del_timer_sync(timer);
1086		if (ret >= 0)
1087			return ret;
1088		cpu_relax();
1089	}
1090}
1091EXPORT_SYMBOL(del_timer_sync);
1092#endif
1093
1094static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1095{
1096	/* cascade all the timers from tv up one level */
1097	struct timer_list *timer, *tmp;
1098	struct list_head tv_list;
1099
1100	list_replace_init(tv->vec + index, &tv_list);
1101
1102	/*
1103	 * We are removing _all_ timers from the list, so we
1104	 * don't have to detach them individually.
1105	 */
1106	list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1107		BUG_ON(tbase_get_base(timer->base) != base);
1108		/* No accounting, while moving them */
1109		__internal_add_timer(base, timer);
1110	}
1111
1112	return index;
1113}
1114
1115static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1116			  unsigned long data)
1117{
1118	int count = preempt_count();
1119
1120#ifdef CONFIG_LOCKDEP
1121	/*
1122	 * It is permissible to free the timer from inside the
1123	 * function that is called from it, this we need to take into
1124	 * account for lockdep too. To avoid bogus "held lock freed"
1125	 * warnings as well as problems when looking into
1126	 * timer->lockdep_map, make a copy and use that here.
1127	 */
1128	struct lockdep_map lockdep_map;
1129
1130	lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1131#endif
1132	/*
1133	 * Couple the lock chain with the lock chain at
1134	 * del_timer_sync() by acquiring the lock_map around the fn()
1135	 * call here and in del_timer_sync().
1136	 */
1137	lock_map_acquire(&lockdep_map);
1138
1139	trace_timer_expire_entry(timer);
1140	fn(data);
1141	trace_timer_expire_exit(timer);
1142
1143	lock_map_release(&lockdep_map);
1144
1145	if (count != preempt_count()) {
1146		WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1147			  fn, count, preempt_count());
1148		/*
1149		 * Restore the preempt count. That gives us a decent
1150		 * chance to survive and extract information. If the
1151		 * callback kept a lock held, bad luck, but not worse
1152		 * than the BUG() we had.
1153		 */
1154		preempt_count_set(count);
1155	}
1156}
1157
1158#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1159
1160/**
1161 * __run_timers - run all expired timers (if any) on this CPU.
1162 * @base: the timer vector to be processed.
1163 *
1164 * This function cascades all vectors and executes all expired timer
1165 * vectors.
1166 */
1167static inline void __run_timers(struct tvec_base *base)
1168{
1169	struct timer_list *timer;
1170
1171	spin_lock_irq(&base->lock);
1172	if (catchup_timer_jiffies(base)) {
1173		spin_unlock_irq(&base->lock);
1174		return;
1175	}
1176	while (time_after_eq(jiffies, base->timer_jiffies)) {
1177		struct list_head work_list;
1178		struct list_head *head = &work_list;
1179		int index = base->timer_jiffies & TVR_MASK;
1180
1181		/*
1182		 * Cascade timers:
1183		 */
1184		if (!index &&
1185			(!cascade(base, &base->tv2, INDEX(0))) &&
1186				(!cascade(base, &base->tv3, INDEX(1))) &&
1187					!cascade(base, &base->tv4, INDEX(2)))
1188			cascade(base, &base->tv5, INDEX(3));
1189		++base->timer_jiffies;
1190		list_replace_init(base->tv1.vec + index, head);
1191		while (!list_empty(head)) {
1192			void (*fn)(unsigned long);
1193			unsigned long data;
1194			bool irqsafe;
1195
1196			timer = list_first_entry(head, struct timer_list,entry);
1197			fn = timer->function;
1198			data = timer->data;
1199			irqsafe = tbase_get_irqsafe(timer->base);
1200
1201			timer_stats_account_timer(timer);
1202
1203			base->running_timer = timer;
1204			detach_expired_timer(timer, base);
1205
1206			if (irqsafe) {
1207				spin_unlock(&base->lock);
1208				call_timer_fn(timer, fn, data);
1209				spin_lock(&base->lock);
1210			} else {
1211				spin_unlock_irq(&base->lock);
1212				call_timer_fn(timer, fn, data);
1213				spin_lock_irq(&base->lock);
1214			}
1215		}
1216	}
1217	base->running_timer = NULL;
1218	spin_unlock_irq(&base->lock);
1219}
1220
1221#ifdef CONFIG_NO_HZ_COMMON
1222/*
1223 * Find out when the next timer event is due to happen. This
1224 * is used on S/390 to stop all activity when a CPU is idle.
1225 * This function needs to be called with interrupts disabled.
1226 */
1227static unsigned long __next_timer_interrupt(struct tvec_base *base)
1228{
1229	unsigned long timer_jiffies = base->timer_jiffies;
1230	unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1231	int index, slot, array, found = 0;
1232	struct timer_list *nte;
1233	struct tvec *varray[4];
1234
1235	/* Look for timer events in tv1. */
1236	index = slot = timer_jiffies & TVR_MASK;
1237	do {
1238		list_for_each_entry(nte, base->tv1.vec + slot, entry) {
1239			if (tbase_get_deferrable(nte->base))
1240				continue;
1241
1242			found = 1;
1243			expires = nte->expires;
1244			/* Look at the cascade bucket(s)? */
1245			if (!index || slot < index)
1246				goto cascade;
1247			return expires;
1248		}
1249		slot = (slot + 1) & TVR_MASK;
1250	} while (slot != index);
1251
1252cascade:
1253	/* Calculate the next cascade event */
1254	if (index)
1255		timer_jiffies += TVR_SIZE - index;
1256	timer_jiffies >>= TVR_BITS;
1257
1258	/* Check tv2-tv5. */
1259	varray[0] = &base->tv2;
1260	varray[1] = &base->tv3;
1261	varray[2] = &base->tv4;
1262	varray[3] = &base->tv5;
1263
1264	for (array = 0; array < 4; array++) {
1265		struct tvec *varp = varray[array];
1266
1267		index = slot = timer_jiffies & TVN_MASK;
1268		do {
1269			list_for_each_entry(nte, varp->vec + slot, entry) {
1270				if (tbase_get_deferrable(nte->base))
1271					continue;
1272
1273				found = 1;
1274				if (time_before(nte->expires, expires))
1275					expires = nte->expires;
1276			}
1277			/*
1278			 * Do we still search for the first timer or are
1279			 * we looking up the cascade buckets ?
1280			 */
1281			if (found) {
1282				/* Look at the cascade bucket(s)? */
1283				if (!index || slot < index)
1284					break;
1285				return expires;
1286			}
1287			slot = (slot + 1) & TVN_MASK;
1288		} while (slot != index);
1289
1290		if (index)
1291			timer_jiffies += TVN_SIZE - index;
1292		timer_jiffies >>= TVN_BITS;
1293	}
1294	return expires;
1295}
1296
1297/*
1298 * Check, if the next hrtimer event is before the next timer wheel
1299 * event:
1300 */
1301static unsigned long cmp_next_hrtimer_event(unsigned long now,
1302					    unsigned long expires)
1303{
1304	ktime_t hr_delta = hrtimer_get_next_event();
1305	struct timespec tsdelta;
1306	unsigned long delta;
1307
1308	if (hr_delta.tv64 == KTIME_MAX)
1309		return expires;
1310
1311	/*
1312	 * Expired timer available, let it expire in the next tick
1313	 */
1314	if (hr_delta.tv64 <= 0)
1315		return now + 1;
1316
1317	tsdelta = ktime_to_timespec(hr_delta);
1318	delta = timespec_to_jiffies(&tsdelta);
1319
1320	/*
1321	 * Limit the delta to the max value, which is checked in
1322	 * tick_nohz_stop_sched_tick():
1323	 */
1324	if (delta > NEXT_TIMER_MAX_DELTA)
1325		delta = NEXT_TIMER_MAX_DELTA;
1326
1327	/*
1328	 * Take rounding errors in to account and make sure, that it
1329	 * expires in the next tick. Otherwise we go into an endless
1330	 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1331	 * the timer softirq
1332	 */
1333	if (delta < 1)
1334		delta = 1;
1335	now += delta;
1336	if (time_before(now, expires))
1337		return now;
1338	return expires;
1339}
1340
1341/**
1342 * get_next_timer_interrupt - return the jiffy of the next pending timer
1343 * @now: current time (in jiffies)
1344 */
1345unsigned long get_next_timer_interrupt(unsigned long now)
1346{
1347	struct tvec_base *base = __this_cpu_read(tvec_bases);
1348	unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
1349
1350	/*
1351	 * Pretend that there is no timer pending if the cpu is offline.
1352	 * Possible pending timers will be migrated later to an active cpu.
1353	 */
1354	if (cpu_is_offline(smp_processor_id()))
1355		return expires;
1356
1357	spin_lock(&base->lock);
1358	if (base->active_timers) {
1359		if (time_before_eq(base->next_timer, base->timer_jiffies))
1360			base->next_timer = __next_timer_interrupt(base);
1361		expires = base->next_timer;
1362	}
1363	spin_unlock(&base->lock);
1364
1365	if (time_before_eq(expires, now))
1366		return now;
1367
1368	return cmp_next_hrtimer_event(now, expires);
1369}
1370#endif
1371
1372/*
1373 * Called from the timer interrupt handler to charge one tick to the current
1374 * process.  user_tick is 1 if the tick is user time, 0 for system.
1375 */
1376void update_process_times(int user_tick)
1377{
1378	struct task_struct *p = current;
1379	int cpu = smp_processor_id();
1380
1381	/* Note: this timer irq context must be accounted for as well. */
1382	account_process_tick(p, user_tick);
1383	run_local_timers();
1384	rcu_check_callbacks(cpu, user_tick);
1385#ifdef CONFIG_IRQ_WORK
1386	if (in_irq())
1387		irq_work_run();
1388#endif
1389	scheduler_tick();
1390	run_posix_cpu_timers(p);
1391}
1392
1393/*
1394 * This function runs timers and the timer-tq in bottom half context.
1395 */
1396static void run_timer_softirq(struct softirq_action *h)
1397{
1398	struct tvec_base *base = __this_cpu_read(tvec_bases);
1399
1400	hrtimer_run_pending();
1401
1402	if (time_after_eq(jiffies, base->timer_jiffies))
1403		__run_timers(base);
1404}
1405
1406/*
1407 * Called by the local, per-CPU timer interrupt on SMP.
1408 */
1409void run_local_timers(void)
1410{
1411	hrtimer_run_queues();
1412	raise_softirq(TIMER_SOFTIRQ);
1413}
1414
1415#ifdef __ARCH_WANT_SYS_ALARM
1416
1417/*
1418 * For backwards compatibility?  This can be done in libc so Alpha
1419 * and all newer ports shouldn't need it.
1420 */
1421SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1422{
1423	return alarm_setitimer(seconds);
1424}
1425
1426#endif
1427
1428static void process_timeout(unsigned long __data)
1429{
1430	wake_up_process((struct task_struct *)__data);
1431}
1432
1433/**
1434 * schedule_timeout - sleep until timeout
1435 * @timeout: timeout value in jiffies
1436 *
1437 * Make the current task sleep until @timeout jiffies have
1438 * elapsed. The routine will return immediately unless
1439 * the current task state has been set (see set_current_state()).
1440 *
1441 * You can set the task state as follows -
1442 *
1443 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1444 * pass before the routine returns. The routine will return 0
1445 *
1446 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1447 * delivered to the current task. In this case the remaining time
1448 * in jiffies will be returned, or 0 if the timer expired in time
1449 *
1450 * The current task state is guaranteed to be TASK_RUNNING when this
1451 * routine returns.
1452 *
1453 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1454 * the CPU away without a bound on the timeout. In this case the return
1455 * value will be %MAX_SCHEDULE_TIMEOUT.
1456 *
1457 * In all cases the return value is guaranteed to be non-negative.
1458 */
1459signed long __sched schedule_timeout(signed long timeout)
1460{
1461	struct timer_list timer;
1462	unsigned long expire;
1463
1464	switch (timeout)
1465	{
1466	case MAX_SCHEDULE_TIMEOUT:
1467		/*
1468		 * These two special cases are useful to be comfortable
1469		 * in the caller. Nothing more. We could take
1470		 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1471		 * but I' d like to return a valid offset (>=0) to allow
1472		 * the caller to do everything it want with the retval.
1473		 */
1474		schedule();
1475		goto out;
1476	default:
1477		/*
1478		 * Another bit of PARANOID. Note that the retval will be
1479		 * 0 since no piece of kernel is supposed to do a check
1480		 * for a negative retval of schedule_timeout() (since it
1481		 * should never happens anyway). You just have the printk()
1482		 * that will tell you if something is gone wrong and where.
1483		 */
1484		if (timeout < 0) {
1485			printk(KERN_ERR "schedule_timeout: wrong timeout "
1486				"value %lx\n", timeout);
1487			dump_stack();
1488			current->state = TASK_RUNNING;
1489			goto out;
1490		}
1491	}
1492
1493	expire = timeout + jiffies;
1494
1495	setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1496	__mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1497	schedule();
1498	del_singleshot_timer_sync(&timer);
1499
1500	/* Remove the timer from the object tracker */
1501	destroy_timer_on_stack(&timer);
1502
1503	timeout = expire - jiffies;
1504
1505 out:
1506	return timeout < 0 ? 0 : timeout;
1507}
1508EXPORT_SYMBOL(schedule_timeout);
1509
1510/*
1511 * We can use __set_current_state() here because schedule_timeout() calls
1512 * schedule() unconditionally.
1513 */
1514signed long __sched schedule_timeout_interruptible(signed long timeout)
1515{
1516	__set_current_state(TASK_INTERRUPTIBLE);
1517	return schedule_timeout(timeout);
1518}
1519EXPORT_SYMBOL(schedule_timeout_interruptible);
1520
1521signed long __sched schedule_timeout_killable(signed long timeout)
1522{
1523	__set_current_state(TASK_KILLABLE);
1524	return schedule_timeout(timeout);
1525}
1526EXPORT_SYMBOL(schedule_timeout_killable);
1527
1528signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1529{
1530	__set_current_state(TASK_UNINTERRUPTIBLE);
1531	return schedule_timeout(timeout);
1532}
1533EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1534
1535static int init_timers_cpu(int cpu)
1536{
1537	int j;
1538	struct tvec_base *base;
1539	static char tvec_base_done[NR_CPUS];
1540
1541	if (!tvec_base_done[cpu]) {
1542		static char boot_done;
1543
1544		if (boot_done) {
1545			/*
1546			 * The APs use this path later in boot
1547			 */
1548			base = kzalloc_node(sizeof(*base), GFP_KERNEL,
1549					    cpu_to_node(cpu));
1550			if (!base)
1551				return -ENOMEM;
1552
1553			/* Make sure tvec_base has TIMER_FLAG_MASK bits free */
1554			if (WARN_ON(base != tbase_get_base(base))) {
1555				kfree(base);
1556				return -ENOMEM;
1557			}
1558			per_cpu(tvec_bases, cpu) = base;
1559		} else {
1560			/*
1561			 * This is for the boot CPU - we use compile-time
1562			 * static initialisation because per-cpu memory isn't
1563			 * ready yet and because the memory allocators are not
1564			 * initialised either.
1565			 */
1566			boot_done = 1;
1567			base = &boot_tvec_bases;
1568		}
1569		spin_lock_init(&base->lock);
1570		tvec_base_done[cpu] = 1;
1571	} else {
1572		base = per_cpu(tvec_bases, cpu);
1573	}
1574
1575
1576	for (j = 0; j < TVN_SIZE; j++) {
1577		INIT_LIST_HEAD(base->tv5.vec + j);
1578		INIT_LIST_HEAD(base->tv4.vec + j);
1579		INIT_LIST_HEAD(base->tv3.vec + j);
1580		INIT_LIST_HEAD(base->tv2.vec + j);
1581	}
1582	for (j = 0; j < TVR_SIZE; j++)
1583		INIT_LIST_HEAD(base->tv1.vec + j);
1584
1585	base->timer_jiffies = jiffies;
1586	base->next_timer = base->timer_jiffies;
1587	base->active_timers = 0;
1588	base->all_timers = 0;
1589	return 0;
1590}
1591
1592#ifdef CONFIG_HOTPLUG_CPU
1593static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1594{
1595	struct timer_list *timer;
1596
1597	while (!list_empty(head)) {
1598		timer = list_first_entry(head, struct timer_list, entry);
1599		/* We ignore the accounting on the dying cpu */
1600		detach_timer(timer, false);
1601		timer_set_base(timer, new_base);
1602		internal_add_timer(new_base, timer);
1603	}
1604}
1605
1606static void migrate_timers(int cpu)
1607{
1608	struct tvec_base *old_base;
1609	struct tvec_base *new_base;
1610	int i;
1611
1612	BUG_ON(cpu_online(cpu));
1613	old_base = per_cpu(tvec_bases, cpu);
1614	new_base = get_cpu_var(tvec_bases);
1615	/*
1616	 * The caller is globally serialized and nobody else
1617	 * takes two locks at once, deadlock is not possible.
1618	 */
1619	spin_lock_irq(&new_base->lock);
1620	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1621
1622	BUG_ON(old_base->running_timer);
1623
1624	for (i = 0; i < TVR_SIZE; i++)
1625		migrate_timer_list(new_base, old_base->tv1.vec + i);
1626	for (i = 0; i < TVN_SIZE; i++) {
1627		migrate_timer_list(new_base, old_base->tv2.vec + i);
1628		migrate_timer_list(new_base, old_base->tv3.vec + i);
1629		migrate_timer_list(new_base, old_base->tv4.vec + i);
1630		migrate_timer_list(new_base, old_base->tv5.vec + i);
1631	}
1632
1633	spin_unlock(&old_base->lock);
1634	spin_unlock_irq(&new_base->lock);
1635	put_cpu_var(tvec_bases);
1636}
1637#endif /* CONFIG_HOTPLUG_CPU */
1638
1639static int timer_cpu_notify(struct notifier_block *self,
1640				unsigned long action, void *hcpu)
1641{
1642	long cpu = (long)hcpu;
1643	int err;
1644
1645	switch(action) {
1646	case CPU_UP_PREPARE:
1647	case CPU_UP_PREPARE_FROZEN:
1648		err = init_timers_cpu(cpu);
1649		if (err < 0)
1650			return notifier_from_errno(err);
1651		break;
1652#ifdef CONFIG_HOTPLUG_CPU
1653	case CPU_DEAD:
1654	case CPU_DEAD_FROZEN:
1655		migrate_timers(cpu);
1656		break;
1657#endif
1658	default:
1659		break;
1660	}
1661	return NOTIFY_OK;
1662}
1663
1664static struct notifier_block timers_nb = {
1665	.notifier_call	= timer_cpu_notify,
1666};
1667
1668
1669void __init init_timers(void)
1670{
1671	int err;
1672
1673	/* ensure there are enough low bits for flags in timer->base pointer */
1674	BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
1675
1676	err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1677			       (void *)(long)smp_processor_id());
1678	BUG_ON(err != NOTIFY_OK);
1679
1680	init_timer_stats();
1681	register_cpu_notifier(&timers_nb);
1682	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1683}
1684
1685/**
1686 * msleep - sleep safely even with waitqueue interruptions
1687 * @msecs: Time in milliseconds to sleep for
1688 */
1689void msleep(unsigned int msecs)
1690{
1691	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1692
1693	while (timeout)
1694		timeout = schedule_timeout_uninterruptible(timeout);
1695}
1696
1697EXPORT_SYMBOL(msleep);
1698
1699/**
1700 * msleep_interruptible - sleep waiting for signals
1701 * @msecs: Time in milliseconds to sleep for
1702 */
1703unsigned long msleep_interruptible(unsigned int msecs)
1704{
1705	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1706
1707	while (timeout && !signal_pending(current))
1708		timeout = schedule_timeout_interruptible(timeout);
1709	return jiffies_to_msecs(timeout);
1710}
1711
1712EXPORT_SYMBOL(msleep_interruptible);
1713
1714static int __sched do_usleep_range(unsigned long min, unsigned long max)
1715{
1716	ktime_t kmin;
1717	unsigned long delta;
1718
1719	kmin = ktime_set(0, min * NSEC_PER_USEC);
1720	delta = (max - min) * NSEC_PER_USEC;
1721	return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1722}
1723
1724/**
1725 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1726 * @min: Minimum time in usecs to sleep
1727 * @max: Maximum time in usecs to sleep
1728 */
1729void usleep_range(unsigned long min, unsigned long max)
1730{
1731	__set_current_state(TASK_UNINTERRUPTIBLE);
1732	do_usleep_range(min, max);
1733}
1734EXPORT_SYMBOL(usleep_range);