Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 *  linux/kernel/timer.c
   3 *
   4 *  Kernel internal timers
   5 *
   6 *  Copyright (C) 1991, 1992  Linus Torvalds
   7 *
   8 *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
   9 *
  10 *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
  11 *              "A Kernel Model for Precision Timekeeping" by Dave Mills
  12 *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13 *              serialize accesses to xtime/lost_ticks).
  14 *                              Copyright (C) 1998  Andrea Arcangeli
  15 *  1999-03-10  Improved NTP compatibility by Ulrich Windl
  16 *  2002-05-31	Move sys_sysinfo here and make its locking sane, Robert Love
  17 *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
  18 *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
  19 *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20 */
  21
  22#include <linux/kernel_stat.h>
  23#include <linux/export.h>
  24#include <linux/interrupt.h>
  25#include <linux/percpu.h>
  26#include <linux/init.h>
  27#include <linux/mm.h>
  28#include <linux/swap.h>
  29#include <linux/pid_namespace.h>
  30#include <linux/notifier.h>
  31#include <linux/thread_info.h>
  32#include <linux/time.h>
  33#include <linux/jiffies.h>
  34#include <linux/posix-timers.h>
  35#include <linux/cpu.h>
  36#include <linux/syscalls.h>
  37#include <linux/delay.h>
  38#include <linux/tick.h>
  39#include <linux/kallsyms.h>
  40#include <linux/irq_work.h>
  41#include <linux/sched.h>
  42#include <linux/sched/sysctl.h>
  43#include <linux/slab.h>
  44#include <linux/compat.h>
  45
  46#include <asm/uaccess.h>
  47#include <asm/unistd.h>
  48#include <asm/div64.h>
  49#include <asm/timex.h>
  50#include <asm/io.h>
  51
  52#include "tick-internal.h"
  53
  54#define CREATE_TRACE_POINTS
  55#include <trace/events/timer.h>
  56
  57__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  58
  59EXPORT_SYMBOL(jiffies_64);
  60
  61/*
  62 * per-CPU timer vector definitions:
  63 */
  64#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  65#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  66#define TVN_SIZE (1 << TVN_BITS)
  67#define TVR_SIZE (1 << TVR_BITS)
  68#define TVN_MASK (TVN_SIZE - 1)
  69#define TVR_MASK (TVR_SIZE - 1)
  70#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
  71
  72struct tvec {
  73	struct hlist_head vec[TVN_SIZE];
  74};
  75
  76struct tvec_root {
  77	struct hlist_head vec[TVR_SIZE];
  78};
  79
  80struct tvec_base {
  81	spinlock_t lock;
  82	struct timer_list *running_timer;
  83	unsigned long timer_jiffies;
  84	unsigned long next_timer;
  85	unsigned long active_timers;
  86	unsigned long all_timers;
  87	int cpu;
  88	bool migration_enabled;
  89	bool nohz_active;
  90	struct tvec_root tv1;
  91	struct tvec tv2;
  92	struct tvec tv3;
  93	struct tvec tv4;
  94	struct tvec tv5;
  95} ____cacheline_aligned;
  96
  97
  98static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
  99
 100#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 101unsigned int sysctl_timer_migration = 1;
 102
 103void timers_update_migration(bool update_nohz)
 104{
 105	bool on = sysctl_timer_migration && tick_nohz_active;
 106	unsigned int cpu;
 107
 108	/* Avoid the loop, if nothing to update */
 109	if (this_cpu_read(tvec_bases.migration_enabled) == on)
 110		return;
 111
 112	for_each_possible_cpu(cpu) {
 113		per_cpu(tvec_bases.migration_enabled, cpu) = on;
 114		per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
 115		if (!update_nohz)
 116			continue;
 117		per_cpu(tvec_bases.nohz_active, cpu) = true;
 118		per_cpu(hrtimer_bases.nohz_active, cpu) = true;
 119	}
 120}
 121
 122int timer_migration_handler(struct ctl_table *table, int write,
 123			    void __user *buffer, size_t *lenp,
 124			    loff_t *ppos)
 125{
 126	static DEFINE_MUTEX(mutex);
 127	int ret;
 128
 129	mutex_lock(&mutex);
 130	ret = proc_dointvec(table, write, buffer, lenp, ppos);
 131	if (!ret && write)
 132		timers_update_migration(false);
 133	mutex_unlock(&mutex);
 134	return ret;
 135}
 136
 137static inline struct tvec_base *get_target_base(struct tvec_base *base,
 138						int pinned)
 139{
 140	if (pinned || !base->migration_enabled)
 141		return this_cpu_ptr(&tvec_bases);
 142	return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
 143}
 144#else
 145static inline struct tvec_base *get_target_base(struct tvec_base *base,
 146						int pinned)
 147{
 148	return this_cpu_ptr(&tvec_bases);
 149}
 150#endif
 151
 152static unsigned long round_jiffies_common(unsigned long j, int cpu,
 153		bool force_up)
 154{
 155	int rem;
 156	unsigned long original = j;
 157
 158	/*
 159	 * We don't want all cpus firing their timers at once hitting the
 160	 * same lock or cachelines, so we skew each extra cpu with an extra
 161	 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
 162	 * already did this.
 163	 * The skew is done by adding 3*cpunr, then round, then subtract this
 164	 * extra offset again.
 165	 */
 166	j += cpu * 3;
 167
 168	rem = j % HZ;
 169
 170	/*
 171	 * If the target jiffie is just after a whole second (which can happen
 172	 * due to delays of the timer irq, long irq off times etc etc) then
 173	 * we should round down to the whole second, not up. Use 1/4th second
 174	 * as cutoff for this rounding as an extreme upper bound for this.
 175	 * But never round down if @force_up is set.
 176	 */
 177	if (rem < HZ/4 && !force_up) /* round down */
 178		j = j - rem;
 179	else /* round up */
 180		j = j - rem + HZ;
 181
 182	/* now that we have rounded, subtract the extra skew again */
 183	j -= cpu * 3;
 184
 185	/*
 186	 * Make sure j is still in the future. Otherwise return the
 187	 * unmodified value.
 188	 */
 189	return time_is_after_jiffies(j) ? j : original;
 190}
 191
 192/**
 193 * __round_jiffies - function to round jiffies to a full second
 194 * @j: the time in (absolute) jiffies that should be rounded
 195 * @cpu: the processor number on which the timeout will happen
 196 *
 197 * __round_jiffies() rounds an absolute time in the future (in jiffies)
 198 * up or down to (approximately) full seconds. This is useful for timers
 199 * for which the exact time they fire does not matter too much, as long as
 200 * they fire approximately every X seconds.
 201 *
 202 * By rounding these timers to whole seconds, all such timers will fire
 203 * at the same time, rather than at various times spread out. The goal
 204 * of this is to have the CPU wake up less, which saves power.
 205 *
 206 * The exact rounding is skewed for each processor to avoid all
 207 * processors firing at the exact same time, which could lead
 208 * to lock contention or spurious cache line bouncing.
 209 *
 210 * The return value is the rounded version of the @j parameter.
 211 */
 212unsigned long __round_jiffies(unsigned long j, int cpu)
 213{
 214	return round_jiffies_common(j, cpu, false);
 215}
 216EXPORT_SYMBOL_GPL(__round_jiffies);
 217
 218/**
 219 * __round_jiffies_relative - function to round jiffies to a full second
 220 * @j: the time in (relative) jiffies that should be rounded
 221 * @cpu: the processor number on which the timeout will happen
 222 *
 223 * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 224 * up or down to (approximately) full seconds. This is useful for timers
 225 * for which the exact time they fire does not matter too much, as long as
 226 * they fire approximately every X seconds.
 227 *
 228 * By rounding these timers to whole seconds, all such timers will fire
 229 * at the same time, rather than at various times spread out. The goal
 230 * of this is to have the CPU wake up less, which saves power.
 231 *
 232 * The exact rounding is skewed for each processor to avoid all
 233 * processors firing at the exact same time, which could lead
 234 * to lock contention or spurious cache line bouncing.
 235 *
 236 * The return value is the rounded version of the @j parameter.
 237 */
 238unsigned long __round_jiffies_relative(unsigned long j, int cpu)
 239{
 240	unsigned long j0 = jiffies;
 241
 242	/* Use j0 because jiffies might change while we run */
 243	return round_jiffies_common(j + j0, cpu, false) - j0;
 244}
 245EXPORT_SYMBOL_GPL(__round_jiffies_relative);
 246
 247/**
 248 * round_jiffies - function to round jiffies to a full second
 249 * @j: the time in (absolute) jiffies that should be rounded
 250 *
 251 * round_jiffies() rounds an absolute time in the future (in jiffies)
 252 * up or down to (approximately) full seconds. This is useful for timers
 253 * for which the exact time they fire does not matter too much, as long as
 254 * they fire approximately every X seconds.
 255 *
 256 * By rounding these timers to whole seconds, all such timers will fire
 257 * at the same time, rather than at various times spread out. The goal
 258 * of this is to have the CPU wake up less, which saves power.
 259 *
 260 * The return value is the rounded version of the @j parameter.
 261 */
 262unsigned long round_jiffies(unsigned long j)
 263{
 264	return round_jiffies_common(j, raw_smp_processor_id(), false);
 265}
 266EXPORT_SYMBOL_GPL(round_jiffies);
 267
 268/**
 269 * round_jiffies_relative - function to round jiffies to a full second
 270 * @j: the time in (relative) jiffies that should be rounded
 271 *
 272 * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 273 * up or down to (approximately) full seconds. This is useful for timers
 274 * for which the exact time they fire does not matter too much, as long as
 275 * they fire approximately every X seconds.
 276 *
 277 * By rounding these timers to whole seconds, all such timers will fire
 278 * at the same time, rather than at various times spread out. The goal
 279 * of this is to have the CPU wake up less, which saves power.
 280 *
 281 * The return value is the rounded version of the @j parameter.
 282 */
 283unsigned long round_jiffies_relative(unsigned long j)
 284{
 285	return __round_jiffies_relative(j, raw_smp_processor_id());
 286}
 287EXPORT_SYMBOL_GPL(round_jiffies_relative);
 288
 289/**
 290 * __round_jiffies_up - function to round jiffies up to a full second
 291 * @j: the time in (absolute) jiffies that should be rounded
 292 * @cpu: the processor number on which the timeout will happen
 293 *
 294 * This is the same as __round_jiffies() except that it will never
 295 * round down.  This is useful for timeouts for which the exact time
 296 * of firing does not matter too much, as long as they don't fire too
 297 * early.
 298 */
 299unsigned long __round_jiffies_up(unsigned long j, int cpu)
 300{
 301	return round_jiffies_common(j, cpu, true);
 302}
 303EXPORT_SYMBOL_GPL(__round_jiffies_up);
 304
 305/**
 306 * __round_jiffies_up_relative - function to round jiffies up to a full second
 307 * @j: the time in (relative) jiffies that should be rounded
 308 * @cpu: the processor number on which the timeout will happen
 309 *
 310 * This is the same as __round_jiffies_relative() except that it will never
 311 * round down.  This is useful for timeouts for which the exact time
 312 * of firing does not matter too much, as long as they don't fire too
 313 * early.
 314 */
 315unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
 316{
 317	unsigned long j0 = jiffies;
 318
 319	/* Use j0 because jiffies might change while we run */
 320	return round_jiffies_common(j + j0, cpu, true) - j0;
 321}
 322EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
 323
 324/**
 325 * round_jiffies_up - function to round jiffies up to a full second
 326 * @j: the time in (absolute) jiffies that should be rounded
 327 *
 328 * This is the same as round_jiffies() except that it will never
 329 * round down.  This is useful for timeouts for which the exact time
 330 * of firing does not matter too much, as long as they don't fire too
 331 * early.
 332 */
 333unsigned long round_jiffies_up(unsigned long j)
 334{
 335	return round_jiffies_common(j, raw_smp_processor_id(), true);
 336}
 337EXPORT_SYMBOL_GPL(round_jiffies_up);
 338
 339/**
 340 * round_jiffies_up_relative - function to round jiffies up to a full second
 341 * @j: the time in (relative) jiffies that should be rounded
 342 *
 343 * This is the same as round_jiffies_relative() except that it will never
 344 * round down.  This is useful for timeouts for which the exact time
 345 * of firing does not matter too much, as long as they don't fire too
 346 * early.
 347 */
 348unsigned long round_jiffies_up_relative(unsigned long j)
 349{
 350	return __round_jiffies_up_relative(j, raw_smp_processor_id());
 351}
 352EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
 353
 354/**
 355 * set_timer_slack - set the allowed slack for a timer
 356 * @timer: the timer to be modified
 357 * @slack_hz: the amount of time (in jiffies) allowed for rounding
 358 *
 359 * Set the amount of time, in jiffies, that a certain timer has
 360 * in terms of slack. By setting this value, the timer subsystem
 361 * will schedule the actual timer somewhere between
 362 * the time mod_timer() asks for, and that time plus the slack.
 363 *
 364 * By setting the slack to -1, a percentage of the delay is used
 365 * instead.
 366 */
 367void set_timer_slack(struct timer_list *timer, int slack_hz)
 368{
 369	timer->slack = slack_hz;
 370}
 371EXPORT_SYMBOL_GPL(set_timer_slack);
 372
 373static void
 374__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 375{
 376	unsigned long expires = timer->expires;
 377	unsigned long idx = expires - base->timer_jiffies;
 378	struct hlist_head *vec;
 379
 380	if (idx < TVR_SIZE) {
 381		int i = expires & TVR_MASK;
 382		vec = base->tv1.vec + i;
 383	} else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
 384		int i = (expires >> TVR_BITS) & TVN_MASK;
 385		vec = base->tv2.vec + i;
 386	} else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
 387		int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
 388		vec = base->tv3.vec + i;
 389	} else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
 390		int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
 391		vec = base->tv4.vec + i;
 392	} else if ((signed long) idx < 0) {
 393		/*
 394		 * Can happen if you add a timer with expires == jiffies,
 395		 * or you set a timer to go off in the past
 396		 */
 397		vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
 398	} else {
 399		int i;
 400		/* If the timeout is larger than MAX_TVAL (on 64-bit
 401		 * architectures or with CONFIG_BASE_SMALL=1) then we
 402		 * use the maximum timeout.
 403		 */
 404		if (idx > MAX_TVAL) {
 405			idx = MAX_TVAL;
 406			expires = idx + base->timer_jiffies;
 407		}
 408		i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
 409		vec = base->tv5.vec + i;
 410	}
 411
 412	hlist_add_head(&timer->entry, vec);
 413}
 414
 415static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 416{
 417	/* Advance base->jiffies, if the base is empty */
 418	if (!base->all_timers++)
 419		base->timer_jiffies = jiffies;
 420
 421	__internal_add_timer(base, timer);
 422	/*
 423	 * Update base->active_timers and base->next_timer
 424	 */
 425	if (!(timer->flags & TIMER_DEFERRABLE)) {
 426		if (!base->active_timers++ ||
 427		    time_before(timer->expires, base->next_timer))
 428			base->next_timer = timer->expires;
 429	}
 430
 431	/*
 432	 * Check whether the other CPU is in dynticks mode and needs
 433	 * to be triggered to reevaluate the timer wheel.
 434	 * We are protected against the other CPU fiddling
 435	 * with the timer by holding the timer base lock. This also
 436	 * makes sure that a CPU on the way to stop its tick can not
 437	 * evaluate the timer wheel.
 438	 *
 439	 * Spare the IPI for deferrable timers on idle targets though.
 440	 * The next busy ticks will take care of it. Except full dynticks
 441	 * require special care against races with idle_cpu(), lets deal
 442	 * with that later.
 443	 */
 444	if (base->nohz_active) {
 445		if (!(timer->flags & TIMER_DEFERRABLE) ||
 446		    tick_nohz_full_cpu(base->cpu))
 447			wake_up_nohz_cpu(base->cpu);
 448	}
 449}
 450
 451#ifdef CONFIG_TIMER_STATS
 452void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
 453{
 454	if (timer->start_site)
 455		return;
 456
 457	timer->start_site = addr;
 458	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
 459	timer->start_pid = current->pid;
 460}
 461
 462static void timer_stats_account_timer(struct timer_list *timer)
 463{
 464	void *site;
 465
 466	/*
 467	 * start_site can be concurrently reset by
 468	 * timer_stats_timer_clear_start_info()
 469	 */
 470	site = READ_ONCE(timer->start_site);
 471	if (likely(!site))
 472		return;
 473
 474	timer_stats_update_stats(timer, timer->start_pid, site,
 475				 timer->function, timer->start_comm,
 476				 timer->flags);
 477}
 478
 479#else
 480static void timer_stats_account_timer(struct timer_list *timer) {}
 481#endif
 482
 483#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 484
 485static struct debug_obj_descr timer_debug_descr;
 486
 487static void *timer_debug_hint(void *addr)
 488{
 489	return ((struct timer_list *) addr)->function;
 490}
 491
 492/*
 493 * fixup_init is called when:
 494 * - an active object is initialized
 495 */
 496static int timer_fixup_init(void *addr, enum debug_obj_state state)
 497{
 498	struct timer_list *timer = addr;
 499
 500	switch (state) {
 501	case ODEBUG_STATE_ACTIVE:
 502		del_timer_sync(timer);
 503		debug_object_init(timer, &timer_debug_descr);
 504		return 1;
 505	default:
 506		return 0;
 507	}
 508}
 509
 510/* Stub timer callback for improperly used timers. */
 511static void stub_timer(unsigned long data)
 512{
 513	WARN_ON(1);
 514}
 515
 516/*
 517 * fixup_activate is called when:
 518 * - an active object is activated
 519 * - an unknown object is activated (might be a statically initialized object)
 520 */
 521static int timer_fixup_activate(void *addr, enum debug_obj_state state)
 522{
 523	struct timer_list *timer = addr;
 524
 525	switch (state) {
 526
 527	case ODEBUG_STATE_NOTAVAILABLE:
 528		/*
 529		 * This is not really a fixup. The timer was
 530		 * statically initialized. We just make sure that it
 531		 * is tracked in the object tracker.
 532		 */
 533		if (timer->entry.pprev == NULL &&
 534		    timer->entry.next == TIMER_ENTRY_STATIC) {
 535			debug_object_init(timer, &timer_debug_descr);
 536			debug_object_activate(timer, &timer_debug_descr);
 537			return 0;
 538		} else {
 539			setup_timer(timer, stub_timer, 0);
 540			return 1;
 541		}
 542		return 0;
 543
 544	case ODEBUG_STATE_ACTIVE:
 545		WARN_ON(1);
 546
 547	default:
 548		return 0;
 549	}
 550}
 551
 552/*
 553 * fixup_free is called when:
 554 * - an active object is freed
 555 */
 556static int timer_fixup_free(void *addr, enum debug_obj_state state)
 557{
 558	struct timer_list *timer = addr;
 559
 560	switch (state) {
 561	case ODEBUG_STATE_ACTIVE:
 562		del_timer_sync(timer);
 563		debug_object_free(timer, &timer_debug_descr);
 564		return 1;
 565	default:
 566		return 0;
 567	}
 568}
 569
 570/*
 571 * fixup_assert_init is called when:
 572 * - an untracked/uninit-ed object is found
 573 */
 574static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
 575{
 576	struct timer_list *timer = addr;
 577
 578	switch (state) {
 579	case ODEBUG_STATE_NOTAVAILABLE:
 580		if (timer->entry.next == TIMER_ENTRY_STATIC) {
 581			/*
 582			 * This is not really a fixup. The timer was
 583			 * statically initialized. We just make sure that it
 584			 * is tracked in the object tracker.
 585			 */
 586			debug_object_init(timer, &timer_debug_descr);
 587			return 0;
 588		} else {
 589			setup_timer(timer, stub_timer, 0);
 590			return 1;
 591		}
 592	default:
 593		return 0;
 594	}
 595}
 596
 597static struct debug_obj_descr timer_debug_descr = {
 598	.name			= "timer_list",
 599	.debug_hint		= timer_debug_hint,
 600	.fixup_init		= timer_fixup_init,
 601	.fixup_activate		= timer_fixup_activate,
 602	.fixup_free		= timer_fixup_free,
 603	.fixup_assert_init	= timer_fixup_assert_init,
 604};
 605
 606static inline void debug_timer_init(struct timer_list *timer)
 607{
 608	debug_object_init(timer, &timer_debug_descr);
 609}
 610
 611static inline void debug_timer_activate(struct timer_list *timer)
 612{
 613	debug_object_activate(timer, &timer_debug_descr);
 614}
 615
 616static inline void debug_timer_deactivate(struct timer_list *timer)
 617{
 618	debug_object_deactivate(timer, &timer_debug_descr);
 619}
 620
 621static inline void debug_timer_free(struct timer_list *timer)
 622{
 623	debug_object_free(timer, &timer_debug_descr);
 624}
 625
 626static inline void debug_timer_assert_init(struct timer_list *timer)
 627{
 628	debug_object_assert_init(timer, &timer_debug_descr);
 629}
 630
 631static void do_init_timer(struct timer_list *timer, unsigned int flags,
 632			  const char *name, struct lock_class_key *key);
 633
 634void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
 635			     const char *name, struct lock_class_key *key)
 636{
 637	debug_object_init_on_stack(timer, &timer_debug_descr);
 638	do_init_timer(timer, flags, name, key);
 639}
 640EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
 641
 642void destroy_timer_on_stack(struct timer_list *timer)
 643{
 644	debug_object_free(timer, &timer_debug_descr);
 645}
 646EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
 647
 648#else
 649static inline void debug_timer_init(struct timer_list *timer) { }
 650static inline void debug_timer_activate(struct timer_list *timer) { }
 651static inline void debug_timer_deactivate(struct timer_list *timer) { }
 652static inline void debug_timer_assert_init(struct timer_list *timer) { }
 653#endif
 654
 655static inline void debug_init(struct timer_list *timer)
 656{
 657	debug_timer_init(timer);
 658	trace_timer_init(timer);
 659}
 660
 661static inline void
 662debug_activate(struct timer_list *timer, unsigned long expires)
 663{
 664	debug_timer_activate(timer);
 665	trace_timer_start(timer, expires, timer->flags);
 666}
 667
 668static inline void debug_deactivate(struct timer_list *timer)
 669{
 670	debug_timer_deactivate(timer);
 671	trace_timer_cancel(timer);
 672}
 673
 674static inline void debug_assert_init(struct timer_list *timer)
 675{
 676	debug_timer_assert_init(timer);
 677}
 678
 679static void do_init_timer(struct timer_list *timer, unsigned int flags,
 680			  const char *name, struct lock_class_key *key)
 681{
 682	timer->entry.pprev = NULL;
 683	timer->flags = flags | raw_smp_processor_id();
 684	timer->slack = -1;
 685#ifdef CONFIG_TIMER_STATS
 686	timer->start_site = NULL;
 687	timer->start_pid = -1;
 688	memset(timer->start_comm, 0, TASK_COMM_LEN);
 689#endif
 690	lockdep_init_map(&timer->lockdep_map, name, key, 0);
 691}
 692
 693/**
 694 * init_timer_key - initialize a timer
 695 * @timer: the timer to be initialized
 696 * @flags: timer flags
 697 * @name: name of the timer
 698 * @key: lockdep class key of the fake lock used for tracking timer
 699 *       sync lock dependencies
 700 *
 701 * init_timer_key() must be done to a timer prior calling *any* of the
 702 * other timer functions.
 703 */
 704void init_timer_key(struct timer_list *timer, unsigned int flags,
 705		    const char *name, struct lock_class_key *key)
 706{
 707	debug_init(timer);
 708	do_init_timer(timer, flags, name, key);
 709}
 710EXPORT_SYMBOL(init_timer_key);
 711
 712static inline void detach_timer(struct timer_list *timer, bool clear_pending)
 713{
 714	struct hlist_node *entry = &timer->entry;
 715
 716	debug_deactivate(timer);
 717
 718	__hlist_del(entry);
 719	if (clear_pending)
 720		entry->pprev = NULL;
 721	entry->next = LIST_POISON2;
 722}
 723
 724static inline void
 725detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
 726{
 727	detach_timer(timer, true);
 728	if (!(timer->flags & TIMER_DEFERRABLE))
 729		base->active_timers--;
 730	base->all_timers--;
 731}
 732
 733static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
 734			     bool clear_pending)
 735{
 736	if (!timer_pending(timer))
 737		return 0;
 738
 739	detach_timer(timer, clear_pending);
 740	if (!(timer->flags & TIMER_DEFERRABLE)) {
 741		base->active_timers--;
 742		if (timer->expires == base->next_timer)
 743			base->next_timer = base->timer_jiffies;
 744	}
 745	/* If this was the last timer, advance base->jiffies */
 746	if (!--base->all_timers)
 747		base->timer_jiffies = jiffies;
 748	return 1;
 749}
 750
 751/*
 752 * We are using hashed locking: holding per_cpu(tvec_bases).lock
 753 * means that all timers which are tied to this base via timer->base are
 754 * locked, and the base itself is locked too.
 755 *
 756 * So __run_timers/migrate_timers can safely modify all timers which could
 757 * be found on ->tvX lists.
 758 *
 759 * When the timer's base is locked and removed from the list, the
 760 * TIMER_MIGRATING flag is set, FIXME
 761 */
 762static struct tvec_base *lock_timer_base(struct timer_list *timer,
 763					unsigned long *flags)
 764	__acquires(timer->base->lock)
 765{
 766	for (;;) {
 767		u32 tf = timer->flags;
 768		struct tvec_base *base;
 769
 770		if (!(tf & TIMER_MIGRATING)) {
 771			base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
 772			spin_lock_irqsave(&base->lock, *flags);
 773			if (timer->flags == tf)
 774				return base;
 775			spin_unlock_irqrestore(&base->lock, *flags);
 776		}
 777		cpu_relax();
 778	}
 779}
 780
 781static inline int
 782__mod_timer(struct timer_list *timer, unsigned long expires,
 783	    bool pending_only, int pinned)
 784{
 785	struct tvec_base *base, *new_base;
 786	unsigned long flags;
 787	int ret = 0;
 788
 789	timer_stats_timer_set_start_info(timer);
 790	BUG_ON(!timer->function);
 791
 792	base = lock_timer_base(timer, &flags);
 793
 794	ret = detach_if_pending(timer, base, false);
 795	if (!ret && pending_only)
 796		goto out_unlock;
 797
 798	debug_activate(timer, expires);
 799
 800	new_base = get_target_base(base, pinned);
 801
 802	if (base != new_base) {
 803		/*
 804		 * We are trying to schedule the timer on the local CPU.
 805		 * However we can't change timer's base while it is running,
 806		 * otherwise del_timer_sync() can't detect that the timer's
 807		 * handler yet has not finished. This also guarantees that
 808		 * the timer is serialized wrt itself.
 809		 */
 810		if (likely(base->running_timer != timer)) {
 811			/* See the comment in lock_timer_base() */
 812			timer->flags |= TIMER_MIGRATING;
 813
 814			spin_unlock(&base->lock);
 815			base = new_base;
 816			spin_lock(&base->lock);
 817			WRITE_ONCE(timer->flags,
 818				   (timer->flags & ~TIMER_BASEMASK) | base->cpu);
 819		}
 820	}
 821
 822	timer->expires = expires;
 823	internal_add_timer(base, timer);
 824
 825out_unlock:
 826	spin_unlock_irqrestore(&base->lock, flags);
 827
 828	return ret;
 829}
 830
 831/**
 832 * mod_timer_pending - modify a pending timer's timeout
 833 * @timer: the pending timer to be modified
 834 * @expires: new timeout in jiffies
 835 *
 836 * mod_timer_pending() is the same for pending timers as mod_timer(),
 837 * but will not re-activate and modify already deleted timers.
 838 *
 839 * It is useful for unserialized use of timers.
 840 */
 841int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 842{
 843	return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
 844}
 845EXPORT_SYMBOL(mod_timer_pending);
 846
 847/*
 848 * Decide where to put the timer while taking the slack into account
 849 *
 850 * Algorithm:
 851 *   1) calculate the maximum (absolute) time
 852 *   2) calculate the highest bit where the expires and new max are different
 853 *   3) use this bit to make a mask
 854 *   4) use the bitmask to round down the maximum time, so that all last
 855 *      bits are zeros
 856 */
 857static inline
 858unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
 859{
 860	unsigned long expires_limit, mask;
 861	int bit;
 862
 863	if (timer->slack >= 0) {
 864		expires_limit = expires + timer->slack;
 865	} else {
 866		long delta = expires - jiffies;
 867
 868		if (delta < 256)
 869			return expires;
 870
 871		expires_limit = expires + delta / 256;
 872	}
 873	mask = expires ^ expires_limit;
 874	if (mask == 0)
 875		return expires;
 876
 877	bit = __fls(mask);
 878
 879	mask = (1UL << bit) - 1;
 880
 881	expires_limit = expires_limit & ~(mask);
 882
 883	return expires_limit;
 884}
 885
 886/**
 887 * mod_timer - modify a timer's timeout
 888 * @timer: the timer to be modified
 889 * @expires: new timeout in jiffies
 890 *
 891 * mod_timer() is a more efficient way to update the expire field of an
 892 * active timer (if the timer is inactive it will be activated)
 893 *
 894 * mod_timer(timer, expires) is equivalent to:
 895 *
 896 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 897 *
 898 * Note that if there are multiple unserialized concurrent users of the
 899 * same timer, then mod_timer() is the only safe way to modify the timeout,
 900 * since add_timer() cannot modify an already running timer.
 901 *
 902 * The function returns whether it has modified a pending timer or not.
 903 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
 904 * active timer returns 1.)
 905 */
 906int mod_timer(struct timer_list *timer, unsigned long expires)
 907{
 908	expires = apply_slack(timer, expires);
 909
 910	/*
 911	 * This is a common optimization triggered by the
 912	 * networking code - if the timer is re-modified
 913	 * to be the same thing then just return:
 914	 */
 915	if (timer_pending(timer) && timer->expires == expires)
 916		return 1;
 917
 918	return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
 919}
 920EXPORT_SYMBOL(mod_timer);
 921
 922/**
 923 * mod_timer_pinned - modify a timer's timeout
 924 * @timer: the timer to be modified
 925 * @expires: new timeout in jiffies
 926 *
 927 * mod_timer_pinned() is a way to update the expire field of an
 928 * active timer (if the timer is inactive it will be activated)
 929 * and to ensure that the timer is scheduled on the current CPU.
 930 *
 931 * Note that this does not prevent the timer from being migrated
 932 * when the current CPU goes offline.  If this is a problem for
 933 * you, use CPU-hotplug notifiers to handle it correctly, for
 934 * example, cancelling the timer when the corresponding CPU goes
 935 * offline.
 936 *
 937 * mod_timer_pinned(timer, expires) is equivalent to:
 938 *
 939 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 940 */
 941int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
 942{
 943	if (timer->expires == expires && timer_pending(timer))
 944		return 1;
 945
 946	return __mod_timer(timer, expires, false, TIMER_PINNED);
 947}
 948EXPORT_SYMBOL(mod_timer_pinned);
 949
 950/**
 951 * add_timer - start a timer
 952 * @timer: the timer to be added
 953 *
 954 * The kernel will do a ->function(->data) callback from the
 955 * timer interrupt at the ->expires point in the future. The
 956 * current time is 'jiffies'.
 957 *
 958 * The timer's ->expires, ->function (and if the handler uses it, ->data)
 959 * fields must be set prior calling this function.
 960 *
 961 * Timers with an ->expires field in the past will be executed in the next
 962 * timer tick.
 963 */
 964void add_timer(struct timer_list *timer)
 965{
 966	BUG_ON(timer_pending(timer));
 967	mod_timer(timer, timer->expires);
 968}
 969EXPORT_SYMBOL(add_timer);
 970
 971/**
 972 * add_timer_on - start a timer on a particular CPU
 973 * @timer: the timer to be added
 974 * @cpu: the CPU to start it on
 975 *
 976 * This is not very scalable on SMP. Double adds are not possible.
 977 */
 978void add_timer_on(struct timer_list *timer, int cpu)
 979{
 980	struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
 981	struct tvec_base *base;
 982	unsigned long flags;
 983
 984	timer_stats_timer_set_start_info(timer);
 985	BUG_ON(timer_pending(timer) || !timer->function);
 986
 987	/*
 988	 * If @timer was on a different CPU, it should be migrated with the
 989	 * old base locked to prevent other operations proceeding with the
 990	 * wrong base locked.  See lock_timer_base().
 991	 */
 992	base = lock_timer_base(timer, &flags);
 993	if (base != new_base) {
 994		timer->flags |= TIMER_MIGRATING;
 995
 996		spin_unlock(&base->lock);
 997		base = new_base;
 998		spin_lock(&base->lock);
 999		WRITE_ONCE(timer->flags,
1000			   (timer->flags & ~TIMER_BASEMASK) | cpu);
1001	}
1002
1003	debug_activate(timer, timer->expires);
1004	internal_add_timer(base, timer);
1005	spin_unlock_irqrestore(&base->lock, flags);
1006}
1007EXPORT_SYMBOL_GPL(add_timer_on);
1008
1009/**
1010 * del_timer - deactive a timer.
1011 * @timer: the timer to be deactivated
1012 *
1013 * del_timer() deactivates a timer - this works on both active and inactive
1014 * timers.
1015 *
1016 * The function returns whether it has deactivated a pending timer or not.
1017 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1018 * active timer returns 1.)
1019 */
1020int del_timer(struct timer_list *timer)
1021{
1022	struct tvec_base *base;
1023	unsigned long flags;
1024	int ret = 0;
1025
1026	debug_assert_init(timer);
1027
1028	timer_stats_timer_clear_start_info(timer);
1029	if (timer_pending(timer)) {
1030		base = lock_timer_base(timer, &flags);
1031		ret = detach_if_pending(timer, base, true);
1032		spin_unlock_irqrestore(&base->lock, flags);
1033	}
1034
1035	return ret;
1036}
1037EXPORT_SYMBOL(del_timer);
1038
1039/**
1040 * try_to_del_timer_sync - Try to deactivate a timer
1041 * @timer: timer do del
1042 *
1043 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1044 * exit the timer is not queued and the handler is not running on any CPU.
1045 */
1046int try_to_del_timer_sync(struct timer_list *timer)
1047{
1048	struct tvec_base *base;
1049	unsigned long flags;
1050	int ret = -1;
1051
1052	debug_assert_init(timer);
1053
1054	base = lock_timer_base(timer, &flags);
1055
1056	if (base->running_timer != timer) {
1057		timer_stats_timer_clear_start_info(timer);
1058		ret = detach_if_pending(timer, base, true);
1059	}
1060	spin_unlock_irqrestore(&base->lock, flags);
1061
1062	return ret;
1063}
1064EXPORT_SYMBOL(try_to_del_timer_sync);
1065
1066#ifdef CONFIG_SMP
1067/**
1068 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1069 * @timer: the timer to be deactivated
1070 *
1071 * This function only differs from del_timer() on SMP: besides deactivating
1072 * the timer it also makes sure the handler has finished executing on other
1073 * CPUs.
1074 *
1075 * Synchronization rules: Callers must prevent restarting of the timer,
1076 * otherwise this function is meaningless. It must not be called from
1077 * interrupt contexts unless the timer is an irqsafe one. The caller must
1078 * not hold locks which would prevent completion of the timer's
1079 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1080 * timer is not queued and the handler is not running on any CPU.
1081 *
1082 * Note: For !irqsafe timers, you must not hold locks that are held in
1083 *   interrupt context while calling this function. Even if the lock has
1084 *   nothing to do with the timer in question.  Here's why:
1085 *
1086 *    CPU0                             CPU1
1087 *    ----                             ----
1088 *                                   <SOFTIRQ>
1089 *                                   call_timer_fn();
1090 *                                     base->running_timer = mytimer;
1091 *  spin_lock_irq(somelock);
1092 *                                     <IRQ>
1093 *                                        spin_lock(somelock);
1094 *  del_timer_sync(mytimer);
1095 *   while (base->running_timer == mytimer);
1096 *
1097 * Now del_timer_sync() will never return and never release somelock.
1098 * The interrupt on the other CPU is waiting to grab somelock but
1099 * it has interrupted the softirq that CPU0 is waiting to finish.
1100 *
1101 * The function returns whether it has deactivated a pending timer or not.
1102 */
1103int del_timer_sync(struct timer_list *timer)
1104{
1105#ifdef CONFIG_LOCKDEP
1106	unsigned long flags;
1107
1108	/*
1109	 * If lockdep gives a backtrace here, please reference
1110	 * the synchronization rules above.
1111	 */
1112	local_irq_save(flags);
1113	lock_map_acquire(&timer->lockdep_map);
1114	lock_map_release(&timer->lockdep_map);
1115	local_irq_restore(flags);
1116#endif
1117	/*
1118	 * don't use it in hardirq context, because it
1119	 * could lead to deadlock.
1120	 */
1121	WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1122	for (;;) {
1123		int ret = try_to_del_timer_sync(timer);
1124		if (ret >= 0)
1125			return ret;
1126		cpu_relax();
1127	}
1128}
1129EXPORT_SYMBOL(del_timer_sync);
1130#endif
1131
1132static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1133{
1134	/* cascade all the timers from tv up one level */
1135	struct timer_list *timer;
1136	struct hlist_node *tmp;
1137	struct hlist_head tv_list;
1138
1139	hlist_move_list(tv->vec + index, &tv_list);
1140
1141	/*
1142	 * We are removing _all_ timers from the list, so we
1143	 * don't have to detach them individually.
1144	 */
1145	hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1146		/* No accounting, while moving them */
1147		__internal_add_timer(base, timer);
1148	}
1149
1150	return index;
1151}
1152
1153static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1154			  unsigned long data)
1155{
1156	int count = preempt_count();
1157
1158#ifdef CONFIG_LOCKDEP
1159	/*
1160	 * It is permissible to free the timer from inside the
1161	 * function that is called from it, this we need to take into
1162	 * account for lockdep too. To avoid bogus "held lock freed"
1163	 * warnings as well as problems when looking into
1164	 * timer->lockdep_map, make a copy and use that here.
1165	 */
1166	struct lockdep_map lockdep_map;
1167
1168	lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1169#endif
1170	/*
1171	 * Couple the lock chain with the lock chain at
1172	 * del_timer_sync() by acquiring the lock_map around the fn()
1173	 * call here and in del_timer_sync().
1174	 */
1175	lock_map_acquire(&lockdep_map);
1176
1177	trace_timer_expire_entry(timer);
1178	fn(data);
1179	trace_timer_expire_exit(timer);
1180
1181	lock_map_release(&lockdep_map);
1182
1183	if (count != preempt_count()) {
1184		WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1185			  fn, count, preempt_count());
1186		/*
1187		 * Restore the preempt count. That gives us a decent
1188		 * chance to survive and extract information. If the
1189		 * callback kept a lock held, bad luck, but not worse
1190		 * than the BUG() we had.
1191		 */
1192		preempt_count_set(count);
1193	}
1194}
1195
1196#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1197
1198/**
1199 * __run_timers - run all expired timers (if any) on this CPU.
1200 * @base: the timer vector to be processed.
1201 *
1202 * This function cascades all vectors and executes all expired timer
1203 * vectors.
1204 */
1205static inline void __run_timers(struct tvec_base *base)
1206{
1207	struct timer_list *timer;
1208
1209	spin_lock_irq(&base->lock);
1210
1211	while (time_after_eq(jiffies, base->timer_jiffies)) {
1212		struct hlist_head work_list;
1213		struct hlist_head *head = &work_list;
1214		int index;
1215
1216		if (!base->all_timers) {
1217			base->timer_jiffies = jiffies;
1218			break;
1219		}
1220
1221		index = base->timer_jiffies & TVR_MASK;
1222
1223		/*
1224		 * Cascade timers:
1225		 */
1226		if (!index &&
1227			(!cascade(base, &base->tv2, INDEX(0))) &&
1228				(!cascade(base, &base->tv3, INDEX(1))) &&
1229					!cascade(base, &base->tv4, INDEX(2)))
1230			cascade(base, &base->tv5, INDEX(3));
1231		++base->timer_jiffies;
1232		hlist_move_list(base->tv1.vec + index, head);
1233		while (!hlist_empty(head)) {
1234			void (*fn)(unsigned long);
1235			unsigned long data;
1236			bool irqsafe;
1237
1238			timer = hlist_entry(head->first, struct timer_list, entry);
1239			fn = timer->function;
1240			data = timer->data;
1241			irqsafe = timer->flags & TIMER_IRQSAFE;
1242
1243			timer_stats_account_timer(timer);
1244
1245			base->running_timer = timer;
1246			detach_expired_timer(timer, base);
1247
1248			if (irqsafe) {
1249				spin_unlock(&base->lock);
1250				call_timer_fn(timer, fn, data);
1251				spin_lock(&base->lock);
1252			} else {
1253				spin_unlock_irq(&base->lock);
1254				call_timer_fn(timer, fn, data);
1255				spin_lock_irq(&base->lock);
1256			}
1257		}
1258	}
1259	base->running_timer = NULL;
1260	spin_unlock_irq(&base->lock);
1261}
1262
1263#ifdef CONFIG_NO_HZ_COMMON
1264/*
1265 * Find out when the next timer event is due to happen. This
1266 * is used on S/390 to stop all activity when a CPU is idle.
1267 * This function needs to be called with interrupts disabled.
1268 */
1269static unsigned long __next_timer_interrupt(struct tvec_base *base)
1270{
1271	unsigned long timer_jiffies = base->timer_jiffies;
1272	unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1273	int index, slot, array, found = 0;
1274	struct timer_list *nte;
1275	struct tvec *varray[4];
1276
1277	/* Look for timer events in tv1. */
1278	index = slot = timer_jiffies & TVR_MASK;
1279	do {
1280		hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
1281			if (nte->flags & TIMER_DEFERRABLE)
1282				continue;
1283
1284			found = 1;
1285			expires = nte->expires;
1286			/* Look at the cascade bucket(s)? */
1287			if (!index || slot < index)
1288				goto cascade;
1289			return expires;
1290		}
1291		slot = (slot + 1) & TVR_MASK;
1292	} while (slot != index);
1293
1294cascade:
1295	/* Calculate the next cascade event */
1296	if (index)
1297		timer_jiffies += TVR_SIZE - index;
1298	timer_jiffies >>= TVR_BITS;
1299
1300	/* Check tv2-tv5. */
1301	varray[0] = &base->tv2;
1302	varray[1] = &base->tv3;
1303	varray[2] = &base->tv4;
1304	varray[3] = &base->tv5;
1305
1306	for (array = 0; array < 4; array++) {
1307		struct tvec *varp = varray[array];
1308
1309		index = slot = timer_jiffies & TVN_MASK;
1310		do {
1311			hlist_for_each_entry(nte, varp->vec + slot, entry) {
1312				if (nte->flags & TIMER_DEFERRABLE)
1313					continue;
1314
1315				found = 1;
1316				if (time_before(nte->expires, expires))
1317					expires = nte->expires;
1318			}
1319			/*
1320			 * Do we still search for the first timer or are
1321			 * we looking up the cascade buckets ?
1322			 */
1323			if (found) {
1324				/* Look at the cascade bucket(s)? */
1325				if (!index || slot < index)
1326					break;
1327				return expires;
1328			}
1329			slot = (slot + 1) & TVN_MASK;
1330		} while (slot != index);
1331
1332		if (index)
1333			timer_jiffies += TVN_SIZE - index;
1334		timer_jiffies >>= TVN_BITS;
1335	}
1336	return expires;
1337}
1338
1339/*
1340 * Check, if the next hrtimer event is before the next timer wheel
1341 * event:
1342 */
1343static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1344{
1345	u64 nextevt = hrtimer_get_next_event();
1346
1347	/*
1348	 * If high resolution timers are enabled
1349	 * hrtimer_get_next_event() returns KTIME_MAX.
1350	 */
1351	if (expires <= nextevt)
1352		return expires;
1353
1354	/*
1355	 * If the next timer is already expired, return the tick base
1356	 * time so the tick is fired immediately.
1357	 */
1358	if (nextevt <= basem)
1359		return basem;
1360
1361	/*
1362	 * Round up to the next jiffie. High resolution timers are
1363	 * off, so the hrtimers are expired in the tick and we need to
1364	 * make sure that this tick really expires the timer to avoid
1365	 * a ping pong of the nohz stop code.
1366	 *
1367	 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1368	 */
1369	return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1370}
1371
1372/**
1373 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1374 * @basej:	base time jiffies
1375 * @basem:	base time clock monotonic
1376 *
1377 * Returns the tick aligned clock monotonic time of the next pending
1378 * timer or KTIME_MAX if no timer is pending.
1379 */
1380u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1381{
1382	struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1383	u64 expires = KTIME_MAX;
1384	unsigned long nextevt;
1385
1386	/*
1387	 * Pretend that there is no timer pending if the cpu is offline.
1388	 * Possible pending timers will be migrated later to an active cpu.
1389	 */
1390	if (cpu_is_offline(smp_processor_id()))
1391		return expires;
1392
1393	spin_lock(&base->lock);
1394	if (base->active_timers) {
1395		if (time_before_eq(base->next_timer, base->timer_jiffies))
1396			base->next_timer = __next_timer_interrupt(base);
1397		nextevt = base->next_timer;
1398		if (time_before_eq(nextevt, basej))
1399			expires = basem;
1400		else
1401			expires = basem + (nextevt - basej) * TICK_NSEC;
1402	}
1403	spin_unlock(&base->lock);
1404
1405	return cmp_next_hrtimer_event(basem, expires);
1406}
1407#endif
1408
1409/*
1410 * Called from the timer interrupt handler to charge one tick to the current
1411 * process.  user_tick is 1 if the tick is user time, 0 for system.
1412 */
1413void update_process_times(int user_tick)
1414{
1415	struct task_struct *p = current;
1416
1417	/* Note: this timer irq context must be accounted for as well. */
1418	account_process_tick(p, user_tick);
1419	run_local_timers();
1420	rcu_check_callbacks(user_tick);
1421#ifdef CONFIG_IRQ_WORK
1422	if (in_irq())
1423		irq_work_tick();
1424#endif
1425	scheduler_tick();
1426	run_posix_cpu_timers(p);
1427}
1428
1429/*
1430 * This function runs timers and the timer-tq in bottom half context.
1431 */
1432static void run_timer_softirq(struct softirq_action *h)
1433{
1434	struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1435
1436	if (time_after_eq(jiffies, base->timer_jiffies))
1437		__run_timers(base);
1438}
1439
1440/*
1441 * Called by the local, per-CPU timer interrupt on SMP.
1442 */
1443void run_local_timers(void)
1444{
1445	hrtimer_run_queues();
1446	raise_softirq(TIMER_SOFTIRQ);
1447}
1448
1449#ifdef __ARCH_WANT_SYS_ALARM
1450
1451/*
1452 * For backwards compatibility?  This can be done in libc so Alpha
1453 * and all newer ports shouldn't need it.
1454 */
1455SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1456{
1457	return alarm_setitimer(seconds);
1458}
1459
1460#endif
1461
1462static void process_timeout(unsigned long __data)
1463{
1464	wake_up_process((struct task_struct *)__data);
1465}
1466
1467/**
1468 * schedule_timeout - sleep until timeout
1469 * @timeout: timeout value in jiffies
1470 *
1471 * Make the current task sleep until @timeout jiffies have
1472 * elapsed. The routine will return immediately unless
1473 * the current task state has been set (see set_current_state()).
1474 *
1475 * You can set the task state as follows -
1476 *
1477 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1478 * pass before the routine returns. The routine will return 0
1479 *
1480 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1481 * delivered to the current task. In this case the remaining time
1482 * in jiffies will be returned, or 0 if the timer expired in time
1483 *
1484 * The current task state is guaranteed to be TASK_RUNNING when this
1485 * routine returns.
1486 *
1487 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1488 * the CPU away without a bound on the timeout. In this case the return
1489 * value will be %MAX_SCHEDULE_TIMEOUT.
1490 *
1491 * In all cases the return value is guaranteed to be non-negative.
1492 */
1493signed long __sched schedule_timeout(signed long timeout)
1494{
1495	struct timer_list timer;
1496	unsigned long expire;
1497
1498	switch (timeout)
1499	{
1500	case MAX_SCHEDULE_TIMEOUT:
1501		/*
1502		 * These two special cases are useful to be comfortable
1503		 * in the caller. Nothing more. We could take
1504		 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1505		 * but I' d like to return a valid offset (>=0) to allow
1506		 * the caller to do everything it want with the retval.
1507		 */
1508		schedule();
1509		goto out;
1510	default:
1511		/*
1512		 * Another bit of PARANOID. Note that the retval will be
1513		 * 0 since no piece of kernel is supposed to do a check
1514		 * for a negative retval of schedule_timeout() (since it
1515		 * should never happens anyway). You just have the printk()
1516		 * that will tell you if something is gone wrong and where.
1517		 */
1518		if (timeout < 0) {
1519			printk(KERN_ERR "schedule_timeout: wrong timeout "
1520				"value %lx\n", timeout);
1521			dump_stack();
1522			current->state = TASK_RUNNING;
1523			goto out;
1524		}
1525	}
1526
1527	expire = timeout + jiffies;
1528
1529	setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1530	__mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1531	schedule();
1532	del_singleshot_timer_sync(&timer);
1533
1534	/* Remove the timer from the object tracker */
1535	destroy_timer_on_stack(&timer);
1536
1537	timeout = expire - jiffies;
1538
1539 out:
1540	return timeout < 0 ? 0 : timeout;
1541}
1542EXPORT_SYMBOL(schedule_timeout);
1543
1544/*
1545 * We can use __set_current_state() here because schedule_timeout() calls
1546 * schedule() unconditionally.
1547 */
1548signed long __sched schedule_timeout_interruptible(signed long timeout)
1549{
1550	__set_current_state(TASK_INTERRUPTIBLE);
1551	return schedule_timeout(timeout);
1552}
1553EXPORT_SYMBOL(schedule_timeout_interruptible);
1554
1555signed long __sched schedule_timeout_killable(signed long timeout)
1556{
1557	__set_current_state(TASK_KILLABLE);
1558	return schedule_timeout(timeout);
1559}
1560EXPORT_SYMBOL(schedule_timeout_killable);
1561
1562signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1563{
1564	__set_current_state(TASK_UNINTERRUPTIBLE);
1565	return schedule_timeout(timeout);
1566}
1567EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1568
1569/*
1570 * Like schedule_timeout_uninterruptible(), except this task will not contribute
1571 * to load average.
1572 */
1573signed long __sched schedule_timeout_idle(signed long timeout)
1574{
1575	__set_current_state(TASK_IDLE);
1576	return schedule_timeout(timeout);
1577}
1578EXPORT_SYMBOL(schedule_timeout_idle);
1579
1580#ifdef CONFIG_HOTPLUG_CPU
1581static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
1582{
1583	struct timer_list *timer;
1584	int cpu = new_base->cpu;
1585
1586	while (!hlist_empty(head)) {
1587		timer = hlist_entry(head->first, struct timer_list, entry);
1588		/* We ignore the accounting on the dying cpu */
1589		detach_timer(timer, false);
1590		timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1591		internal_add_timer(new_base, timer);
1592	}
1593}
1594
1595static void migrate_timers(int cpu)
1596{
1597	struct tvec_base *old_base;
1598	struct tvec_base *new_base;
1599	int i;
1600
1601	BUG_ON(cpu_online(cpu));
1602	old_base = per_cpu_ptr(&tvec_bases, cpu);
1603	new_base = get_cpu_ptr(&tvec_bases);
1604	/*
1605	 * The caller is globally serialized and nobody else
1606	 * takes two locks at once, deadlock is not possible.
1607	 */
1608	spin_lock_irq(&new_base->lock);
1609	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1610
1611	BUG_ON(old_base->running_timer);
1612
1613	for (i = 0; i < TVR_SIZE; i++)
1614		migrate_timer_list(new_base, old_base->tv1.vec + i);
1615	for (i = 0; i < TVN_SIZE; i++) {
1616		migrate_timer_list(new_base, old_base->tv2.vec + i);
1617		migrate_timer_list(new_base, old_base->tv3.vec + i);
1618		migrate_timer_list(new_base, old_base->tv4.vec + i);
1619		migrate_timer_list(new_base, old_base->tv5.vec + i);
1620	}
1621
1622	old_base->active_timers = 0;
1623	old_base->all_timers = 0;
1624
1625	spin_unlock(&old_base->lock);
1626	spin_unlock_irq(&new_base->lock);
1627	put_cpu_ptr(&tvec_bases);
1628}
1629
1630static int timer_cpu_notify(struct notifier_block *self,
1631				unsigned long action, void *hcpu)
1632{
1633	switch (action) {
1634	case CPU_DEAD:
1635	case CPU_DEAD_FROZEN:
1636		migrate_timers((long)hcpu);
1637		break;
1638	default:
1639		break;
1640	}
1641
1642	return NOTIFY_OK;
1643}
1644
1645static inline void timer_register_cpu_notifier(void)
1646{
1647	cpu_notifier(timer_cpu_notify, 0);
1648}
1649#else
1650static inline void timer_register_cpu_notifier(void) { }
1651#endif /* CONFIG_HOTPLUG_CPU */
1652
1653static void __init init_timer_cpu(int cpu)
1654{
1655	struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
1656
1657	base->cpu = cpu;
1658	spin_lock_init(&base->lock);
1659
1660	base->timer_jiffies = jiffies;
1661	base->next_timer = base->timer_jiffies;
1662}
1663
1664static void __init init_timer_cpus(void)
1665{
1666	int cpu;
1667
1668	for_each_possible_cpu(cpu)
1669		init_timer_cpu(cpu);
1670}
1671
1672void __init init_timers(void)
1673{
1674	init_timer_cpus();
1675	init_timer_stats();
1676	timer_register_cpu_notifier();
1677	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1678}
1679
1680/**
1681 * msleep - sleep safely even with waitqueue interruptions
1682 * @msecs: Time in milliseconds to sleep for
1683 */
1684void msleep(unsigned int msecs)
1685{
1686	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1687
1688	while (timeout)
1689		timeout = schedule_timeout_uninterruptible(timeout);
1690}
1691
1692EXPORT_SYMBOL(msleep);
1693
1694/**
1695 * msleep_interruptible - sleep waiting for signals
1696 * @msecs: Time in milliseconds to sleep for
1697 */
1698unsigned long msleep_interruptible(unsigned int msecs)
1699{
1700	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1701
1702	while (timeout && !signal_pending(current))
1703		timeout = schedule_timeout_interruptible(timeout);
1704	return jiffies_to_msecs(timeout);
1705}
1706
1707EXPORT_SYMBOL(msleep_interruptible);
1708
1709static void __sched do_usleep_range(unsigned long min, unsigned long max)
1710{
1711	ktime_t kmin;
1712	u64 delta;
1713
1714	kmin = ktime_set(0, min * NSEC_PER_USEC);
1715	delta = (u64)(max - min) * NSEC_PER_USEC;
1716	schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1717}
1718
1719/**
1720 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1721 * @min: Minimum time in usecs to sleep
1722 * @max: Maximum time in usecs to sleep
1723 */
1724void __sched usleep_range(unsigned long min, unsigned long max)
1725{
1726	__set_current_state(TASK_UNINTERRUPTIBLE);
1727	do_usleep_range(min, max);
1728}
1729EXPORT_SYMBOL(usleep_range);