Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
 
 
   3 *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   4 *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   5 *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
   6 *
   7 *  High-resolution kernel timers
   8 *
   9 *  In contrast to the low-resolution timeout API, aka timer wheel,
  10 *  hrtimers provide finer resolution and accuracy depending on system
  11 *  configuration and capabilities.
 
 
 
 
 
 
  12 *
  13 *  Started by: Thomas Gleixner and Ingo Molnar
  14 *
  15 *  Credits:
  16 *	Based on the original timer wheel code
  17 *
  18 *	Help, testing, suggestions, bugfixes, improvements were
  19 *	provided by:
  20 *
  21 *	George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
  22 *	et. al.
 
 
  23 */
  24
  25#include <linux/cpu.h>
  26#include <linux/export.h>
  27#include <linux/percpu.h>
  28#include <linux/hrtimer.h>
  29#include <linux/notifier.h>
  30#include <linux/syscalls.h>
 
  31#include <linux/interrupt.h>
  32#include <linux/tick.h>
 
  33#include <linux/err.h>
  34#include <linux/debugobjects.h>
  35#include <linux/sched/signal.h>
  36#include <linux/sched/sysctl.h>
  37#include <linux/sched/rt.h>
  38#include <linux/sched/deadline.h>
  39#include <linux/sched/nohz.h>
  40#include <linux/sched/debug.h>
  41#include <linux/timer.h>
  42#include <linux/freezer.h>
  43#include <linux/compat.h>
  44
  45#include <linux/uaccess.h>
  46
  47#include <trace/events/timer.h>
  48
  49#include "tick-internal.h"
  50
  51/*
  52 * Masks for selecting the soft and hard context timers from
  53 * cpu_base->active
  54 */
  55#define MASK_SHIFT		(HRTIMER_BASE_MONOTONIC_SOFT)
  56#define HRTIMER_ACTIVE_HARD	((1U << MASK_SHIFT) - 1)
  57#define HRTIMER_ACTIVE_SOFT	(HRTIMER_ACTIVE_HARD << MASK_SHIFT)
  58#define HRTIMER_ACTIVE_ALL	(HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
  59
  60/*
  61 * The timer bases:
  62 *
  63 * There are more clockids than hrtimer bases. Thus, we index
  64 * into the timer bases by the hrtimer_base_type enum. When trying
  65 * to reach a base using a clockid, hrtimer_clockid_to_base()
  66 * is used to convert from clockid to the proper hrtimer_base_type.
  67 */
  68DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
  69{
  70	.lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
 
  71	.clock_base =
  72	{
  73		{
  74			.index = HRTIMER_BASE_MONOTONIC,
  75			.clockid = CLOCK_MONOTONIC,
  76			.get_time = &ktime_get,
  77		},
  78		{
  79			.index = HRTIMER_BASE_REALTIME,
  80			.clockid = CLOCK_REALTIME,
  81			.get_time = &ktime_get_real,
  82		},
  83		{
  84			.index = HRTIMER_BASE_BOOTTIME,
  85			.clockid = CLOCK_BOOTTIME,
  86			.get_time = &ktime_get_boottime,
  87		},
  88		{
  89			.index = HRTIMER_BASE_TAI,
  90			.clockid = CLOCK_TAI,
  91			.get_time = &ktime_get_clocktai,
  92		},
  93		{
  94			.index = HRTIMER_BASE_MONOTONIC_SOFT,
  95			.clockid = CLOCK_MONOTONIC,
  96			.get_time = &ktime_get,
  97		},
  98		{
  99			.index = HRTIMER_BASE_REALTIME_SOFT,
 100			.clockid = CLOCK_REALTIME,
 101			.get_time = &ktime_get_real,
 102		},
 103		{
 104			.index = HRTIMER_BASE_BOOTTIME_SOFT,
 105			.clockid = CLOCK_BOOTTIME,
 106			.get_time = &ktime_get_boottime,
 107		},
 108		{
 109			.index = HRTIMER_BASE_TAI_SOFT,
 110			.clockid = CLOCK_TAI,
 111			.get_time = &ktime_get_clocktai,
 112		},
 113	}
 114};
 115
 116static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
 117	/* Make sure we catch unsupported clockids */
 118	[0 ... MAX_CLOCKS - 1]	= HRTIMER_MAX_CLOCK_BASES,
 119
 120	[CLOCK_REALTIME]	= HRTIMER_BASE_REALTIME,
 121	[CLOCK_MONOTONIC]	= HRTIMER_BASE_MONOTONIC,
 122	[CLOCK_BOOTTIME]	= HRTIMER_BASE_BOOTTIME,
 123	[CLOCK_TAI]		= HRTIMER_BASE_TAI,
 124};
 125
 
 
 
 
 
 126/*
 127 * Functions and macros which are different for UP/SMP systems are kept in a
 128 * single place
 129 */
 130#ifdef CONFIG_SMP
 131
 132/*
 133 * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base()
 134 * such that hrtimer_callback_running() can unconditionally dereference
 135 * timer->base->cpu_base
 136 */
 137static struct hrtimer_cpu_base migration_cpu_base = {
 138	.clock_base = { {
 139		.cpu_base = &migration_cpu_base,
 140		.seq      = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq,
 141						     &migration_cpu_base.lock),
 142	}, },
 143};
 144
 145#define migration_base	migration_cpu_base.clock_base[0]
 146
 147static inline bool is_migration_base(struct hrtimer_clock_base *base)
 148{
 149	return base == &migration_base;
 150}
 151
 152/*
 153 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
 154 * means that all timers which are tied to this base via timer->base are
 155 * locked, and the base itself is locked too.
 156 *
 157 * So __run_timers/migrate_timers can safely modify all timers which could
 158 * be found on the lists/queues.
 159 *
 160 * When the timer's base is locked, and the timer removed from list, it is
 161 * possible to set timer->base = &migration_base and drop the lock: the timer
 162 * remains locked.
 163 */
 164static
 165struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
 166					     unsigned long *flags)
 167{
 168	struct hrtimer_clock_base *base;
 169
 170	for (;;) {
 171		base = READ_ONCE(timer->base);
 172		if (likely(base != &migration_base)) {
 173			raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
 174			if (likely(base == timer->base))
 175				return base;
 176			/* The timer has migrated to another CPU: */
 177			raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
 178		}
 179		cpu_relax();
 180	}
 181}
 182
 183/*
 184 * We do not migrate the timer when it is expiring before the next
 185 * event on the target cpu. When high resolution is enabled, we cannot
 186 * reprogram the target cpu hardware and we would cause it to fire
 187 * late. To keep it simple, we handle the high resolution enabled and
 188 * disabled case similar.
 189 *
 190 * Called with cpu_base->lock of target cpu held.
 191 */
 192static int
 193hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
 194{
 
 195	ktime_t expires;
 196
 
 
 
 197	expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
 198	return expires < new_base->cpu_base->expires_next;
 
 
 
 199}
 200
 
 
 
 
 
 
 
 
 
 
 201static inline
 202struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
 203					 int pinned)
 204{
 205#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 206	if (static_branch_likely(&timers_migration_enabled) && !pinned)
 207		return &per_cpu(hrtimer_bases, get_nohz_timer_target());
 208#endif
 209	return base;
 210}
 
 211
 212/*
 213 * We switch the timer base to a power-optimized selected CPU target,
 214 * if:
 215 *	- NO_HZ_COMMON is enabled
 216 *	- timer migration is enabled
 217 *	- the timer callback is not running
 218 *	- the timer is not the first expiring timer on the new target
 219 *
 220 * If one of the above requirements is not fulfilled we move the timer
 221 * to the current CPU or leave it on the previously assigned CPU if
 222 * the timer callback is currently running.
 223 */
 224static inline struct hrtimer_clock_base *
 225switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
 226		    int pinned)
 227{
 228	struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
 229	struct hrtimer_clock_base *new_base;
 230	int basenum = base->index;
 231
 232	this_cpu_base = this_cpu_ptr(&hrtimer_bases);
 233	new_cpu_base = get_target_base(this_cpu_base, pinned);
 234again:
 235	new_base = &new_cpu_base->clock_base[basenum];
 236
 237	if (base != new_base) {
 238		/*
 239		 * We are trying to move timer to new_base.
 240		 * However we can't change timer's base while it is running,
 241		 * so we keep it on the same CPU. No hassle vs. reprogramming
 242		 * the event source in the high resolution case. The softirq
 243		 * code will take care of this when the timer function has
 244		 * completed. There is no conflict as we hold the lock until
 245		 * the timer is enqueued.
 246		 */
 247		if (unlikely(hrtimer_callback_running(timer)))
 248			return base;
 249
 250		/* See the comment in lock_hrtimer_base() */
 251		WRITE_ONCE(timer->base, &migration_base);
 252		raw_spin_unlock(&base->cpu_base->lock);
 253		raw_spin_lock(&new_base->cpu_base->lock);
 254
 255		if (new_cpu_base != this_cpu_base &&
 256		    hrtimer_check_target(timer, new_base)) {
 257			raw_spin_unlock(&new_base->cpu_base->lock);
 258			raw_spin_lock(&base->cpu_base->lock);
 259			new_cpu_base = this_cpu_base;
 260			WRITE_ONCE(timer->base, base);
 261			goto again;
 262		}
 263		WRITE_ONCE(timer->base, new_base);
 264	} else {
 265		if (new_cpu_base != this_cpu_base &&
 266		    hrtimer_check_target(timer, new_base)) {
 267			new_cpu_base = this_cpu_base;
 268			goto again;
 269		}
 270	}
 271	return new_base;
 272}
 273
 274#else /* CONFIG_SMP */
 275
 276static inline bool is_migration_base(struct hrtimer_clock_base *base)
 277{
 278	return false;
 279}
 280
 281static inline struct hrtimer_clock_base *
 282lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 283{
 284	struct hrtimer_clock_base *base = timer->base;
 285
 286	raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
 287
 288	return base;
 289}
 290
 291# define switch_hrtimer_base(t, b, p)	(b)
 292
 293#endif	/* !CONFIG_SMP */
 294
 295/*
 296 * Functions for the union type storage format of ktime_t which are
 297 * too large for inlining:
 298 */
 299#if BITS_PER_LONG < 64
 300/*
 301 * Divide a ktime value by a nanosecond value
 302 */
 303s64 __ktime_divns(const ktime_t kt, s64 div)
 304{
 305	int sft = 0;
 306	s64 dclc;
 307	u64 tmp;
 308
 309	dclc = ktime_to_ns(kt);
 310	tmp = dclc < 0 ? -dclc : dclc;
 311
 312	/* Make sure the divisor is less than 2^32: */
 313	while (div >> 32) {
 314		sft++;
 315		div >>= 1;
 316	}
 317	tmp >>= sft;
 318	do_div(tmp, (u32) div);
 319	return dclc < 0 ? -tmp : tmp;
 320}
 321EXPORT_SYMBOL_GPL(__ktime_divns);
 322#endif /* BITS_PER_LONG >= 64 */
 323
 324/*
 325 * Add two ktime values and do a safety check for overflow:
 326 */
 327ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
 328{
 329	ktime_t res = ktime_add_unsafe(lhs, rhs);
 330
 331	/*
 332	 * We use KTIME_SEC_MAX here, the maximum timeout which we can
 333	 * return to user space in a timespec:
 334	 */
 335	if (res < 0 || res < lhs || res < rhs)
 336		res = ktime_set(KTIME_SEC_MAX, 0);
 337
 338	return res;
 339}
 340
 341EXPORT_SYMBOL_GPL(ktime_add_safe);
 342
 343#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 344
 345static const struct debug_obj_descr hrtimer_debug_descr;
 346
 347static void *hrtimer_debug_hint(void *addr)
 348{
 349	return ((struct hrtimer *) addr)->function;
 350}
 351
 352/*
 353 * fixup_init is called when:
 354 * - an active object is initialized
 355 */
 356static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state)
 357{
 358	struct hrtimer *timer = addr;
 359
 360	switch (state) {
 361	case ODEBUG_STATE_ACTIVE:
 362		hrtimer_cancel(timer);
 363		debug_object_init(timer, &hrtimer_debug_descr);
 364		return true;
 365	default:
 366		return false;
 367	}
 368}
 369
 370/*
 371 * fixup_activate is called when:
 372 * - an active object is activated
 373 * - an unknown non-static object is activated
 374 */
 375static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
 376{
 377	switch (state) {
 
 
 
 
 
 378	case ODEBUG_STATE_ACTIVE:
 379		WARN_ON(1);
 380		fallthrough;
 381	default:
 382		return false;
 383	}
 384}
 385
 386/*
 387 * fixup_free is called when:
 388 * - an active object is freed
 389 */
 390static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
 391{
 392	struct hrtimer *timer = addr;
 393
 394	switch (state) {
 395	case ODEBUG_STATE_ACTIVE:
 396		hrtimer_cancel(timer);
 397		debug_object_free(timer, &hrtimer_debug_descr);
 398		return true;
 399	default:
 400		return false;
 401	}
 402}
 403
 404static const struct debug_obj_descr hrtimer_debug_descr = {
 405	.name		= "hrtimer",
 406	.debug_hint	= hrtimer_debug_hint,
 407	.fixup_init	= hrtimer_fixup_init,
 408	.fixup_activate	= hrtimer_fixup_activate,
 409	.fixup_free	= hrtimer_fixup_free,
 410};
 411
 412static inline void debug_hrtimer_init(struct hrtimer *timer)
 413{
 414	debug_object_init(timer, &hrtimer_debug_descr);
 415}
 416
 417static inline void debug_hrtimer_activate(struct hrtimer *timer,
 418					  enum hrtimer_mode mode)
 419{
 420	debug_object_activate(timer, &hrtimer_debug_descr);
 421}
 422
 423static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
 424{
 425	debug_object_deactivate(timer, &hrtimer_debug_descr);
 426}
 427
 
 
 
 
 
 428static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
 429			   enum hrtimer_mode mode);
 430
 431void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
 432			   enum hrtimer_mode mode)
 433{
 434	debug_object_init_on_stack(timer, &hrtimer_debug_descr);
 435	__hrtimer_init(timer, clock_id, mode);
 436}
 437EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
 438
 439static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
 440				   clockid_t clock_id, enum hrtimer_mode mode);
 441
 442void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
 443				   clockid_t clock_id, enum hrtimer_mode mode)
 444{
 445	debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
 446	__hrtimer_init_sleeper(sl, clock_id, mode);
 447}
 448EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack);
 449
 450void destroy_hrtimer_on_stack(struct hrtimer *timer)
 451{
 452	debug_object_free(timer, &hrtimer_debug_descr);
 453}
 454EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
 455
 456#else
 457
 458static inline void debug_hrtimer_init(struct hrtimer *timer) { }
 459static inline void debug_hrtimer_activate(struct hrtimer *timer,
 460					  enum hrtimer_mode mode) { }
 461static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
 462#endif
 463
 464static inline void
 465debug_init(struct hrtimer *timer, clockid_t clockid,
 466	   enum hrtimer_mode mode)
 467{
 468	debug_hrtimer_init(timer);
 469	trace_hrtimer_init(timer, clockid, mode);
 470}
 471
 472static inline void debug_activate(struct hrtimer *timer,
 473				  enum hrtimer_mode mode)
 474{
 475	debug_hrtimer_activate(timer, mode);
 476	trace_hrtimer_start(timer, mode);
 477}
 478
 479static inline void debug_deactivate(struct hrtimer *timer)
 480{
 481	debug_hrtimer_deactivate(timer);
 482	trace_hrtimer_cancel(timer);
 483}
 484
 485static struct hrtimer_clock_base *
 486__next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
 
 487{
 488	unsigned int idx;
 489
 490	if (!*active)
 491		return NULL;
 492
 493	idx = __ffs(*active);
 494	*active &= ~(1U << idx);
 495
 496	return &cpu_base->clock_base[idx];
 497}
 498
 499#define for_each_active_base(base, cpu_base, active)	\
 500	while ((base = __next_base((cpu_base), &(active))))
 501
 502static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
 503					 const struct hrtimer *exclude,
 504					 unsigned int active,
 505					 ktime_t expires_next)
 506{
 507	struct hrtimer_clock_base *base;
 508	ktime_t expires;
 
 509
 510	for_each_active_base(base, cpu_base, active) {
 
 511		struct timerqueue_node *next;
 512		struct hrtimer *timer;
 513
 
 
 
 514		next = timerqueue_getnext(&base->active);
 515		timer = container_of(next, struct hrtimer, node);
 516		if (timer == exclude) {
 517			/* Get to the next timer in the queue. */
 518			next = timerqueue_iterate_next(next);
 519			if (!next)
 520				continue;
 521
 522			timer = container_of(next, struct hrtimer, node);
 523		}
 524		expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
 525		if (expires < expires_next) {
 526			expires_next = expires;
 527
 528			/* Skip cpu_base update if a timer is being excluded. */
 529			if (exclude)
 530				continue;
 531
 532			if (timer->is_soft)
 533				cpu_base->softirq_next_timer = timer;
 534			else
 535				cpu_base->next_timer = timer;
 536		}
 537	}
 538	/*
 539	 * clock_was_set() might have changed base->offset of any of
 540	 * the clock bases so the result might be negative. Fix it up
 541	 * to prevent a false positive in clockevents_program_event().
 542	 */
 543	if (expires_next < 0)
 544		expires_next = 0;
 545	return expires_next;
 546}
 547
 548/*
 549 * Recomputes cpu_base::*next_timer and returns the earliest expires_next
 550 * but does not set cpu_base::*expires_next, that is done by
 551 * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating
 552 * cpu_base::*expires_next right away, reprogramming logic would no longer
 553 * work.
 554 *
 555 * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
 556 * those timers will get run whenever the softirq gets handled, at the end of
 557 * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
 558 *
 559 * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases.
 560 * The !softirq values are the minima across HRTIMER_ACTIVE_ALL, unless an actual
 561 * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD.
 562 *
 563 * @active_mask must be one of:
 564 *  - HRTIMER_ACTIVE_ALL,
 565 *  - HRTIMER_ACTIVE_SOFT, or
 566 *  - HRTIMER_ACTIVE_HARD.
 567 */
 568static ktime_t
 569__hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
 570{
 571	unsigned int active;
 572	struct hrtimer *next_timer = NULL;
 573	ktime_t expires_next = KTIME_MAX;
 574
 575	if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
 576		active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
 577		cpu_base->softirq_next_timer = NULL;
 578		expires_next = __hrtimer_next_event_base(cpu_base, NULL,
 579							 active, KTIME_MAX);
 580
 581		next_timer = cpu_base->softirq_next_timer;
 582	}
 583
 584	if (active_mask & HRTIMER_ACTIVE_HARD) {
 585		active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
 586		cpu_base->next_timer = next_timer;
 587		expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
 588							 expires_next);
 589	}
 590
 591	return expires_next;
 592}
 593
 594static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base)
 595{
 596	ktime_t expires_next, soft = KTIME_MAX;
 597
 598	/*
 599	 * If the soft interrupt has already been activated, ignore the
 600	 * soft bases. They will be handled in the already raised soft
 601	 * interrupt.
 602	 */
 603	if (!cpu_base->softirq_activated) {
 604		soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
 605		/*
 606		 * Update the soft expiry time. clock_settime() might have
 607		 * affected it.
 608		 */
 609		cpu_base->softirq_expires_next = soft;
 610	}
 611
 612	expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
 613	/*
 614	 * If a softirq timer is expiring first, update cpu_base->next_timer
 615	 * and program the hardware with the soft expiry time.
 616	 */
 617	if (expires_next > soft) {
 618		cpu_base->next_timer = cpu_base->softirq_next_timer;
 619		expires_next = soft;
 620	}
 621
 622	return expires_next;
 623}
 
 624
 625static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
 626{
 627	ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
 628	ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
 629	ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
 630
 631	ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
 632					    offs_real, offs_boot, offs_tai);
 
 633
 634	base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
 635	base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
 636	base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
 637
 638	return now;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 639}
 640
 641/*
 642 * Is the high resolution mode active ?
 643 */
 644static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
 645{
 646	return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
 647		cpu_base->hres_active : 0;
 648}
 649
 650static inline int hrtimer_hres_active(void)
 651{
 652	return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
 653}
 654
 655/*
 656 * Reprogram the event source with checking both queues for the
 657 * next event
 658 * Called with interrupts disabled and base->lock held
 659 */
 660static void
 661hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 662{
 663	ktime_t expires_next;
 664
 665	expires_next = hrtimer_update_next_event(cpu_base);
 
 
 
 666
 667	if (skip_equal && expires_next == cpu_base->expires_next)
 668		return;
 669
 670	cpu_base->expires_next = expires_next;
 671
 672	/*
 673	 * If hres is not active, hardware does not have to be
 674	 * reprogrammed yet.
 675	 *
 676	 * If a hang was detected in the last timer interrupt then we
 677	 * leave the hang delay active in the hardware. We want the
 678	 * system to make progress. That also prevents the following
 679	 * scenario:
 680	 * T1 expires 50ms from now
 681	 * T2 expires 5s from now
 682	 *
 683	 * T1 is removed, so this code is called and would reprogram
 684	 * the hardware to 5s from now. Any hrtimer_start after that
 685	 * will not reprogram the hardware due to hang_detected being
 686	 * set. So we'd effectively block all timers until the T2 event
 687	 * fires.
 688	 */
 689	if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
 690		return;
 691
 692	tick_program_event(cpu_base->expires_next, 1);
 693}
 694
 695/* High resolution timer related functions */
 696#ifdef CONFIG_HIGH_RES_TIMERS
 697
 698/*
 699 * High resolution timer enabled ?
 700 */
 701static bool hrtimer_hres_enabled __read_mostly  = true;
 702unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
 703EXPORT_SYMBOL_GPL(hrtimer_resolution);
 704
 705/*
 706 * Enable / Disable high resolution mode
 
 
 
 
 707 */
 708static int __init setup_hrtimer_hres(char *str)
 
 709{
 710	return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
 711}
 712
 713__setup("highres=", setup_hrtimer_hres);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 714
 715/*
 716 * hrtimer_high_res_enabled - query, if the highres mode is enabled
 717 */
 718static inline int hrtimer_is_hres_enabled(void)
 719{
 720	return hrtimer_hres_enabled;
 
 721}
 722
 723/*
 724 * Retrigger next event is called after clock was set
 725 *
 726 * Called with interrupts disabled via on_each_cpu()
 727 */
 728static void retrigger_next_event(void *arg)
 729{
 730	struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 731
 732	if (!__hrtimer_hres_active(base))
 733		return;
 734
 735	raw_spin_lock(&base->lock);
 736	hrtimer_update_base(base);
 737	hrtimer_force_reprogram(base, 0);
 738	raw_spin_unlock(&base->lock);
 739}
 740
 741/*
 742 * Switch to high resolution mode
 743 */
 744static void hrtimer_switch_to_hres(void)
 745{
 746	struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 747
 748	if (tick_init_highres()) {
 749		pr_warn("Could not switch to high resolution mode on CPU %u\n",
 750			base->cpu);
 751		return;
 752	}
 753	base->hres_active = 1;
 754	hrtimer_resolution = HIGH_RES_NSEC;
 755
 756	tick_setup_sched_timer();
 757	/* "Retrigger" the interrupt to get things going */
 758	retrigger_next_event(NULL);
 759}
 760
 761#else
 762
 763static inline int hrtimer_is_hres_enabled(void) { return 0; }
 764static inline void hrtimer_switch_to_hres(void) { }
 765static inline void retrigger_next_event(void *arg) { }
 766
 767#endif /* CONFIG_HIGH_RES_TIMERS */
 768
 769/*
 770 * When a timer is enqueued and expires earlier than the already enqueued
 771 * timers, we have to check, whether it expires earlier than the timer for
 772 * which the clock event device was armed.
 773 *
 774 * Called with interrupts disabled and base->cpu_base.lock held
 775 */
 776static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
 777{
 778	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
 779	struct hrtimer_clock_base *base = timer->base;
 780	ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
 781
 782	WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
 783
 784	/*
 785	 * CLOCK_REALTIME timer might be requested with an absolute
 786	 * expiry time which is less than base->offset. Set it to 0.
 787	 */
 788	if (expires < 0)
 789		expires = 0;
 790
 791	if (timer->is_soft) {
 792		/*
 793		 * soft hrtimer could be started on a remote CPU. In this
 794		 * case softirq_expires_next needs to be updated on the
 795		 * remote CPU. The soft hrtimer will not expire before the
 796		 * first hard hrtimer on the remote CPU -
 797		 * hrtimer_check_target() prevents this case.
 798		 */
 799		struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
 800
 801		if (timer_cpu_base->softirq_activated)
 802			return;
 803
 804		if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
 805			return;
 806
 807		timer_cpu_base->softirq_next_timer = timer;
 808		timer_cpu_base->softirq_expires_next = expires;
 809
 810		if (!ktime_before(expires, timer_cpu_base->expires_next) ||
 811		    !reprogram)
 812			return;
 813	}
 814
 815	/*
 816	 * If the timer is not on the current cpu, we cannot reprogram
 817	 * the other cpus clock event device.
 818	 */
 819	if (base->cpu_base != cpu_base)
 820		return;
 821
 822	/*
 823	 * If the hrtimer interrupt is running, then it will
 824	 * reevaluate the clock bases and reprogram the clock event
 825	 * device. The callbacks are always executed in hard interrupt
 826	 * context so we don't need an extra check for a running
 827	 * callback.
 828	 */
 829	if (cpu_base->in_hrtirq)
 830		return;
 831
 832	if (expires >= cpu_base->expires_next)
 833		return;
 834
 835	/* Update the pointer to the next expiring timer */
 836	cpu_base->next_timer = timer;
 837	cpu_base->expires_next = expires;
 838
 839	/*
 840	 * If hres is not active, hardware does not have to be
 841	 * programmed yet.
 842	 *
 843	 * If a hang was detected in the last timer interrupt then we
 844	 * do not schedule a timer which is earlier than the expiry
 845	 * which we enforced in the hang detection. We want the system
 846	 * to make progress.
 847	 */
 848	if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
 849		return;
 850
 851	/*
 852	 * Program the timer hardware. We enforce the expiry for
 853	 * events which are already in the past.
 854	 */
 855	tick_program_event(expires, 1);
 
 
 
 
 
 856}
 
 
 
 
 857
 858/*
 859 * Clock realtime was set
 860 *
 861 * Change the offset of the realtime clock vs. the monotonic
 862 * clock.
 863 *
 864 * We might have to reprogram the high resolution timer interrupt. On
 865 * SMP we call the architecture specific code to retrigger _all_ high
 866 * resolution timer interrupts. On UP we just disable interrupts and
 867 * call the high resolution interrupt code.
 868 */
 869void clock_was_set(void)
 870{
 871#ifdef CONFIG_HIGH_RES_TIMERS
 872	/* Retrigger the CPU local events everywhere */
 873	on_each_cpu(retrigger_next_event, NULL, 1);
 874#endif
 875	timerfd_clock_was_set();
 876}
 877
 878static void clock_was_set_work(struct work_struct *work)
 879{
 880	clock_was_set();
 881}
 882
 883static DECLARE_WORK(hrtimer_work, clock_was_set_work);
 884
 885/*
 886 * Called from timekeeping and resume code to reprogram the hrtimer
 887 * interrupt device on all cpus and to notify timerfd.
 888 */
 889void clock_was_set_delayed(void)
 890{
 891	schedule_work(&hrtimer_work);
 892}
 893
 894/*
 895 * During resume we might have to reprogram the high resolution timer
 896 * interrupt on all online CPUs.  However, all other CPUs will be
 897 * stopped with IRQs interrupts disabled so the clock_was_set() call
 898 * must be deferred.
 899 */
 900void hrtimers_resume(void)
 901{
 902	lockdep_assert_irqs_disabled();
 
 
 903	/* Retrigger on the local CPU */
 904	retrigger_next_event(NULL);
 905	/* And schedule a retrigger for all others */
 906	clock_was_set_delayed();
 907}
 908
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 909/*
 910 * Counterpart to lock_hrtimer_base above:
 911 */
 912static inline
 913void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 914{
 915	raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
 916}
 917
 918/**
 919 * hrtimer_forward - forward the timer expiry
 920 * @timer:	hrtimer to forward
 921 * @now:	forward past this time
 922 * @interval:	the interval to forward
 923 *
 924 * Forward the timer expiry so it will expire in the future.
 925 * Returns the number of overruns.
 926 *
 927 * Can be safely called from the callback function of @timer. If
 928 * called from other contexts @timer must neither be enqueued nor
 929 * running the callback and the caller needs to take care of
 930 * serialization.
 931 *
 932 * Note: This only updates the timer expiry value and does not requeue
 933 * the timer.
 934 */
 935u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
 936{
 937	u64 orun = 1;
 938	ktime_t delta;
 939
 940	delta = ktime_sub(now, hrtimer_get_expires(timer));
 941
 942	if (delta < 0)
 943		return 0;
 944
 945	if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
 946		return 0;
 947
 948	if (interval < hrtimer_resolution)
 949		interval = hrtimer_resolution;
 950
 951	if (unlikely(delta >= interval)) {
 952		s64 incr = ktime_to_ns(interval);
 953
 954		orun = ktime_divns(delta, incr);
 955		hrtimer_add_expires_ns(timer, incr * orun);
 956		if (hrtimer_get_expires_tv64(timer) > now)
 957			return orun;
 958		/*
 959		 * This (and the ktime_add() below) is the
 960		 * correction for exact:
 961		 */
 962		orun++;
 963	}
 964	hrtimer_add_expires(timer, interval);
 965
 966	return orun;
 967}
 968EXPORT_SYMBOL_GPL(hrtimer_forward);
 969
 970/*
 971 * enqueue_hrtimer - internal function to (re)start a timer
 972 *
 973 * The timer is inserted in expiry order. Insertion into the
 974 * red black tree is O(log(n)). Must hold the base lock.
 975 *
 976 * Returns 1 when the new timer is the leftmost timer in the tree.
 977 */
 978static int enqueue_hrtimer(struct hrtimer *timer,
 979			   struct hrtimer_clock_base *base,
 980			   enum hrtimer_mode mode)
 981{
 982	debug_activate(timer, mode);
 983
 984	base->cpu_base->active_bases |= 1 << base->index;
 985
 986	/* Pairs with the lockless read in hrtimer_is_queued() */
 987	WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
 988
 989	return timerqueue_add(&base->active, &timer->node);
 990}
 991
 992/*
 993 * __remove_hrtimer - internal function to remove a timer
 994 *
 995 * Caller must hold the base lock.
 996 *
 997 * High resolution timer mode reprograms the clock event device when the
 998 * timer is the one which expires next. The caller can disable this by setting
 999 * reprogram to zero. This is useful, when the context does a reprogramming
1000 * anyway (e.g. timer interrupt)
1001 */
1002static void __remove_hrtimer(struct hrtimer *timer,
1003			     struct hrtimer_clock_base *base,
1004			     u8 newstate, int reprogram)
1005{
1006	struct hrtimer_cpu_base *cpu_base = base->cpu_base;
1007	u8 state = timer->state;
1008
1009	/* Pairs with the lockless read in hrtimer_is_queued() */
1010	WRITE_ONCE(timer->state, newstate);
1011	if (!(state & HRTIMER_STATE_ENQUEUED))
1012		return;
1013
1014	if (!timerqueue_del(&base->active, &timer->node))
1015		cpu_base->active_bases &= ~(1 << base->index);
1016
 
1017	/*
1018	 * Note: If reprogram is false we do not update
1019	 * cpu_base->next_timer. This happens when we remove the first
1020	 * timer on a remote cpu. No harm as we never dereference
1021	 * cpu_base->next_timer. So the worst thing what can happen is
1022	 * an superfluous call to hrtimer_force_reprogram() on the
1023	 * remote cpu later on if the same timer gets enqueued again.
1024	 */
1025	if (reprogram && timer == cpu_base->next_timer)
1026		hrtimer_force_reprogram(cpu_base, 1);
 
1027}
1028
1029/*
1030 * remove hrtimer, called with base lock held
1031 */
1032static inline int
1033remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
1034	       bool restart, bool keep_local)
1035{
1036	u8 state = timer->state;
1037
1038	if (state & HRTIMER_STATE_ENQUEUED) {
1039		bool reprogram;
1040
1041		/*
1042		 * Remove the timer and force reprogramming when high
1043		 * resolution mode is active and the timer is on the current
1044		 * CPU. If we remove a timer on another CPU, reprogramming is
1045		 * skipped. The interrupt event on this CPU is fired and
1046		 * reprogramming happens in the interrupt handler. This is a
1047		 * rare case and less expensive than a smp call.
1048		 */
1049		debug_deactivate(timer);
 
1050		reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
1051
1052		/*
1053		 * If the timer is not restarted then reprogramming is
1054		 * required if the timer is local. If it is local and about
1055		 * to be restarted, avoid programming it twice (on removal
1056		 * and a moment later when it's requeued).
1057		 */
1058		if (!restart)
1059			state = HRTIMER_STATE_INACTIVE;
1060		else
1061			reprogram &= !keep_local;
1062
1063		__remove_hrtimer(timer, base, state, reprogram);
1064		return 1;
1065	}
1066	return 0;
1067}
1068
1069static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
1070					    const enum hrtimer_mode mode)
1071{
1072#ifdef CONFIG_TIME_LOW_RES
1073	/*
1074	 * CONFIG_TIME_LOW_RES indicates that the system has no way to return
1075	 * granular time values. For relative timers we add hrtimer_resolution
1076	 * (i.e. one jiffie) to prevent short timeouts.
1077	 */
1078	timer->is_rel = mode & HRTIMER_MODE_REL;
1079	if (timer->is_rel)
1080		tim = ktime_add_safe(tim, hrtimer_resolution);
1081#endif
1082	return tim;
1083}
1084
1085static void
1086hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
1087{
1088	ktime_t expires;
1089
1090	/*
1091	 * Find the next SOFT expiration.
1092	 */
1093	expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
1094
1095	/*
1096	 * reprogramming needs to be triggered, even if the next soft
1097	 * hrtimer expires at the same time than the next hard
1098	 * hrtimer. cpu_base->softirq_expires_next needs to be updated!
1099	 */
1100	if (expires == KTIME_MAX)
1101		return;
1102
1103	/*
1104	 * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event()
1105	 * cpu_base->*expires_next is only set by hrtimer_reprogram()
1106	 */
1107	hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
1108}
1109
1110static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1111				    u64 delta_ns, const enum hrtimer_mode mode,
1112				    struct hrtimer_clock_base *base)
1113{
1114	struct hrtimer_clock_base *new_base;
1115	bool force_local, first;
 
1116
1117	/*
1118	 * If the timer is on the local cpu base and is the first expiring
1119	 * timer then this might end up reprogramming the hardware twice
1120	 * (on removal and on enqueue). To avoid that by prevent the
1121	 * reprogram on removal, keep the timer local to the current CPU
1122	 * and enforce reprogramming after it is queued no matter whether
1123	 * it is the new first expiring timer again or not.
1124	 */
1125	force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
1126	force_local &= base->cpu_base->next_timer == timer;
1127
1128	/*
1129	 * Remove an active timer from the queue. In case it is not queued
1130	 * on the current CPU, make sure that remove_hrtimer() updates the
1131	 * remote data correctly.
1132	 *
1133	 * If it's on the current CPU and the first expiring timer, then
1134	 * skip reprogramming, keep the timer local and enforce
1135	 * reprogramming later if it was the first expiring timer.  This
1136	 * avoids programming the underlying clock event twice (once at
1137	 * removal and once after enqueue).
1138	 */
1139	remove_hrtimer(timer, base, true, force_local);
1140
1141	if (mode & HRTIMER_MODE_REL)
1142		tim = ktime_add_safe(tim, base->get_time());
1143
1144	tim = hrtimer_update_lowres(timer, tim, mode);
1145
1146	hrtimer_set_expires_range_ns(timer, tim, delta_ns);
1147
1148	/* Switch the timer base, if necessary: */
1149	if (!force_local) {
1150		new_base = switch_hrtimer_base(timer, base,
1151					       mode & HRTIMER_MODE_PINNED);
1152	} else {
1153		new_base = base;
1154	}
1155
1156	first = enqueue_hrtimer(timer, new_base, mode);
1157	if (!force_local)
1158		return first;
1159
1160	/*
1161	 * Timer was forced to stay on the current CPU to avoid
1162	 * reprogramming on removal and enqueue. Force reprogram the
1163	 * hardware by evaluating the new first expiring timer.
1164	 */
1165	hrtimer_force_reprogram(new_base->cpu_base, 1);
1166	return 0;
1167}
1168
1169/**
1170 * hrtimer_start_range_ns - (re)start an hrtimer
1171 * @timer:	the timer to be added
1172 * @tim:	expiry time
1173 * @delta_ns:	"slack" range for the timer
1174 * @mode:	timer mode: absolute (HRTIMER_MODE_ABS) or
1175 *		relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
1176 *		softirq based mode is considered for debug purpose only!
1177 */
1178void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1179			    u64 delta_ns, const enum hrtimer_mode mode)
1180{
1181	struct hrtimer_clock_base *base;
1182	unsigned long flags;
1183
1184	/*
1185	 * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
1186	 * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard
1187	 * expiry mode because unmarked timers are moved to softirq expiry.
1188	 */
1189	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1190		WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
1191	else
1192		WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
1193
1194	base = lock_hrtimer_base(timer, &flags);
1195
1196	if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
1197		hrtimer_reprogram(timer, true);
1198
 
 
 
 
 
 
 
 
 
 
 
1199	unlock_hrtimer_base(timer, &flags);
1200}
1201EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1202
1203/**
1204 * hrtimer_try_to_cancel - try to deactivate a timer
1205 * @timer:	hrtimer to stop
1206 *
1207 * Returns:
1208 *
1209 *  *  0 when the timer was not active
1210 *  *  1 when the timer was active
1211 *  * -1 when the timer is currently executing the callback function and
1212 *    cannot be stopped
1213 */
1214int hrtimer_try_to_cancel(struct hrtimer *timer)
1215{
1216	struct hrtimer_clock_base *base;
1217	unsigned long flags;
1218	int ret = -1;
1219
1220	/*
1221	 * Check lockless first. If the timer is not active (neither
1222	 * enqueued nor running the callback, nothing to do here.  The
1223	 * base lock does not serialize against a concurrent enqueue,
1224	 * so we can avoid taking it.
1225	 */
1226	if (!hrtimer_active(timer))
1227		return 0;
1228
1229	base = lock_hrtimer_base(timer, &flags);
1230
1231	if (!hrtimer_callback_running(timer))
1232		ret = remove_hrtimer(timer, base, false, false);
1233
1234	unlock_hrtimer_base(timer, &flags);
1235
1236	return ret;
1237
1238}
1239EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
1240
1241#ifdef CONFIG_PREEMPT_RT
1242static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
1243{
1244	spin_lock_init(&base->softirq_expiry_lock);
1245}
1246
1247static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
1248{
1249	spin_lock(&base->softirq_expiry_lock);
1250}
1251
1252static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
1253{
1254	spin_unlock(&base->softirq_expiry_lock);
1255}
1256
1257/*
1258 * The counterpart to hrtimer_cancel_wait_running().
1259 *
1260 * If there is a waiter for cpu_base->expiry_lock, then it was waiting for
1261 * the timer callback to finish. Drop expiry_lock and reacquire it. That
1262 * allows the waiter to acquire the lock and make progress.
1263 */
1264static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
1265				      unsigned long flags)
1266{
1267	if (atomic_read(&cpu_base->timer_waiters)) {
1268		raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1269		spin_unlock(&cpu_base->softirq_expiry_lock);
1270		spin_lock(&cpu_base->softirq_expiry_lock);
1271		raw_spin_lock_irq(&cpu_base->lock);
1272	}
1273}
1274
1275/*
1276 * This function is called on PREEMPT_RT kernels when the fast path
1277 * deletion of a timer failed because the timer callback function was
1278 * running.
1279 *
1280 * This prevents priority inversion: if the soft irq thread is preempted
1281 * in the middle of a timer callback, then calling del_timer_sync() can
1282 * lead to two issues:
1283 *
1284 *  - If the caller is on a remote CPU then it has to spin wait for the timer
1285 *    handler to complete. This can result in unbound priority inversion.
1286 *
1287 *  - If the caller originates from the task which preempted the timer
1288 *    handler on the same CPU, then spin waiting for the timer handler to
1289 *    complete is never going to end.
1290 */
1291void hrtimer_cancel_wait_running(const struct hrtimer *timer)
1292{
1293	/* Lockless read. Prevent the compiler from reloading it below */
1294	struct hrtimer_clock_base *base = READ_ONCE(timer->base);
1295
1296	/*
1297	 * Just relax if the timer expires in hard interrupt context or if
1298	 * it is currently on the migration base.
1299	 */
1300	if (!timer->is_soft || is_migration_base(base)) {
1301		cpu_relax();
1302		return;
1303	}
1304
1305	/*
1306	 * Mark the base as contended and grab the expiry lock, which is
1307	 * held by the softirq across the timer callback. Drop the lock
1308	 * immediately so the softirq can expire the next timer. In theory
1309	 * the timer could already be running again, but that's more than
1310	 * unlikely and just causes another wait loop.
1311	 */
1312	atomic_inc(&base->cpu_base->timer_waiters);
1313	spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
1314	atomic_dec(&base->cpu_base->timer_waiters);
1315	spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
1316}
1317#else
1318static inline void
1319hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { }
1320static inline void
1321hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { }
1322static inline void
1323hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { }
1324static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base,
1325					     unsigned long flags) { }
1326#endif
1327
1328/**
1329 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1330 * @timer:	the timer to be cancelled
1331 *
1332 * Returns:
1333 *  0 when the timer was not active
1334 *  1 when the timer was active
1335 */
1336int hrtimer_cancel(struct hrtimer *timer)
1337{
1338	int ret;
1339
1340	do {
1341		ret = hrtimer_try_to_cancel(timer);
1342
1343		if (ret < 0)
1344			hrtimer_cancel_wait_running(timer);
1345	} while (ret < 0);
1346	return ret;
1347}
1348EXPORT_SYMBOL_GPL(hrtimer_cancel);
1349
1350/**
1351 * __hrtimer_get_remaining - get remaining time for the timer
1352 * @timer:	the timer to read
1353 * @adjust:	adjust relative timers when CONFIG_TIME_LOW_RES=y
1354 */
1355ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
1356{
1357	unsigned long flags;
1358	ktime_t rem;
1359
1360	lock_hrtimer_base(timer, &flags);
1361	if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
1362		rem = hrtimer_expires_remaining_adjusted(timer);
1363	else
1364		rem = hrtimer_expires_remaining(timer);
1365	unlock_hrtimer_base(timer, &flags);
1366
1367	return rem;
1368}
1369EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
1370
1371#ifdef CONFIG_NO_HZ_COMMON
1372/**
1373 * hrtimer_get_next_event - get the time until next expiry event
1374 *
1375 * Returns the next expiry time or KTIME_MAX if no timer is pending.
1376 */
1377u64 hrtimer_get_next_event(void)
1378{
1379	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1380	u64 expires = KTIME_MAX;
1381	unsigned long flags;
1382
1383	raw_spin_lock_irqsave(&cpu_base->lock, flags);
1384
1385	if (!__hrtimer_hres_active(cpu_base))
1386		expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
1387
1388	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1389
1390	return expires;
1391}
1392
1393/**
1394 * hrtimer_next_event_without - time until next expiry event w/o one timer
1395 * @exclude:	timer to exclude
1396 *
1397 * Returns the next expiry time over all timers except for the @exclude one or
1398 * KTIME_MAX if none of them is pending.
1399 */
1400u64 hrtimer_next_event_without(const struct hrtimer *exclude)
1401{
1402	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1403	u64 expires = KTIME_MAX;
1404	unsigned long flags;
1405
1406	raw_spin_lock_irqsave(&cpu_base->lock, flags);
1407
1408	if (__hrtimer_hres_active(cpu_base)) {
1409		unsigned int active;
1410
1411		if (!cpu_base->softirq_activated) {
1412			active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
1413			expires = __hrtimer_next_event_base(cpu_base, exclude,
1414							    active, KTIME_MAX);
1415		}
1416		active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
1417		expires = __hrtimer_next_event_base(cpu_base, exclude, active,
1418						    expires);
1419	}
1420
1421	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1422
1423	return expires;
1424}
1425#endif
1426
1427static inline int hrtimer_clockid_to_base(clockid_t clock_id)
1428{
1429	if (likely(clock_id < MAX_CLOCKS)) {
1430		int base = hrtimer_clock_to_base_table[clock_id];
1431
1432		if (likely(base != HRTIMER_MAX_CLOCK_BASES))
1433			return base;
1434	}
1435	WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
1436	return HRTIMER_BASE_MONOTONIC;
1437}
1438
1439static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1440			   enum hrtimer_mode mode)
1441{
1442	bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
1443	struct hrtimer_cpu_base *cpu_base;
1444	int base;
1445
1446	/*
1447	 * On PREEMPT_RT enabled kernels hrtimers which are not explicitly
1448	 * marked for hard interrupt expiry mode are moved into soft
1449	 * interrupt context for latency reasons and because the callbacks
1450	 * can invoke functions which might sleep on RT, e.g. spin_lock().
1451	 */
1452	if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD))
1453		softtimer = true;
1454
1455	memset(timer, 0, sizeof(struct hrtimer));
1456
1457	cpu_base = raw_cpu_ptr(&hrtimer_bases);
1458
1459	/*
1460	 * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
1461	 * clock modifications, so they needs to become CLOCK_MONOTONIC to
1462	 * ensure POSIX compliance.
1463	 */
1464	if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
1465		clock_id = CLOCK_MONOTONIC;
1466
1467	base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
1468	base += hrtimer_clockid_to_base(clock_id);
1469	timer->is_soft = softtimer;
1470	timer->is_hard = !!(mode & HRTIMER_MODE_HARD);
1471	timer->base = &cpu_base->clock_base[base];
1472	timerqueue_init(&timer->node);
 
 
 
 
 
 
1473}
1474
1475/**
1476 * hrtimer_init - initialize a timer to the given clock
1477 * @timer:	the timer to be initialized
1478 * @clock_id:	the clock to be used
1479 * @mode:       The modes which are relevant for initialization:
1480 *              HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
1481 *              HRTIMER_MODE_REL_SOFT
1482 *
1483 *              The PINNED variants of the above can be handed in,
1484 *              but the PINNED bit is ignored as pinning happens
1485 *              when the hrtimer is started
1486 */
1487void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1488		  enum hrtimer_mode mode)
1489{
1490	debug_init(timer, clock_id, mode);
1491	__hrtimer_init(timer, clock_id, mode);
1492}
1493EXPORT_SYMBOL_GPL(hrtimer_init);
1494
1495/*
1496 * A timer is active, when it is enqueued into the rbtree or the
1497 * callback function is running or it's in the state of being migrated
1498 * to another cpu.
1499 *
1500 * It is important for this function to not return a false negative.
1501 */
1502bool hrtimer_active(const struct hrtimer *timer)
1503{
1504	struct hrtimer_clock_base *base;
1505	unsigned int seq;
1506
1507	do {
1508		base = READ_ONCE(timer->base);
1509		seq = raw_read_seqcount_begin(&base->seq);
1510
1511		if (timer->state != HRTIMER_STATE_INACTIVE ||
1512		    base->running == timer)
1513			return true;
1514
1515	} while (read_seqcount_retry(&base->seq, seq) ||
1516		 base != READ_ONCE(timer->base));
1517
1518	return false;
1519}
1520EXPORT_SYMBOL_GPL(hrtimer_active);
1521
1522/*
1523 * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3
1524 * distinct sections:
1525 *
1526 *  - queued:	the timer is queued
1527 *  - callback:	the timer is being ran
1528 *  - post:	the timer is inactive or (re)queued
1529 *
1530 * On the read side we ensure we observe timer->state and cpu_base->running
1531 * from the same section, if anything changed while we looked at it, we retry.
1532 * This includes timer->base changing because sequence numbers alone are
1533 * insufficient for that.
1534 *
1535 * The sequence numbers are required because otherwise we could still observe
1536 * a false negative if the read side got smeared over multiple consecutive
1537 * __run_hrtimer() invocations.
1538 */
1539
1540static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
1541			  struct hrtimer_clock_base *base,
1542			  struct hrtimer *timer, ktime_t *now,
1543			  unsigned long flags) __must_hold(&cpu_base->lock)
1544{
1545	enum hrtimer_restart (*fn)(struct hrtimer *);
1546	bool expires_in_hardirq;
1547	int restart;
1548
1549	lockdep_assert_held(&cpu_base->lock);
1550
1551	debug_deactivate(timer);
1552	base->running = timer;
1553
1554	/*
1555	 * Separate the ->running assignment from the ->state assignment.
1556	 *
1557	 * As with a regular write barrier, this ensures the read side in
1558	 * hrtimer_active() cannot observe base->running == NULL &&
1559	 * timer->state == INACTIVE.
1560	 */
1561	raw_write_seqcount_barrier(&base->seq);
1562
1563	__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
 
1564	fn = timer->function;
1565
1566	/*
1567	 * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
1568	 * timer is restarted with a period then it becomes an absolute
1569	 * timer. If its not restarted it does not matter.
1570	 */
1571	if (IS_ENABLED(CONFIG_TIME_LOW_RES))
1572		timer->is_rel = false;
1573
1574	/*
1575	 * The timer is marked as running in the CPU base, so it is
1576	 * protected against migration to a different CPU even if the lock
1577	 * is dropped.
1578	 */
1579	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1580	trace_hrtimer_expire_entry(timer, now);
1581	expires_in_hardirq = lockdep_hrtimer_enter(timer);
1582
1583	restart = fn(timer);
1584
1585	lockdep_hrtimer_exit(expires_in_hardirq);
1586	trace_hrtimer_expire_exit(timer);
1587	raw_spin_lock_irq(&cpu_base->lock);
1588
1589	/*
1590	 * Note: We clear the running state after enqueue_hrtimer and
1591	 * we do not reprogram the event hardware. Happens either in
1592	 * hrtimer_start_range_ns() or in hrtimer_interrupt()
1593	 *
1594	 * Note: Because we dropped the cpu_base->lock above,
1595	 * hrtimer_start_range_ns() can have popped in and enqueued the timer
1596	 * for us already.
1597	 */
1598	if (restart != HRTIMER_NORESTART &&
1599	    !(timer->state & HRTIMER_STATE_ENQUEUED))
1600		enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
1601
1602	/*
1603	 * Separate the ->running assignment from the ->state assignment.
1604	 *
1605	 * As with a regular write barrier, this ensures the read side in
1606	 * hrtimer_active() cannot observe base->running.timer == NULL &&
1607	 * timer->state == INACTIVE.
1608	 */
1609	raw_write_seqcount_barrier(&base->seq);
1610
1611	WARN_ON_ONCE(base->running != timer);
1612	base->running = NULL;
1613}
1614
1615static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
1616				 unsigned long flags, unsigned int active_mask)
1617{
1618	struct hrtimer_clock_base *base;
1619	unsigned int active = cpu_base->active_bases & active_mask;
1620
1621	for_each_active_base(base, cpu_base, active) {
1622		struct timerqueue_node *node;
1623		ktime_t basenow;
1624
 
 
 
1625		basenow = ktime_add(now, base->offset);
1626
1627		while ((node = timerqueue_getnext(&base->active))) {
1628			struct hrtimer *timer;
1629
1630			timer = container_of(node, struct hrtimer, node);
1631
1632			/*
1633			 * The immediate goal for using the softexpires is
1634			 * minimizing wakeups, not running timers at the
1635			 * earliest interrupt after their soft expiration.
1636			 * This allows us to avoid using a Priority Search
1637			 * Tree, which can answer a stabbing query for
1638			 * overlapping intervals and instead use the simple
1639			 * BST we already have.
1640			 * We don't add extra wakeups by delaying timers that
1641			 * are right-of a not yet expired timer, because that
1642			 * timer will have to trigger a wakeup anyway.
1643			 */
1644			if (basenow < hrtimer_get_softexpires_tv64(timer))
1645				break;
1646
1647			__run_hrtimer(cpu_base, base, timer, &basenow, flags);
1648			if (active_mask == HRTIMER_ACTIVE_SOFT)
1649				hrtimer_sync_wait_running(cpu_base, flags);
1650		}
1651	}
1652}
1653
1654static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
1655{
1656	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1657	unsigned long flags;
1658	ktime_t now;
1659
1660	hrtimer_cpu_base_lock_expiry(cpu_base);
1661	raw_spin_lock_irqsave(&cpu_base->lock, flags);
1662
1663	now = hrtimer_update_base(cpu_base);
1664	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
1665
1666	cpu_base->softirq_activated = 0;
1667	hrtimer_update_softirq_timer(cpu_base, true);
1668
1669	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1670	hrtimer_cpu_base_unlock_expiry(cpu_base);
1671}
1672
1673#ifdef CONFIG_HIGH_RES_TIMERS
1674
1675/*
1676 * High resolution timer interrupt
1677 * Called with interrupts disabled
1678 */
1679void hrtimer_interrupt(struct clock_event_device *dev)
1680{
1681	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1682	ktime_t expires_next, now, entry_time, delta;
1683	unsigned long flags;
1684	int retries = 0;
1685
1686	BUG_ON(!cpu_base->hres_active);
1687	cpu_base->nr_events++;
1688	dev->next_event = KTIME_MAX;
1689
1690	raw_spin_lock_irqsave(&cpu_base->lock, flags);
1691	entry_time = now = hrtimer_update_base(cpu_base);
1692retry:
1693	cpu_base->in_hrtirq = 1;
1694	/*
1695	 * We set expires_next to KTIME_MAX here with cpu_base->lock
1696	 * held to prevent that a timer is enqueued in our queue via
1697	 * the migration code. This does not affect enqueueing of
1698	 * timers which run their callback and need to be requeued on
1699	 * this CPU.
1700	 */
1701	cpu_base->expires_next = KTIME_MAX;
1702
1703	if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1704		cpu_base->softirq_expires_next = KTIME_MAX;
1705		cpu_base->softirq_activated = 1;
1706		raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1707	}
1708
1709	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1710
1711	/* Reevaluate the clock bases for the [soft] next expiry */
1712	expires_next = hrtimer_update_next_event(cpu_base);
1713	/*
1714	 * Store the new expiry value so the migration code can verify
1715	 * against it.
1716	 */
1717	cpu_base->expires_next = expires_next;
1718	cpu_base->in_hrtirq = 0;
1719	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1720
1721	/* Reprogramming necessary ? */
1722	if (!tick_program_event(expires_next, 0)) {
1723		cpu_base->hang_detected = 0;
1724		return;
1725	}
1726
1727	/*
1728	 * The next timer was already expired due to:
1729	 * - tracing
1730	 * - long lasting callbacks
1731	 * - being scheduled away when running in a VM
1732	 *
1733	 * We need to prevent that we loop forever in the hrtimer
1734	 * interrupt routine. We give it 3 attempts to avoid
1735	 * overreacting on some spurious event.
1736	 *
1737	 * Acquire base lock for updating the offsets and retrieving
1738	 * the current time.
1739	 */
1740	raw_spin_lock_irqsave(&cpu_base->lock, flags);
1741	now = hrtimer_update_base(cpu_base);
1742	cpu_base->nr_retries++;
1743	if (++retries < 3)
1744		goto retry;
1745	/*
1746	 * Give the system a chance to do something else than looping
1747	 * here. We stored the entry time, so we know exactly how long
1748	 * we spent here. We schedule the next event this amount of
1749	 * time away.
1750	 */
1751	cpu_base->nr_hangs++;
1752	cpu_base->hang_detected = 1;
1753	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1754
1755	delta = ktime_sub(now, entry_time);
1756	if ((unsigned int)delta > cpu_base->max_hang_time)
1757		cpu_base->max_hang_time = (unsigned int) delta;
1758	/*
1759	 * Limit it to a sensible value as we enforce a longer
1760	 * delay. Give the CPU at least 100ms to catch up.
1761	 */
1762	if (delta > 100 * NSEC_PER_MSEC)
1763		expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1764	else
1765		expires_next = ktime_add(now, delta);
1766	tick_program_event(expires_next, 1);
1767	pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
 
1768}
1769
1770/* called with interrupts disabled */
 
 
 
1771static inline void __hrtimer_peek_ahead_timers(void)
1772{
1773	struct tick_device *td;
1774
1775	if (!hrtimer_hres_active())
1776		return;
1777
1778	td = this_cpu_ptr(&tick_cpu_device);
1779	if (td && td->evtdev)
1780		hrtimer_interrupt(td->evtdev);
1781}
1782
1783#else /* CONFIG_HIGH_RES_TIMERS */
1784
1785static inline void __hrtimer_peek_ahead_timers(void) { }
1786
1787#endif	/* !CONFIG_HIGH_RES_TIMERS */
1788
1789/*
1790 * Called from run_local_timers in hardirq context every jiffy
1791 */
1792void hrtimer_run_queues(void)
1793{
1794	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1795	unsigned long flags;
1796	ktime_t now;
1797
1798	if (__hrtimer_hres_active(cpu_base))
1799		return;
1800
1801	/*
1802	 * This _is_ ugly: We have to check periodically, whether we
1803	 * can switch to highres and / or nohz mode. The clocksource
1804	 * switch happens with xtime_lock held. Notification from
1805	 * there only sets the check bit in the tick_oneshot code,
1806	 * otherwise we might deadlock vs. xtime_lock.
1807	 */
1808	if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
1809		hrtimer_switch_to_hres();
1810		return;
1811	}
1812
1813	raw_spin_lock_irqsave(&cpu_base->lock, flags);
1814	now = hrtimer_update_base(cpu_base);
1815
1816	if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1817		cpu_base->softirq_expires_next = KTIME_MAX;
1818		cpu_base->softirq_activated = 1;
1819		raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1820	}
1821
1822	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1823	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1824}
1825
1826/*
1827 * Sleep related functions:
1828 */
1829static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1830{
1831	struct hrtimer_sleeper *t =
1832		container_of(timer, struct hrtimer_sleeper, timer);
1833	struct task_struct *task = t->task;
1834
1835	t->task = NULL;
1836	if (task)
1837		wake_up_process(task);
1838
1839	return HRTIMER_NORESTART;
1840}
1841
1842/**
1843 * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer
1844 * @sl:		sleeper to be started
1845 * @mode:	timer mode abs/rel
1846 *
1847 * Wrapper around hrtimer_start_expires() for hrtimer_sleeper based timers
1848 * to allow PREEMPT_RT to tweak the delivery mode (soft/hardirq context)
1849 */
1850void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
1851				   enum hrtimer_mode mode)
1852{
1853	/*
1854	 * Make the enqueue delivery mode check work on RT. If the sleeper
1855	 * was initialized for hard interrupt delivery, force the mode bit.
1856	 * This is a special case for hrtimer_sleepers because
1857	 * hrtimer_init_sleeper() determines the delivery mode on RT so the
1858	 * fiddling with this decision is avoided at the call sites.
1859	 */
1860	if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
1861		mode |= HRTIMER_MODE_HARD;
1862
1863	hrtimer_start_expires(&sl->timer, mode);
1864}
1865EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires);
1866
1867static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
1868				   clockid_t clock_id, enum hrtimer_mode mode)
1869{
1870	/*
1871	 * On PREEMPT_RT enabled kernels hrtimers which are not explicitly
1872	 * marked for hard interrupt expiry mode are moved into soft
1873	 * interrupt context either for latency reasons or because the
1874	 * hrtimer callback takes regular spinlocks or invokes other
1875	 * functions which are not suitable for hard interrupt context on
1876	 * PREEMPT_RT.
1877	 *
1878	 * The hrtimer_sleeper callback is RT compatible in hard interrupt
1879	 * context, but there is a latency concern: Untrusted userspace can
1880	 * spawn many threads which arm timers for the same expiry time on
1881	 * the same CPU. That causes a latency spike due to the wakeup of
1882	 * a gazillion threads.
1883	 *
1884	 * OTOH, privileged real-time user space applications rely on the
1885	 * low latency of hard interrupt wakeups. If the current task is in
1886	 * a real-time scheduling class, mark the mode for hard interrupt
1887	 * expiry.
1888	 */
1889	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1890		if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
1891			mode |= HRTIMER_MODE_HARD;
1892	}
1893
1894	__hrtimer_init(&sl->timer, clock_id, mode);
1895	sl->timer.function = hrtimer_wakeup;
1896	sl->task = current;
1897}
1898
1899/**
1900 * hrtimer_init_sleeper - initialize sleeper to the given clock
1901 * @sl:		sleeper to be initialized
1902 * @clock_id:	the clock to be used
1903 * @mode:	timer mode abs/rel
1904 */
1905void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
1906			  enum hrtimer_mode mode)
1907{
1908	debug_init(&sl->timer, clock_id, mode);
1909	__hrtimer_init_sleeper(sl, clock_id, mode);
1910
1911}
1912EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
1913
1914int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
1915{
1916	switch(restart->nanosleep.type) {
1917#ifdef CONFIG_COMPAT_32BIT_TIME
1918	case TT_COMPAT:
1919		if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp))
1920			return -EFAULT;
1921		break;
1922#endif
1923	case TT_NATIVE:
1924		if (put_timespec64(ts, restart->nanosleep.rmtp))
1925			return -EFAULT;
1926		break;
1927	default:
1928		BUG();
1929	}
1930	return -ERESTART_RESTARTBLOCK;
1931}
1932
1933static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1934{
1935	struct restart_block *restart;
1936
1937	do {
1938		set_current_state(TASK_INTERRUPTIBLE);
1939		hrtimer_sleeper_start_expires(t, mode);
1940
1941		if (likely(t->task))
1942			freezable_schedule();
1943
1944		hrtimer_cancel(&t->timer);
1945		mode = HRTIMER_MODE_ABS;
1946
1947	} while (t->task && !signal_pending(current));
1948
1949	__set_current_state(TASK_RUNNING);
1950
1951	if (!t->task)
1952		return 0;
1953
1954	restart = &current->restart_block;
1955	if (restart->nanosleep.type != TT_NONE) {
1956		ktime_t rem = hrtimer_expires_remaining(&t->timer);
1957		struct timespec64 rmt;
1958
1959		if (rem <= 0)
1960			return 0;
1961		rmt = ktime_to_timespec64(rem);
 
1962
1963		return nanosleep_copyout(restart, &rmt);
1964	}
1965	return -ERESTART_RESTARTBLOCK;
 
1966}
1967
1968static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1969{
1970	struct hrtimer_sleeper t;
1971	int ret;
 
1972
1973	hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
1974				      HRTIMER_MODE_ABS);
1975	hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1976	ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
 
 
 
 
 
 
 
 
 
 
 
 
 
1977	destroy_hrtimer_on_stack(&t.timer);
1978	return ret;
1979}
1980
1981long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
1982		       const clockid_t clockid)
1983{
1984	struct restart_block *restart;
1985	struct hrtimer_sleeper t;
1986	int ret = 0;
1987	u64 slack;
1988
1989	slack = current->timer_slack_ns;
1990	if (dl_task(current) || rt_task(current))
1991		slack = 0;
1992
1993	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
1994	hrtimer_set_expires_range_ns(&t.timer, rqtp, slack);
1995	ret = do_nanosleep(&t, mode);
1996	if (ret != -ERESTART_RESTARTBLOCK)
1997		goto out;
1998
1999	/* Absolute timers do not update the rmtp value and restart: */
2000	if (mode == HRTIMER_MODE_ABS) {
2001		ret = -ERESTARTNOHAND;
2002		goto out;
2003	}
2004
 
 
 
 
 
 
2005	restart = &current->restart_block;
 
2006	restart->nanosleep.clockid = t.timer.base->clockid;
 
2007	restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
2008	set_restart_fn(restart, hrtimer_nanosleep_restart);
 
2009out:
2010	destroy_hrtimer_on_stack(&t.timer);
2011	return ret;
2012}
2013
2014#ifdef CONFIG_64BIT
2015
2016SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
2017		struct __kernel_timespec __user *, rmtp)
2018{
2019	struct timespec64 tu;
2020
2021	if (get_timespec64(&tu, rqtp))
2022		return -EFAULT;
2023
2024	if (!timespec64_valid(&tu))
2025		return -EINVAL;
2026
2027	current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
2028	current->restart_block.nanosleep.rmtp = rmtp;
2029	return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
2030				 CLOCK_MONOTONIC);
2031}
2032
2033#endif
2034
2035#ifdef CONFIG_COMPAT_32BIT_TIME
2036
2037SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
2038		       struct old_timespec32 __user *, rmtp)
2039{
2040	struct timespec64 tu;
2041
2042	if (get_old_timespec32(&tu, rqtp))
2043		return -EFAULT;
2044
2045	if (!timespec64_valid(&tu))
2046		return -EINVAL;
2047
2048	current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
2049	current->restart_block.nanosleep.compat_rmtp = rmtp;
2050	return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
2051				 CLOCK_MONOTONIC);
2052}
2053#endif
2054
2055/*
2056 * Functions related to boot-time initialization:
2057 */
2058int hrtimers_prepare_cpu(unsigned int cpu)
2059{
2060	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
2061	int i;
2062
2063	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2064		struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
2065
2066		clock_b->cpu_base = cpu_base;
2067		seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
2068		timerqueue_init_head(&clock_b->active);
2069	}
2070
2071	cpu_base->cpu = cpu;
2072	cpu_base->active_bases = 0;
2073	cpu_base->hres_active = 0;
2074	cpu_base->hang_detected = 0;
2075	cpu_base->next_timer = NULL;
2076	cpu_base->softirq_next_timer = NULL;
2077	cpu_base->expires_next = KTIME_MAX;
2078	cpu_base->softirq_expires_next = KTIME_MAX;
2079	hrtimer_cpu_base_init_expiry_lock(cpu_base);
2080	return 0;
2081}
2082
2083#ifdef CONFIG_HOTPLUG_CPU
2084
2085static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
2086				struct hrtimer_clock_base *new_base)
2087{
2088	struct hrtimer *timer;
2089	struct timerqueue_node *node;
2090
2091	while ((node = timerqueue_getnext(&old_base->active))) {
2092		timer = container_of(node, struct hrtimer, node);
2093		BUG_ON(hrtimer_callback_running(timer));
2094		debug_deactivate(timer);
2095
2096		/*
2097		 * Mark it as ENQUEUED not INACTIVE otherwise the
2098		 * timer could be seen as !active and just vanish away
2099		 * under us on another CPU
2100		 */
2101		__remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
2102		timer->base = new_base;
2103		/*
2104		 * Enqueue the timers on the new cpu. This does not
2105		 * reprogram the event device in case the timer
2106		 * expires before the earliest on this CPU, but we run
2107		 * hrtimer_interrupt after we migrated everything to
2108		 * sort out already expired timers and reprogram the
2109		 * event device.
2110		 */
2111		enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
2112	}
2113}
2114
2115int hrtimers_dead_cpu(unsigned int scpu)
2116{
2117	struct hrtimer_cpu_base *old_base, *new_base;
2118	int i;
2119
2120	BUG_ON(cpu_online(scpu));
2121	tick_cancel_sched_timer(scpu);
2122
2123	/*
2124	 * this BH disable ensures that raise_softirq_irqoff() does
2125	 * not wakeup ksoftirqd (and acquire the pi-lock) while
2126	 * holding the cpu_base lock
2127	 */
2128	local_bh_disable();
2129	local_irq_disable();
2130	old_base = &per_cpu(hrtimer_bases, scpu);
2131	new_base = this_cpu_ptr(&hrtimer_bases);
2132	/*
2133	 * The caller is globally serialized and nobody else
2134	 * takes two locks at once, deadlock is not possible.
2135	 */
2136	raw_spin_lock(&new_base->lock);
2137	raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
2138
2139	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2140		migrate_hrtimer_list(&old_base->clock_base[i],
2141				     &new_base->clock_base[i]);
2142	}
2143
2144	/*
2145	 * The migration might have changed the first expiring softirq
2146	 * timer on this CPU. Update it.
2147	 */
2148	hrtimer_update_softirq_timer(new_base, false);
2149
2150	raw_spin_unlock(&old_base->lock);
2151	raw_spin_unlock(&new_base->lock);
2152
2153	/* Check, if we got expired work to do */
2154	__hrtimer_peek_ahead_timers();
2155	local_irq_enable();
2156	local_bh_enable();
2157	return 0;
2158}
2159
2160#endif /* CONFIG_HOTPLUG_CPU */
2161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2162void __init hrtimers_init(void)
2163{
2164	hrtimers_prepare_cpu(smp_processor_id());
2165	open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
 
2166}
2167
2168/**
2169 * schedule_hrtimeout_range_clock - sleep until timeout
2170 * @expires:	timeout value (ktime_t)
2171 * @delta:	slack in expires timeout (ktime_t)
2172 * @mode:	timer mode
2173 * @clock_id:	timer clock to be used
2174 */
2175int __sched
2176schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
2177			       const enum hrtimer_mode mode, clockid_t clock_id)
2178{
2179	struct hrtimer_sleeper t;
2180
2181	/*
2182	 * Optimize when a zero timeout value is given. It does not
2183	 * matter whether this is an absolute or a relative time.
2184	 */
2185	if (expires && *expires == 0) {
2186		__set_current_state(TASK_RUNNING);
2187		return 0;
2188	}
2189
2190	/*
2191	 * A NULL parameter means "infinite"
2192	 */
2193	if (!expires) {
2194		schedule();
2195		return -EINTR;
2196	}
2197
2198	hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
2199	hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
2200	hrtimer_sleeper_start_expires(&t, mode);
 
 
 
2201
2202	if (likely(t.task))
2203		schedule();
2204
2205	hrtimer_cancel(&t.timer);
2206	destroy_hrtimer_on_stack(&t.timer);
2207
2208	__set_current_state(TASK_RUNNING);
2209
2210	return !t.task ? 0 : -EINTR;
2211}
2212
2213/**
2214 * schedule_hrtimeout_range - sleep until timeout
2215 * @expires:	timeout value (ktime_t)
2216 * @delta:	slack in expires timeout (ktime_t)
2217 * @mode:	timer mode
2218 *
2219 * Make the current task sleep until the given expiry time has
2220 * elapsed. The routine will return immediately unless
2221 * the current task state has been set (see set_current_state()).
2222 *
2223 * The @delta argument gives the kernel the freedom to schedule the
2224 * actual wakeup to a time that is both power and performance friendly.
2225 * The kernel give the normal best effort behavior for "@expires+@delta",
2226 * but may decide to fire the timer earlier, but no earlier than @expires.
2227 *
2228 * You can set the task state as follows -
2229 *
2230 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
2231 * pass before the routine returns unless the current task is explicitly
2232 * woken up, (e.g. by wake_up_process()).
2233 *
2234 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2235 * delivered to the current task or the current task is explicitly woken
2236 * up.
2237 *
2238 * The current task state is guaranteed to be TASK_RUNNING when this
2239 * routine returns.
2240 *
2241 * Returns 0 when the timer has expired. If the task was woken before the
2242 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
2243 * by an explicit wakeup, it returns -EINTR.
2244 */
2245int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
2246				     const enum hrtimer_mode mode)
2247{
2248	return schedule_hrtimeout_range_clock(expires, delta, mode,
2249					      CLOCK_MONOTONIC);
2250}
2251EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
2252
2253/**
2254 * schedule_hrtimeout - sleep until timeout
2255 * @expires:	timeout value (ktime_t)
2256 * @mode:	timer mode
2257 *
2258 * Make the current task sleep until the given expiry time has
2259 * elapsed. The routine will return immediately unless
2260 * the current task state has been set (see set_current_state()).
2261 *
2262 * You can set the task state as follows -
2263 *
2264 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
2265 * pass before the routine returns unless the current task is explicitly
2266 * woken up, (e.g. by wake_up_process()).
2267 *
2268 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2269 * delivered to the current task or the current task is explicitly woken
2270 * up.
2271 *
2272 * The current task state is guaranteed to be TASK_RUNNING when this
2273 * routine returns.
2274 *
2275 * Returns 0 when the timer has expired. If the task was woken before the
2276 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
2277 * by an explicit wakeup, it returns -EINTR.
2278 */
2279int __sched schedule_hrtimeout(ktime_t *expires,
2280			       const enum hrtimer_mode mode)
2281{
2282	return schedule_hrtimeout_range(expires, 0, mode);
2283}
2284EXPORT_SYMBOL_GPL(schedule_hrtimeout);
v4.6
 
   1/*
   2 *  linux/kernel/hrtimer.c
   3 *
   4 *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   5 *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   6 *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
   7 *
   8 *  High-resolution kernel timers
   9 *
  10 *  In contrast to the low-resolution timeout API implemented in
  11 *  kernel/timer.c, hrtimers provide finer resolution and accuracy
  12 *  depending on system configuration and capabilities.
  13 *
  14 *  These timers are currently used for:
  15 *   - itimers
  16 *   - POSIX timers
  17 *   - nanosleep
  18 *   - precise in-kernel timing
  19 *
  20 *  Started by: Thomas Gleixner and Ingo Molnar
  21 *
  22 *  Credits:
  23 *	based on kernel/timer.c
  24 *
  25 *	Help, testing, suggestions, bugfixes, improvements were
  26 *	provided by:
  27 *
  28 *	George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
  29 *	et. al.
  30 *
  31 *  For licencing details see kernel-base/COPYING
  32 */
  33
  34#include <linux/cpu.h>
  35#include <linux/export.h>
  36#include <linux/percpu.h>
  37#include <linux/hrtimer.h>
  38#include <linux/notifier.h>
  39#include <linux/syscalls.h>
  40#include <linux/kallsyms.h>
  41#include <linux/interrupt.h>
  42#include <linux/tick.h>
  43#include <linux/seq_file.h>
  44#include <linux/err.h>
  45#include <linux/debugobjects.h>
  46#include <linux/sched.h>
  47#include <linux/sched/sysctl.h>
  48#include <linux/sched/rt.h>
  49#include <linux/sched/deadline.h>
 
 
  50#include <linux/timer.h>
  51#include <linux/freezer.h>
 
  52
  53#include <asm/uaccess.h>
  54
  55#include <trace/events/timer.h>
  56
  57#include "tick-internal.h"
  58
  59/*
 
 
 
 
 
 
 
 
 
  60 * The timer bases:
  61 *
  62 * There are more clockids than hrtimer bases. Thus, we index
  63 * into the timer bases by the hrtimer_base_type enum. When trying
  64 * to reach a base using a clockid, hrtimer_clockid_to_base()
  65 * is used to convert from clockid to the proper hrtimer_base_type.
  66 */
  67DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
  68{
  69	.lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
  70	.seq = SEQCNT_ZERO(hrtimer_bases.seq),
  71	.clock_base =
  72	{
  73		{
  74			.index = HRTIMER_BASE_MONOTONIC,
  75			.clockid = CLOCK_MONOTONIC,
  76			.get_time = &ktime_get,
  77		},
  78		{
  79			.index = HRTIMER_BASE_REALTIME,
  80			.clockid = CLOCK_REALTIME,
  81			.get_time = &ktime_get_real,
  82		},
  83		{
  84			.index = HRTIMER_BASE_BOOTTIME,
  85			.clockid = CLOCK_BOOTTIME,
  86			.get_time = &ktime_get_boottime,
  87		},
  88		{
  89			.index = HRTIMER_BASE_TAI,
  90			.clockid = CLOCK_TAI,
  91			.get_time = &ktime_get_clocktai,
  92		},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  93	}
  94};
  95
  96static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
 
 
 
  97	[CLOCK_REALTIME]	= HRTIMER_BASE_REALTIME,
  98	[CLOCK_MONOTONIC]	= HRTIMER_BASE_MONOTONIC,
  99	[CLOCK_BOOTTIME]	= HRTIMER_BASE_BOOTTIME,
 100	[CLOCK_TAI]		= HRTIMER_BASE_TAI,
 101};
 102
 103static inline int hrtimer_clockid_to_base(clockid_t clock_id)
 104{
 105	return hrtimer_clock_to_base_table[clock_id];
 106}
 107
 108/*
 109 * Functions and macros which are different for UP/SMP systems are kept in a
 110 * single place
 111 */
 112#ifdef CONFIG_SMP
 113
 114/*
 115 * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base()
 116 * such that hrtimer_callback_running() can unconditionally dereference
 117 * timer->base->cpu_base
 118 */
 119static struct hrtimer_cpu_base migration_cpu_base = {
 120	.seq = SEQCNT_ZERO(migration_cpu_base),
 121	.clock_base = { { .cpu_base = &migration_cpu_base, }, },
 
 
 
 122};
 123
 124#define migration_base	migration_cpu_base.clock_base[0]
 125
 
 
 
 
 
 126/*
 127 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
 128 * means that all timers which are tied to this base via timer->base are
 129 * locked, and the base itself is locked too.
 130 *
 131 * So __run_timers/migrate_timers can safely modify all timers which could
 132 * be found on the lists/queues.
 133 *
 134 * When the timer's base is locked, and the timer removed from list, it is
 135 * possible to set timer->base = &migration_base and drop the lock: the timer
 136 * remains locked.
 137 */
 138static
 139struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
 140					     unsigned long *flags)
 141{
 142	struct hrtimer_clock_base *base;
 143
 144	for (;;) {
 145		base = timer->base;
 146		if (likely(base != &migration_base)) {
 147			raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
 148			if (likely(base == timer->base))
 149				return base;
 150			/* The timer has migrated to another CPU: */
 151			raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
 152		}
 153		cpu_relax();
 154	}
 155}
 156
 157/*
 158 * With HIGHRES=y we do not migrate the timer when it is expiring
 159 * before the next event on the target cpu because we cannot reprogram
 160 * the target cpu hardware and we would cause it to fire late.
 
 
 161 *
 162 * Called with cpu_base->lock of target cpu held.
 163 */
 164static int
 165hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
 166{
 167#ifdef CONFIG_HIGH_RES_TIMERS
 168	ktime_t expires;
 169
 170	if (!new_base->cpu_base->hres_active)
 171		return 0;
 172
 173	expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
 174	return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
 175#else
 176	return 0;
 177#endif
 178}
 179
 180#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 181static inline
 182struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
 183					 int pinned)
 184{
 185	if (pinned || !base->migration_enabled)
 186		return base;
 187	return &per_cpu(hrtimer_bases, get_nohz_timer_target());
 188}
 189#else
 190static inline
 191struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
 192					 int pinned)
 193{
 
 
 
 
 194	return base;
 195}
 196#endif
 197
 198/*
 199 * We switch the timer base to a power-optimized selected CPU target,
 200 * if:
 201 *	- NO_HZ_COMMON is enabled
 202 *	- timer migration is enabled
 203 *	- the timer callback is not running
 204 *	- the timer is not the first expiring timer on the new target
 205 *
 206 * If one of the above requirements is not fulfilled we move the timer
 207 * to the current CPU or leave it on the previously assigned CPU if
 208 * the timer callback is currently running.
 209 */
 210static inline struct hrtimer_clock_base *
 211switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
 212		    int pinned)
 213{
 214	struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
 215	struct hrtimer_clock_base *new_base;
 216	int basenum = base->index;
 217
 218	this_cpu_base = this_cpu_ptr(&hrtimer_bases);
 219	new_cpu_base = get_target_base(this_cpu_base, pinned);
 220again:
 221	new_base = &new_cpu_base->clock_base[basenum];
 222
 223	if (base != new_base) {
 224		/*
 225		 * We are trying to move timer to new_base.
 226		 * However we can't change timer's base while it is running,
 227		 * so we keep it on the same CPU. No hassle vs. reprogramming
 228		 * the event source in the high resolution case. The softirq
 229		 * code will take care of this when the timer function has
 230		 * completed. There is no conflict as we hold the lock until
 231		 * the timer is enqueued.
 232		 */
 233		if (unlikely(hrtimer_callback_running(timer)))
 234			return base;
 235
 236		/* See the comment in lock_hrtimer_base() */
 237		timer->base = &migration_base;
 238		raw_spin_unlock(&base->cpu_base->lock);
 239		raw_spin_lock(&new_base->cpu_base->lock);
 240
 241		if (new_cpu_base != this_cpu_base &&
 242		    hrtimer_check_target(timer, new_base)) {
 243			raw_spin_unlock(&new_base->cpu_base->lock);
 244			raw_spin_lock(&base->cpu_base->lock);
 245			new_cpu_base = this_cpu_base;
 246			timer->base = base;
 247			goto again;
 248		}
 249		timer->base = new_base;
 250	} else {
 251		if (new_cpu_base != this_cpu_base &&
 252		    hrtimer_check_target(timer, new_base)) {
 253			new_cpu_base = this_cpu_base;
 254			goto again;
 255		}
 256	}
 257	return new_base;
 258}
 259
 260#else /* CONFIG_SMP */
 261
 
 
 
 
 
 262static inline struct hrtimer_clock_base *
 263lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 264{
 265	struct hrtimer_clock_base *base = timer->base;
 266
 267	raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
 268
 269	return base;
 270}
 271
 272# define switch_hrtimer_base(t, b, p)	(b)
 273
 274#endif	/* !CONFIG_SMP */
 275
 276/*
 277 * Functions for the union type storage format of ktime_t which are
 278 * too large for inlining:
 279 */
 280#if BITS_PER_LONG < 64
 281/*
 282 * Divide a ktime value by a nanosecond value
 283 */
 284s64 __ktime_divns(const ktime_t kt, s64 div)
 285{
 286	int sft = 0;
 287	s64 dclc;
 288	u64 tmp;
 289
 290	dclc = ktime_to_ns(kt);
 291	tmp = dclc < 0 ? -dclc : dclc;
 292
 293	/* Make sure the divisor is less than 2^32: */
 294	while (div >> 32) {
 295		sft++;
 296		div >>= 1;
 297	}
 298	tmp >>= sft;
 299	do_div(tmp, (unsigned long) div);
 300	return dclc < 0 ? -tmp : tmp;
 301}
 302EXPORT_SYMBOL_GPL(__ktime_divns);
 303#endif /* BITS_PER_LONG >= 64 */
 304
 305/*
 306 * Add two ktime values and do a safety check for overflow:
 307 */
 308ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
 309{
 310	ktime_t res = ktime_add(lhs, rhs);
 311
 312	/*
 313	 * We use KTIME_SEC_MAX here, the maximum timeout which we can
 314	 * return to user space in a timespec:
 315	 */
 316	if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
 317		res = ktime_set(KTIME_SEC_MAX, 0);
 318
 319	return res;
 320}
 321
 322EXPORT_SYMBOL_GPL(ktime_add_safe);
 323
 324#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 325
 326static struct debug_obj_descr hrtimer_debug_descr;
 327
 328static void *hrtimer_debug_hint(void *addr)
 329{
 330	return ((struct hrtimer *) addr)->function;
 331}
 332
 333/*
 334 * fixup_init is called when:
 335 * - an active object is initialized
 336 */
 337static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
 338{
 339	struct hrtimer *timer = addr;
 340
 341	switch (state) {
 342	case ODEBUG_STATE_ACTIVE:
 343		hrtimer_cancel(timer);
 344		debug_object_init(timer, &hrtimer_debug_descr);
 345		return 1;
 346	default:
 347		return 0;
 348	}
 349}
 350
 351/*
 352 * fixup_activate is called when:
 353 * - an active object is activated
 354 * - an unknown object is activated (might be a statically initialized object)
 355 */
 356static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
 357{
 358	switch (state) {
 359
 360	case ODEBUG_STATE_NOTAVAILABLE:
 361		WARN_ON_ONCE(1);
 362		return 0;
 363
 364	case ODEBUG_STATE_ACTIVE:
 365		WARN_ON(1);
 366
 367	default:
 368		return 0;
 369	}
 370}
 371
 372/*
 373 * fixup_free is called when:
 374 * - an active object is freed
 375 */
 376static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
 377{
 378	struct hrtimer *timer = addr;
 379
 380	switch (state) {
 381	case ODEBUG_STATE_ACTIVE:
 382		hrtimer_cancel(timer);
 383		debug_object_free(timer, &hrtimer_debug_descr);
 384		return 1;
 385	default:
 386		return 0;
 387	}
 388}
 389
 390static struct debug_obj_descr hrtimer_debug_descr = {
 391	.name		= "hrtimer",
 392	.debug_hint	= hrtimer_debug_hint,
 393	.fixup_init	= hrtimer_fixup_init,
 394	.fixup_activate	= hrtimer_fixup_activate,
 395	.fixup_free	= hrtimer_fixup_free,
 396};
 397
 398static inline void debug_hrtimer_init(struct hrtimer *timer)
 399{
 400	debug_object_init(timer, &hrtimer_debug_descr);
 401}
 402
 403static inline void debug_hrtimer_activate(struct hrtimer *timer)
 
 404{
 405	debug_object_activate(timer, &hrtimer_debug_descr);
 406}
 407
 408static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
 409{
 410	debug_object_deactivate(timer, &hrtimer_debug_descr);
 411}
 412
 413static inline void debug_hrtimer_free(struct hrtimer *timer)
 414{
 415	debug_object_free(timer, &hrtimer_debug_descr);
 416}
 417
 418static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
 419			   enum hrtimer_mode mode);
 420
 421void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
 422			   enum hrtimer_mode mode)
 423{
 424	debug_object_init_on_stack(timer, &hrtimer_debug_descr);
 425	__hrtimer_init(timer, clock_id, mode);
 426}
 427EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
 428
 
 
 
 
 
 
 
 
 
 
 
 429void destroy_hrtimer_on_stack(struct hrtimer *timer)
 430{
 431	debug_object_free(timer, &hrtimer_debug_descr);
 432}
 
 433
 434#else
 
 435static inline void debug_hrtimer_init(struct hrtimer *timer) { }
 436static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
 
 437static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
 438#endif
 439
 440static inline void
 441debug_init(struct hrtimer *timer, clockid_t clockid,
 442	   enum hrtimer_mode mode)
 443{
 444	debug_hrtimer_init(timer);
 445	trace_hrtimer_init(timer, clockid, mode);
 446}
 447
 448static inline void debug_activate(struct hrtimer *timer)
 
 449{
 450	debug_hrtimer_activate(timer);
 451	trace_hrtimer_start(timer);
 452}
 453
 454static inline void debug_deactivate(struct hrtimer *timer)
 455{
 456	debug_hrtimer_deactivate(timer);
 457	trace_hrtimer_cancel(timer);
 458}
 459
 460#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
 461static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
 462					     struct hrtimer *timer)
 463{
 464#ifdef CONFIG_HIGH_RES_TIMERS
 465	cpu_base->next_timer = timer;
 466#endif
 
 
 
 
 
 
 467}
 468
 469static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
 
 
 
 
 
 
 470{
 471	struct hrtimer_clock_base *base = cpu_base->clock_base;
 472	ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
 473	unsigned int active = cpu_base->active_bases;
 474
 475	hrtimer_update_next_timer(cpu_base, NULL);
 476	for (; active; base++, active >>= 1) {
 477		struct timerqueue_node *next;
 478		struct hrtimer *timer;
 479
 480		if (!(active & 0x01))
 481			continue;
 482
 483		next = timerqueue_getnext(&base->active);
 484		timer = container_of(next, struct hrtimer, node);
 
 
 
 
 
 
 
 
 485		expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
 486		if (expires.tv64 < expires_next.tv64) {
 487			expires_next = expires;
 488			hrtimer_update_next_timer(cpu_base, timer);
 
 
 
 
 
 
 
 
 489		}
 490	}
 491	/*
 492	 * clock_was_set() might have changed base->offset of any of
 493	 * the clock bases so the result might be negative. Fix it up
 494	 * to prevent a false positive in clockevents_program_event().
 495	 */
 496	if (expires_next.tv64 < 0)
 497		expires_next.tv64 = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 498	return expires_next;
 499}
 500#endif
 501
 502static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
 503{
 504	ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
 505	ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
 506	ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
 507
 508	return ktime_get_update_offsets_now(&base->clock_was_set_seq,
 509					    offs_real, offs_boot, offs_tai);
 510}
 511
 512/* High resolution timer related functions */
 513#ifdef CONFIG_HIGH_RES_TIMERS
 
 514
 515/*
 516 * High resolution timer enabled ?
 517 */
 518static bool hrtimer_hres_enabled __read_mostly  = true;
 519unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
 520EXPORT_SYMBOL_GPL(hrtimer_resolution);
 521
 522/*
 523 * Enable / Disable high resolution mode
 524 */
 525static int __init setup_hrtimer_hres(char *str)
 526{
 527	return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
 528}
 529
 530__setup("highres=", setup_hrtimer_hres);
 531
 532/*
 533 * hrtimer_high_res_enabled - query, if the highres mode is enabled
 534 */
 535static inline int hrtimer_is_hres_enabled(void)
 536{
 537	return hrtimer_hres_enabled;
 538}
 539
 540/*
 541 * Is the high resolution mode active ?
 542 */
 543static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
 544{
 545	return cpu_base->hres_active;
 
 546}
 547
 548static inline int hrtimer_hres_active(void)
 549{
 550	return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
 551}
 552
 553/*
 554 * Reprogram the event source with checking both queues for the
 555 * next event
 556 * Called with interrupts disabled and base->lock held
 557 */
 558static void
 559hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 560{
 561	ktime_t expires_next;
 562
 563	if (!cpu_base->hres_active)
 564		return;
 565
 566	expires_next = __hrtimer_get_next_event(cpu_base);
 567
 568	if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
 569		return;
 570
 571	cpu_base->expires_next.tv64 = expires_next.tv64;
 572
 573	/*
 
 
 
 574	 * If a hang was detected in the last timer interrupt then we
 575	 * leave the hang delay active in the hardware. We want the
 576	 * system to make progress. That also prevents the following
 577	 * scenario:
 578	 * T1 expires 50ms from now
 579	 * T2 expires 5s from now
 580	 *
 581	 * T1 is removed, so this code is called and would reprogram
 582	 * the hardware to 5s from now. Any hrtimer_start after that
 583	 * will not reprogram the hardware due to hang_detected being
 584	 * set. So we'd effectivly block all timers until the T2 event
 585	 * fires.
 586	 */
 587	if (cpu_base->hang_detected)
 588		return;
 589
 590	tick_program_event(cpu_base->expires_next, 1);
 591}
 592
 
 
 
 
 
 
 
 
 
 
 593/*
 594 * When a timer is enqueued and expires earlier than the already enqueued
 595 * timers, we have to check, whether it expires earlier than the timer for
 596 * which the clock event device was armed.
 597 *
 598 * Called with interrupts disabled and base->cpu_base.lock held
 599 */
 600static void hrtimer_reprogram(struct hrtimer *timer,
 601			      struct hrtimer_clock_base *base)
 602{
 603	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
 604	ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
 605
 606	WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
 607
 608	/*
 609	 * If the timer is not on the current cpu, we cannot reprogram
 610	 * the other cpus clock event device.
 611	 */
 612	if (base->cpu_base != cpu_base)
 613		return;
 614
 615	/*
 616	 * If the hrtimer interrupt is running, then it will
 617	 * reevaluate the clock bases and reprogram the clock event
 618	 * device. The callbacks are always executed in hard interrupt
 619	 * context so we don't need an extra check for a running
 620	 * callback.
 621	 */
 622	if (cpu_base->in_hrtirq)
 623		return;
 624
 625	/*
 626	 * CLOCK_REALTIME timer might be requested with an absolute
 627	 * expiry time which is less than base->offset. Set it to 0.
 628	 */
 629	if (expires.tv64 < 0)
 630		expires.tv64 = 0;
 631
 632	if (expires.tv64 >= cpu_base->expires_next.tv64)
 633		return;
 634
 635	/* Update the pointer to the next expiring timer */
 636	cpu_base->next_timer = timer;
 637
 638	/*
 639	 * If a hang was detected in the last timer interrupt then we
 640	 * do not schedule a timer which is earlier than the expiry
 641	 * which we enforced in the hang detection. We want the system
 642	 * to make progress.
 643	 */
 644	if (cpu_base->hang_detected)
 645		return;
 646
 647	/*
 648	 * Program the timer hardware. We enforce the expiry for
 649	 * events which are already in the past.
 650	 */
 651	cpu_base->expires_next = expires;
 652	tick_program_event(expires, 1);
 653}
 654
 655/*
 656 * Initialize the high resolution related parts of cpu_base
 657 */
 658static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
 659{
 660	base->expires_next.tv64 = KTIME_MAX;
 661	base->hres_active = 0;
 662}
 663
 664/*
 665 * Retrigger next event is called after clock was set
 666 *
 667 * Called with interrupts disabled via on_each_cpu()
 668 */
 669static void retrigger_next_event(void *arg)
 670{
 671	struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 672
 673	if (!base->hres_active)
 674		return;
 675
 676	raw_spin_lock(&base->lock);
 677	hrtimer_update_base(base);
 678	hrtimer_force_reprogram(base, 0);
 679	raw_spin_unlock(&base->lock);
 680}
 681
 682/*
 683 * Switch to high resolution mode
 684 */
 685static void hrtimer_switch_to_hres(void)
 686{
 687	struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 688
 689	if (tick_init_highres()) {
 690		printk(KERN_WARNING "Could not switch to high resolution "
 691				    "mode on CPU %d\n", base->cpu);
 692		return;
 693	}
 694	base->hres_active = 1;
 695	hrtimer_resolution = HIGH_RES_NSEC;
 696
 697	tick_setup_sched_timer();
 698	/* "Retrigger" the interrupt to get things going */
 699	retrigger_next_event(NULL);
 700}
 701
 702static void clock_was_set_work(struct work_struct *work)
 703{
 704	clock_was_set();
 705}
 
 706
 707static DECLARE_WORK(hrtimer_work, clock_was_set_work);
 708
 709/*
 710 * Called from timekeeping and resume code to reprogramm the hrtimer
 711 * interrupt device on all cpus.
 
 
 
 712 */
 713void clock_was_set_delayed(void)
 714{
 715	schedule_work(&hrtimer_work);
 716}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 717
 718#else
 
 
 
 
 
 
 
 
 
 
 719
 720static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *b) { return 0; }
 721static inline int hrtimer_hres_active(void) { return 0; }
 722static inline int hrtimer_is_hres_enabled(void) { return 0; }
 723static inline void hrtimer_switch_to_hres(void) { }
 724static inline void
 725hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
 726static inline int hrtimer_reprogram(struct hrtimer *timer,
 727				    struct hrtimer_clock_base *base)
 728{
 729	return 0;
 730}
 731static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
 732static inline void retrigger_next_event(void *arg) { }
 733
 734#endif /* CONFIG_HIGH_RES_TIMERS */
 735
 736/*
 737 * Clock realtime was set
 738 *
 739 * Change the offset of the realtime clock vs. the monotonic
 740 * clock.
 741 *
 742 * We might have to reprogram the high resolution timer interrupt. On
 743 * SMP we call the architecture specific code to retrigger _all_ high
 744 * resolution timer interrupts. On UP we just disable interrupts and
 745 * call the high resolution interrupt code.
 746 */
 747void clock_was_set(void)
 748{
 749#ifdef CONFIG_HIGH_RES_TIMERS
 750	/* Retrigger the CPU local events everywhere */
 751	on_each_cpu(retrigger_next_event, NULL, 1);
 752#endif
 753	timerfd_clock_was_set();
 754}
 755
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 756/*
 757 * During resume we might have to reprogram the high resolution timer
 758 * interrupt on all online CPUs.  However, all other CPUs will be
 759 * stopped with IRQs interrupts disabled so the clock_was_set() call
 760 * must be deferred.
 761 */
 762void hrtimers_resume(void)
 763{
 764	WARN_ONCE(!irqs_disabled(),
 765		  KERN_INFO "hrtimers_resume() called with IRQs enabled!");
 766
 767	/* Retrigger on the local CPU */
 768	retrigger_next_event(NULL);
 769	/* And schedule a retrigger for all others */
 770	clock_was_set_delayed();
 771}
 772
 773static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
 774{
 775#ifdef CONFIG_TIMER_STATS
 776	if (timer->start_site)
 777		return;
 778	timer->start_site = __builtin_return_address(0);
 779	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
 780	timer->start_pid = current->pid;
 781#endif
 782}
 783
 784static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
 785{
 786#ifdef CONFIG_TIMER_STATS
 787	timer->start_site = NULL;
 788#endif
 789}
 790
 791static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
 792{
 793#ifdef CONFIG_TIMER_STATS
 794	if (likely(!timer_stats_active))
 795		return;
 796	timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
 797				 timer->function, timer->start_comm, 0);
 798#endif
 799}
 800
 801/*
 802 * Counterpart to lock_hrtimer_base above:
 803 */
 804static inline
 805void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 806{
 807	raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
 808}
 809
 810/**
 811 * hrtimer_forward - forward the timer expiry
 812 * @timer:	hrtimer to forward
 813 * @now:	forward past this time
 814 * @interval:	the interval to forward
 815 *
 816 * Forward the timer expiry so it will expire in the future.
 817 * Returns the number of overruns.
 818 *
 819 * Can be safely called from the callback function of @timer. If
 820 * called from other contexts @timer must neither be enqueued nor
 821 * running the callback and the caller needs to take care of
 822 * serialization.
 823 *
 824 * Note: This only updates the timer expiry value and does not requeue
 825 * the timer.
 826 */
 827u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
 828{
 829	u64 orun = 1;
 830	ktime_t delta;
 831
 832	delta = ktime_sub(now, hrtimer_get_expires(timer));
 833
 834	if (delta.tv64 < 0)
 835		return 0;
 836
 837	if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
 838		return 0;
 839
 840	if (interval.tv64 < hrtimer_resolution)
 841		interval.tv64 = hrtimer_resolution;
 842
 843	if (unlikely(delta.tv64 >= interval.tv64)) {
 844		s64 incr = ktime_to_ns(interval);
 845
 846		orun = ktime_divns(delta, incr);
 847		hrtimer_add_expires_ns(timer, incr * orun);
 848		if (hrtimer_get_expires_tv64(timer) > now.tv64)
 849			return orun;
 850		/*
 851		 * This (and the ktime_add() below) is the
 852		 * correction for exact:
 853		 */
 854		orun++;
 855	}
 856	hrtimer_add_expires(timer, interval);
 857
 858	return orun;
 859}
 860EXPORT_SYMBOL_GPL(hrtimer_forward);
 861
 862/*
 863 * enqueue_hrtimer - internal function to (re)start a timer
 864 *
 865 * The timer is inserted in expiry order. Insertion into the
 866 * red black tree is O(log(n)). Must hold the base lock.
 867 *
 868 * Returns 1 when the new timer is the leftmost timer in the tree.
 869 */
 870static int enqueue_hrtimer(struct hrtimer *timer,
 871			   struct hrtimer_clock_base *base)
 
 872{
 873	debug_activate(timer);
 874
 875	base->cpu_base->active_bases |= 1 << base->index;
 876
 877	timer->state = HRTIMER_STATE_ENQUEUED;
 
 878
 879	return timerqueue_add(&base->active, &timer->node);
 880}
 881
 882/*
 883 * __remove_hrtimer - internal function to remove a timer
 884 *
 885 * Caller must hold the base lock.
 886 *
 887 * High resolution timer mode reprograms the clock event device when the
 888 * timer is the one which expires next. The caller can disable this by setting
 889 * reprogram to zero. This is useful, when the context does a reprogramming
 890 * anyway (e.g. timer interrupt)
 891 */
 892static void __remove_hrtimer(struct hrtimer *timer,
 893			     struct hrtimer_clock_base *base,
 894			     u8 newstate, int reprogram)
 895{
 896	struct hrtimer_cpu_base *cpu_base = base->cpu_base;
 897	u8 state = timer->state;
 898
 899	timer->state = newstate;
 
 900	if (!(state & HRTIMER_STATE_ENQUEUED))
 901		return;
 902
 903	if (!timerqueue_del(&base->active, &timer->node))
 904		cpu_base->active_bases &= ~(1 << base->index);
 905
 906#ifdef CONFIG_HIGH_RES_TIMERS
 907	/*
 908	 * Note: If reprogram is false we do not update
 909	 * cpu_base->next_timer. This happens when we remove the first
 910	 * timer on a remote cpu. No harm as we never dereference
 911	 * cpu_base->next_timer. So the worst thing what can happen is
 912	 * an superflous call to hrtimer_force_reprogram() on the
 913	 * remote cpu later on if the same timer gets enqueued again.
 914	 */
 915	if (reprogram && timer == cpu_base->next_timer)
 916		hrtimer_force_reprogram(cpu_base, 1);
 917#endif
 918}
 919
 920/*
 921 * remove hrtimer, called with base lock held
 922 */
 923static inline int
 924remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
 
 925{
 926	if (hrtimer_is_queued(timer)) {
 927		u8 state = timer->state;
 928		int reprogram;
 
 929
 930		/*
 931		 * Remove the timer and force reprogramming when high
 932		 * resolution mode is active and the timer is on the current
 933		 * CPU. If we remove a timer on another CPU, reprogramming is
 934		 * skipped. The interrupt event on this CPU is fired and
 935		 * reprogramming happens in the interrupt handler. This is a
 936		 * rare case and less expensive than a smp call.
 937		 */
 938		debug_deactivate(timer);
 939		timer_stats_hrtimer_clear_start_info(timer);
 940		reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
 941
 
 
 
 
 
 
 942		if (!restart)
 943			state = HRTIMER_STATE_INACTIVE;
 
 
 944
 945		__remove_hrtimer(timer, base, state, reprogram);
 946		return 1;
 947	}
 948	return 0;
 949}
 950
 951static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
 952					    const enum hrtimer_mode mode)
 953{
 954#ifdef CONFIG_TIME_LOW_RES
 955	/*
 956	 * CONFIG_TIME_LOW_RES indicates that the system has no way to return
 957	 * granular time values. For relative timers we add hrtimer_resolution
 958	 * (i.e. one jiffie) to prevent short timeouts.
 959	 */
 960	timer->is_rel = mode & HRTIMER_MODE_REL;
 961	if (timer->is_rel)
 962		tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
 963#endif
 964	return tim;
 965}
 966
 967/**
 968 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
 969 * @timer:	the timer to be added
 970 * @tim:	expiry time
 971 * @delta_ns:	"slack" range for the timer
 972 * @mode:	expiry mode: absolute (HRTIMER_MODE_ABS) or
 973 *		relative (HRTIMER_MODE_REL)
 974 */
 975void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
 976			    u64 delta_ns, const enum hrtimer_mode mode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 977{
 978	struct hrtimer_clock_base *base, *new_base;
 979	unsigned long flags;
 980	int leftmost;
 981
 982	base = lock_hrtimer_base(timer, &flags);
 
 
 
 
 
 
 
 
 
 983
 984	/* Remove an active timer from the queue: */
 985	remove_hrtimer(timer, base, true);
 
 
 
 
 
 
 
 
 
 
 986
 987	if (mode & HRTIMER_MODE_REL)
 988		tim = ktime_add_safe(tim, base->get_time());
 989
 990	tim = hrtimer_update_lowres(timer, tim, mode);
 991
 992	hrtimer_set_expires_range_ns(timer, tim, delta_ns);
 993
 994	/* Switch the timer base, if necessary: */
 995	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
 
 
 
 
 
 996
 997	timer_stats_hrtimer_set_start_info(timer);
 
 
 998
 999	leftmost = enqueue_hrtimer(timer, new_base);
1000	if (!leftmost)
1001		goto unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1002
1003	if (!hrtimer_is_hres_active(timer)) {
1004		/*
1005		 * Kick to reschedule the next tick to handle the new timer
1006		 * on dynticks target.
1007		 */
1008		if (new_base->cpu_base->nohz_active)
1009			wake_up_nohz_cpu(new_base->cpu_base->cpu);
1010	} else {
1011		hrtimer_reprogram(timer, new_base);
1012	}
1013unlock:
1014	unlock_hrtimer_base(timer, &flags);
1015}
1016EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1017
1018/**
1019 * hrtimer_try_to_cancel - try to deactivate a timer
1020 * @timer:	hrtimer to stop
1021 *
1022 * Returns:
1023 *  0 when the timer was not active
1024 *  1 when the timer was active
1025 * -1 when the timer is currently excuting the callback function and
 
1026 *    cannot be stopped
1027 */
1028int hrtimer_try_to_cancel(struct hrtimer *timer)
1029{
1030	struct hrtimer_clock_base *base;
1031	unsigned long flags;
1032	int ret = -1;
1033
1034	/*
1035	 * Check lockless first. If the timer is not active (neither
1036	 * enqueued nor running the callback, nothing to do here.  The
1037	 * base lock does not serialize against a concurrent enqueue,
1038	 * so we can avoid taking it.
1039	 */
1040	if (!hrtimer_active(timer))
1041		return 0;
1042
1043	base = lock_hrtimer_base(timer, &flags);
1044
1045	if (!hrtimer_callback_running(timer))
1046		ret = remove_hrtimer(timer, base, false);
1047
1048	unlock_hrtimer_base(timer, &flags);
1049
1050	return ret;
1051
1052}
1053EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
1054
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1055/**
1056 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1057 * @timer:	the timer to be cancelled
1058 *
1059 * Returns:
1060 *  0 when the timer was not active
1061 *  1 when the timer was active
1062 */
1063int hrtimer_cancel(struct hrtimer *timer)
1064{
1065	for (;;) {
1066		int ret = hrtimer_try_to_cancel(timer);
 
 
1067
1068		if (ret >= 0)
1069			return ret;
1070		cpu_relax();
1071	}
1072}
1073EXPORT_SYMBOL_GPL(hrtimer_cancel);
1074
1075/**
1076 * hrtimer_get_remaining - get remaining time for the timer
1077 * @timer:	the timer to read
1078 * @adjust:	adjust relative timers when CONFIG_TIME_LOW_RES=y
1079 */
1080ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
1081{
1082	unsigned long flags;
1083	ktime_t rem;
1084
1085	lock_hrtimer_base(timer, &flags);
1086	if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
1087		rem = hrtimer_expires_remaining_adjusted(timer);
1088	else
1089		rem = hrtimer_expires_remaining(timer);
1090	unlock_hrtimer_base(timer, &flags);
1091
1092	return rem;
1093}
1094EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
1095
1096#ifdef CONFIG_NO_HZ_COMMON
1097/**
1098 * hrtimer_get_next_event - get the time until next expiry event
1099 *
1100 * Returns the next expiry time or KTIME_MAX if no timer is pending.
1101 */
1102u64 hrtimer_get_next_event(void)
1103{
1104	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1105	u64 expires = KTIME_MAX;
1106	unsigned long flags;
1107
1108	raw_spin_lock_irqsave(&cpu_base->lock, flags);
1109
1110	if (!__hrtimer_hres_active(cpu_base))
1111		expires = __hrtimer_get_next_event(cpu_base).tv64;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1112
1113	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1114
1115	return expires;
1116}
1117#endif
1118
 
 
 
 
 
 
 
 
 
 
 
 
1119static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1120			   enum hrtimer_mode mode)
1121{
 
1122	struct hrtimer_cpu_base *cpu_base;
1123	int base;
1124
 
 
 
 
 
 
 
 
 
1125	memset(timer, 0, sizeof(struct hrtimer));
1126
1127	cpu_base = raw_cpu_ptr(&hrtimer_bases);
1128
1129	if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
 
 
 
 
 
1130		clock_id = CLOCK_MONOTONIC;
1131
1132	base = hrtimer_clockid_to_base(clock_id);
 
 
 
1133	timer->base = &cpu_base->clock_base[base];
1134	timerqueue_init(&timer->node);
1135
1136#ifdef CONFIG_TIMER_STATS
1137	timer->start_site = NULL;
1138	timer->start_pid = -1;
1139	memset(timer->start_comm, 0, TASK_COMM_LEN);
1140#endif
1141}
1142
1143/**
1144 * hrtimer_init - initialize a timer to the given clock
1145 * @timer:	the timer to be initialized
1146 * @clock_id:	the clock to be used
1147 * @mode:	timer mode abs/rel
 
 
 
 
 
 
1148 */
1149void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1150		  enum hrtimer_mode mode)
1151{
1152	debug_init(timer, clock_id, mode);
1153	__hrtimer_init(timer, clock_id, mode);
1154}
1155EXPORT_SYMBOL_GPL(hrtimer_init);
1156
1157/*
1158 * A timer is active, when it is enqueued into the rbtree or the
1159 * callback function is running or it's in the state of being migrated
1160 * to another cpu.
1161 *
1162 * It is important for this function to not return a false negative.
1163 */
1164bool hrtimer_active(const struct hrtimer *timer)
1165{
1166	struct hrtimer_cpu_base *cpu_base;
1167	unsigned int seq;
1168
1169	do {
1170		cpu_base = READ_ONCE(timer->base->cpu_base);
1171		seq = raw_read_seqcount_begin(&cpu_base->seq);
1172
1173		if (timer->state != HRTIMER_STATE_INACTIVE ||
1174		    cpu_base->running == timer)
1175			return true;
1176
1177	} while (read_seqcount_retry(&cpu_base->seq, seq) ||
1178		 cpu_base != READ_ONCE(timer->base->cpu_base));
1179
1180	return false;
1181}
1182EXPORT_SYMBOL_GPL(hrtimer_active);
1183
1184/*
1185 * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3
1186 * distinct sections:
1187 *
1188 *  - queued:	the timer is queued
1189 *  - callback:	the timer is being ran
1190 *  - post:	the timer is inactive or (re)queued
1191 *
1192 * On the read side we ensure we observe timer->state and cpu_base->running
1193 * from the same section, if anything changed while we looked at it, we retry.
1194 * This includes timer->base changing because sequence numbers alone are
1195 * insufficient for that.
1196 *
1197 * The sequence numbers are required because otherwise we could still observe
1198 * a false negative if the read side got smeared over multiple consequtive
1199 * __run_hrtimer() invocations.
1200 */
1201
1202static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
1203			  struct hrtimer_clock_base *base,
1204			  struct hrtimer *timer, ktime_t *now)
 
1205{
1206	enum hrtimer_restart (*fn)(struct hrtimer *);
 
1207	int restart;
1208
1209	lockdep_assert_held(&cpu_base->lock);
1210
1211	debug_deactivate(timer);
1212	cpu_base->running = timer;
1213
1214	/*
1215	 * Separate the ->running assignment from the ->state assignment.
1216	 *
1217	 * As with a regular write barrier, this ensures the read side in
1218	 * hrtimer_active() cannot observe cpu_base->running == NULL &&
1219	 * timer->state == INACTIVE.
1220	 */
1221	raw_write_seqcount_barrier(&cpu_base->seq);
1222
1223	__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
1224	timer_stats_account_hrtimer(timer);
1225	fn = timer->function;
1226
1227	/*
1228	 * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
1229	 * timer is restarted with a period then it becomes an absolute
1230	 * timer. If its not restarted it does not matter.
1231	 */
1232	if (IS_ENABLED(CONFIG_TIME_LOW_RES))
1233		timer->is_rel = false;
1234
1235	/*
1236	 * Because we run timers from hardirq context, there is no chance
1237	 * they get migrated to another cpu, therefore its safe to unlock
1238	 * the timer base.
1239	 */
1240	raw_spin_unlock(&cpu_base->lock);
1241	trace_hrtimer_expire_entry(timer, now);
 
 
1242	restart = fn(timer);
 
 
1243	trace_hrtimer_expire_exit(timer);
1244	raw_spin_lock(&cpu_base->lock);
1245
1246	/*
1247	 * Note: We clear the running state after enqueue_hrtimer and
1248	 * we do not reprogramm the event hardware. Happens either in
1249	 * hrtimer_start_range_ns() or in hrtimer_interrupt()
1250	 *
1251	 * Note: Because we dropped the cpu_base->lock above,
1252	 * hrtimer_start_range_ns() can have popped in and enqueued the timer
1253	 * for us already.
1254	 */
1255	if (restart != HRTIMER_NORESTART &&
1256	    !(timer->state & HRTIMER_STATE_ENQUEUED))
1257		enqueue_hrtimer(timer, base);
1258
1259	/*
1260	 * Separate the ->running assignment from the ->state assignment.
1261	 *
1262	 * As with a regular write barrier, this ensures the read side in
1263	 * hrtimer_active() cannot observe cpu_base->running == NULL &&
1264	 * timer->state == INACTIVE.
1265	 */
1266	raw_write_seqcount_barrier(&cpu_base->seq);
1267
1268	WARN_ON_ONCE(cpu_base->running != timer);
1269	cpu_base->running = NULL;
1270}
1271
1272static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
 
1273{
1274	struct hrtimer_clock_base *base = cpu_base->clock_base;
1275	unsigned int active = cpu_base->active_bases;
1276
1277	for (; active; base++, active >>= 1) {
1278		struct timerqueue_node *node;
1279		ktime_t basenow;
1280
1281		if (!(active & 0x01))
1282			continue;
1283
1284		basenow = ktime_add(now, base->offset);
1285
1286		while ((node = timerqueue_getnext(&base->active))) {
1287			struct hrtimer *timer;
1288
1289			timer = container_of(node, struct hrtimer, node);
1290
1291			/*
1292			 * The immediate goal for using the softexpires is
1293			 * minimizing wakeups, not running timers at the
1294			 * earliest interrupt after their soft expiration.
1295			 * This allows us to avoid using a Priority Search
1296			 * Tree, which can answer a stabbing querry for
1297			 * overlapping intervals and instead use the simple
1298			 * BST we already have.
1299			 * We don't add extra wakeups by delaying timers that
1300			 * are right-of a not yet expired timer, because that
1301			 * timer will have to trigger a wakeup anyway.
1302			 */
1303			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
1304				break;
1305
1306			__run_hrtimer(cpu_base, base, timer, &basenow);
 
 
1307		}
1308	}
1309}
1310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1311#ifdef CONFIG_HIGH_RES_TIMERS
1312
1313/*
1314 * High resolution timer interrupt
1315 * Called with interrupts disabled
1316 */
1317void hrtimer_interrupt(struct clock_event_device *dev)
1318{
1319	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1320	ktime_t expires_next, now, entry_time, delta;
 
1321	int retries = 0;
1322
1323	BUG_ON(!cpu_base->hres_active);
1324	cpu_base->nr_events++;
1325	dev->next_event.tv64 = KTIME_MAX;
1326
1327	raw_spin_lock(&cpu_base->lock);
1328	entry_time = now = hrtimer_update_base(cpu_base);
1329retry:
1330	cpu_base->in_hrtirq = 1;
1331	/*
1332	 * We set expires_next to KTIME_MAX here with cpu_base->lock
1333	 * held to prevent that a timer is enqueued in our queue via
1334	 * the migration code. This does not affect enqueueing of
1335	 * timers which run their callback and need to be requeued on
1336	 * this CPU.
1337	 */
1338	cpu_base->expires_next.tv64 = KTIME_MAX;
 
 
 
 
 
 
1339
1340	__hrtimer_run_queues(cpu_base, now);
1341
1342	/* Reevaluate the clock bases for the next expiry */
1343	expires_next = __hrtimer_get_next_event(cpu_base);
1344	/*
1345	 * Store the new expiry value so the migration code can verify
1346	 * against it.
1347	 */
1348	cpu_base->expires_next = expires_next;
1349	cpu_base->in_hrtirq = 0;
1350	raw_spin_unlock(&cpu_base->lock);
1351
1352	/* Reprogramming necessary ? */
1353	if (!tick_program_event(expires_next, 0)) {
1354		cpu_base->hang_detected = 0;
1355		return;
1356	}
1357
1358	/*
1359	 * The next timer was already expired due to:
1360	 * - tracing
1361	 * - long lasting callbacks
1362	 * - being scheduled away when running in a VM
1363	 *
1364	 * We need to prevent that we loop forever in the hrtimer
1365	 * interrupt routine. We give it 3 attempts to avoid
1366	 * overreacting on some spurious event.
1367	 *
1368	 * Acquire base lock for updating the offsets and retrieving
1369	 * the current time.
1370	 */
1371	raw_spin_lock(&cpu_base->lock);
1372	now = hrtimer_update_base(cpu_base);
1373	cpu_base->nr_retries++;
1374	if (++retries < 3)
1375		goto retry;
1376	/*
1377	 * Give the system a chance to do something else than looping
1378	 * here. We stored the entry time, so we know exactly how long
1379	 * we spent here. We schedule the next event this amount of
1380	 * time away.
1381	 */
1382	cpu_base->nr_hangs++;
1383	cpu_base->hang_detected = 1;
1384	raw_spin_unlock(&cpu_base->lock);
 
1385	delta = ktime_sub(now, entry_time);
1386	if ((unsigned int)delta.tv64 > cpu_base->max_hang_time)
1387		cpu_base->max_hang_time = (unsigned int) delta.tv64;
1388	/*
1389	 * Limit it to a sensible value as we enforce a longer
1390	 * delay. Give the CPU at least 100ms to catch up.
1391	 */
1392	if (delta.tv64 > 100 * NSEC_PER_MSEC)
1393		expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1394	else
1395		expires_next = ktime_add(now, delta);
1396	tick_program_event(expires_next, 1);
1397	printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
1398		    ktime_to_ns(delta));
1399}
1400
1401/*
1402 * local version of hrtimer_peek_ahead_timers() called with interrupts
1403 * disabled.
1404 */
1405static inline void __hrtimer_peek_ahead_timers(void)
1406{
1407	struct tick_device *td;
1408
1409	if (!hrtimer_hres_active())
1410		return;
1411
1412	td = this_cpu_ptr(&tick_cpu_device);
1413	if (td && td->evtdev)
1414		hrtimer_interrupt(td->evtdev);
1415}
1416
1417#else /* CONFIG_HIGH_RES_TIMERS */
1418
1419static inline void __hrtimer_peek_ahead_timers(void) { }
1420
1421#endif	/* !CONFIG_HIGH_RES_TIMERS */
1422
1423/*
1424 * Called from run_local_timers in hardirq context every jiffy
1425 */
1426void hrtimer_run_queues(void)
1427{
1428	struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
 
1429	ktime_t now;
1430
1431	if (__hrtimer_hres_active(cpu_base))
1432		return;
1433
1434	/*
1435	 * This _is_ ugly: We have to check periodically, whether we
1436	 * can switch to highres and / or nohz mode. The clocksource
1437	 * switch happens with xtime_lock held. Notification from
1438	 * there only sets the check bit in the tick_oneshot code,
1439	 * otherwise we might deadlock vs. xtime_lock.
1440	 */
1441	if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
1442		hrtimer_switch_to_hres();
1443		return;
1444	}
1445
1446	raw_spin_lock(&cpu_base->lock);
1447	now = hrtimer_update_base(cpu_base);
1448	__hrtimer_run_queues(cpu_base, now);
1449	raw_spin_unlock(&cpu_base->lock);
 
 
 
 
 
 
 
1450}
1451
1452/*
1453 * Sleep related functions:
1454 */
1455static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1456{
1457	struct hrtimer_sleeper *t =
1458		container_of(timer, struct hrtimer_sleeper, timer);
1459	struct task_struct *task = t->task;
1460
1461	t->task = NULL;
1462	if (task)
1463		wake_up_process(task);
1464
1465	return HRTIMER_NORESTART;
1466}
1467
1468void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1469{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1470	sl->timer.function = hrtimer_wakeup;
1471	sl->task = task;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1472}
1473EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
1474
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1475static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1476{
1477	hrtimer_init_sleeper(t, current);
1478
1479	do {
1480		set_current_state(TASK_INTERRUPTIBLE);
1481		hrtimer_start_expires(&t->timer, mode);
1482
1483		if (likely(t->task))
1484			freezable_schedule();
1485
1486		hrtimer_cancel(&t->timer);
1487		mode = HRTIMER_MODE_ABS;
1488
1489	} while (t->task && !signal_pending(current));
1490
1491	__set_current_state(TASK_RUNNING);
1492
1493	return t->task == NULL;
1494}
1495
1496static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1497{
1498	struct timespec rmt;
1499	ktime_t rem;
1500
1501	rem = hrtimer_expires_remaining(timer);
1502	if (rem.tv64 <= 0)
1503		return 0;
1504	rmt = ktime_to_timespec(rem);
1505
1506	if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
1507		return -EFAULT;
1508
1509	return 1;
1510}
1511
1512long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1513{
1514	struct hrtimer_sleeper t;
1515	struct timespec __user  *rmtp;
1516	int ret = 0;
1517
1518	hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
1519				HRTIMER_MODE_ABS);
1520	hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1521
1522	if (do_nanosleep(&t, HRTIMER_MODE_ABS))
1523		goto out;
1524
1525	rmtp = restart->nanosleep.rmtp;
1526	if (rmtp) {
1527		ret = update_rmtp(&t.timer, rmtp);
1528		if (ret <= 0)
1529			goto out;
1530	}
1531
1532	/* The other values in restart are already filled in */
1533	ret = -ERESTART_RESTARTBLOCK;
1534out:
1535	destroy_hrtimer_on_stack(&t.timer);
1536	return ret;
1537}
1538
1539long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1540		       const enum hrtimer_mode mode, const clockid_t clockid)
1541{
1542	struct restart_block *restart;
1543	struct hrtimer_sleeper t;
1544	int ret = 0;
1545	u64 slack;
1546
1547	slack = current->timer_slack_ns;
1548	if (dl_task(current) || rt_task(current))
1549		slack = 0;
1550
1551	hrtimer_init_on_stack(&t.timer, clockid, mode);
1552	hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
1553	if (do_nanosleep(&t, mode))
 
1554		goto out;
1555
1556	/* Absolute timers do not update the rmtp value and restart: */
1557	if (mode == HRTIMER_MODE_ABS) {
1558		ret = -ERESTARTNOHAND;
1559		goto out;
1560	}
1561
1562	if (rmtp) {
1563		ret = update_rmtp(&t.timer, rmtp);
1564		if (ret <= 0)
1565			goto out;
1566	}
1567
1568	restart = &current->restart_block;
1569	restart->fn = hrtimer_nanosleep_restart;
1570	restart->nanosleep.clockid = t.timer.base->clockid;
1571	restart->nanosleep.rmtp = rmtp;
1572	restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1573
1574	ret = -ERESTART_RESTARTBLOCK;
1575out:
1576	destroy_hrtimer_on_stack(&t.timer);
1577	return ret;
1578}
1579
1580SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
1581		struct timespec __user *, rmtp)
 
 
1582{
1583	struct timespec tu;
1584
1585	if (copy_from_user(&tu, rqtp, sizeof(tu)))
1586		return -EFAULT;
1587
1588	if (!timespec_valid(&tu))
1589		return -EINVAL;
1590
1591	return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
 
 
 
1592}
1593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1594/*
1595 * Functions related to boot-time initialization:
1596 */
1597static void init_hrtimers_cpu(int cpu)
1598{
1599	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1600	int i;
1601
1602	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1603		cpu_base->clock_base[i].cpu_base = cpu_base;
1604		timerqueue_init_head(&cpu_base->clock_base[i].active);
 
 
 
1605	}
1606
1607	cpu_base->cpu = cpu;
1608	hrtimer_init_hres(cpu_base);
 
 
 
 
 
 
 
 
1609}
1610
1611#ifdef CONFIG_HOTPLUG_CPU
1612
1613static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1614				struct hrtimer_clock_base *new_base)
1615{
1616	struct hrtimer *timer;
1617	struct timerqueue_node *node;
1618
1619	while ((node = timerqueue_getnext(&old_base->active))) {
1620		timer = container_of(node, struct hrtimer, node);
1621		BUG_ON(hrtimer_callback_running(timer));
1622		debug_deactivate(timer);
1623
1624		/*
1625		 * Mark it as ENQUEUED not INACTIVE otherwise the
1626		 * timer could be seen as !active and just vanish away
1627		 * under us on another CPU
1628		 */
1629		__remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
1630		timer->base = new_base;
1631		/*
1632		 * Enqueue the timers on the new cpu. This does not
1633		 * reprogram the event device in case the timer
1634		 * expires before the earliest on this CPU, but we run
1635		 * hrtimer_interrupt after we migrated everything to
1636		 * sort out already expired timers and reprogram the
1637		 * event device.
1638		 */
1639		enqueue_hrtimer(timer, new_base);
1640	}
1641}
1642
1643static void migrate_hrtimers(int scpu)
1644{
1645	struct hrtimer_cpu_base *old_base, *new_base;
1646	int i;
1647
1648	BUG_ON(cpu_online(scpu));
1649	tick_cancel_sched_timer(scpu);
1650
 
 
 
 
 
 
1651	local_irq_disable();
1652	old_base = &per_cpu(hrtimer_bases, scpu);
1653	new_base = this_cpu_ptr(&hrtimer_bases);
1654	/*
1655	 * The caller is globally serialized and nobody else
1656	 * takes two locks at once, deadlock is not possible.
1657	 */
1658	raw_spin_lock(&new_base->lock);
1659	raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1660
1661	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1662		migrate_hrtimer_list(&old_base->clock_base[i],
1663				     &new_base->clock_base[i]);
1664	}
1665
 
 
 
 
 
 
1666	raw_spin_unlock(&old_base->lock);
1667	raw_spin_unlock(&new_base->lock);
1668
1669	/* Check, if we got expired work to do */
1670	__hrtimer_peek_ahead_timers();
1671	local_irq_enable();
 
 
1672}
1673
1674#endif /* CONFIG_HOTPLUG_CPU */
1675
1676static int hrtimer_cpu_notify(struct notifier_block *self,
1677					unsigned long action, void *hcpu)
1678{
1679	int scpu = (long)hcpu;
1680
1681	switch (action) {
1682
1683	case CPU_UP_PREPARE:
1684	case CPU_UP_PREPARE_FROZEN:
1685		init_hrtimers_cpu(scpu);
1686		break;
1687
1688#ifdef CONFIG_HOTPLUG_CPU
1689	case CPU_DEAD:
1690	case CPU_DEAD_FROZEN:
1691		migrate_hrtimers(scpu);
1692		break;
1693#endif
1694
1695	default:
1696		break;
1697	}
1698
1699	return NOTIFY_OK;
1700}
1701
1702static struct notifier_block hrtimers_nb = {
1703	.notifier_call = hrtimer_cpu_notify,
1704};
1705
1706void __init hrtimers_init(void)
1707{
1708	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1709			  (void *)(long)smp_processor_id());
1710	register_cpu_notifier(&hrtimers_nb);
1711}
1712
1713/**
1714 * schedule_hrtimeout_range_clock - sleep until timeout
1715 * @expires:	timeout value (ktime_t)
1716 * @delta:	slack in expires timeout (ktime_t)
1717 * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1718 * @clock:	timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
1719 */
1720int __sched
1721schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
1722			       const enum hrtimer_mode mode, int clock)
1723{
1724	struct hrtimer_sleeper t;
1725
1726	/*
1727	 * Optimize when a zero timeout value is given. It does not
1728	 * matter whether this is an absolute or a relative time.
1729	 */
1730	if (expires && !expires->tv64) {
1731		__set_current_state(TASK_RUNNING);
1732		return 0;
1733	}
1734
1735	/*
1736	 * A NULL parameter means "infinite"
1737	 */
1738	if (!expires) {
1739		schedule();
1740		return -EINTR;
1741	}
1742
1743	hrtimer_init_on_stack(&t.timer, clock, mode);
1744	hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1745
1746	hrtimer_init_sleeper(&t, current);
1747
1748	hrtimer_start_expires(&t.timer, mode);
1749
1750	if (likely(t.task))
1751		schedule();
1752
1753	hrtimer_cancel(&t.timer);
1754	destroy_hrtimer_on_stack(&t.timer);
1755
1756	__set_current_state(TASK_RUNNING);
1757
1758	return !t.task ? 0 : -EINTR;
1759}
1760
1761/**
1762 * schedule_hrtimeout_range - sleep until timeout
1763 * @expires:	timeout value (ktime_t)
1764 * @delta:	slack in expires timeout (ktime_t)
1765 * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1766 *
1767 * Make the current task sleep until the given expiry time has
1768 * elapsed. The routine will return immediately unless
1769 * the current task state has been set (see set_current_state()).
1770 *
1771 * The @delta argument gives the kernel the freedom to schedule the
1772 * actual wakeup to a time that is both power and performance friendly.
1773 * The kernel give the normal best effort behavior for "@expires+@delta",
1774 * but may decide to fire the timer earlier, but no earlier than @expires.
1775 *
1776 * You can set the task state as follows -
1777 *
1778 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1779 * pass before the routine returns.
 
1780 *
1781 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1782 * delivered to the current task.
 
1783 *
1784 * The current task state is guaranteed to be TASK_RUNNING when this
1785 * routine returns.
1786 *
1787 * Returns 0 when the timer has expired otherwise -EINTR
 
 
1788 */
1789int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
1790				     const enum hrtimer_mode mode)
1791{
1792	return schedule_hrtimeout_range_clock(expires, delta, mode,
1793					      CLOCK_MONOTONIC);
1794}
1795EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1796
1797/**
1798 * schedule_hrtimeout - sleep until timeout
1799 * @expires:	timeout value (ktime_t)
1800 * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1801 *
1802 * Make the current task sleep until the given expiry time has
1803 * elapsed. The routine will return immediately unless
1804 * the current task state has been set (see set_current_state()).
1805 *
1806 * You can set the task state as follows -
1807 *
1808 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1809 * pass before the routine returns.
 
1810 *
1811 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1812 * delivered to the current task.
 
1813 *
1814 * The current task state is guaranteed to be TASK_RUNNING when this
1815 * routine returns.
1816 *
1817 * Returns 0 when the timer has expired otherwise -EINTR
 
 
1818 */
1819int __sched schedule_hrtimeout(ktime_t *expires,
1820			       const enum hrtimer_mode mode)
1821{
1822	return schedule_hrtimeout_range(expires, 0, mode);
1823}
1824EXPORT_SYMBOL_GPL(schedule_hrtimeout);