Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * This file contains the functions which manage clocksource drivers.
   4 *
   5 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/device.h>
  11#include <linux/clocksource.h>
  12#include <linux/init.h>
  13#include <linux/module.h>
  14#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
  15#include <linux/tick.h>
  16#include <linux/kthread.h>
  17#include <linux/prandom.h>
  18#include <linux/cpu.h>
  19
  20#include "tick-internal.h"
  21#include "timekeeping_internal.h"
  22
  23/**
  24 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
  25 * @mult:	pointer to mult variable
  26 * @shift:	pointer to shift variable
  27 * @from:	frequency to convert from
  28 * @to:		frequency to convert to
  29 * @maxsec:	guaranteed runtime conversion range in seconds
  30 *
  31 * The function evaluates the shift/mult pair for the scaled math
  32 * operations of clocksources and clockevents.
  33 *
  34 * @to and @from are frequency values in HZ. For clock sources @to is
  35 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
  36 * event @to is the counter frequency and @from is NSEC_PER_SEC.
  37 *
  38 * The @maxsec conversion range argument controls the time frame in
  39 * seconds which must be covered by the runtime conversion with the
  40 * calculated mult and shift factors. This guarantees that no 64bit
  41 * overflow happens when the input value of the conversion is
  42 * multiplied with the calculated mult factor. Larger ranges may
  43 * reduce the conversion accuracy by choosing smaller mult and shift
  44 * factors.
  45 */
  46void
  47clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
  48{
  49	u64 tmp;
  50	u32 sft, sftacc= 32;
  51
  52	/*
  53	 * Calculate the shift factor which is limiting the conversion
  54	 * range:
  55	 */
  56	tmp = ((u64)maxsec * from) >> 32;
  57	while (tmp) {
  58		tmp >>=1;
  59		sftacc--;
  60	}
  61
  62	/*
  63	 * Find the conversion shift/mult pair which has the best
  64	 * accuracy and fits the maxsec conversion range:
  65	 */
  66	for (sft = 32; sft > 0; sft--) {
  67		tmp = (u64) to << sft;
  68		tmp += from / 2;
  69		do_div(tmp, from);
  70		if ((tmp >> sftacc) == 0)
  71			break;
  72	}
  73	*mult = tmp;
  74	*shift = sft;
  75}
  76EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
  77
  78/*[Clocksource internal variables]---------
  79 * curr_clocksource:
  80 *	currently selected clocksource.
  81 * suspend_clocksource:
  82 *	used to calculate the suspend time.
  83 * clocksource_list:
  84 *	linked list with the registered clocksources
  85 * clocksource_mutex:
  86 *	protects manipulations to curr_clocksource and the clocksource_list
  87 * override_name:
  88 *	Name of the user-specified clocksource.
  89 */
  90static struct clocksource *curr_clocksource;
  91static struct clocksource *suspend_clocksource;
  92static LIST_HEAD(clocksource_list);
  93static DEFINE_MUTEX(clocksource_mutex);
  94static char override_name[CS_NAME_LEN];
  95static int finished_booting;
  96static u64 suspend_start;
  97
  98/*
  99 * Interval: 0.5sec.
 100 */
 101#define WATCHDOG_INTERVAL (HZ >> 1)
 102#define WATCHDOG_INTERVAL_MAX_NS ((2 * WATCHDOG_INTERVAL) * (NSEC_PER_SEC / HZ))
 103
 104/*
 105 * Threshold: 0.0312s, when doubled: 0.0625s.
 106 * Also a default for cs->uncertainty_margin when registering clocks.
 107 */
 108#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5)
 109
 110/*
 111 * Maximum permissible delay between two readouts of the watchdog
 112 * clocksource surrounding a read of the clocksource being validated.
 113 * This delay could be due to SMIs, NMIs, or to VCPU preemptions.  Used as
 114 * a lower bound for cs->uncertainty_margin values when registering clocks.
 115 *
 116 * The default of 500 parts per million is based on NTP's limits.
 117 * If a clocksource is good enough for NTP, it is good enough for us!
 118 */
 119#ifdef CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US
 120#define MAX_SKEW_USEC	CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US
 121#else
 122#define MAX_SKEW_USEC	(125 * WATCHDOG_INTERVAL / HZ)
 123#endif
 124
 125#define WATCHDOG_MAX_SKEW (MAX_SKEW_USEC * NSEC_PER_USEC)
 126
 127#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
 128static void clocksource_watchdog_work(struct work_struct *work);
 129static void clocksource_select(void);
 130
 131static LIST_HEAD(watchdog_list);
 132static struct clocksource *watchdog;
 133static struct timer_list watchdog_timer;
 134static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
 135static DEFINE_SPINLOCK(watchdog_lock);
 136static int watchdog_running;
 137static atomic_t watchdog_reset_pending;
 138static int64_t watchdog_max_interval;
 139
 140static inline void clocksource_watchdog_lock(unsigned long *flags)
 141{
 142	spin_lock_irqsave(&watchdog_lock, *flags);
 143}
 144
 145static inline void clocksource_watchdog_unlock(unsigned long *flags)
 146{
 147	spin_unlock_irqrestore(&watchdog_lock, *flags);
 148}
 149
 150static int clocksource_watchdog_kthread(void *data);
 151static void __clocksource_change_rating(struct clocksource *cs, int rating);
 152
 
 
 
 
 
 153static void clocksource_watchdog_work(struct work_struct *work)
 154{
 155	/*
 156	 * We cannot directly run clocksource_watchdog_kthread() here, because
 157	 * clocksource_select() calls timekeeping_notify() which uses
 158	 * stop_machine(). One cannot use stop_machine() from a workqueue() due
 159	 * lock inversions wrt CPU hotplug.
 160	 *
 161	 * Also, we only ever run this work once or twice during the lifetime
 162	 * of the kernel, so there is no point in creating a more permanent
 163	 * kthread for this.
 164	 *
 165	 * If kthread_run fails the next watchdog scan over the
 166	 * watchdog_list will find the unstable clock again.
 167	 */
 168	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
 169}
 170
 171static void __clocksource_unstable(struct clocksource *cs)
 172{
 173	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
 174	cs->flags |= CLOCK_SOURCE_UNSTABLE;
 175
 176	/*
 177	 * If the clocksource is registered clocksource_watchdog_kthread() will
 178	 * re-rate and re-select.
 179	 */
 180	if (list_empty(&cs->list)) {
 181		cs->rating = 0;
 182		return;
 183	}
 184
 185	if (cs->mark_unstable)
 186		cs->mark_unstable(cs);
 187
 188	/* kick clocksource_watchdog_kthread() */
 189	if (finished_booting)
 190		schedule_work(&watchdog_work);
 191}
 192
 193/**
 194 * clocksource_mark_unstable - mark clocksource unstable via watchdog
 195 * @cs:		clocksource to be marked unstable
 196 *
 197 * This function is called by the x86 TSC code to mark clocksources as unstable;
 198 * it defers demotion and re-selection to a kthread.
 199 */
 200void clocksource_mark_unstable(struct clocksource *cs)
 201{
 202	unsigned long flags;
 203
 204	spin_lock_irqsave(&watchdog_lock, flags);
 205	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
 206		if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
 207			list_add(&cs->wd_list, &watchdog_list);
 208		__clocksource_unstable(cs);
 209	}
 210	spin_unlock_irqrestore(&watchdog_lock, flags);
 211}
 212
 
 
 
 213static int verify_n_cpus = 8;
 214module_param(verify_n_cpus, int, 0644);
 215
 216enum wd_read_status {
 217	WD_READ_SUCCESS,
 218	WD_READ_UNSTABLE,
 219	WD_READ_SKIP
 220};
 221
 222static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
 223{
 224	unsigned int nretries, max_retries;
 225	u64 wd_end, wd_end2, wd_delta;
 226	int64_t wd_delay, wd_seq_delay;
 227
 228	max_retries = clocksource_get_max_watchdog_retry();
 229	for (nretries = 0; nretries <= max_retries; nretries++) {
 230		local_irq_disable();
 231		*wdnow = watchdog->read(watchdog);
 232		*csnow = cs->read(cs);
 233		wd_end = watchdog->read(watchdog);
 234		wd_end2 = watchdog->read(watchdog);
 235		local_irq_enable();
 236
 237		wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
 238		wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
 239					      watchdog->shift);
 240		if (wd_delay <= WATCHDOG_MAX_SKEW) {
 241			if (nretries > 1 || nretries >= max_retries) {
 242				pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
 243					smp_processor_id(), watchdog->name, nretries);
 244			}
 245			return WD_READ_SUCCESS;
 246		}
 
 247
 248		/*
 249		 * Now compute delay in consecutive watchdog read to see if
 250		 * there is too much external interferences that cause
 251		 * significant delay in reading both clocksource and watchdog.
 252		 *
 253		 * If consecutive WD read-back delay > WATCHDOG_MAX_SKEW/2,
 254		 * report system busy, reinit the watchdog and skip the current
 255		 * watchdog test.
 256		 */
 257		wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask);
 258		wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift);
 259		if (wd_seq_delay > WATCHDOG_MAX_SKEW/2)
 260			goto skip_test;
 261	}
 262
 263	pr_warn("timekeeping watchdog on CPU%d: wd-%s-wd excessive read-back delay of %lldns vs. limit of %ldns, wd-wd read-back delay only %lldns, attempt %d, marking %s unstable\n",
 264		smp_processor_id(), cs->name, wd_delay, WATCHDOG_MAX_SKEW, wd_seq_delay, nretries, cs->name);
 265	return WD_READ_UNSTABLE;
 266
 267skip_test:
 268	pr_info("timekeeping watchdog on CPU%d: %s wd-wd read-back delay of %lldns\n",
 269		smp_processor_id(), watchdog->name, wd_seq_delay);
 270	pr_info("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n",
 271		cs->name, wd_delay);
 272	return WD_READ_SKIP;
 273}
 274
 275static u64 csnow_mid;
 276static cpumask_t cpus_ahead;
 277static cpumask_t cpus_behind;
 278static cpumask_t cpus_chosen;
 279
 280static void clocksource_verify_choose_cpus(void)
 281{
 282	int cpu, i, n = verify_n_cpus;
 283
 284	if (n < 0) {
 285		/* Check all of the CPUs. */
 286		cpumask_copy(&cpus_chosen, cpu_online_mask);
 287		cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
 288		return;
 289	}
 290
 291	/* If no checking desired, or no other CPU to check, leave. */
 292	cpumask_clear(&cpus_chosen);
 293	if (n == 0 || num_online_cpus() <= 1)
 294		return;
 295
 296	/* Make sure to select at least one CPU other than the current CPU. */
 297	cpu = cpumask_first(cpu_online_mask);
 298	if (cpu == smp_processor_id())
 299		cpu = cpumask_next(cpu, cpu_online_mask);
 300	if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
 301		return;
 302	cpumask_set_cpu(cpu, &cpus_chosen);
 303
 304	/* Force a sane value for the boot parameter. */
 305	if (n > nr_cpu_ids)
 306		n = nr_cpu_ids;
 307
 308	/*
 309	 * Randomly select the specified number of CPUs.  If the same
 310	 * CPU is selected multiple times, that CPU is checked only once,
 311	 * and no replacement CPU is selected.  This gracefully handles
 312	 * situations where verify_n_cpus is greater than the number of
 313	 * CPUs that are currently online.
 314	 */
 315	for (i = 1; i < n; i++) {
 316		cpu = get_random_u32_below(nr_cpu_ids);
 317		cpu = cpumask_next(cpu - 1, cpu_online_mask);
 318		if (cpu >= nr_cpu_ids)
 319			cpu = cpumask_first(cpu_online_mask);
 320		if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
 321			cpumask_set_cpu(cpu, &cpus_chosen);
 322	}
 323
 324	/* Don't verify ourselves. */
 325	cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
 326}
 327
 328static void clocksource_verify_one_cpu(void *csin)
 329{
 330	struct clocksource *cs = (struct clocksource *)csin;
 331
 332	csnow_mid = cs->read(cs);
 333}
 334
 335void clocksource_verify_percpu(struct clocksource *cs)
 336{
 337	int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX;
 338	u64 csnow_begin, csnow_end;
 339	int cpu, testcpu;
 340	s64 delta;
 341
 342	if (verify_n_cpus == 0)
 343		return;
 344	cpumask_clear(&cpus_ahead);
 345	cpumask_clear(&cpus_behind);
 346	cpus_read_lock();
 347	preempt_disable();
 348	clocksource_verify_choose_cpus();
 349	if (cpumask_empty(&cpus_chosen)) {
 350		preempt_enable();
 351		cpus_read_unlock();
 352		pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
 353		return;
 354	}
 355	testcpu = smp_processor_id();
 356	pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
 357	for_each_cpu(cpu, &cpus_chosen) {
 358		if (cpu == testcpu)
 359			continue;
 360		csnow_begin = cs->read(cs);
 361		smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
 362		csnow_end = cs->read(cs);
 363		delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
 364		if (delta < 0)
 365			cpumask_set_cpu(cpu, &cpus_behind);
 366		delta = (csnow_end - csnow_mid) & cs->mask;
 367		if (delta < 0)
 368			cpumask_set_cpu(cpu, &cpus_ahead);
 369		delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
 370		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
 371		if (cs_nsec > cs_nsec_max)
 372			cs_nsec_max = cs_nsec;
 373		if (cs_nsec < cs_nsec_min)
 374			cs_nsec_min = cs_nsec;
 375	}
 376	preempt_enable();
 377	cpus_read_unlock();
 378	if (!cpumask_empty(&cpus_ahead))
 379		pr_warn("        CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
 380			cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
 381	if (!cpumask_empty(&cpus_behind))
 382		pr_warn("        CPUs %*pbl behind CPU %d for clocksource %s.\n",
 383			cpumask_pr_args(&cpus_behind), testcpu, cs->name);
 384	if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind))
 385		pr_warn("        CPU %d check durations %lldns - %lldns for clocksource %s.\n",
 386			testcpu, cs_nsec_min, cs_nsec_max, cs->name);
 387}
 388EXPORT_SYMBOL_GPL(clocksource_verify_percpu);
 389
 390static inline void clocksource_reset_watchdog(void)
 391{
 392	struct clocksource *cs;
 393
 394	list_for_each_entry(cs, &watchdog_list, wd_list)
 395		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
 396}
 397
 398
 399static void clocksource_watchdog(struct timer_list *unused)
 400{
 401	u64 csnow, wdnow, cslast, wdlast, delta;
 402	int64_t wd_nsec, cs_nsec, interval;
 403	int next_cpu, reset_pending;
 
 404	struct clocksource *cs;
 405	enum wd_read_status read_ret;
 406	unsigned long extra_wait = 0;
 407	u32 md;
 408
 409	spin_lock(&watchdog_lock);
 410	if (!watchdog_running)
 411		goto out;
 412
 413	reset_pending = atomic_read(&watchdog_reset_pending);
 414
 415	list_for_each_entry(cs, &watchdog_list, wd_list) {
 416
 417		/* Clocksource already marked unstable? */
 418		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
 419			if (finished_booting)
 420				schedule_work(&watchdog_work);
 421			continue;
 422		}
 423
 424		read_ret = cs_watchdog_read(cs, &csnow, &wdnow);
 425
 426		if (read_ret == WD_READ_UNSTABLE) {
 427			/* Clock readout unreliable, so give it up. */
 428			__clocksource_unstable(cs);
 429			continue;
 430		}
 431
 432		/*
 433		 * When WD_READ_SKIP is returned, it means the system is likely
 434		 * under very heavy load, where the latency of reading
 435		 * watchdog/clocksource is very big, and affect the accuracy of
 436		 * watchdog check. So give system some space and suspend the
 437		 * watchdog check for 5 minutes.
 438		 */
 439		if (read_ret == WD_READ_SKIP) {
 440			/*
 441			 * As the watchdog timer will be suspended, and
 442			 * cs->last could keep unchanged for 5 minutes, reset
 443			 * the counters.
 444			 */
 445			clocksource_reset_watchdog();
 446			extra_wait = HZ * 300;
 447			break;
 448		}
 449
 450		/* Clocksource initialized ? */
 451		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
 452		    atomic_read(&watchdog_reset_pending)) {
 453			cs->flags |= CLOCK_SOURCE_WATCHDOG;
 454			cs->wd_last = wdnow;
 455			cs->cs_last = csnow;
 456			continue;
 457		}
 458
 459		delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
 460		wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
 461					     watchdog->shift);
 462
 463		delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
 464		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
 465		wdlast = cs->wd_last; /* save these in case we print them */
 466		cslast = cs->cs_last;
 467		cs->cs_last = csnow;
 468		cs->wd_last = wdnow;
 469
 470		if (atomic_read(&watchdog_reset_pending))
 471			continue;
 472
 473		/*
 474		 * The processing of timer softirqs can get delayed (usually
 475		 * on account of ksoftirqd not getting to run in a timely
 476		 * manner), which causes the watchdog interval to stretch.
 477		 * Skew detection may fail for longer watchdog intervals
 478		 * on account of fixed margins being used.
 479		 * Some clocksources, e.g. acpi_pm, cannot tolerate
 480		 * watchdog intervals longer than a few seconds.
 481		 */
 482		interval = max(cs_nsec, wd_nsec);
 483		if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) {
 484			if (system_state > SYSTEM_SCHEDULING &&
 485			    interval > 2 * watchdog_max_interval) {
 486				watchdog_max_interval = interval;
 487				pr_warn("Long readout interval, skipping watchdog check: cs_nsec: %lld wd_nsec: %lld\n",
 488					cs_nsec, wd_nsec);
 489			}
 490			watchdog_timer.expires = jiffies;
 491			continue;
 492		}
 493
 494		/* Check the deviation from the watchdog clocksource. */
 495		md = cs->uncertainty_margin + watchdog->uncertainty_margin;
 496		if (abs(cs_nsec - wd_nsec) > md) {
 497			s64 cs_wd_msec;
 498			s64 wd_msec;
 499			u32 wd_rem;
 500
 501			pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
 502				smp_processor_id(), cs->name);
 503			pr_warn("                      '%s' wd_nsec: %lld wd_now: %llx wd_last: %llx mask: %llx\n",
 504				watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask);
 505			pr_warn("                      '%s' cs_nsec: %lld cs_now: %llx cs_last: %llx mask: %llx\n",
 506				cs->name, cs_nsec, csnow, cslast, cs->mask);
 507			cs_wd_msec = div_s64_rem(cs_nsec - wd_nsec, 1000 * 1000, &wd_rem);
 508			wd_msec = div_s64_rem(wd_nsec, 1000 * 1000, &wd_rem);
 509			pr_warn("                      Clocksource '%s' skewed %lld ns (%lld ms) over watchdog '%s' interval of %lld ns (%lld ms)\n",
 510				cs->name, cs_nsec - wd_nsec, cs_wd_msec, watchdog->name, wd_nsec, wd_msec);
 511			if (curr_clocksource == cs)
 512				pr_warn("                      '%s' is current clocksource.\n", cs->name);
 513			else if (curr_clocksource)
 514				pr_warn("                      '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name);
 515			else
 516				pr_warn("                      No current clocksource.\n");
 517			__clocksource_unstable(cs);
 518			continue;
 519		}
 520
 521		if (cs == curr_clocksource && cs->tick_stable)
 522			cs->tick_stable(cs);
 523
 524		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
 525		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
 526		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
 527			/* Mark it valid for high-res. */
 528			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
 529
 530			/*
 531			 * clocksource_done_booting() will sort it if
 532			 * finished_booting is not set yet.
 533			 */
 534			if (!finished_booting)
 535				continue;
 536
 537			/*
 538			 * If this is not the current clocksource let
 539			 * the watchdog thread reselect it. Due to the
 540			 * change to high res this clocksource might
 541			 * be preferred now. If it is the current
 542			 * clocksource let the tick code know about
 543			 * that change.
 544			 */
 545			if (cs != curr_clocksource) {
 546				cs->flags |= CLOCK_SOURCE_RESELECT;
 547				schedule_work(&watchdog_work);
 548			} else {
 549				tick_clock_notify();
 550			}
 551		}
 552	}
 553
 554	/*
 555	 * We only clear the watchdog_reset_pending, when we did a
 556	 * full cycle through all clocksources.
 557	 */
 558	if (reset_pending)
 559		atomic_dec(&watchdog_reset_pending);
 560
 561	/*
 562	 * Cycle through CPUs to check if the CPUs stay synchronized
 563	 * to each other.
 564	 */
 565	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
 566	if (next_cpu >= nr_cpu_ids)
 567		next_cpu = cpumask_first(cpu_online_mask);
 568
 569	/*
 570	 * Arm timer if not already pending: could race with concurrent
 571	 * pair clocksource_stop_watchdog() clocksource_start_watchdog().
 572	 */
 573	if (!timer_pending(&watchdog_timer)) {
 574		watchdog_timer.expires += WATCHDOG_INTERVAL + extra_wait;
 575		add_timer_on(&watchdog_timer, next_cpu);
 576	}
 577out:
 578	spin_unlock(&watchdog_lock);
 579}
 580
 581static inline void clocksource_start_watchdog(void)
 582{
 583	if (watchdog_running || !watchdog || list_empty(&watchdog_list))
 584		return;
 585	timer_setup(&watchdog_timer, clocksource_watchdog, 0);
 586	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
 587	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
 588	watchdog_running = 1;
 589}
 590
 591static inline void clocksource_stop_watchdog(void)
 592{
 593	if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
 594		return;
 595	del_timer(&watchdog_timer);
 596	watchdog_running = 0;
 597}
 598
 
 
 
 
 
 
 
 
 599static void clocksource_resume_watchdog(void)
 600{
 601	atomic_inc(&watchdog_reset_pending);
 602}
 603
 604static void clocksource_enqueue_watchdog(struct clocksource *cs)
 605{
 606	INIT_LIST_HEAD(&cs->wd_list);
 607
 608	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
 609		/* cs is a clocksource to be watched. */
 610		list_add(&cs->wd_list, &watchdog_list);
 611		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
 612	} else {
 613		/* cs is a watchdog. */
 614		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
 615			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
 616	}
 617}
 618
 619static void clocksource_select_watchdog(bool fallback)
 620{
 621	struct clocksource *cs, *old_wd;
 622	unsigned long flags;
 623
 624	spin_lock_irqsave(&watchdog_lock, flags);
 625	/* save current watchdog */
 626	old_wd = watchdog;
 627	if (fallback)
 628		watchdog = NULL;
 629
 630	list_for_each_entry(cs, &clocksource_list, list) {
 631		/* cs is a clocksource to be watched. */
 632		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
 633			continue;
 634
 635		/* Skip current if we were requested for a fallback. */
 636		if (fallback && cs == old_wd)
 637			continue;
 638
 639		/* Pick the best watchdog. */
 640		if (!watchdog || cs->rating > watchdog->rating)
 641			watchdog = cs;
 642	}
 643	/* If we failed to find a fallback restore the old one. */
 644	if (!watchdog)
 645		watchdog = old_wd;
 646
 647	/* If we changed the watchdog we need to reset cycles. */
 648	if (watchdog != old_wd)
 649		clocksource_reset_watchdog();
 650
 651	/* Check if the watchdog timer needs to be started. */
 652	clocksource_start_watchdog();
 653	spin_unlock_irqrestore(&watchdog_lock, flags);
 654}
 655
 656static void clocksource_dequeue_watchdog(struct clocksource *cs)
 657{
 658	if (cs != watchdog) {
 659		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
 660			/* cs is a watched clocksource. */
 661			list_del_init(&cs->wd_list);
 662			/* Check if the watchdog timer needs to be stopped. */
 663			clocksource_stop_watchdog();
 664		}
 665	}
 666}
 667
 668static int __clocksource_watchdog_kthread(void)
 669{
 670	struct clocksource *cs, *tmp;
 671	unsigned long flags;
 672	int select = 0;
 673
 674	/* Do any required per-CPU skew verification. */
 675	if (curr_clocksource &&
 676	    curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE &&
 677	    curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU)
 678		clocksource_verify_percpu(curr_clocksource);
 679
 680	spin_lock_irqsave(&watchdog_lock, flags);
 681	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
 682		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
 683			list_del_init(&cs->wd_list);
 684			__clocksource_change_rating(cs, 0);
 685			select = 1;
 686		}
 687		if (cs->flags & CLOCK_SOURCE_RESELECT) {
 688			cs->flags &= ~CLOCK_SOURCE_RESELECT;
 689			select = 1;
 690		}
 691	}
 692	/* Check if the watchdog timer needs to be stopped. */
 693	clocksource_stop_watchdog();
 694	spin_unlock_irqrestore(&watchdog_lock, flags);
 695
 696	return select;
 697}
 698
 699static int clocksource_watchdog_kthread(void *data)
 700{
 701	mutex_lock(&clocksource_mutex);
 702	if (__clocksource_watchdog_kthread())
 703		clocksource_select();
 704	mutex_unlock(&clocksource_mutex);
 705	return 0;
 706}
 707
 708static bool clocksource_is_watchdog(struct clocksource *cs)
 709{
 710	return cs == watchdog;
 711}
 712
 713#else /* CONFIG_CLOCKSOURCE_WATCHDOG */
 714
 715static void clocksource_enqueue_watchdog(struct clocksource *cs)
 716{
 717	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
 718		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
 719}
 720
 721static void clocksource_select_watchdog(bool fallback) { }
 722static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
 723static inline void clocksource_resume_watchdog(void) { }
 724static inline int __clocksource_watchdog_kthread(void) { return 0; }
 725static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
 726void clocksource_mark_unstable(struct clocksource *cs) { }
 727
 728static inline void clocksource_watchdog_lock(unsigned long *flags) { }
 729static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
 730
 731#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
 732
 733static bool clocksource_is_suspend(struct clocksource *cs)
 734{
 735	return cs == suspend_clocksource;
 736}
 737
 738static void __clocksource_suspend_select(struct clocksource *cs)
 739{
 740	/*
 741	 * Skip the clocksource which will be stopped in suspend state.
 742	 */
 743	if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
 744		return;
 745
 746	/*
 747	 * The nonstop clocksource can be selected as the suspend clocksource to
 748	 * calculate the suspend time, so it should not supply suspend/resume
 749	 * interfaces to suspend the nonstop clocksource when system suspends.
 750	 */
 751	if (cs->suspend || cs->resume) {
 752		pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
 753			cs->name);
 754	}
 755
 756	/* Pick the best rating. */
 757	if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
 758		suspend_clocksource = cs;
 759}
 760
 761/**
 762 * clocksource_suspend_select - Select the best clocksource for suspend timing
 763 * @fallback:	if select a fallback clocksource
 764 */
 765static void clocksource_suspend_select(bool fallback)
 766{
 767	struct clocksource *cs, *old_suspend;
 768
 769	old_suspend = suspend_clocksource;
 770	if (fallback)
 771		suspend_clocksource = NULL;
 772
 773	list_for_each_entry(cs, &clocksource_list, list) {
 774		/* Skip current if we were requested for a fallback. */
 775		if (fallback && cs == old_suspend)
 776			continue;
 777
 778		__clocksource_suspend_select(cs);
 779	}
 780}
 781
 782/**
 783 * clocksource_start_suspend_timing - Start measuring the suspend timing
 784 * @cs:			current clocksource from timekeeping
 785 * @start_cycles:	current cycles from timekeeping
 786 *
 787 * This function will save the start cycle values of suspend timer to calculate
 788 * the suspend time when resuming system.
 789 *
 790 * This function is called late in the suspend process from timekeeping_suspend(),
 791 * that means processes are frozen, non-boot cpus and interrupts are disabled
 792 * now. It is therefore possible to start the suspend timer without taking the
 793 * clocksource mutex.
 794 */
 795void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
 796{
 797	if (!suspend_clocksource)
 798		return;
 799
 800	/*
 801	 * If current clocksource is the suspend timer, we should use the
 802	 * tkr_mono.cycle_last value as suspend_start to avoid same reading
 803	 * from suspend timer.
 804	 */
 805	if (clocksource_is_suspend(cs)) {
 806		suspend_start = start_cycles;
 807		return;
 808	}
 809
 810	if (suspend_clocksource->enable &&
 811	    suspend_clocksource->enable(suspend_clocksource)) {
 812		pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
 813		return;
 814	}
 815
 816	suspend_start = suspend_clocksource->read(suspend_clocksource);
 817}
 818
 819/**
 820 * clocksource_stop_suspend_timing - Stop measuring the suspend timing
 821 * @cs:		current clocksource from timekeeping
 822 * @cycle_now:	current cycles from timekeeping
 823 *
 824 * This function will calculate the suspend time from suspend timer.
 825 *
 826 * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
 827 *
 828 * This function is called early in the resume process from timekeeping_resume(),
 829 * that means there is only one cpu, no processes are running and the interrupts
 830 * are disabled. It is therefore possible to stop the suspend timer without
 831 * taking the clocksource mutex.
 832 */
 833u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
 834{
 835	u64 now, delta, nsec = 0;
 836
 837	if (!suspend_clocksource)
 838		return 0;
 839
 840	/*
 841	 * If current clocksource is the suspend timer, we should use the
 842	 * tkr_mono.cycle_last value from timekeeping as current cycle to
 843	 * avoid same reading from suspend timer.
 844	 */
 845	if (clocksource_is_suspend(cs))
 846		now = cycle_now;
 847	else
 848		now = suspend_clocksource->read(suspend_clocksource);
 849
 850	if (now > suspend_start) {
 851		delta = clocksource_delta(now, suspend_start,
 852					  suspend_clocksource->mask);
 853		nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
 854				       suspend_clocksource->shift);
 855	}
 856
 857	/*
 858	 * Disable the suspend timer to save power if current clocksource is
 859	 * not the suspend timer.
 860	 */
 861	if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
 862		suspend_clocksource->disable(suspend_clocksource);
 863
 864	return nsec;
 865}
 866
 867/**
 868 * clocksource_suspend - suspend the clocksource(s)
 869 */
 870void clocksource_suspend(void)
 871{
 872	struct clocksource *cs;
 873
 874	list_for_each_entry_reverse(cs, &clocksource_list, list)
 875		if (cs->suspend)
 876			cs->suspend(cs);
 877}
 878
 879/**
 880 * clocksource_resume - resume the clocksource(s)
 881 */
 882void clocksource_resume(void)
 883{
 884	struct clocksource *cs;
 885
 886	list_for_each_entry(cs, &clocksource_list, list)
 887		if (cs->resume)
 888			cs->resume(cs);
 889
 890	clocksource_resume_watchdog();
 891}
 892
 893/**
 894 * clocksource_touch_watchdog - Update watchdog
 895 *
 896 * Update the watchdog after exception contexts such as kgdb so as not
 897 * to incorrectly trip the watchdog. This might fail when the kernel
 898 * was stopped in code which holds watchdog_lock.
 899 */
 900void clocksource_touch_watchdog(void)
 901{
 902	clocksource_resume_watchdog();
 903}
 904
 905/**
 906 * clocksource_max_adjustment- Returns max adjustment amount
 907 * @cs:         Pointer to clocksource
 908 *
 909 */
 910static u32 clocksource_max_adjustment(struct clocksource *cs)
 911{
 912	u64 ret;
 913	/*
 914	 * We won't try to correct for more than 11% adjustments (110,000 ppm),
 915	 */
 916	ret = (u64)cs->mult * 11;
 917	do_div(ret,100);
 918	return (u32)ret;
 919}
 920
 921/**
 922 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
 923 * @mult:	cycle to nanosecond multiplier
 924 * @shift:	cycle to nanosecond divisor (power of two)
 925 * @maxadj:	maximum adjustment value to mult (~11%)
 926 * @mask:	bitmask for two's complement subtraction of non 64 bit counters
 927 * @max_cyc:	maximum cycle value before potential overflow (does not include
 928 *		any safety margin)
 929 *
 930 * NOTE: This function includes a safety margin of 50%, in other words, we
 931 * return half the number of nanoseconds the hardware counter can technically
 932 * cover. This is done so that we can potentially detect problems caused by
 933 * delayed timers or bad hardware, which might result in time intervals that
 934 * are larger than what the math used can handle without overflows.
 935 */
 936u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
 937{
 938	u64 max_nsecs, max_cycles;
 939
 940	/*
 941	 * Calculate the maximum number of cycles that we can pass to the
 942	 * cyc2ns() function without overflowing a 64-bit result.
 943	 */
 944	max_cycles = ULLONG_MAX;
 945	do_div(max_cycles, mult+maxadj);
 946
 947	/*
 948	 * The actual maximum number of cycles we can defer the clocksource is
 949	 * determined by the minimum of max_cycles and mask.
 950	 * Note: Here we subtract the maxadj to make sure we don't sleep for
 951	 * too long if there's a large negative adjustment.
 952	 */
 953	max_cycles = min(max_cycles, mask);
 954	max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
 955
 956	/* return the max_cycles value as well if requested */
 957	if (max_cyc)
 958		*max_cyc = max_cycles;
 959
 960	/* Return 50% of the actual maximum, so we can detect bad values */
 961	max_nsecs >>= 1;
 962
 963	return max_nsecs;
 964}
 965
 966/**
 967 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
 968 * @cs:         Pointer to clocksource to be updated
 969 *
 970 */
 971static inline void clocksource_update_max_deferment(struct clocksource *cs)
 972{
 973	cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
 974						cs->maxadj, cs->mask,
 975						&cs->max_cycles);
 976}
 977
 978static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
 979{
 980	struct clocksource *cs;
 981
 982	if (!finished_booting || list_empty(&clocksource_list))
 983		return NULL;
 984
 985	/*
 986	 * We pick the clocksource with the highest rating. If oneshot
 987	 * mode is active, we pick the highres valid clocksource with
 988	 * the best rating.
 989	 */
 990	list_for_each_entry(cs, &clocksource_list, list) {
 991		if (skipcur && cs == curr_clocksource)
 992			continue;
 993		if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
 994			continue;
 995		return cs;
 996	}
 997	return NULL;
 998}
 999
1000static void __clocksource_select(bool skipcur)
1001{
1002	bool oneshot = tick_oneshot_mode_active();
1003	struct clocksource *best, *cs;
1004
1005	/* Find the best suitable clocksource */
1006	best = clocksource_find_best(oneshot, skipcur);
1007	if (!best)
1008		return;
1009
1010	if (!strlen(override_name))
1011		goto found;
1012
1013	/* Check for the override clocksource. */
1014	list_for_each_entry(cs, &clocksource_list, list) {
1015		if (skipcur && cs == curr_clocksource)
1016			continue;
1017		if (strcmp(cs->name, override_name) != 0)
1018			continue;
1019		/*
1020		 * Check to make sure we don't switch to a non-highres
1021		 * capable clocksource if the tick code is in oneshot
1022		 * mode (highres or nohz)
1023		 */
1024		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
1025			/* Override clocksource cannot be used. */
1026			if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
1027				pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
1028					cs->name);
1029				override_name[0] = 0;
1030			} else {
1031				/*
1032				 * The override cannot be currently verified.
1033				 * Deferring to let the watchdog check.
1034				 */
1035				pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
1036					cs->name);
1037			}
1038		} else
1039			/* Override clocksource can be used. */
1040			best = cs;
1041		break;
1042	}
1043
1044found:
1045	if (curr_clocksource != best && !timekeeping_notify(best)) {
1046		pr_info("Switched to clocksource %s\n", best->name);
1047		curr_clocksource = best;
1048	}
1049}
1050
1051/**
1052 * clocksource_select - Select the best clocksource available
1053 *
1054 * Private function. Must hold clocksource_mutex when called.
1055 *
1056 * Select the clocksource with the best rating, or the clocksource,
1057 * which is selected by userspace override.
1058 */
1059static void clocksource_select(void)
1060{
1061	__clocksource_select(false);
1062}
1063
1064static void clocksource_select_fallback(void)
1065{
1066	__clocksource_select(true);
1067}
1068
1069/*
1070 * clocksource_done_booting - Called near the end of core bootup
1071 *
1072 * Hack to avoid lots of clocksource churn at boot time.
1073 * We use fs_initcall because we want this to start before
1074 * device_initcall but after subsys_initcall.
1075 */
1076static int __init clocksource_done_booting(void)
1077{
1078	mutex_lock(&clocksource_mutex);
1079	curr_clocksource = clocksource_default_clock();
1080	finished_booting = 1;
1081	/*
1082	 * Run the watchdog first to eliminate unstable clock sources
1083	 */
1084	__clocksource_watchdog_kthread();
1085	clocksource_select();
1086	mutex_unlock(&clocksource_mutex);
1087	return 0;
1088}
1089fs_initcall(clocksource_done_booting);
1090
1091/*
1092 * Enqueue the clocksource sorted by rating
1093 */
1094static void clocksource_enqueue(struct clocksource *cs)
1095{
1096	struct list_head *entry = &clocksource_list;
1097	struct clocksource *tmp;
1098
1099	list_for_each_entry(tmp, &clocksource_list, list) {
1100		/* Keep track of the place, where to insert */
1101		if (tmp->rating < cs->rating)
1102			break;
1103		entry = &tmp->list;
1104	}
1105	list_add(&cs->list, entry);
1106}
1107
1108/**
1109 * __clocksource_update_freq_scale - Used update clocksource with new freq
1110 * @cs:		clocksource to be registered
1111 * @scale:	Scale factor multiplied against freq to get clocksource hz
1112 * @freq:	clocksource frequency (cycles per second) divided by scale
1113 *
1114 * This should only be called from the clocksource->enable() method.
1115 *
1116 * This *SHOULD NOT* be called directly! Please use the
1117 * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
1118 * functions.
1119 */
1120void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
1121{
1122	u64 sec;
1123
1124	/*
1125	 * Default clocksources are *special* and self-define their mult/shift.
1126	 * But, you're not special, so you should specify a freq value.
1127	 */
1128	if (freq) {
1129		/*
1130		 * Calc the maximum number of seconds which we can run before
1131		 * wrapping around. For clocksources which have a mask > 32-bit
1132		 * we need to limit the max sleep time to have a good
1133		 * conversion precision. 10 minutes is still a reasonable
1134		 * amount. That results in a shift value of 24 for a
1135		 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
1136		 * ~ 0.06ppm granularity for NTP.
1137		 */
1138		sec = cs->mask;
1139		do_div(sec, freq);
1140		do_div(sec, scale);
1141		if (!sec)
1142			sec = 1;
1143		else if (sec > 600 && cs->mask > UINT_MAX)
1144			sec = 600;
1145
1146		clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
1147				       NSEC_PER_SEC / scale, sec * scale);
1148	}
1149
1150	/*
1151	 * If the uncertainty margin is not specified, calculate it.
1152	 * If both scale and freq are non-zero, calculate the clock
1153	 * period, but bound below at 2*WATCHDOG_MAX_SKEW.  However,
1154	 * if either of scale or freq is zero, be very conservative and
1155	 * take the tens-of-milliseconds WATCHDOG_THRESHOLD value for the
1156	 * uncertainty margin.  Allow stupidly small uncertainty margins
1157	 * to be specified by the caller for testing purposes, but warn
1158	 * to discourage production use of this capability.
1159	 */
1160	if (scale && freq && !cs->uncertainty_margin) {
1161		cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq);
1162		if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW)
1163			cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW;
1164	} else if (!cs->uncertainty_margin) {
1165		cs->uncertainty_margin = WATCHDOG_THRESHOLD;
1166	}
1167	WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW);
1168
1169	/*
1170	 * Ensure clocksources that have large 'mult' values don't overflow
1171	 * when adjusted.
1172	 */
1173	cs->maxadj = clocksource_max_adjustment(cs);
1174	while (freq && ((cs->mult + cs->maxadj < cs->mult)
1175		|| (cs->mult - cs->maxadj > cs->mult))) {
1176		cs->mult >>= 1;
1177		cs->shift--;
1178		cs->maxadj = clocksource_max_adjustment(cs);
1179	}
1180
1181	/*
1182	 * Only warn for *special* clocksources that self-define
1183	 * their mult/shift values and don't specify a freq.
1184	 */
1185	WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
1186		"timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
1187		cs->name);
1188
1189	clocksource_update_max_deferment(cs);
1190
1191	pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
1192		cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
1193}
1194EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
1195
1196/**
1197 * __clocksource_register_scale - Used to install new clocksources
1198 * @cs:		clocksource to be registered
1199 * @scale:	Scale factor multiplied against freq to get clocksource hz
1200 * @freq:	clocksource frequency (cycles per second) divided by scale
1201 *
1202 * Returns -EBUSY if registration fails, zero otherwise.
1203 *
1204 * This *SHOULD NOT* be called directly! Please use the
1205 * clocksource_register_hz() or clocksource_register_khz helper functions.
1206 */
1207int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
1208{
1209	unsigned long flags;
1210
1211	clocksource_arch_init(cs);
1212
1213	if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX))
1214		cs->id = CSID_GENERIC;
1215	if (cs->vdso_clock_mode < 0 ||
1216	    cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
1217		pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
1218			cs->name, cs->vdso_clock_mode);
1219		cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
1220	}
1221
1222	/* Initialize mult/shift and max_idle_ns */
1223	__clocksource_update_freq_scale(cs, scale, freq);
1224
1225	/* Add clocksource to the clocksource list */
1226	mutex_lock(&clocksource_mutex);
1227
1228	clocksource_watchdog_lock(&flags);
1229	clocksource_enqueue(cs);
1230	clocksource_enqueue_watchdog(cs);
1231	clocksource_watchdog_unlock(&flags);
1232
1233	clocksource_select();
1234	clocksource_select_watchdog(false);
1235	__clocksource_suspend_select(cs);
1236	mutex_unlock(&clocksource_mutex);
1237	return 0;
1238}
1239EXPORT_SYMBOL_GPL(__clocksource_register_scale);
1240
1241static void __clocksource_change_rating(struct clocksource *cs, int rating)
1242{
1243	list_del(&cs->list);
1244	cs->rating = rating;
1245	clocksource_enqueue(cs);
1246}
1247
1248/**
1249 * clocksource_change_rating - Change the rating of a registered clocksource
1250 * @cs:		clocksource to be changed
1251 * @rating:	new rating
1252 */
1253void clocksource_change_rating(struct clocksource *cs, int rating)
1254{
1255	unsigned long flags;
1256
1257	mutex_lock(&clocksource_mutex);
1258	clocksource_watchdog_lock(&flags);
1259	__clocksource_change_rating(cs, rating);
1260	clocksource_watchdog_unlock(&flags);
1261
1262	clocksource_select();
1263	clocksource_select_watchdog(false);
1264	clocksource_suspend_select(false);
1265	mutex_unlock(&clocksource_mutex);
1266}
1267EXPORT_SYMBOL(clocksource_change_rating);
1268
1269/*
1270 * Unbind clocksource @cs. Called with clocksource_mutex held
1271 */
1272static int clocksource_unbind(struct clocksource *cs)
1273{
1274	unsigned long flags;
1275
1276	if (clocksource_is_watchdog(cs)) {
1277		/* Select and try to install a replacement watchdog. */
1278		clocksource_select_watchdog(true);
1279		if (clocksource_is_watchdog(cs))
1280			return -EBUSY;
1281	}
1282
1283	if (cs == curr_clocksource) {
1284		/* Select and try to install a replacement clock source */
1285		clocksource_select_fallback();
1286		if (curr_clocksource == cs)
1287			return -EBUSY;
1288	}
1289
1290	if (clocksource_is_suspend(cs)) {
1291		/*
1292		 * Select and try to install a replacement suspend clocksource.
1293		 * If no replacement suspend clocksource, we will just let the
1294		 * clocksource go and have no suspend clocksource.
1295		 */
1296		clocksource_suspend_select(true);
1297	}
1298
1299	clocksource_watchdog_lock(&flags);
1300	clocksource_dequeue_watchdog(cs);
1301	list_del_init(&cs->list);
1302	clocksource_watchdog_unlock(&flags);
1303
1304	return 0;
1305}
1306
1307/**
1308 * clocksource_unregister - remove a registered clocksource
1309 * @cs:	clocksource to be unregistered
1310 */
1311int clocksource_unregister(struct clocksource *cs)
1312{
1313	int ret = 0;
1314
1315	mutex_lock(&clocksource_mutex);
1316	if (!list_empty(&cs->list))
1317		ret = clocksource_unbind(cs);
1318	mutex_unlock(&clocksource_mutex);
1319	return ret;
1320}
1321EXPORT_SYMBOL(clocksource_unregister);
1322
1323#ifdef CONFIG_SYSFS
1324/**
1325 * current_clocksource_show - sysfs interface for current clocksource
1326 * @dev:	unused
1327 * @attr:	unused
1328 * @buf:	char buffer to be filled with clocksource list
1329 *
1330 * Provides sysfs interface for listing current clocksource.
1331 */
1332static ssize_t current_clocksource_show(struct device *dev,
1333					struct device_attribute *attr,
1334					char *buf)
1335{
1336	ssize_t count = 0;
1337
1338	mutex_lock(&clocksource_mutex);
1339	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
1340	mutex_unlock(&clocksource_mutex);
1341
1342	return count;
1343}
1344
1345ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
1346{
1347	size_t ret = cnt;
1348
1349	/* strings from sysfs write are not 0 terminated! */
1350	if (!cnt || cnt >= CS_NAME_LEN)
1351		return -EINVAL;
1352
1353	/* strip of \n: */
1354	if (buf[cnt-1] == '\n')
1355		cnt--;
1356	if (cnt > 0)
1357		memcpy(dst, buf, cnt);
1358	dst[cnt] = 0;
1359	return ret;
1360}
1361
1362/**
1363 * current_clocksource_store - interface for manually overriding clocksource
1364 * @dev:	unused
1365 * @attr:	unused
1366 * @buf:	name of override clocksource
1367 * @count:	length of buffer
1368 *
1369 * Takes input from sysfs interface for manually overriding the default
1370 * clocksource selection.
1371 */
1372static ssize_t current_clocksource_store(struct device *dev,
1373					 struct device_attribute *attr,
1374					 const char *buf, size_t count)
1375{
1376	ssize_t ret;
1377
1378	mutex_lock(&clocksource_mutex);
1379
1380	ret = sysfs_get_uname(buf, override_name, count);
1381	if (ret >= 0)
1382		clocksource_select();
1383
1384	mutex_unlock(&clocksource_mutex);
1385
1386	return ret;
1387}
1388static DEVICE_ATTR_RW(current_clocksource);
1389
1390/**
1391 * unbind_clocksource_store - interface for manually unbinding clocksource
1392 * @dev:	unused
1393 * @attr:	unused
1394 * @buf:	unused
1395 * @count:	length of buffer
1396 *
1397 * Takes input from sysfs interface for manually unbinding a clocksource.
1398 */
1399static ssize_t unbind_clocksource_store(struct device *dev,
1400					struct device_attribute *attr,
1401					const char *buf, size_t count)
1402{
1403	struct clocksource *cs;
1404	char name[CS_NAME_LEN];
1405	ssize_t ret;
1406
1407	ret = sysfs_get_uname(buf, name, count);
1408	if (ret < 0)
1409		return ret;
1410
1411	ret = -ENODEV;
1412	mutex_lock(&clocksource_mutex);
1413	list_for_each_entry(cs, &clocksource_list, list) {
1414		if (strcmp(cs->name, name))
1415			continue;
1416		ret = clocksource_unbind(cs);
1417		break;
1418	}
1419	mutex_unlock(&clocksource_mutex);
1420
1421	return ret ? ret : count;
1422}
1423static DEVICE_ATTR_WO(unbind_clocksource);
1424
1425/**
1426 * available_clocksource_show - sysfs interface for listing clocksource
1427 * @dev:	unused
1428 * @attr:	unused
1429 * @buf:	char buffer to be filled with clocksource list
1430 *
1431 * Provides sysfs interface for listing registered clocksources
1432 */
1433static ssize_t available_clocksource_show(struct device *dev,
1434					  struct device_attribute *attr,
1435					  char *buf)
1436{
1437	struct clocksource *src;
1438	ssize_t count = 0;
1439
1440	mutex_lock(&clocksource_mutex);
1441	list_for_each_entry(src, &clocksource_list, list) {
1442		/*
1443		 * Don't show non-HRES clocksource if the tick code is
1444		 * in one shot mode (highres=on or nohz=on)
1445		 */
1446		if (!tick_oneshot_mode_active() ||
1447		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1448			count += snprintf(buf + count,
1449				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
1450				  "%s ", src->name);
1451	}
1452	mutex_unlock(&clocksource_mutex);
1453
1454	count += snprintf(buf + count,
1455			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
1456
1457	return count;
1458}
1459static DEVICE_ATTR_RO(available_clocksource);
1460
1461static struct attribute *clocksource_attrs[] = {
1462	&dev_attr_current_clocksource.attr,
1463	&dev_attr_unbind_clocksource.attr,
1464	&dev_attr_available_clocksource.attr,
1465	NULL
1466};
1467ATTRIBUTE_GROUPS(clocksource);
1468
1469static const struct bus_type clocksource_subsys = {
1470	.name = "clocksource",
1471	.dev_name = "clocksource",
1472};
1473
1474static struct device device_clocksource = {
1475	.id	= 0,
1476	.bus	= &clocksource_subsys,
1477	.groups	= clocksource_groups,
1478};
1479
1480static int __init init_clocksource_sysfs(void)
1481{
1482	int error = subsys_system_register(&clocksource_subsys, NULL);
1483
1484	if (!error)
1485		error = device_register(&device_clocksource);
1486
1487	return error;
1488}
1489
1490device_initcall(init_clocksource_sysfs);
1491#endif /* CONFIG_SYSFS */
1492
1493/**
1494 * boot_override_clocksource - boot clock override
1495 * @str:	override name
1496 *
1497 * Takes a clocksource= boot argument and uses it
1498 * as the clocksource override name.
1499 */
1500static int __init boot_override_clocksource(char* str)
1501{
1502	mutex_lock(&clocksource_mutex);
1503	if (str)
1504		strscpy(override_name, str, sizeof(override_name));
1505	mutex_unlock(&clocksource_mutex);
1506	return 1;
1507}
1508
1509__setup("clocksource=", boot_override_clocksource);
1510
1511/**
1512 * boot_override_clock - Compatibility layer for deprecated boot option
1513 * @str:	override name
1514 *
1515 * DEPRECATED! Takes a clock= boot argument and uses it
1516 * as the clocksource override name
1517 */
1518static int __init boot_override_clock(char* str)
1519{
1520	if (!strcmp(str, "pmtmr")) {
1521		pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
1522		return boot_override_clocksource("acpi_pm");
1523	}
1524	pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
1525	return boot_override_clocksource(str);
1526}
1527
1528__setup("clock=", boot_override_clock);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * This file contains the functions which manage clocksource drivers.
   4 *
   5 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/device.h>
  11#include <linux/clocksource.h>
  12#include <linux/init.h>
  13#include <linux/module.h>
  14#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
  15#include <linux/tick.h>
  16#include <linux/kthread.h>
  17#include <linux/prandom.h>
  18#include <linux/cpu.h>
  19
  20#include "tick-internal.h"
  21#include "timekeeping_internal.h"
  22
  23/**
  24 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
  25 * @mult:	pointer to mult variable
  26 * @shift:	pointer to shift variable
  27 * @from:	frequency to convert from
  28 * @to:		frequency to convert to
  29 * @maxsec:	guaranteed runtime conversion range in seconds
  30 *
  31 * The function evaluates the shift/mult pair for the scaled math
  32 * operations of clocksources and clockevents.
  33 *
  34 * @to and @from are frequency values in HZ. For clock sources @to is
  35 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
  36 * event @to is the counter frequency and @from is NSEC_PER_SEC.
  37 *
  38 * The @maxsec conversion range argument controls the time frame in
  39 * seconds which must be covered by the runtime conversion with the
  40 * calculated mult and shift factors. This guarantees that no 64bit
  41 * overflow happens when the input value of the conversion is
  42 * multiplied with the calculated mult factor. Larger ranges may
  43 * reduce the conversion accuracy by choosing smaller mult and shift
  44 * factors.
  45 */
  46void
  47clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
  48{
  49	u64 tmp;
  50	u32 sft, sftacc= 32;
  51
  52	/*
  53	 * Calculate the shift factor which is limiting the conversion
  54	 * range:
  55	 */
  56	tmp = ((u64)maxsec * from) >> 32;
  57	while (tmp) {
  58		tmp >>=1;
  59		sftacc--;
  60	}
  61
  62	/*
  63	 * Find the conversion shift/mult pair which has the best
  64	 * accuracy and fits the maxsec conversion range:
  65	 */
  66	for (sft = 32; sft > 0; sft--) {
  67		tmp = (u64) to << sft;
  68		tmp += from / 2;
  69		do_div(tmp, from);
  70		if ((tmp >> sftacc) == 0)
  71			break;
  72	}
  73	*mult = tmp;
  74	*shift = sft;
  75}
  76EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
  77
  78/*[Clocksource internal variables]---------
  79 * curr_clocksource:
  80 *	currently selected clocksource.
  81 * suspend_clocksource:
  82 *	used to calculate the suspend time.
  83 * clocksource_list:
  84 *	linked list with the registered clocksources
  85 * clocksource_mutex:
  86 *	protects manipulations to curr_clocksource and the clocksource_list
  87 * override_name:
  88 *	Name of the user-specified clocksource.
  89 */
  90static struct clocksource *curr_clocksource;
  91static struct clocksource *suspend_clocksource;
  92static LIST_HEAD(clocksource_list);
  93static DEFINE_MUTEX(clocksource_mutex);
  94static char override_name[CS_NAME_LEN];
  95static int finished_booting;
  96static u64 suspend_start;
  97
  98/*
 
 
 
 
 
 
  99 * Threshold: 0.0312s, when doubled: 0.0625s.
 100 * Also a default for cs->uncertainty_margin when registering clocks.
 101 */
 102#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5)
 103
 104/*
 105 * Maximum permissible delay between two readouts of the watchdog
 106 * clocksource surrounding a read of the clocksource being validated.
 107 * This delay could be due to SMIs, NMIs, or to VCPU preemptions.  Used as
 108 * a lower bound for cs->uncertainty_margin values when registering clocks.
 
 
 
 109 */
 110#define WATCHDOG_MAX_SKEW (50 * NSEC_PER_USEC)
 
 
 
 
 
 
 111
 112#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
 113static void clocksource_watchdog_work(struct work_struct *work);
 114static void clocksource_select(void);
 115
 116static LIST_HEAD(watchdog_list);
 117static struct clocksource *watchdog;
 118static struct timer_list watchdog_timer;
 119static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
 120static DEFINE_SPINLOCK(watchdog_lock);
 121static int watchdog_running;
 122static atomic_t watchdog_reset_pending;
 
 123
 124static inline void clocksource_watchdog_lock(unsigned long *flags)
 125{
 126	spin_lock_irqsave(&watchdog_lock, *flags);
 127}
 128
 129static inline void clocksource_watchdog_unlock(unsigned long *flags)
 130{
 131	spin_unlock_irqrestore(&watchdog_lock, *flags);
 132}
 133
 134static int clocksource_watchdog_kthread(void *data);
 135static void __clocksource_change_rating(struct clocksource *cs, int rating);
 136
 137/*
 138 * Interval: 0.5sec.
 139 */
 140#define WATCHDOG_INTERVAL (HZ >> 1)
 141
 142static void clocksource_watchdog_work(struct work_struct *work)
 143{
 144	/*
 145	 * We cannot directly run clocksource_watchdog_kthread() here, because
 146	 * clocksource_select() calls timekeeping_notify() which uses
 147	 * stop_machine(). One cannot use stop_machine() from a workqueue() due
 148	 * lock inversions wrt CPU hotplug.
 149	 *
 150	 * Also, we only ever run this work once or twice during the lifetime
 151	 * of the kernel, so there is no point in creating a more permanent
 152	 * kthread for this.
 153	 *
 154	 * If kthread_run fails the next watchdog scan over the
 155	 * watchdog_list will find the unstable clock again.
 156	 */
 157	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
 158}
 159
 160static void __clocksource_unstable(struct clocksource *cs)
 161{
 162	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
 163	cs->flags |= CLOCK_SOURCE_UNSTABLE;
 164
 165	/*
 166	 * If the clocksource is registered clocksource_watchdog_kthread() will
 167	 * re-rate and re-select.
 168	 */
 169	if (list_empty(&cs->list)) {
 170		cs->rating = 0;
 171		return;
 172	}
 173
 174	if (cs->mark_unstable)
 175		cs->mark_unstable(cs);
 176
 177	/* kick clocksource_watchdog_kthread() */
 178	if (finished_booting)
 179		schedule_work(&watchdog_work);
 180}
 181
 182/**
 183 * clocksource_mark_unstable - mark clocksource unstable via watchdog
 184 * @cs:		clocksource to be marked unstable
 185 *
 186 * This function is called by the x86 TSC code to mark clocksources as unstable;
 187 * it defers demotion and re-selection to a kthread.
 188 */
 189void clocksource_mark_unstable(struct clocksource *cs)
 190{
 191	unsigned long flags;
 192
 193	spin_lock_irqsave(&watchdog_lock, flags);
 194	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
 195		if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
 196			list_add(&cs->wd_list, &watchdog_list);
 197		__clocksource_unstable(cs);
 198	}
 199	spin_unlock_irqrestore(&watchdog_lock, flags);
 200}
 201
 202ulong max_cswd_read_retries = 3;
 203module_param(max_cswd_read_retries, ulong, 0644);
 204EXPORT_SYMBOL_GPL(max_cswd_read_retries);
 205static int verify_n_cpus = 8;
 206module_param(verify_n_cpus, int, 0644);
 207
 208static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
 
 
 
 
 
 
 209{
 210	unsigned int nretries;
 211	u64 wd_end, wd_delta;
 212	int64_t wd_delay;
 213
 214	for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) {
 
 215		local_irq_disable();
 216		*wdnow = watchdog->read(watchdog);
 217		*csnow = cs->read(cs);
 218		wd_end = watchdog->read(watchdog);
 
 219		local_irq_enable();
 220
 221		wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
 222		wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
 223					      watchdog->shift);
 224		if (wd_delay <= WATCHDOG_MAX_SKEW) {
 225			if (nretries > 1 || nretries >= max_cswd_read_retries) {
 226				pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
 227					smp_processor_id(), watchdog->name, nretries);
 228			}
 229			return true;
 230		}
 231	}
 232
 233	pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n",
 234		smp_processor_id(), watchdog->name, wd_delay, nretries);
 235	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 236}
 237
 238static u64 csnow_mid;
 239static cpumask_t cpus_ahead;
 240static cpumask_t cpus_behind;
 241static cpumask_t cpus_chosen;
 242
 243static void clocksource_verify_choose_cpus(void)
 244{
 245	int cpu, i, n = verify_n_cpus;
 246
 247	if (n < 0) {
 248		/* Check all of the CPUs. */
 249		cpumask_copy(&cpus_chosen, cpu_online_mask);
 250		cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
 251		return;
 252	}
 253
 254	/* If no checking desired, or no other CPU to check, leave. */
 255	cpumask_clear(&cpus_chosen);
 256	if (n == 0 || num_online_cpus() <= 1)
 257		return;
 258
 259	/* Make sure to select at least one CPU other than the current CPU. */
 260	cpu = cpumask_next(-1, cpu_online_mask);
 261	if (cpu == smp_processor_id())
 262		cpu = cpumask_next(cpu, cpu_online_mask);
 263	if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
 264		return;
 265	cpumask_set_cpu(cpu, &cpus_chosen);
 266
 267	/* Force a sane value for the boot parameter. */
 268	if (n > nr_cpu_ids)
 269		n = nr_cpu_ids;
 270
 271	/*
 272	 * Randomly select the specified number of CPUs.  If the same
 273	 * CPU is selected multiple times, that CPU is checked only once,
 274	 * and no replacement CPU is selected.  This gracefully handles
 275	 * situations where verify_n_cpus is greater than the number of
 276	 * CPUs that are currently online.
 277	 */
 278	for (i = 1; i < n; i++) {
 279		cpu = prandom_u32() % nr_cpu_ids;
 280		cpu = cpumask_next(cpu - 1, cpu_online_mask);
 281		if (cpu >= nr_cpu_ids)
 282			cpu = cpumask_next(-1, cpu_online_mask);
 283		if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
 284			cpumask_set_cpu(cpu, &cpus_chosen);
 285	}
 286
 287	/* Don't verify ourselves. */
 288	cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
 289}
 290
 291static void clocksource_verify_one_cpu(void *csin)
 292{
 293	struct clocksource *cs = (struct clocksource *)csin;
 294
 295	csnow_mid = cs->read(cs);
 296}
 297
 298void clocksource_verify_percpu(struct clocksource *cs)
 299{
 300	int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX;
 301	u64 csnow_begin, csnow_end;
 302	int cpu, testcpu;
 303	s64 delta;
 304
 305	if (verify_n_cpus == 0)
 306		return;
 307	cpumask_clear(&cpus_ahead);
 308	cpumask_clear(&cpus_behind);
 309	get_online_cpus();
 310	preempt_disable();
 311	clocksource_verify_choose_cpus();
 312	if (cpumask_weight(&cpus_chosen) == 0) {
 313		preempt_enable();
 314		put_online_cpus();
 315		pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
 316		return;
 317	}
 318	testcpu = smp_processor_id();
 319	pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
 320	for_each_cpu(cpu, &cpus_chosen) {
 321		if (cpu == testcpu)
 322			continue;
 323		csnow_begin = cs->read(cs);
 324		smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
 325		csnow_end = cs->read(cs);
 326		delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
 327		if (delta < 0)
 328			cpumask_set_cpu(cpu, &cpus_behind);
 329		delta = (csnow_end - csnow_mid) & cs->mask;
 330		if (delta < 0)
 331			cpumask_set_cpu(cpu, &cpus_ahead);
 332		delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
 333		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
 334		if (cs_nsec > cs_nsec_max)
 335			cs_nsec_max = cs_nsec;
 336		if (cs_nsec < cs_nsec_min)
 337			cs_nsec_min = cs_nsec;
 338	}
 339	preempt_enable();
 340	put_online_cpus();
 341	if (!cpumask_empty(&cpus_ahead))
 342		pr_warn("        CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
 343			cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
 344	if (!cpumask_empty(&cpus_behind))
 345		pr_warn("        CPUs %*pbl behind CPU %d for clocksource %s.\n",
 346			cpumask_pr_args(&cpus_behind), testcpu, cs->name);
 347	if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind))
 348		pr_warn("        CPU %d check durations %lldns - %lldns for clocksource %s.\n",
 349			testcpu, cs_nsec_min, cs_nsec_max, cs->name);
 350}
 351EXPORT_SYMBOL_GPL(clocksource_verify_percpu);
 352
 
 
 
 
 
 
 
 
 
 353static void clocksource_watchdog(struct timer_list *unused)
 354{
 355	u64 csnow, wdnow, cslast, wdlast, delta;
 
 356	int next_cpu, reset_pending;
 357	int64_t wd_nsec, cs_nsec;
 358	struct clocksource *cs;
 
 
 359	u32 md;
 360
 361	spin_lock(&watchdog_lock);
 362	if (!watchdog_running)
 363		goto out;
 364
 365	reset_pending = atomic_read(&watchdog_reset_pending);
 366
 367	list_for_each_entry(cs, &watchdog_list, wd_list) {
 368
 369		/* Clocksource already marked unstable? */
 370		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
 371			if (finished_booting)
 372				schedule_work(&watchdog_work);
 373			continue;
 374		}
 375
 376		if (!cs_watchdog_read(cs, &csnow, &wdnow)) {
 
 
 377			/* Clock readout unreliable, so give it up. */
 378			__clocksource_unstable(cs);
 379			continue;
 380		}
 381
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 382		/* Clocksource initialized ? */
 383		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
 384		    atomic_read(&watchdog_reset_pending)) {
 385			cs->flags |= CLOCK_SOURCE_WATCHDOG;
 386			cs->wd_last = wdnow;
 387			cs->cs_last = csnow;
 388			continue;
 389		}
 390
 391		delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
 392		wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
 393					     watchdog->shift);
 394
 395		delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
 396		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
 397		wdlast = cs->wd_last; /* save these in case we print them */
 398		cslast = cs->cs_last;
 399		cs->cs_last = csnow;
 400		cs->wd_last = wdnow;
 401
 402		if (atomic_read(&watchdog_reset_pending))
 403			continue;
 404
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 405		/* Check the deviation from the watchdog clocksource. */
 406		md = cs->uncertainty_margin + watchdog->uncertainty_margin;
 407		if (abs(cs_nsec - wd_nsec) > md) {
 
 
 
 
 408			pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
 409				smp_processor_id(), cs->name);
 410			pr_warn("                      '%s' wd_nsec: %lld wd_now: %llx wd_last: %llx mask: %llx\n",
 411				watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask);
 412			pr_warn("                      '%s' cs_nsec: %lld cs_now: %llx cs_last: %llx mask: %llx\n",
 413				cs->name, cs_nsec, csnow, cslast, cs->mask);
 
 
 
 
 414			if (curr_clocksource == cs)
 415				pr_warn("                      '%s' is current clocksource.\n", cs->name);
 416			else if (curr_clocksource)
 417				pr_warn("                      '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name);
 418			else
 419				pr_warn("                      No current clocksource.\n");
 420			__clocksource_unstable(cs);
 421			continue;
 422		}
 423
 424		if (cs == curr_clocksource && cs->tick_stable)
 425			cs->tick_stable(cs);
 426
 427		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
 428		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
 429		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
 430			/* Mark it valid for high-res. */
 431			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
 432
 433			/*
 434			 * clocksource_done_booting() will sort it if
 435			 * finished_booting is not set yet.
 436			 */
 437			if (!finished_booting)
 438				continue;
 439
 440			/*
 441			 * If this is not the current clocksource let
 442			 * the watchdog thread reselect it. Due to the
 443			 * change to high res this clocksource might
 444			 * be preferred now. If it is the current
 445			 * clocksource let the tick code know about
 446			 * that change.
 447			 */
 448			if (cs != curr_clocksource) {
 449				cs->flags |= CLOCK_SOURCE_RESELECT;
 450				schedule_work(&watchdog_work);
 451			} else {
 452				tick_clock_notify();
 453			}
 454		}
 455	}
 456
 457	/*
 458	 * We only clear the watchdog_reset_pending, when we did a
 459	 * full cycle through all clocksources.
 460	 */
 461	if (reset_pending)
 462		atomic_dec(&watchdog_reset_pending);
 463
 464	/*
 465	 * Cycle through CPUs to check if the CPUs stay synchronized
 466	 * to each other.
 467	 */
 468	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
 469	if (next_cpu >= nr_cpu_ids)
 470		next_cpu = cpumask_first(cpu_online_mask);
 471
 472	/*
 473	 * Arm timer if not already pending: could race with concurrent
 474	 * pair clocksource_stop_watchdog() clocksource_start_watchdog().
 475	 */
 476	if (!timer_pending(&watchdog_timer)) {
 477		watchdog_timer.expires += WATCHDOG_INTERVAL;
 478		add_timer_on(&watchdog_timer, next_cpu);
 479	}
 480out:
 481	spin_unlock(&watchdog_lock);
 482}
 483
 484static inline void clocksource_start_watchdog(void)
 485{
 486	if (watchdog_running || !watchdog || list_empty(&watchdog_list))
 487		return;
 488	timer_setup(&watchdog_timer, clocksource_watchdog, 0);
 489	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
 490	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
 491	watchdog_running = 1;
 492}
 493
 494static inline void clocksource_stop_watchdog(void)
 495{
 496	if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
 497		return;
 498	del_timer(&watchdog_timer);
 499	watchdog_running = 0;
 500}
 501
 502static inline void clocksource_reset_watchdog(void)
 503{
 504	struct clocksource *cs;
 505
 506	list_for_each_entry(cs, &watchdog_list, wd_list)
 507		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
 508}
 509
 510static void clocksource_resume_watchdog(void)
 511{
 512	atomic_inc(&watchdog_reset_pending);
 513}
 514
 515static void clocksource_enqueue_watchdog(struct clocksource *cs)
 516{
 517	INIT_LIST_HEAD(&cs->wd_list);
 518
 519	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
 520		/* cs is a clocksource to be watched. */
 521		list_add(&cs->wd_list, &watchdog_list);
 522		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
 523	} else {
 524		/* cs is a watchdog. */
 525		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
 526			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
 527	}
 528}
 529
 530static void clocksource_select_watchdog(bool fallback)
 531{
 532	struct clocksource *cs, *old_wd;
 533	unsigned long flags;
 534
 535	spin_lock_irqsave(&watchdog_lock, flags);
 536	/* save current watchdog */
 537	old_wd = watchdog;
 538	if (fallback)
 539		watchdog = NULL;
 540
 541	list_for_each_entry(cs, &clocksource_list, list) {
 542		/* cs is a clocksource to be watched. */
 543		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
 544			continue;
 545
 546		/* Skip current if we were requested for a fallback. */
 547		if (fallback && cs == old_wd)
 548			continue;
 549
 550		/* Pick the best watchdog. */
 551		if (!watchdog || cs->rating > watchdog->rating)
 552			watchdog = cs;
 553	}
 554	/* If we failed to find a fallback restore the old one. */
 555	if (!watchdog)
 556		watchdog = old_wd;
 557
 558	/* If we changed the watchdog we need to reset cycles. */
 559	if (watchdog != old_wd)
 560		clocksource_reset_watchdog();
 561
 562	/* Check if the watchdog timer needs to be started. */
 563	clocksource_start_watchdog();
 564	spin_unlock_irqrestore(&watchdog_lock, flags);
 565}
 566
 567static void clocksource_dequeue_watchdog(struct clocksource *cs)
 568{
 569	if (cs != watchdog) {
 570		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
 571			/* cs is a watched clocksource. */
 572			list_del_init(&cs->wd_list);
 573			/* Check if the watchdog timer needs to be stopped. */
 574			clocksource_stop_watchdog();
 575		}
 576	}
 577}
 578
 579static int __clocksource_watchdog_kthread(void)
 580{
 581	struct clocksource *cs, *tmp;
 582	unsigned long flags;
 583	int select = 0;
 584
 585	/* Do any required per-CPU skew verification. */
 586	if (curr_clocksource &&
 587	    curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE &&
 588	    curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU)
 589		clocksource_verify_percpu(curr_clocksource);
 590
 591	spin_lock_irqsave(&watchdog_lock, flags);
 592	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
 593		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
 594			list_del_init(&cs->wd_list);
 595			__clocksource_change_rating(cs, 0);
 596			select = 1;
 597		}
 598		if (cs->flags & CLOCK_SOURCE_RESELECT) {
 599			cs->flags &= ~CLOCK_SOURCE_RESELECT;
 600			select = 1;
 601		}
 602	}
 603	/* Check if the watchdog timer needs to be stopped. */
 604	clocksource_stop_watchdog();
 605	spin_unlock_irqrestore(&watchdog_lock, flags);
 606
 607	return select;
 608}
 609
 610static int clocksource_watchdog_kthread(void *data)
 611{
 612	mutex_lock(&clocksource_mutex);
 613	if (__clocksource_watchdog_kthread())
 614		clocksource_select();
 615	mutex_unlock(&clocksource_mutex);
 616	return 0;
 617}
 618
 619static bool clocksource_is_watchdog(struct clocksource *cs)
 620{
 621	return cs == watchdog;
 622}
 623
 624#else /* CONFIG_CLOCKSOURCE_WATCHDOG */
 625
 626static void clocksource_enqueue_watchdog(struct clocksource *cs)
 627{
 628	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
 629		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
 630}
 631
 632static void clocksource_select_watchdog(bool fallback) { }
 633static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
 634static inline void clocksource_resume_watchdog(void) { }
 635static inline int __clocksource_watchdog_kthread(void) { return 0; }
 636static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
 637void clocksource_mark_unstable(struct clocksource *cs) { }
 638
 639static inline void clocksource_watchdog_lock(unsigned long *flags) { }
 640static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
 641
 642#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
 643
 644static bool clocksource_is_suspend(struct clocksource *cs)
 645{
 646	return cs == suspend_clocksource;
 647}
 648
 649static void __clocksource_suspend_select(struct clocksource *cs)
 650{
 651	/*
 652	 * Skip the clocksource which will be stopped in suspend state.
 653	 */
 654	if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
 655		return;
 656
 657	/*
 658	 * The nonstop clocksource can be selected as the suspend clocksource to
 659	 * calculate the suspend time, so it should not supply suspend/resume
 660	 * interfaces to suspend the nonstop clocksource when system suspends.
 661	 */
 662	if (cs->suspend || cs->resume) {
 663		pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
 664			cs->name);
 665	}
 666
 667	/* Pick the best rating. */
 668	if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
 669		suspend_clocksource = cs;
 670}
 671
 672/**
 673 * clocksource_suspend_select - Select the best clocksource for suspend timing
 674 * @fallback:	if select a fallback clocksource
 675 */
 676static void clocksource_suspend_select(bool fallback)
 677{
 678	struct clocksource *cs, *old_suspend;
 679
 680	old_suspend = suspend_clocksource;
 681	if (fallback)
 682		suspend_clocksource = NULL;
 683
 684	list_for_each_entry(cs, &clocksource_list, list) {
 685		/* Skip current if we were requested for a fallback. */
 686		if (fallback && cs == old_suspend)
 687			continue;
 688
 689		__clocksource_suspend_select(cs);
 690	}
 691}
 692
 693/**
 694 * clocksource_start_suspend_timing - Start measuring the suspend timing
 695 * @cs:			current clocksource from timekeeping
 696 * @start_cycles:	current cycles from timekeeping
 697 *
 698 * This function will save the start cycle values of suspend timer to calculate
 699 * the suspend time when resuming system.
 700 *
 701 * This function is called late in the suspend process from timekeeping_suspend(),
 702 * that means processes are frozen, non-boot cpus and interrupts are disabled
 703 * now. It is therefore possible to start the suspend timer without taking the
 704 * clocksource mutex.
 705 */
 706void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
 707{
 708	if (!suspend_clocksource)
 709		return;
 710
 711	/*
 712	 * If current clocksource is the suspend timer, we should use the
 713	 * tkr_mono.cycle_last value as suspend_start to avoid same reading
 714	 * from suspend timer.
 715	 */
 716	if (clocksource_is_suspend(cs)) {
 717		suspend_start = start_cycles;
 718		return;
 719	}
 720
 721	if (suspend_clocksource->enable &&
 722	    suspend_clocksource->enable(suspend_clocksource)) {
 723		pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
 724		return;
 725	}
 726
 727	suspend_start = suspend_clocksource->read(suspend_clocksource);
 728}
 729
 730/**
 731 * clocksource_stop_suspend_timing - Stop measuring the suspend timing
 732 * @cs:		current clocksource from timekeeping
 733 * @cycle_now:	current cycles from timekeeping
 734 *
 735 * This function will calculate the suspend time from suspend timer.
 736 *
 737 * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
 738 *
 739 * This function is called early in the resume process from timekeeping_resume(),
 740 * that means there is only one cpu, no processes are running and the interrupts
 741 * are disabled. It is therefore possible to stop the suspend timer without
 742 * taking the clocksource mutex.
 743 */
 744u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
 745{
 746	u64 now, delta, nsec = 0;
 747
 748	if (!suspend_clocksource)
 749		return 0;
 750
 751	/*
 752	 * If current clocksource is the suspend timer, we should use the
 753	 * tkr_mono.cycle_last value from timekeeping as current cycle to
 754	 * avoid same reading from suspend timer.
 755	 */
 756	if (clocksource_is_suspend(cs))
 757		now = cycle_now;
 758	else
 759		now = suspend_clocksource->read(suspend_clocksource);
 760
 761	if (now > suspend_start) {
 762		delta = clocksource_delta(now, suspend_start,
 763					  suspend_clocksource->mask);
 764		nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
 765				       suspend_clocksource->shift);
 766	}
 767
 768	/*
 769	 * Disable the suspend timer to save power if current clocksource is
 770	 * not the suspend timer.
 771	 */
 772	if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
 773		suspend_clocksource->disable(suspend_clocksource);
 774
 775	return nsec;
 776}
 777
 778/**
 779 * clocksource_suspend - suspend the clocksource(s)
 780 */
 781void clocksource_suspend(void)
 782{
 783	struct clocksource *cs;
 784
 785	list_for_each_entry_reverse(cs, &clocksource_list, list)
 786		if (cs->suspend)
 787			cs->suspend(cs);
 788}
 789
 790/**
 791 * clocksource_resume - resume the clocksource(s)
 792 */
 793void clocksource_resume(void)
 794{
 795	struct clocksource *cs;
 796
 797	list_for_each_entry(cs, &clocksource_list, list)
 798		if (cs->resume)
 799			cs->resume(cs);
 800
 801	clocksource_resume_watchdog();
 802}
 803
 804/**
 805 * clocksource_touch_watchdog - Update watchdog
 806 *
 807 * Update the watchdog after exception contexts such as kgdb so as not
 808 * to incorrectly trip the watchdog. This might fail when the kernel
 809 * was stopped in code which holds watchdog_lock.
 810 */
 811void clocksource_touch_watchdog(void)
 812{
 813	clocksource_resume_watchdog();
 814}
 815
 816/**
 817 * clocksource_max_adjustment- Returns max adjustment amount
 818 * @cs:         Pointer to clocksource
 819 *
 820 */
 821static u32 clocksource_max_adjustment(struct clocksource *cs)
 822{
 823	u64 ret;
 824	/*
 825	 * We won't try to correct for more than 11% adjustments (110,000 ppm),
 826	 */
 827	ret = (u64)cs->mult * 11;
 828	do_div(ret,100);
 829	return (u32)ret;
 830}
 831
 832/**
 833 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
 834 * @mult:	cycle to nanosecond multiplier
 835 * @shift:	cycle to nanosecond divisor (power of two)
 836 * @maxadj:	maximum adjustment value to mult (~11%)
 837 * @mask:	bitmask for two's complement subtraction of non 64 bit counters
 838 * @max_cyc:	maximum cycle value before potential overflow (does not include
 839 *		any safety margin)
 840 *
 841 * NOTE: This function includes a safety margin of 50%, in other words, we
 842 * return half the number of nanoseconds the hardware counter can technically
 843 * cover. This is done so that we can potentially detect problems caused by
 844 * delayed timers or bad hardware, which might result in time intervals that
 845 * are larger than what the math used can handle without overflows.
 846 */
 847u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
 848{
 849	u64 max_nsecs, max_cycles;
 850
 851	/*
 852	 * Calculate the maximum number of cycles that we can pass to the
 853	 * cyc2ns() function without overflowing a 64-bit result.
 854	 */
 855	max_cycles = ULLONG_MAX;
 856	do_div(max_cycles, mult+maxadj);
 857
 858	/*
 859	 * The actual maximum number of cycles we can defer the clocksource is
 860	 * determined by the minimum of max_cycles and mask.
 861	 * Note: Here we subtract the maxadj to make sure we don't sleep for
 862	 * too long if there's a large negative adjustment.
 863	 */
 864	max_cycles = min(max_cycles, mask);
 865	max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
 866
 867	/* return the max_cycles value as well if requested */
 868	if (max_cyc)
 869		*max_cyc = max_cycles;
 870
 871	/* Return 50% of the actual maximum, so we can detect bad values */
 872	max_nsecs >>= 1;
 873
 874	return max_nsecs;
 875}
 876
 877/**
 878 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
 879 * @cs:         Pointer to clocksource to be updated
 880 *
 881 */
 882static inline void clocksource_update_max_deferment(struct clocksource *cs)
 883{
 884	cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
 885						cs->maxadj, cs->mask,
 886						&cs->max_cycles);
 887}
 888
 889static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
 890{
 891	struct clocksource *cs;
 892
 893	if (!finished_booting || list_empty(&clocksource_list))
 894		return NULL;
 895
 896	/*
 897	 * We pick the clocksource with the highest rating. If oneshot
 898	 * mode is active, we pick the highres valid clocksource with
 899	 * the best rating.
 900	 */
 901	list_for_each_entry(cs, &clocksource_list, list) {
 902		if (skipcur && cs == curr_clocksource)
 903			continue;
 904		if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
 905			continue;
 906		return cs;
 907	}
 908	return NULL;
 909}
 910
 911static void __clocksource_select(bool skipcur)
 912{
 913	bool oneshot = tick_oneshot_mode_active();
 914	struct clocksource *best, *cs;
 915
 916	/* Find the best suitable clocksource */
 917	best = clocksource_find_best(oneshot, skipcur);
 918	if (!best)
 919		return;
 920
 921	if (!strlen(override_name))
 922		goto found;
 923
 924	/* Check for the override clocksource. */
 925	list_for_each_entry(cs, &clocksource_list, list) {
 926		if (skipcur && cs == curr_clocksource)
 927			continue;
 928		if (strcmp(cs->name, override_name) != 0)
 929			continue;
 930		/*
 931		 * Check to make sure we don't switch to a non-highres
 932		 * capable clocksource if the tick code is in oneshot
 933		 * mode (highres or nohz)
 934		 */
 935		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
 936			/* Override clocksource cannot be used. */
 937			if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
 938				pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
 939					cs->name);
 940				override_name[0] = 0;
 941			} else {
 942				/*
 943				 * The override cannot be currently verified.
 944				 * Deferring to let the watchdog check.
 945				 */
 946				pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
 947					cs->name);
 948			}
 949		} else
 950			/* Override clocksource can be used. */
 951			best = cs;
 952		break;
 953	}
 954
 955found:
 956	if (curr_clocksource != best && !timekeeping_notify(best)) {
 957		pr_info("Switched to clocksource %s\n", best->name);
 958		curr_clocksource = best;
 959	}
 960}
 961
 962/**
 963 * clocksource_select - Select the best clocksource available
 964 *
 965 * Private function. Must hold clocksource_mutex when called.
 966 *
 967 * Select the clocksource with the best rating, or the clocksource,
 968 * which is selected by userspace override.
 969 */
 970static void clocksource_select(void)
 971{
 972	__clocksource_select(false);
 973}
 974
 975static void clocksource_select_fallback(void)
 976{
 977	__clocksource_select(true);
 978}
 979
 980/*
 981 * clocksource_done_booting - Called near the end of core bootup
 982 *
 983 * Hack to avoid lots of clocksource churn at boot time.
 984 * We use fs_initcall because we want this to start before
 985 * device_initcall but after subsys_initcall.
 986 */
 987static int __init clocksource_done_booting(void)
 988{
 989	mutex_lock(&clocksource_mutex);
 990	curr_clocksource = clocksource_default_clock();
 991	finished_booting = 1;
 992	/*
 993	 * Run the watchdog first to eliminate unstable clock sources
 994	 */
 995	__clocksource_watchdog_kthread();
 996	clocksource_select();
 997	mutex_unlock(&clocksource_mutex);
 998	return 0;
 999}
1000fs_initcall(clocksource_done_booting);
1001
1002/*
1003 * Enqueue the clocksource sorted by rating
1004 */
1005static void clocksource_enqueue(struct clocksource *cs)
1006{
1007	struct list_head *entry = &clocksource_list;
1008	struct clocksource *tmp;
1009
1010	list_for_each_entry(tmp, &clocksource_list, list) {
1011		/* Keep track of the place, where to insert */
1012		if (tmp->rating < cs->rating)
1013			break;
1014		entry = &tmp->list;
1015	}
1016	list_add(&cs->list, entry);
1017}
1018
1019/**
1020 * __clocksource_update_freq_scale - Used update clocksource with new freq
1021 * @cs:		clocksource to be registered
1022 * @scale:	Scale factor multiplied against freq to get clocksource hz
1023 * @freq:	clocksource frequency (cycles per second) divided by scale
1024 *
1025 * This should only be called from the clocksource->enable() method.
1026 *
1027 * This *SHOULD NOT* be called directly! Please use the
1028 * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
1029 * functions.
1030 */
1031void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
1032{
1033	u64 sec;
1034
1035	/*
1036	 * Default clocksources are *special* and self-define their mult/shift.
1037	 * But, you're not special, so you should specify a freq value.
1038	 */
1039	if (freq) {
1040		/*
1041		 * Calc the maximum number of seconds which we can run before
1042		 * wrapping around. For clocksources which have a mask > 32-bit
1043		 * we need to limit the max sleep time to have a good
1044		 * conversion precision. 10 minutes is still a reasonable
1045		 * amount. That results in a shift value of 24 for a
1046		 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
1047		 * ~ 0.06ppm granularity for NTP.
1048		 */
1049		sec = cs->mask;
1050		do_div(sec, freq);
1051		do_div(sec, scale);
1052		if (!sec)
1053			sec = 1;
1054		else if (sec > 600 && cs->mask > UINT_MAX)
1055			sec = 600;
1056
1057		clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
1058				       NSEC_PER_SEC / scale, sec * scale);
1059	}
1060
1061	/*
1062	 * If the uncertainty margin is not specified, calculate it.
1063	 * If both scale and freq are non-zero, calculate the clock
1064	 * period, but bound below at 2*WATCHDOG_MAX_SKEW.  However,
1065	 * if either of scale or freq is zero, be very conservative and
1066	 * take the tens-of-milliseconds WATCHDOG_THRESHOLD value for the
1067	 * uncertainty margin.  Allow stupidly small uncertainty margins
1068	 * to be specified by the caller for testing purposes, but warn
1069	 * to discourage production use of this capability.
1070	 */
1071	if (scale && freq && !cs->uncertainty_margin) {
1072		cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq);
1073		if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW)
1074			cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW;
1075	} else if (!cs->uncertainty_margin) {
1076		cs->uncertainty_margin = WATCHDOG_THRESHOLD;
1077	}
1078	WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW);
1079
1080	/*
1081	 * Ensure clocksources that have large 'mult' values don't overflow
1082	 * when adjusted.
1083	 */
1084	cs->maxadj = clocksource_max_adjustment(cs);
1085	while (freq && ((cs->mult + cs->maxadj < cs->mult)
1086		|| (cs->mult - cs->maxadj > cs->mult))) {
1087		cs->mult >>= 1;
1088		cs->shift--;
1089		cs->maxadj = clocksource_max_adjustment(cs);
1090	}
1091
1092	/*
1093	 * Only warn for *special* clocksources that self-define
1094	 * their mult/shift values and don't specify a freq.
1095	 */
1096	WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
1097		"timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
1098		cs->name);
1099
1100	clocksource_update_max_deferment(cs);
1101
1102	pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
1103		cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
1104}
1105EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
1106
1107/**
1108 * __clocksource_register_scale - Used to install new clocksources
1109 * @cs:		clocksource to be registered
1110 * @scale:	Scale factor multiplied against freq to get clocksource hz
1111 * @freq:	clocksource frequency (cycles per second) divided by scale
1112 *
1113 * Returns -EBUSY if registration fails, zero otherwise.
1114 *
1115 * This *SHOULD NOT* be called directly! Please use the
1116 * clocksource_register_hz() or clocksource_register_khz helper functions.
1117 */
1118int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
1119{
1120	unsigned long flags;
1121
1122	clocksource_arch_init(cs);
1123
1124	if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX))
1125		cs->id = CSID_GENERIC;
1126	if (cs->vdso_clock_mode < 0 ||
1127	    cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
1128		pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
1129			cs->name, cs->vdso_clock_mode);
1130		cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
1131	}
1132
1133	/* Initialize mult/shift and max_idle_ns */
1134	__clocksource_update_freq_scale(cs, scale, freq);
1135
1136	/* Add clocksource to the clocksource list */
1137	mutex_lock(&clocksource_mutex);
1138
1139	clocksource_watchdog_lock(&flags);
1140	clocksource_enqueue(cs);
1141	clocksource_enqueue_watchdog(cs);
1142	clocksource_watchdog_unlock(&flags);
1143
1144	clocksource_select();
1145	clocksource_select_watchdog(false);
1146	__clocksource_suspend_select(cs);
1147	mutex_unlock(&clocksource_mutex);
1148	return 0;
1149}
1150EXPORT_SYMBOL_GPL(__clocksource_register_scale);
1151
1152static void __clocksource_change_rating(struct clocksource *cs, int rating)
1153{
1154	list_del(&cs->list);
1155	cs->rating = rating;
1156	clocksource_enqueue(cs);
1157}
1158
1159/**
1160 * clocksource_change_rating - Change the rating of a registered clocksource
1161 * @cs:		clocksource to be changed
1162 * @rating:	new rating
1163 */
1164void clocksource_change_rating(struct clocksource *cs, int rating)
1165{
1166	unsigned long flags;
1167
1168	mutex_lock(&clocksource_mutex);
1169	clocksource_watchdog_lock(&flags);
1170	__clocksource_change_rating(cs, rating);
1171	clocksource_watchdog_unlock(&flags);
1172
1173	clocksource_select();
1174	clocksource_select_watchdog(false);
1175	clocksource_suspend_select(false);
1176	mutex_unlock(&clocksource_mutex);
1177}
1178EXPORT_SYMBOL(clocksource_change_rating);
1179
1180/*
1181 * Unbind clocksource @cs. Called with clocksource_mutex held
1182 */
1183static int clocksource_unbind(struct clocksource *cs)
1184{
1185	unsigned long flags;
1186
1187	if (clocksource_is_watchdog(cs)) {
1188		/* Select and try to install a replacement watchdog. */
1189		clocksource_select_watchdog(true);
1190		if (clocksource_is_watchdog(cs))
1191			return -EBUSY;
1192	}
1193
1194	if (cs == curr_clocksource) {
1195		/* Select and try to install a replacement clock source */
1196		clocksource_select_fallback();
1197		if (curr_clocksource == cs)
1198			return -EBUSY;
1199	}
1200
1201	if (clocksource_is_suspend(cs)) {
1202		/*
1203		 * Select and try to install a replacement suspend clocksource.
1204		 * If no replacement suspend clocksource, we will just let the
1205		 * clocksource go and have no suspend clocksource.
1206		 */
1207		clocksource_suspend_select(true);
1208	}
1209
1210	clocksource_watchdog_lock(&flags);
1211	clocksource_dequeue_watchdog(cs);
1212	list_del_init(&cs->list);
1213	clocksource_watchdog_unlock(&flags);
1214
1215	return 0;
1216}
1217
1218/**
1219 * clocksource_unregister - remove a registered clocksource
1220 * @cs:	clocksource to be unregistered
1221 */
1222int clocksource_unregister(struct clocksource *cs)
1223{
1224	int ret = 0;
1225
1226	mutex_lock(&clocksource_mutex);
1227	if (!list_empty(&cs->list))
1228		ret = clocksource_unbind(cs);
1229	mutex_unlock(&clocksource_mutex);
1230	return ret;
1231}
1232EXPORT_SYMBOL(clocksource_unregister);
1233
1234#ifdef CONFIG_SYSFS
1235/**
1236 * current_clocksource_show - sysfs interface for current clocksource
1237 * @dev:	unused
1238 * @attr:	unused
1239 * @buf:	char buffer to be filled with clocksource list
1240 *
1241 * Provides sysfs interface for listing current clocksource.
1242 */
1243static ssize_t current_clocksource_show(struct device *dev,
1244					struct device_attribute *attr,
1245					char *buf)
1246{
1247	ssize_t count = 0;
1248
1249	mutex_lock(&clocksource_mutex);
1250	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
1251	mutex_unlock(&clocksource_mutex);
1252
1253	return count;
1254}
1255
1256ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
1257{
1258	size_t ret = cnt;
1259
1260	/* strings from sysfs write are not 0 terminated! */
1261	if (!cnt || cnt >= CS_NAME_LEN)
1262		return -EINVAL;
1263
1264	/* strip of \n: */
1265	if (buf[cnt-1] == '\n')
1266		cnt--;
1267	if (cnt > 0)
1268		memcpy(dst, buf, cnt);
1269	dst[cnt] = 0;
1270	return ret;
1271}
1272
1273/**
1274 * current_clocksource_store - interface for manually overriding clocksource
1275 * @dev:	unused
1276 * @attr:	unused
1277 * @buf:	name of override clocksource
1278 * @count:	length of buffer
1279 *
1280 * Takes input from sysfs interface for manually overriding the default
1281 * clocksource selection.
1282 */
1283static ssize_t current_clocksource_store(struct device *dev,
1284					 struct device_attribute *attr,
1285					 const char *buf, size_t count)
1286{
1287	ssize_t ret;
1288
1289	mutex_lock(&clocksource_mutex);
1290
1291	ret = sysfs_get_uname(buf, override_name, count);
1292	if (ret >= 0)
1293		clocksource_select();
1294
1295	mutex_unlock(&clocksource_mutex);
1296
1297	return ret;
1298}
1299static DEVICE_ATTR_RW(current_clocksource);
1300
1301/**
1302 * unbind_clocksource_store - interface for manually unbinding clocksource
1303 * @dev:	unused
1304 * @attr:	unused
1305 * @buf:	unused
1306 * @count:	length of buffer
1307 *
1308 * Takes input from sysfs interface for manually unbinding a clocksource.
1309 */
1310static ssize_t unbind_clocksource_store(struct device *dev,
1311					struct device_attribute *attr,
1312					const char *buf, size_t count)
1313{
1314	struct clocksource *cs;
1315	char name[CS_NAME_LEN];
1316	ssize_t ret;
1317
1318	ret = sysfs_get_uname(buf, name, count);
1319	if (ret < 0)
1320		return ret;
1321
1322	ret = -ENODEV;
1323	mutex_lock(&clocksource_mutex);
1324	list_for_each_entry(cs, &clocksource_list, list) {
1325		if (strcmp(cs->name, name))
1326			continue;
1327		ret = clocksource_unbind(cs);
1328		break;
1329	}
1330	mutex_unlock(&clocksource_mutex);
1331
1332	return ret ? ret : count;
1333}
1334static DEVICE_ATTR_WO(unbind_clocksource);
1335
1336/**
1337 * available_clocksource_show - sysfs interface for listing clocksource
1338 * @dev:	unused
1339 * @attr:	unused
1340 * @buf:	char buffer to be filled with clocksource list
1341 *
1342 * Provides sysfs interface for listing registered clocksources
1343 */
1344static ssize_t available_clocksource_show(struct device *dev,
1345					  struct device_attribute *attr,
1346					  char *buf)
1347{
1348	struct clocksource *src;
1349	ssize_t count = 0;
1350
1351	mutex_lock(&clocksource_mutex);
1352	list_for_each_entry(src, &clocksource_list, list) {
1353		/*
1354		 * Don't show non-HRES clocksource if the tick code is
1355		 * in one shot mode (highres=on or nohz=on)
1356		 */
1357		if (!tick_oneshot_mode_active() ||
1358		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1359			count += snprintf(buf + count,
1360				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
1361				  "%s ", src->name);
1362	}
1363	mutex_unlock(&clocksource_mutex);
1364
1365	count += snprintf(buf + count,
1366			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
1367
1368	return count;
1369}
1370static DEVICE_ATTR_RO(available_clocksource);
1371
1372static struct attribute *clocksource_attrs[] = {
1373	&dev_attr_current_clocksource.attr,
1374	&dev_attr_unbind_clocksource.attr,
1375	&dev_attr_available_clocksource.attr,
1376	NULL
1377};
1378ATTRIBUTE_GROUPS(clocksource);
1379
1380static struct bus_type clocksource_subsys = {
1381	.name = "clocksource",
1382	.dev_name = "clocksource",
1383};
1384
1385static struct device device_clocksource = {
1386	.id	= 0,
1387	.bus	= &clocksource_subsys,
1388	.groups	= clocksource_groups,
1389};
1390
1391static int __init init_clocksource_sysfs(void)
1392{
1393	int error = subsys_system_register(&clocksource_subsys, NULL);
1394
1395	if (!error)
1396		error = device_register(&device_clocksource);
1397
1398	return error;
1399}
1400
1401device_initcall(init_clocksource_sysfs);
1402#endif /* CONFIG_SYSFS */
1403
1404/**
1405 * boot_override_clocksource - boot clock override
1406 * @str:	override name
1407 *
1408 * Takes a clocksource= boot argument and uses it
1409 * as the clocksource override name.
1410 */
1411static int __init boot_override_clocksource(char* str)
1412{
1413	mutex_lock(&clocksource_mutex);
1414	if (str)
1415		strlcpy(override_name, str, sizeof(override_name));
1416	mutex_unlock(&clocksource_mutex);
1417	return 1;
1418}
1419
1420__setup("clocksource=", boot_override_clocksource);
1421
1422/**
1423 * boot_override_clock - Compatibility layer for deprecated boot option
1424 * @str:	override name
1425 *
1426 * DEPRECATED! Takes a clock= boot argument and uses it
1427 * as the clocksource override name
1428 */
1429static int __init boot_override_clock(char* str)
1430{
1431	if (!strcmp(str, "pmtmr")) {
1432		pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
1433		return boot_override_clocksource("acpi_pm");
1434	}
1435	pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
1436	return boot_override_clocksource(str);
1437}
1438
1439__setup("clock=", boot_override_clock);