Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Kernel timekeeping code and accessor functions. Based on code from
   4 *  timer.c, moved in commit 8524070b7982.
 
 
 
 
 
   5 */
 
   6#include <linux/timekeeper_internal.h>
   7#include <linux/module.h>
   8#include <linux/interrupt.h>
   9#include <linux/percpu.h>
  10#include <linux/init.h>
  11#include <linux/mm.h>
  12#include <linux/nmi.h>
  13#include <linux/sched.h>
  14#include <linux/sched/loadavg.h>
  15#include <linux/sched/clock.h>
  16#include <linux/syscore_ops.h>
  17#include <linux/clocksource.h>
  18#include <linux/jiffies.h>
  19#include <linux/time.h>
  20#include <linux/timex.h>
  21#include <linux/tick.h>
  22#include <linux/stop_machine.h>
  23#include <linux/pvclock_gtod.h>
  24#include <linux/compiler.h>
  25#include <linux/audit.h>
  26#include <linux/random.h>
  27
  28#include "tick-internal.h"
  29#include "ntp_internal.h"
  30#include "timekeeping_internal.h"
  31
  32#define TK_CLEAR_NTP		(1 << 0)
  33#define TK_MIRROR		(1 << 1)
  34#define TK_CLOCK_WAS_SET	(1 << 2)
  35
  36enum timekeeping_adv_mode {
  37	/* Update timekeeper when a tick has passed */
  38	TK_ADV_TICK,
  39
  40	/* Update timekeeper on a direct frequency change */
  41	TK_ADV_FREQ
  42};
  43
  44DEFINE_RAW_SPINLOCK(timekeeper_lock);
  45
  46/*
  47 * The most important data for readout fits into a single 64 byte
  48 * cache line.
  49 */
  50static struct {
  51	seqcount_raw_spinlock_t	seq;
  52	struct timekeeper	timekeeper;
  53} tk_core ____cacheline_aligned = {
  54	.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
  55};
  56
 
  57static struct timekeeper shadow_timekeeper;
  58
  59/* flag for if timekeeping is suspended */
  60int __read_mostly timekeeping_suspended;
  61
  62/**
  63 * struct tk_fast - NMI safe timekeeper
  64 * @seq:	Sequence counter for protecting updates. The lowest bit
  65 *		is the index for the tk_read_base array
  66 * @base:	tk_read_base array. Access is indexed by the lowest bit of
  67 *		@seq.
  68 *
  69 * See @update_fast_timekeeper() below.
  70 */
  71struct tk_fast {
  72	seqcount_latch_t	seq;
  73	struct tk_read_base	base[2];
  74};
  75
  76/* Suspend-time cycles value for halted fast timekeeper. */
  77static u64 cycles_at_suspend;
  78
  79static u64 dummy_clock_read(struct clocksource *cs)
  80{
  81	if (timekeeping_suspended)
  82		return cycles_at_suspend;
  83	return local_clock();
  84}
  85
  86static struct clocksource dummy_clock = {
  87	.read = dummy_clock_read,
  88};
  89
  90/*
  91 * Boot time initialization which allows local_clock() to be utilized
  92 * during early boot when clocksources are not available. local_clock()
  93 * returns nanoseconds already so no conversion is required, hence mult=1
  94 * and shift=0. When the first proper clocksource is installed then
  95 * the fast time keepers are updated with the correct values.
  96 */
  97#define FAST_TK_INIT						\
  98	{							\
  99		.clock		= &dummy_clock,			\
 100		.mask		= CLOCKSOURCE_MASK(64),		\
 101		.mult		= 1,				\
 102		.shift		= 0,				\
 103	}
 104
 105static struct tk_fast tk_fast_mono ____cacheline_aligned = {
 106	.seq     = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
 107	.base[0] = FAST_TK_INIT,
 108	.base[1] = FAST_TK_INIT,
 109};
 110
 111static struct tk_fast tk_fast_raw  ____cacheline_aligned = {
 112	.seq     = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
 113	.base[0] = FAST_TK_INIT,
 114	.base[1] = FAST_TK_INIT,
 115};
 116
 117static inline void tk_normalize_xtime(struct timekeeper *tk)
 118{
 119	while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
 120		tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
 121		tk->xtime_sec++;
 122	}
 123	while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
 124		tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
 125		tk->raw_sec++;
 126	}
 127}
 128
 129static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
 130{
 131	struct timespec64 ts;
 132
 133	ts.tv_sec = tk->xtime_sec;
 134	ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
 135	return ts;
 136}
 137
 138static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
 139{
 140	tk->xtime_sec = ts->tv_sec;
 141	tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
 142}
 143
 144static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
 145{
 146	tk->xtime_sec += ts->tv_sec;
 147	tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
 148	tk_normalize_xtime(tk);
 149}
 150
 151static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
 152{
 153	struct timespec64 tmp;
 154
 155	/*
 156	 * Verify consistency of: offset_real = -wall_to_monotonic
 157	 * before modifying anything
 158	 */
 159	set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
 160					-tk->wall_to_monotonic.tv_nsec);
 161	WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
 162	tk->wall_to_monotonic = wtm;
 163	set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
 164	tk->offs_real = timespec64_to_ktime(tmp);
 165	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
 166}
 167
 168static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
 169{
 170	tk->offs_boot = ktime_add(tk->offs_boot, delta);
 171	/*
 172	 * Timespec representation for VDSO update to avoid 64bit division
 173	 * on every update.
 174	 */
 175	tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
 176}
 177
 178/*
 179 * tk_clock_read - atomic clocksource read() helper
 180 *
 181 * This helper is necessary to use in the read paths because, while the
 182 * seqcount ensures we don't return a bad value while structures are updated,
 183 * it doesn't protect from potential crashes. There is the possibility that
 184 * the tkr's clocksource may change between the read reference, and the
 185 * clock reference passed to the read function.  This can cause crashes if
 186 * the wrong clocksource is passed to the wrong read function.
 187 * This isn't necessary to use when holding the timekeeper_lock or doing
 188 * a read of the fast-timekeeper tkrs (which is protected by its own locking
 189 * and update logic).
 190 */
 191static inline u64 tk_clock_read(const struct tk_read_base *tkr)
 192{
 193	struct clocksource *clock = READ_ONCE(tkr->clock);
 194
 195	return clock->read(clock);
 196}
 197
 198#ifdef CONFIG_DEBUG_TIMEKEEPING
 199#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
 200
 201static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
 202{
 203
 204	u64 max_cycles = tk->tkr_mono.clock->max_cycles;
 205	const char *name = tk->tkr_mono.clock->name;
 206
 207	if (offset > max_cycles) {
 208		printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
 209				offset, name, max_cycles);
 210		printk_deferred("         timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
 211	} else {
 212		if (offset > (max_cycles >> 1)) {
 213			printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
 214					offset, name, max_cycles >> 1);
 215			printk_deferred("      timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
 216		}
 217	}
 218
 219	if (tk->underflow_seen) {
 220		if (jiffies - tk->last_warning > WARNING_FREQ) {
 221			printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
 222			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
 223			printk_deferred("         Your kernel is probably still fine.\n");
 224			tk->last_warning = jiffies;
 225		}
 226		tk->underflow_seen = 0;
 227	}
 228
 229	if (tk->overflow_seen) {
 230		if (jiffies - tk->last_warning > WARNING_FREQ) {
 231			printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
 232			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
 233			printk_deferred("         Your kernel is probably still fine.\n");
 234			tk->last_warning = jiffies;
 235		}
 236		tk->overflow_seen = 0;
 237	}
 238}
 239
 240static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
 241{
 242	struct timekeeper *tk = &tk_core.timekeeper;
 243	u64 now, last, mask, max, delta;
 244	unsigned int seq;
 245
 246	/*
 247	 * Since we're called holding a seqcount, the data may shift
 248	 * under us while we're doing the calculation. This can cause
 249	 * false positives, since we'd note a problem but throw the
 250	 * results away. So nest another seqcount here to atomically
 251	 * grab the points we are checking with.
 252	 */
 253	do {
 254		seq = read_seqcount_begin(&tk_core.seq);
 255		now = tk_clock_read(tkr);
 256		last = tkr->cycle_last;
 257		mask = tkr->mask;
 258		max = tkr->clock->max_cycles;
 259	} while (read_seqcount_retry(&tk_core.seq, seq));
 260
 261	delta = clocksource_delta(now, last, mask);
 262
 263	/*
 264	 * Try to catch underflows by checking if we are seeing small
 265	 * mask-relative negative values.
 266	 */
 267	if (unlikely((~delta & mask) < (mask >> 3))) {
 268		tk->underflow_seen = 1;
 269		delta = 0;
 270	}
 271
 272	/* Cap delta value to the max_cycles values to avoid mult overflows */
 273	if (unlikely(delta > max)) {
 274		tk->overflow_seen = 1;
 275		delta = tkr->clock->max_cycles;
 276	}
 277
 278	return delta;
 279}
 280#else
 281static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
 282{
 283}
 284static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
 285{
 286	u64 cycle_now, delta;
 287
 288	/* read clocksource */
 289	cycle_now = tk_clock_read(tkr);
 290
 291	/* calculate the delta since the last update_wall_time */
 292	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
 293
 294	return delta;
 295}
 296#endif
 297
 298/**
 299 * tk_setup_internals - Set up internals to use clocksource clock.
 300 *
 301 * @tk:		The target timekeeper to setup.
 302 * @clock:		Pointer to clocksource.
 303 *
 304 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
 305 * pair and interval request.
 306 *
 307 * Unless you're the timekeeping code, you should not be using this!
 308 */
 309static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
 310{
 311	u64 interval;
 312	u64 tmp, ntpinterval;
 313	struct clocksource *old_clock;
 314
 315	++tk->cs_was_changed_seq;
 316	old_clock = tk->tkr_mono.clock;
 317	tk->tkr_mono.clock = clock;
 
 318	tk->tkr_mono.mask = clock->mask;
 319	tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
 320
 321	tk->tkr_raw.clock = clock;
 
 322	tk->tkr_raw.mask = clock->mask;
 323	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
 324
 325	/* Do the ns -> cycle conversion first, using original mult */
 326	tmp = NTP_INTERVAL_LENGTH;
 327	tmp <<= clock->shift;
 328	ntpinterval = tmp;
 329	tmp += clock->mult/2;
 330	do_div(tmp, clock->mult);
 331	if (tmp == 0)
 332		tmp = 1;
 333
 334	interval = (u64) tmp;
 335	tk->cycle_interval = interval;
 336
 337	/* Go back from cycles -> shifted ns */
 338	tk->xtime_interval = interval * clock->mult;
 339	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
 340	tk->raw_interval = interval * clock->mult;
 
 341
 342	 /* if changing clocks, convert xtime_nsec shift units */
 343	if (old_clock) {
 344		int shift_change = clock->shift - old_clock->shift;
 345		if (shift_change < 0) {
 346			tk->tkr_mono.xtime_nsec >>= -shift_change;
 347			tk->tkr_raw.xtime_nsec >>= -shift_change;
 348		} else {
 349			tk->tkr_mono.xtime_nsec <<= shift_change;
 350			tk->tkr_raw.xtime_nsec <<= shift_change;
 351		}
 352	}
 
 353
 354	tk->tkr_mono.shift = clock->shift;
 355	tk->tkr_raw.shift = clock->shift;
 356
 357	tk->ntp_error = 0;
 358	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
 359	tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
 360
 361	/*
 362	 * The timekeeper keeps its own mult values for the currently
 363	 * active clocksource. These value will be adjusted via NTP
 364	 * to counteract clock drifting.
 365	 */
 366	tk->tkr_mono.mult = clock->mult;
 367	tk->tkr_raw.mult = clock->mult;
 368	tk->ntp_err_mult = 0;
 369	tk->skip_second_overflow = 0;
 370}
 371
 372/* Timekeeper helper functions. */
 373
 374static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
 
 
 
 
 
 
 
 
 375{
 376	u64 nsec;
 377
 378	nsec = delta * tkr->mult + tkr->xtime_nsec;
 379	nsec >>= tkr->shift;
 380
 381	return nsec;
 
 382}
 383
 384static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
 385{
 386	u64 delta;
 387
 388	delta = timekeeping_get_delta(tkr);
 389	return timekeeping_delta_to_ns(tkr, delta);
 390}
 391
 392static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
 
 393{
 394	u64 delta;
 395
 396	/* calculate the delta since the last update_wall_time */
 397	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
 398	return timekeeping_delta_to_ns(tkr, delta);
 399}
 400
 401/**
 402 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
 403 * @tkr: Timekeeping readout base from which we take the update
 404 * @tkf: Pointer to NMI safe timekeeper
 405 *
 406 * We want to use this from any context including NMI and tracing /
 407 * instrumenting the timekeeping code itself.
 408 *
 409 * Employ the latch technique; see @raw_write_seqcount_latch.
 410 *
 411 * So if a NMI hits the update of base[0] then it will use base[1]
 412 * which is still consistent. In the worst case this can result is a
 413 * slightly wrong timestamp (a few nanoseconds). See
 414 * @ktime_get_mono_fast_ns.
 415 */
 416static void update_fast_timekeeper(const struct tk_read_base *tkr,
 417				   struct tk_fast *tkf)
 418{
 419	struct tk_read_base *base = tkf->base;
 420
 421	/* Force readers off to base[1] */
 422	raw_write_seqcount_latch(&tkf->seq);
 423
 424	/* Update base[0] */
 425	memcpy(base, tkr, sizeof(*base));
 426
 427	/* Force readers back to base[0] */
 428	raw_write_seqcount_latch(&tkf->seq);
 429
 430	/* Update base[1] */
 431	memcpy(base + 1, base, sizeof(*base));
 432}
 433
 434static __always_inline u64 fast_tk_get_delta_ns(struct tk_read_base *tkr)
 435{
 436	u64 delta, cycles = tk_clock_read(tkr);
 437
 438	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
 439	return timekeeping_delta_to_ns(tkr, delta);
 440}
 441
 442static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
 443{
 444	struct tk_read_base *tkr;
 445	unsigned int seq;
 446	u64 now;
 447
 448	do {
 449		seq = raw_read_seqcount_latch(&tkf->seq);
 450		tkr = tkf->base + (seq & 0x01);
 451		now = ktime_to_ns(tkr->base);
 452		now += fast_tk_get_delta_ns(tkr);
 453	} while (read_seqcount_latch_retry(&tkf->seq, seq));
 454
 455	return now;
 456}
 457
 458/**
 459 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
 460 *
 461 * This timestamp is not guaranteed to be monotonic across an update.
 462 * The timestamp is calculated by:
 463 *
 464 *	now = base_mono + clock_delta * slope
 465 *
 466 * So if the update lowers the slope, readers who are forced to the
 467 * not yet updated second array are still using the old steeper slope.
 468 *
 469 * tmono
 470 * ^
 471 * |    o  n
 472 * |   o n
 473 * |  u
 474 * | o
 475 * |o
 476 * |12345678---> reader order
 477 *
 478 * o = old slope
 479 * u = update
 480 * n = new slope
 481 *
 482 * So reader 6 will observe time going backwards versus reader 5.
 483 *
 484 * While other CPUs are likely to be able to observe that, the only way
 485 * for a CPU local observation is when an NMI hits in the middle of
 486 * the update. Timestamps taken from that NMI context might be ahead
 487 * of the following timestamps. Callers need to be aware of that and
 488 * deal with it.
 489 */
 490u64 notrace ktime_get_mono_fast_ns(void)
 491{
 492	return __ktime_get_fast_ns(&tk_fast_mono);
 493}
 494EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
 495
 496/**
 497 * ktime_get_raw_fast_ns - Fast NMI safe access to clock monotonic raw
 498 *
 499 * Contrary to ktime_get_mono_fast_ns() this is always correct because the
 500 * conversion factor is not affected by NTP/PTP correction.
 501 */
 502u64 notrace ktime_get_raw_fast_ns(void)
 503{
 504	return __ktime_get_fast_ns(&tk_fast_raw);
 505}
 506EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
 507
 508/**
 509 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
 510 *
 511 * To keep it NMI safe since we're accessing from tracing, we're not using a
 512 * separate timekeeper with updates to monotonic clock and boot offset
 513 * protected with seqcounts. This has the following minor side effects:
 514 *
 515 * (1) Its possible that a timestamp be taken after the boot offset is updated
 516 * but before the timekeeper is updated. If this happens, the new boot offset
 517 * is added to the old timekeeping making the clock appear to update slightly
 518 * earlier:
 519 *    CPU 0                                        CPU 1
 520 *    timekeeping_inject_sleeptime64()
 521 *    __timekeeping_inject_sleeptime(tk, delta);
 522 *                                                 timestamp();
 523 *    timekeeping_update(tk, TK_CLEAR_NTP...);
 524 *
 525 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
 526 * partially updated.  Since the tk->offs_boot update is a rare event, this
 527 * should be a rare occurrence which postprocessing should be able to handle.
 528 *
 529 * The caveats vs. timestamp ordering as documented for ktime_get_fast_ns()
 530 * apply as well.
 531 */
 532u64 notrace ktime_get_boot_fast_ns(void)
 533{
 534	struct timekeeper *tk = &tk_core.timekeeper;
 535
 536	return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_boot)));
 537}
 538EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
 539
 540/**
 541 * ktime_get_tai_fast_ns - NMI safe and fast access to tai clock.
 542 *
 543 * The same limitations as described for ktime_get_boot_fast_ns() apply. The
 544 * mono time and the TAI offset are not read atomically which may yield wrong
 545 * readouts. However, an update of the TAI offset is an rare event e.g., caused
 546 * by settime or adjtimex with an offset. The user of this function has to deal
 547 * with the possibility of wrong timestamps in post processing.
 548 */
 549u64 notrace ktime_get_tai_fast_ns(void)
 550{
 551	struct timekeeper *tk = &tk_core.timekeeper;
 552
 553	return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_tai)));
 554}
 555EXPORT_SYMBOL_GPL(ktime_get_tai_fast_ns);
 556
 557static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
 558{
 559	struct tk_read_base *tkr;
 560	u64 basem, baser, delta;
 561	unsigned int seq;
 
 562
 563	do {
 564		seq = raw_read_seqcount_latch(&tkf->seq);
 565		tkr = tkf->base + (seq & 0x01);
 566		basem = ktime_to_ns(tkr->base);
 567		baser = ktime_to_ns(tkr->base_real);
 568		delta = fast_tk_get_delta_ns(tkr);
 569	} while (read_seqcount_latch_retry(&tkf->seq, seq));
 570
 571	if (mono)
 572		*mono = basem + delta;
 573	return baser + delta;
 574}
 575
 576/**
 577 * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
 578 *
 579 * See ktime_get_fast_ns() for documentation of the time stamp ordering.
 580 */
 581u64 ktime_get_real_fast_ns(void)
 582{
 583	return __ktime_get_real_fast(&tk_fast_mono, NULL);
 584}
 585EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
 586
 587/**
 588 * ktime_get_fast_timestamps: - NMI safe timestamps
 589 * @snapshot:	Pointer to timestamp storage
 590 *
 591 * Stores clock monotonic, boottime and realtime timestamps.
 592 *
 593 * Boot time is a racy access on 32bit systems if the sleep time injection
 594 * happens late during resume and not in timekeeping_resume(). That could
 595 * be avoided by expanding struct tk_read_base with boot offset for 32bit
 596 * and adding more overhead to the update. As this is a hard to observe
 597 * once per resume event which can be filtered with reasonable effort using
 598 * the accurate mono/real timestamps, it's probably not worth the trouble.
 599 *
 600 * Aside of that it might be possible on 32 and 64 bit to observe the
 601 * following when the sleep time injection happens late:
 602 *
 603 * CPU 0				CPU 1
 604 * timekeeping_resume()
 605 * ktime_get_fast_timestamps()
 606 *	mono, real = __ktime_get_real_fast()
 607 *					inject_sleep_time()
 608 *					   update boot offset
 609 *	boot = mono + bootoffset;
 610 *
 611 * That means that boot time already has the sleep time adjustment, but
 612 * real time does not. On the next readout both are in sync again.
 613 *
 614 * Preventing this for 64bit is not really feasible without destroying the
 615 * careful cache layout of the timekeeper because the sequence count and
 616 * struct tk_read_base would then need two cache lines instead of one.
 617 *
 618 * Access to the time keeper clock source is disabled across the innermost
 619 * steps of suspend/resume. The accessors still work, but the timestamps
 620 * are frozen until time keeping is resumed which happens very early.
 621 *
 622 * For regular suspend/resume there is no observable difference vs. sched
 623 * clock, but it might affect some of the nasty low level debug printks.
 624 *
 625 * OTOH, access to sched clock is not guaranteed across suspend/resume on
 626 * all systems either so it depends on the hardware in use.
 627 *
 628 * If that turns out to be a real problem then this could be mitigated by
 629 * using sched clock in a similar way as during early boot. But it's not as
 630 * trivial as on early boot because it needs some careful protection
 631 * against the clock monotonic timestamp jumping backwards on resume.
 632 */
 633void ktime_get_fast_timestamps(struct ktime_timestamps *snapshot)
 634{
 635	struct timekeeper *tk = &tk_core.timekeeper;
 
 
 
 
 
 636
 637	snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono);
 638	snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot));
 
 639}
 640
 641/**
 642 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
 643 * @tk: Timekeeper to snapshot.
 644 *
 645 * It generally is unsafe to access the clocksource after timekeeping has been
 646 * suspended, so take a snapshot of the readout base of @tk and use it as the
 647 * fast timekeeper's readout base while suspended.  It will return the same
 648 * number of cycles every time until timekeeping is resumed at which time the
 649 * proper readout base for the fast timekeeper will be restored automatically.
 650 */
 651static void halt_fast_timekeeper(const struct timekeeper *tk)
 652{
 653	static struct tk_read_base tkr_dummy;
 654	const struct tk_read_base *tkr = &tk->tkr_mono;
 655
 656	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
 657	cycles_at_suspend = tk_clock_read(tkr);
 658	tkr_dummy.clock = &dummy_clock;
 659	tkr_dummy.base_real = tkr->base + tk->offs_real;
 660	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
 661
 662	tkr = &tk->tkr_raw;
 663	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
 664	tkr_dummy.clock = &dummy_clock;
 665	update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
 666}
 667
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 668static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
 669
 670static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
 671{
 672	raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
 673}
 674
 675/**
 676 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
 677 * @nb: Pointer to the notifier block to register
 678 */
 679int pvclock_gtod_register_notifier(struct notifier_block *nb)
 680{
 681	struct timekeeper *tk = &tk_core.timekeeper;
 682	unsigned long flags;
 683	int ret;
 684
 685	raw_spin_lock_irqsave(&timekeeper_lock, flags);
 686	ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
 687	update_pvclock_gtod(tk, true);
 688	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 689
 690	return ret;
 691}
 692EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
 693
 694/**
 695 * pvclock_gtod_unregister_notifier - unregister a pvclock
 696 * timedata update listener
 697 * @nb: Pointer to the notifier block to unregister
 698 */
 699int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
 700{
 701	unsigned long flags;
 702	int ret;
 703
 704	raw_spin_lock_irqsave(&timekeeper_lock, flags);
 705	ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
 706	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 707
 708	return ret;
 709}
 710EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
 711
 712/*
 713 * tk_update_leap_state - helper to update the next_leap_ktime
 714 */
 715static inline void tk_update_leap_state(struct timekeeper *tk)
 716{
 717	tk->next_leap_ktime = ntp_get_next_leap();
 718	if (tk->next_leap_ktime != KTIME_MAX)
 719		/* Convert to monotonic time */
 720		tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
 721}
 722
 723/*
 724 * Update the ktime_t based scalar nsec members of the timekeeper
 725 */
 726static inline void tk_update_ktime_data(struct timekeeper *tk)
 727{
 728	u64 seconds;
 729	u32 nsec;
 730
 731	/*
 732	 * The xtime based monotonic readout is:
 733	 *	nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
 734	 * The ktime based monotonic readout is:
 735	 *	nsec = base_mono + now();
 736	 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
 737	 */
 738	seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
 739	nsec = (u32) tk->wall_to_monotonic.tv_nsec;
 740	tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
 741
 
 
 
 742	/*
 743	 * The sum of the nanoseconds portions of xtime and
 744	 * wall_to_monotonic can be greater/equal one second. Take
 745	 * this into account before updating tk->ktime_sec.
 746	 */
 747	nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
 748	if (nsec >= NSEC_PER_SEC)
 749		seconds++;
 750	tk->ktime_sec = seconds;
 751
 752	/* Update the monotonic raw base */
 753	tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
 754}
 755
 756/* must hold timekeeper_lock */
 757static void timekeeping_update(struct timekeeper *tk, unsigned int action)
 758{
 759	if (action & TK_CLEAR_NTP) {
 760		tk->ntp_error = 0;
 761		ntp_clear();
 762	}
 763
 764	tk_update_leap_state(tk);
 765	tk_update_ktime_data(tk);
 766
 767	update_vsyscall(tk);
 768	update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
 769
 770	tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
 771	update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
 772	update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
 773
 774	if (action & TK_CLOCK_WAS_SET)
 775		tk->clock_was_set_seq++;
 776	/*
 777	 * The mirroring of the data to the shadow-timekeeper needs
 778	 * to happen last here to ensure we don't over-write the
 779	 * timekeeper structure on the next update with stale data
 780	 */
 781	if (action & TK_MIRROR)
 782		memcpy(&shadow_timekeeper, &tk_core.timekeeper,
 783		       sizeof(tk_core.timekeeper));
 784}
 785
 786/**
 787 * timekeeping_forward_now - update clock to the current time
 788 * @tk:		Pointer to the timekeeper to update
 789 *
 790 * Forward the current clock to update its state since the last call to
 791 * update_wall_time(). This is useful before significant clock changes,
 792 * as it avoids having to deal with this time offset explicitly.
 793 */
 794static void timekeeping_forward_now(struct timekeeper *tk)
 795{
 796	u64 cycle_now, delta;
 
 
 797
 798	cycle_now = tk_clock_read(&tk->tkr_mono);
 799	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
 800	tk->tkr_mono.cycle_last = cycle_now;
 801	tk->tkr_raw.cycle_last  = cycle_now;
 802
 803	tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
 804	tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
 
 
 805
 806	tk_normalize_xtime(tk);
 
 
 
 807}
 808
 809/**
 810 * ktime_get_real_ts64 - Returns the time of day in a timespec64.
 811 * @ts:		pointer to the timespec to be set
 812 *
 813 * Returns the time of day in a timespec64 (WARN if suspended).
 
 814 */
 815void ktime_get_real_ts64(struct timespec64 *ts)
 816{
 817	struct timekeeper *tk = &tk_core.timekeeper;
 818	unsigned int seq;
 819	u64 nsecs;
 820
 821	WARN_ON(timekeeping_suspended);
 822
 823	do {
 824		seq = read_seqcount_begin(&tk_core.seq);
 825
 826		ts->tv_sec = tk->xtime_sec;
 827		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 828
 829	} while (read_seqcount_retry(&tk_core.seq, seq));
 830
 831	ts->tv_nsec = 0;
 832	timespec64_add_ns(ts, nsecs);
 
 
 
 
 
 
 
 
 833}
 834EXPORT_SYMBOL(ktime_get_real_ts64);
 
 
 
 
 
 
 
 
 
 
 
 
 835
 836ktime_t ktime_get(void)
 837{
 838	struct timekeeper *tk = &tk_core.timekeeper;
 839	unsigned int seq;
 840	ktime_t base;
 841	u64 nsecs;
 842
 843	WARN_ON(timekeeping_suspended);
 844
 845	do {
 846		seq = read_seqcount_begin(&tk_core.seq);
 847		base = tk->tkr_mono.base;
 848		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 849
 850	} while (read_seqcount_retry(&tk_core.seq, seq));
 851
 852	return ktime_add_ns(base, nsecs);
 853}
 854EXPORT_SYMBOL_GPL(ktime_get);
 855
 856u32 ktime_get_resolution_ns(void)
 857{
 858	struct timekeeper *tk = &tk_core.timekeeper;
 859	unsigned int seq;
 860	u32 nsecs;
 861
 862	WARN_ON(timekeeping_suspended);
 863
 864	do {
 865		seq = read_seqcount_begin(&tk_core.seq);
 866		nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
 867	} while (read_seqcount_retry(&tk_core.seq, seq));
 868
 869	return nsecs;
 870}
 871EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
 872
 873static ktime_t *offsets[TK_OFFS_MAX] = {
 874	[TK_OFFS_REAL]	= &tk_core.timekeeper.offs_real,
 875	[TK_OFFS_BOOT]	= &tk_core.timekeeper.offs_boot,
 876	[TK_OFFS_TAI]	= &tk_core.timekeeper.offs_tai,
 877};
 878
 879ktime_t ktime_get_with_offset(enum tk_offsets offs)
 880{
 881	struct timekeeper *tk = &tk_core.timekeeper;
 882	unsigned int seq;
 883	ktime_t base, *offset = offsets[offs];
 884	u64 nsecs;
 885
 886	WARN_ON(timekeeping_suspended);
 887
 888	do {
 889		seq = read_seqcount_begin(&tk_core.seq);
 890		base = ktime_add(tk->tkr_mono.base, *offset);
 891		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 892
 893	} while (read_seqcount_retry(&tk_core.seq, seq));
 894
 895	return ktime_add_ns(base, nsecs);
 896
 897}
 898EXPORT_SYMBOL_GPL(ktime_get_with_offset);
 899
 900ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
 901{
 902	struct timekeeper *tk = &tk_core.timekeeper;
 903	unsigned int seq;
 904	ktime_t base, *offset = offsets[offs];
 905	u64 nsecs;
 906
 907	WARN_ON(timekeeping_suspended);
 908
 909	do {
 910		seq = read_seqcount_begin(&tk_core.seq);
 911		base = ktime_add(tk->tkr_mono.base, *offset);
 912		nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
 913
 914	} while (read_seqcount_retry(&tk_core.seq, seq));
 915
 916	return ktime_add_ns(base, nsecs);
 917}
 918EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
 919
 920/**
 921 * ktime_mono_to_any() - convert monotonic time to any other time
 922 * @tmono:	time to convert.
 923 * @offs:	which offset to use
 924 */
 925ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
 926{
 927	ktime_t *offset = offsets[offs];
 928	unsigned int seq;
 929	ktime_t tconv;
 930
 931	do {
 932		seq = read_seqcount_begin(&tk_core.seq);
 933		tconv = ktime_add(tmono, *offset);
 934	} while (read_seqcount_retry(&tk_core.seq, seq));
 935
 936	return tconv;
 937}
 938EXPORT_SYMBOL_GPL(ktime_mono_to_any);
 939
 940/**
 941 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
 942 */
 943ktime_t ktime_get_raw(void)
 944{
 945	struct timekeeper *tk = &tk_core.timekeeper;
 946	unsigned int seq;
 947	ktime_t base;
 948	u64 nsecs;
 949
 950	do {
 951		seq = read_seqcount_begin(&tk_core.seq);
 952		base = tk->tkr_raw.base;
 953		nsecs = timekeeping_get_ns(&tk->tkr_raw);
 954
 955	} while (read_seqcount_retry(&tk_core.seq, seq));
 956
 957	return ktime_add_ns(base, nsecs);
 958}
 959EXPORT_SYMBOL_GPL(ktime_get_raw);
 960
 961/**
 962 * ktime_get_ts64 - get the monotonic clock in timespec64 format
 963 * @ts:		pointer to timespec variable
 964 *
 965 * The function calculates the monotonic clock from the realtime
 966 * clock and the wall_to_monotonic offset and stores the result
 967 * in normalized timespec64 format in the variable pointed to by @ts.
 968 */
 969void ktime_get_ts64(struct timespec64 *ts)
 970{
 971	struct timekeeper *tk = &tk_core.timekeeper;
 972	struct timespec64 tomono;
 
 973	unsigned int seq;
 974	u64 nsec;
 975
 976	WARN_ON(timekeeping_suspended);
 977
 978	do {
 979		seq = read_seqcount_begin(&tk_core.seq);
 980		ts->tv_sec = tk->xtime_sec;
 981		nsec = timekeeping_get_ns(&tk->tkr_mono);
 982		tomono = tk->wall_to_monotonic;
 983
 984	} while (read_seqcount_retry(&tk_core.seq, seq));
 985
 986	ts->tv_sec += tomono.tv_sec;
 987	ts->tv_nsec = 0;
 988	timespec64_add_ns(ts, nsec + tomono.tv_nsec);
 989}
 990EXPORT_SYMBOL_GPL(ktime_get_ts64);
 991
 992/**
 993 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
 994 *
 995 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
 996 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
 997 * works on both 32 and 64 bit systems. On 32 bit systems the readout
 998 * covers ~136 years of uptime which should be enough to prevent
 999 * premature wrap arounds.
1000 */
1001time64_t ktime_get_seconds(void)
1002{
1003	struct timekeeper *tk = &tk_core.timekeeper;
1004
1005	WARN_ON(timekeeping_suspended);
1006	return tk->ktime_sec;
1007}
1008EXPORT_SYMBOL_GPL(ktime_get_seconds);
1009
1010/**
1011 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
1012 *
1013 * Returns the wall clock seconds since 1970.
 
1014 *
1015 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
1016 * 32bit systems the access must be protected with the sequence
1017 * counter to provide "atomic" access to the 64bit tk->xtime_sec
1018 * value.
1019 */
1020time64_t ktime_get_real_seconds(void)
1021{
1022	struct timekeeper *tk = &tk_core.timekeeper;
1023	time64_t seconds;
1024	unsigned int seq;
1025
1026	if (IS_ENABLED(CONFIG_64BIT))
1027		return tk->xtime_sec;
1028
1029	do {
1030		seq = read_seqcount_begin(&tk_core.seq);
1031		seconds = tk->xtime_sec;
1032
1033	} while (read_seqcount_retry(&tk_core.seq, seq));
1034
1035	return seconds;
1036}
1037EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
1038
1039/**
1040 * __ktime_get_real_seconds - The same as ktime_get_real_seconds
1041 * but without the sequence counter protect. This internal function
1042 * is called just when timekeeping lock is already held.
1043 */
1044noinstr time64_t __ktime_get_real_seconds(void)
1045{
1046	struct timekeeper *tk = &tk_core.timekeeper;
1047
1048	return tk->xtime_sec;
1049}
1050
1051/**
1052 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
1053 * @systime_snapshot:	pointer to struct receiving the system time snapshot
1054 */
1055void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
1056{
1057	struct timekeeper *tk = &tk_core.timekeeper;
1058	unsigned int seq;
1059	ktime_t base_raw;
1060	ktime_t base_real;
1061	u64 nsec_raw;
1062	u64 nsec_real;
1063	u64 now;
1064
1065	WARN_ON_ONCE(timekeeping_suspended);
1066
1067	do {
1068		seq = read_seqcount_begin(&tk_core.seq);
1069		now = tk_clock_read(&tk->tkr_mono);
1070		systime_snapshot->cs_id = tk->tkr_mono.clock->id;
1071		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
1072		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
1073		base_real = ktime_add(tk->tkr_mono.base,
1074				      tk_core.timekeeper.offs_real);
1075		base_raw = tk->tkr_raw.base;
1076		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
1077		nsec_raw  = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
1078	} while (read_seqcount_retry(&tk_core.seq, seq));
1079
1080	systime_snapshot->cycles = now;
1081	systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
1082	systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
1083}
1084EXPORT_SYMBOL_GPL(ktime_get_snapshot);
1085
1086/* Scale base by mult/div checking for overflow */
1087static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
1088{
1089	u64 tmp, rem;
1090
1091	tmp = div64_u64_rem(*base, div, &rem);
1092
1093	if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
1094	    ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
1095		return -EOVERFLOW;
1096	tmp *= mult;
 
1097
1098	rem = div64_u64(rem * mult, div);
1099	*base = tmp + rem;
1100	return 0;
1101}
1102
1103/**
1104 * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
1105 * @history:			Snapshot representing start of history
1106 * @partial_history_cycles:	Cycle offset into history (fractional part)
1107 * @total_history_cycles:	Total history length in cycles
1108 * @discontinuity:		True indicates clock was set on history period
1109 * @ts:				Cross timestamp that should be adjusted using
1110 *	partial/total ratio
1111 *
1112 * Helper function used by get_device_system_crosststamp() to correct the
1113 * crosstimestamp corresponding to the start of the current interval to the
1114 * system counter value (timestamp point) provided by the driver. The
1115 * total_history_* quantities are the total history starting at the provided
1116 * reference point and ending at the start of the current interval. The cycle
1117 * count between the driver timestamp point and the start of the current
1118 * interval is partial_history_cycles.
1119 */
1120static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1121					 u64 partial_history_cycles,
1122					 u64 total_history_cycles,
1123					 bool discontinuity,
1124					 struct system_device_crosststamp *ts)
1125{
1126	struct timekeeper *tk = &tk_core.timekeeper;
1127	u64 corr_raw, corr_real;
1128	bool interp_forward;
1129	int ret;
1130
1131	if (total_history_cycles == 0 || partial_history_cycles == 0)
1132		return 0;
1133
1134	/* Interpolate shortest distance from beginning or end of history */
1135	interp_forward = partial_history_cycles > total_history_cycles / 2;
 
1136	partial_history_cycles = interp_forward ?
1137		total_history_cycles - partial_history_cycles :
1138		partial_history_cycles;
1139
1140	/*
1141	 * Scale the monotonic raw time delta by:
1142	 *	partial_history_cycles / total_history_cycles
1143	 */
1144	corr_raw = (u64)ktime_to_ns(
1145		ktime_sub(ts->sys_monoraw, history->raw));
1146	ret = scale64_check_overflow(partial_history_cycles,
1147				     total_history_cycles, &corr_raw);
1148	if (ret)
1149		return ret;
1150
1151	/*
1152	 * If there is a discontinuity in the history, scale monotonic raw
1153	 *	correction by:
1154	 *	mult(real)/mult(raw) yielding the realtime correction
1155	 * Otherwise, calculate the realtime correction similar to monotonic
1156	 *	raw calculation
1157	 */
1158	if (discontinuity) {
1159		corr_real = mul_u64_u32_div
1160			(corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1161	} else {
1162		corr_real = (u64)ktime_to_ns(
1163			ktime_sub(ts->sys_realtime, history->real));
1164		ret = scale64_check_overflow(partial_history_cycles,
1165					     total_history_cycles, &corr_real);
1166		if (ret)
1167			return ret;
1168	}
1169
1170	/* Fixup monotonic raw and real time time values */
1171	if (interp_forward) {
1172		ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1173		ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1174	} else {
1175		ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1176		ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1177	}
1178
1179	return 0;
1180}
1181
1182/*
1183 * cycle_between - true if test occurs chronologically between before and after
1184 */
1185static bool cycle_between(u64 before, u64 test, u64 after)
1186{
1187	if (test > before && test < after)
1188		return true;
1189	if (test < before && before > after)
1190		return true;
1191	return false;
1192}
1193
1194/**
1195 * get_device_system_crosststamp - Synchronously capture system/device timestamp
1196 * @get_time_fn:	Callback to get simultaneous device time and
1197 *	system counter from the device driver
1198 * @ctx:		Context passed to get_time_fn()
1199 * @history_begin:	Historical reference point used to interpolate system
1200 *	time when counter provided by the driver is before the current interval
1201 * @xtstamp:		Receives simultaneously captured system and device time
1202 *
1203 * Reads a timestamp from a device and correlates it to system time
1204 */
1205int get_device_system_crosststamp(int (*get_time_fn)
1206				  (ktime_t *device_time,
1207				   struct system_counterval_t *sys_counterval,
1208				   void *ctx),
1209				  void *ctx,
1210				  struct system_time_snapshot *history_begin,
1211				  struct system_device_crosststamp *xtstamp)
1212{
1213	struct system_counterval_t system_counterval;
1214	struct timekeeper *tk = &tk_core.timekeeper;
1215	u64 cycles, now, interval_start;
1216	unsigned int clock_was_set_seq = 0;
1217	ktime_t base_real, base_raw;
1218	u64 nsec_real, nsec_raw;
1219	u8 cs_was_changed_seq;
1220	unsigned int seq;
1221	bool do_interp;
1222	int ret;
1223
1224	do {
1225		seq = read_seqcount_begin(&tk_core.seq);
1226		/*
1227		 * Try to synchronously capture device time and a system
1228		 * counter value calling back into the device driver
1229		 */
1230		ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1231		if (ret)
1232			return ret;
1233
1234		/*
1235		 * Verify that the clocksource associated with the captured
1236		 * system counter value is the same as the currently installed
1237		 * timekeeper clocksource
1238		 */
1239		if (tk->tkr_mono.clock != system_counterval.cs)
1240			return -ENODEV;
1241		cycles = system_counterval.cycles;
1242
1243		/*
1244		 * Check whether the system counter value provided by the
1245		 * device driver is on the current timekeeping interval.
1246		 */
1247		now = tk_clock_read(&tk->tkr_mono);
1248		interval_start = tk->tkr_mono.cycle_last;
1249		if (!cycle_between(interval_start, cycles, now)) {
1250			clock_was_set_seq = tk->clock_was_set_seq;
1251			cs_was_changed_seq = tk->cs_was_changed_seq;
1252			cycles = interval_start;
1253			do_interp = true;
1254		} else {
1255			do_interp = false;
1256		}
1257
1258		base_real = ktime_add(tk->tkr_mono.base,
1259				      tk_core.timekeeper.offs_real);
1260		base_raw = tk->tkr_raw.base;
1261
1262		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1263						     system_counterval.cycles);
1264		nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1265						    system_counterval.cycles);
1266	} while (read_seqcount_retry(&tk_core.seq, seq));
1267
1268	xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1269	xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1270
1271	/*
1272	 * Interpolate if necessary, adjusting back from the start of the
1273	 * current interval
1274	 */
1275	if (do_interp) {
1276		u64 partial_history_cycles, total_history_cycles;
1277		bool discontinuity;
1278
1279		/*
1280		 * Check that the counter value occurs after the provided
1281		 * history reference and that the history doesn't cross a
1282		 * clocksource change
1283		 */
1284		if (!history_begin ||
1285		    !cycle_between(history_begin->cycles,
1286				   system_counterval.cycles, cycles) ||
1287		    history_begin->cs_was_changed_seq != cs_was_changed_seq)
1288			return -EINVAL;
1289		partial_history_cycles = cycles - system_counterval.cycles;
1290		total_history_cycles = cycles - history_begin->cycles;
1291		discontinuity =
1292			history_begin->clock_was_set_seq != clock_was_set_seq;
1293
1294		ret = adjust_historical_crosststamp(history_begin,
1295						    partial_history_cycles,
1296						    total_history_cycles,
1297						    discontinuity, xtstamp);
1298		if (ret)
1299			return ret;
1300	}
1301
1302	return 0;
1303}
1304EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1305
1306/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1307 * do_settimeofday64 - Sets the time of day.
1308 * @ts:     pointer to the timespec64 variable containing the new time
1309 *
1310 * Sets the time of day to the new time and update NTP and notify hrtimers
1311 */
1312int do_settimeofday64(const struct timespec64 *ts)
1313{
1314	struct timekeeper *tk = &tk_core.timekeeper;
1315	struct timespec64 ts_delta, xt;
1316	unsigned long flags;
1317	int ret = 0;
1318
1319	if (!timespec64_valid_settod(ts))
1320		return -EINVAL;
1321
1322	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1323	write_seqcount_begin(&tk_core.seq);
1324
1325	timekeeping_forward_now(tk);
1326
1327	xt = tk_xtime(tk);
1328	ts_delta = timespec64_sub(*ts, xt);
 
1329
1330	if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1331		ret = -EINVAL;
1332		goto out;
1333	}
1334
1335	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1336
1337	tk_set_xtime(tk, ts);
1338out:
1339	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1340
1341	write_seqcount_end(&tk_core.seq);
1342	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1343
1344	/* Signal hrtimers about time change */
1345	clock_was_set(CLOCK_SET_WALL);
1346
1347	if (!ret) {
1348		audit_tk_injoffset(ts_delta);
1349		add_device_randomness(ts, sizeof(*ts));
1350	}
1351
1352	return ret;
1353}
1354EXPORT_SYMBOL(do_settimeofday64);
1355
1356/**
1357 * timekeeping_inject_offset - Adds or subtracts from the current time.
1358 * @ts:		Pointer to the timespec variable containing the offset
1359 *
1360 * Adds or subtracts an offset value from the current time.
1361 */
1362static int timekeeping_inject_offset(const struct timespec64 *ts)
1363{
1364	struct timekeeper *tk = &tk_core.timekeeper;
1365	unsigned long flags;
1366	struct timespec64 tmp;
1367	int ret = 0;
1368
1369	if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
1370		return -EINVAL;
1371
 
 
1372	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1373	write_seqcount_begin(&tk_core.seq);
1374
1375	timekeeping_forward_now(tk);
1376
1377	/* Make sure the proposed value is valid */
1378	tmp = timespec64_add(tk_xtime(tk), *ts);
1379	if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
1380	    !timespec64_valid_settod(&tmp)) {
1381		ret = -EINVAL;
1382		goto error;
1383	}
1384
1385	tk_xtime_add(tk, ts);
1386	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
1387
1388error: /* even if we error out, we forwarded the time, so call update */
1389	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1390
1391	write_seqcount_end(&tk_core.seq);
1392	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1393
1394	/* Signal hrtimers about time change */
1395	clock_was_set(CLOCK_SET_WALL);
1396
1397	return ret;
1398}
 
1399
1400/*
1401 * Indicates if there is an offset between the system clock and the hardware
1402 * clock/persistent clock/rtc.
1403 */
1404int persistent_clock_is_local;
1405
1406/*
1407 * Adjust the time obtained from the CMOS to be UTC time instead of
1408 * local time.
1409 *
1410 * This is ugly, but preferable to the alternatives.  Otherwise we
1411 * would either need to write a program to do it in /etc/rc (and risk
1412 * confusion if the program gets run more than once; it would also be
1413 * hard to make the program warp the clock precisely n hours)  or
1414 * compile in the timezone information into the kernel.  Bad, bad....
1415 *
1416 *						- TYT, 1992-01-01
1417 *
1418 * The best thing to do is to keep the CMOS clock in universal time (UTC)
1419 * as real UNIX machines always do it. This avoids all headaches about
1420 * daylight saving times and warping kernel clocks.
1421 */
1422void timekeeping_warp_clock(void)
1423{
1424	if (sys_tz.tz_minuteswest != 0) {
1425		struct timespec64 adjust;
1426
1427		persistent_clock_is_local = 1;
1428		adjust.tv_sec = sys_tz.tz_minuteswest * 60;
1429		adjust.tv_nsec = 0;
1430		timekeeping_inject_offset(&adjust);
1431	}
1432}
1433
1434/*
1435 * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
 
1436 */
1437static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1438{
1439	tk->tai_offset = tai_offset;
1440	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1441}
1442
1443/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1444 * change_clocksource - Swaps clocksources if a new one is available
1445 *
1446 * Accumulates current time interval and initializes new clocksource
1447 */
1448static int change_clocksource(void *data)
1449{
1450	struct timekeeper *tk = &tk_core.timekeeper;
1451	struct clocksource *new, *old = NULL;
1452	unsigned long flags;
1453	bool change = false;
1454
1455	new = (struct clocksource *) data;
1456
 
 
 
 
1457	/*
1458	 * If the cs is in module, get a module reference. Succeeds
1459	 * for built-in code (owner == NULL) as well.
1460	 */
1461	if (try_module_get(new->owner)) {
1462		if (!new->enable || new->enable(new) == 0)
1463			change = true;
1464		else
 
 
 
 
1465			module_put(new->owner);
 
1466	}
1467
1468	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1469	write_seqcount_begin(&tk_core.seq);
1470
1471	timekeeping_forward_now(tk);
1472
1473	if (change) {
1474		old = tk->tkr_mono.clock;
1475		tk_setup_internals(tk, new);
1476	}
1477
1478	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1479
1480	write_seqcount_end(&tk_core.seq);
1481	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1482
1483	if (old) {
1484		if (old->disable)
1485			old->disable(old);
1486
1487		module_put(old->owner);
1488	}
1489
1490	return 0;
1491}
1492
1493/**
1494 * timekeeping_notify - Install a new clock source
1495 * @clock:		pointer to the clock source
1496 *
1497 * This function is called from clocksource.c after a new, better clock
1498 * source has been registered. The caller holds the clocksource_mutex.
1499 */
1500int timekeeping_notify(struct clocksource *clock)
1501{
1502	struct timekeeper *tk = &tk_core.timekeeper;
1503
1504	if (tk->tkr_mono.clock == clock)
1505		return 0;
1506	stop_machine(change_clocksource, clock, NULL);
1507	tick_clock_notify();
1508	return tk->tkr_mono.clock == clock ? 0 : -1;
1509}
1510
1511/**
1512 * ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
1513 * @ts:		pointer to the timespec64 to be set
1514 *
1515 * Returns the raw monotonic time (completely un-modified by ntp)
1516 */
1517void ktime_get_raw_ts64(struct timespec64 *ts)
1518{
1519	struct timekeeper *tk = &tk_core.timekeeper;
1520	unsigned int seq;
1521	u64 nsecs;
 
1522
1523	do {
1524		seq = read_seqcount_begin(&tk_core.seq);
1525		ts->tv_sec = tk->raw_sec;
1526		nsecs = timekeeping_get_ns(&tk->tkr_raw);
 
1527
1528	} while (read_seqcount_retry(&tk_core.seq, seq));
1529
1530	ts->tv_nsec = 0;
1531	timespec64_add_ns(ts, nsecs);
1532}
1533EXPORT_SYMBOL(ktime_get_raw_ts64);
1534
1535
1536/**
1537 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1538 */
1539int timekeeping_valid_for_hres(void)
1540{
1541	struct timekeeper *tk = &tk_core.timekeeper;
1542	unsigned int seq;
1543	int ret;
1544
1545	do {
1546		seq = read_seqcount_begin(&tk_core.seq);
1547
1548		ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1549
1550	} while (read_seqcount_retry(&tk_core.seq, seq));
1551
1552	return ret;
1553}
1554
1555/**
1556 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1557 */
1558u64 timekeeping_max_deferment(void)
1559{
1560	struct timekeeper *tk = &tk_core.timekeeper;
1561	unsigned int seq;
1562	u64 ret;
1563
1564	do {
1565		seq = read_seqcount_begin(&tk_core.seq);
1566
1567		ret = tk->tkr_mono.clock->max_idle_ns;
1568
1569	} while (read_seqcount_retry(&tk_core.seq, seq));
1570
1571	return ret;
1572}
1573
1574/**
1575 * read_persistent_clock64 -  Return time from the persistent clock.
1576 * @ts: Pointer to the storage for the readout value
1577 *
1578 * Weak dummy function for arches that do not yet support it.
1579 * Reads the time from the battery backed persistent clock.
1580 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1581 *
1582 *  XXX - Do be sure to remove it once all arches implement it.
1583 */
1584void __weak read_persistent_clock64(struct timespec64 *ts)
1585{
1586	ts->tv_sec = 0;
1587	ts->tv_nsec = 0;
1588}
1589
 
 
 
 
 
 
 
 
1590/**
1591 * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
1592 *                                        from the boot.
1593 * @wall_time:	  current time as returned by persistent clock
1594 * @boot_offset:  offset that is defined as wall_time - boot_time
1595 *
1596 * Weak dummy function for arches that do not yet support it.
 
 
1597 *
1598 * The default function calculates offset based on the current value of
1599 * local_clock(). This way architectures that support sched_clock() but don't
1600 * support dedicated boot time clock will provide the best estimate of the
1601 * boot time.
1602 */
1603void __weak __init
1604read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
1605				     struct timespec64 *boot_offset)
1606{
1607	read_persistent_clock64(wall_time);
1608	*boot_offset = ns_to_timespec64(local_clock());
1609}
1610
1611/*
1612 * Flag reflecting whether timekeeping_resume() has injected sleeptime.
1613 *
1614 * The flag starts of false and is only set when a suspend reaches
1615 * timekeeping_suspend(), timekeeping_resume() sets it to false when the
1616 * timekeeper clocksource is not stopping across suspend and has been
1617 * used to update sleep time. If the timekeeper clocksource has stopped
1618 * then the flag stays true and is used by the RTC resume code to decide
1619 * whether sleeptime must be injected and if so the flag gets false then.
1620 *
1621 * If a suspend fails before reaching timekeeping_resume() then the flag
1622 * stays false and prevents erroneous sleeptime injection.
1623 */
1624static bool suspend_timing_needed;
1625
1626/* Flag for if there is a persistent clock on this platform */
1627static bool persistent_clock_exists;
1628
1629/*
1630 * timekeeping_init - Initializes the clocksource and common timekeeping values
1631 */
1632void __init timekeeping_init(void)
1633{
1634	struct timespec64 wall_time, boot_offset, wall_to_mono;
1635	struct timekeeper *tk = &tk_core.timekeeper;
1636	struct clocksource *clock;
1637	unsigned long flags;
 
1638
1639	read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
1640	if (timespec64_valid_settod(&wall_time) &&
1641	    timespec64_to_ns(&wall_time) > 0) {
 
 
 
 
1642		persistent_clock_exists = true;
1643	} else if (timespec64_to_ns(&wall_time) != 0) {
1644		pr_warn("Persistent clock returned invalid value");
1645		wall_time = (struct timespec64){0};
1646	}
1647
1648	if (timespec64_compare(&wall_time, &boot_offset) < 0)
1649		boot_offset = (struct timespec64){0};
1650
1651	/*
1652	 * We want set wall_to_mono, so the following is true:
1653	 * wall time + wall_to_mono = boot time
1654	 */
1655	wall_to_mono = timespec64_sub(boot_offset, wall_time);
 
 
1656
1657	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1658	write_seqcount_begin(&tk_core.seq);
1659	ntp_init();
1660
1661	clock = clocksource_default_clock();
1662	if (clock->enable)
1663		clock->enable(clock);
1664	tk_setup_internals(tk, clock);
1665
1666	tk_set_xtime(tk, &wall_time);
1667	tk->raw_sec = 0;
 
 
 
1668
1669	tk_set_wall_to_mono(tk, wall_to_mono);
 
1670
1671	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1672
1673	write_seqcount_end(&tk_core.seq);
1674	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1675}
1676
1677/* time in seconds when suspend began for persistent clock */
1678static struct timespec64 timekeeping_suspend_time;
1679
1680/**
1681 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1682 * @tk:		Pointer to the timekeeper to be updated
1683 * @delta:	Pointer to the delta value in timespec64 format
1684 *
1685 * Takes a timespec offset measuring a suspend interval and properly
1686 * adds the sleep offset to the timekeeping variables.
1687 */
1688static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1689					   const struct timespec64 *delta)
1690{
1691	if (!timespec64_valid_strict(delta)) {
1692		printk_deferred(KERN_WARNING
1693				"__timekeeping_inject_sleeptime: Invalid "
1694				"sleep delta value!\n");
1695		return;
1696	}
1697	tk_xtime_add(tk, delta);
1698	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1699	tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1700	tk_debug_account_sleep_time(delta);
1701}
1702
1703#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1704/*
1705 * We have three kinds of time sources to use for sleep time
1706 * injection, the preference order is:
1707 * 1) non-stop clocksource
1708 * 2) persistent clock (ie: RTC accessible when irqs are off)
1709 * 3) RTC
1710 *
1711 * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
1712 * If system has neither 1) nor 2), 3) will be used finally.
1713 *
1714 *
1715 * If timekeeping has injected sleeptime via either 1) or 2),
1716 * 3) becomes needless, so in this case we don't need to call
1717 * rtc_resume(), and this is what timekeeping_rtc_skipresume()
1718 * means.
1719 */
1720bool timekeeping_rtc_skipresume(void)
1721{
1722	return !suspend_timing_needed;
1723}
1724
1725/*
1726 * 1) can be determined whether to use or not only when doing
1727 * timekeeping_resume() which is invoked after rtc_suspend(),
1728 * so we can't skip rtc_suspend() surely if system has 1).
1729 *
1730 * But if system has 2), 2) will definitely be used, so in this
1731 * case we don't need to call rtc_suspend(), and this is what
1732 * timekeeping_rtc_skipsuspend() means.
1733 */
1734bool timekeeping_rtc_skipsuspend(void)
1735{
1736	return persistent_clock_exists;
1737}
1738
1739/**
1740 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1741 * @delta: pointer to a timespec64 delta value
1742 *
1743 * This hook is for architectures that cannot support read_persistent_clock64
1744 * because their RTC/persistent clock is only accessible when irqs are enabled.
1745 * and also don't have an effective nonstop clocksource.
1746 *
1747 * This function should only be called by rtc_resume(), and allows
1748 * a suspend offset to be injected into the timekeeping values.
1749 */
1750void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
1751{
1752	struct timekeeper *tk = &tk_core.timekeeper;
1753	unsigned long flags;
1754
1755	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1756	write_seqcount_begin(&tk_core.seq);
1757
1758	suspend_timing_needed = false;
1759
1760	timekeeping_forward_now(tk);
1761
1762	__timekeeping_inject_sleeptime(tk, delta);
1763
1764	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1765
1766	write_seqcount_end(&tk_core.seq);
1767	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1768
1769	/* Signal hrtimers about time change */
1770	clock_was_set(CLOCK_SET_WALL | CLOCK_SET_BOOT);
1771}
1772#endif
1773
1774/**
1775 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1776 */
1777void timekeeping_resume(void)
1778{
1779	struct timekeeper *tk = &tk_core.timekeeper;
1780	struct clocksource *clock = tk->tkr_mono.clock;
1781	unsigned long flags;
1782	struct timespec64 ts_new, ts_delta;
1783	u64 cycle_now, nsec;
1784	bool inject_sleeptime = false;
1785
 
1786	read_persistent_clock64(&ts_new);
1787
1788	clockevents_resume();
1789	clocksource_resume();
1790
1791	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1792	write_seqcount_begin(&tk_core.seq);
1793
1794	/*
1795	 * After system resumes, we need to calculate the suspended time and
1796	 * compensate it for the OS time. There are 3 sources that could be
1797	 * used: Nonstop clocksource during suspend, persistent clock and rtc
1798	 * device.
1799	 *
1800	 * One specific platform may have 1 or 2 or all of them, and the
1801	 * preference will be:
1802	 *	suspend-nonstop clocksource -> persistent clock -> rtc
1803	 * The less preferred source will only be tried if there is no better
1804	 * usable source. The rtc part is handled separately in rtc core code.
1805	 */
1806	cycle_now = tk_clock_read(&tk->tkr_mono);
1807	nsec = clocksource_stop_suspend_timing(clock, cycle_now);
1808	if (nsec > 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1809		ts_delta = ns_to_timespec64(nsec);
1810		inject_sleeptime = true;
1811	} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1812		ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1813		inject_sleeptime = true;
1814	}
1815
1816	if (inject_sleeptime) {
1817		suspend_timing_needed = false;
1818		__timekeeping_inject_sleeptime(tk, &ts_delta);
1819	}
1820
1821	/* Re-base the last cycle value */
1822	tk->tkr_mono.cycle_last = cycle_now;
1823	tk->tkr_raw.cycle_last  = cycle_now;
1824
1825	tk->ntp_error = 0;
1826	timekeeping_suspended = 0;
1827	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1828	write_seqcount_end(&tk_core.seq);
1829	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1830
1831	touch_softlockup_watchdog();
1832
1833	/* Resume the clockevent device(s) and hrtimers */
1834	tick_resume();
1835	/* Notify timerfd as resume is equivalent to clock_was_set() */
1836	timerfd_resume();
1837}
1838
1839int timekeeping_suspend(void)
1840{
1841	struct timekeeper *tk = &tk_core.timekeeper;
1842	unsigned long flags;
1843	struct timespec64		delta, delta_delta;
1844	static struct timespec64	old_delta;
1845	struct clocksource *curr_clock;
1846	u64 cycle_now;
1847
1848	read_persistent_clock64(&timekeeping_suspend_time);
1849
1850	/*
1851	 * On some systems the persistent_clock can not be detected at
1852	 * timekeeping_init by its return value, so if we see a valid
1853	 * value returned, update the persistent_clock_exists flag.
1854	 */
1855	if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1856		persistent_clock_exists = true;
1857
1858	suspend_timing_needed = true;
1859
1860	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1861	write_seqcount_begin(&tk_core.seq);
1862	timekeeping_forward_now(tk);
1863	timekeeping_suspended = 1;
1864
1865	/*
1866	 * Since we've called forward_now, cycle_last stores the value
1867	 * just read from the current clocksource. Save this to potentially
1868	 * use in suspend timing.
1869	 */
1870	curr_clock = tk->tkr_mono.clock;
1871	cycle_now = tk->tkr_mono.cycle_last;
1872	clocksource_start_suspend_timing(curr_clock, cycle_now);
1873
1874	if (persistent_clock_exists) {
1875		/*
1876		 * To avoid drift caused by repeated suspend/resumes,
1877		 * which each can add ~1 second drift error,
1878		 * try to compensate so the difference in system time
1879		 * and persistent_clock time stays close to constant.
1880		 */
1881		delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1882		delta_delta = timespec64_sub(delta, old_delta);
1883		if (abs(delta_delta.tv_sec) >= 2) {
1884			/*
1885			 * if delta_delta is too large, assume time correction
1886			 * has occurred and set old_delta to the current delta.
1887			 */
1888			old_delta = delta;
1889		} else {
1890			/* Otherwise try to adjust old_system to compensate */
1891			timekeeping_suspend_time =
1892				timespec64_add(timekeeping_suspend_time, delta_delta);
1893		}
1894	}
1895
1896	timekeeping_update(tk, TK_MIRROR);
1897	halt_fast_timekeeper(tk);
1898	write_seqcount_end(&tk_core.seq);
1899	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1900
1901	tick_suspend();
1902	clocksource_suspend();
1903	clockevents_suspend();
1904
1905	return 0;
1906}
1907
1908/* sysfs resume/suspend bits for timekeeping */
1909static struct syscore_ops timekeeping_syscore_ops = {
1910	.resume		= timekeeping_resume,
1911	.suspend	= timekeeping_suspend,
1912};
1913
1914static int __init timekeeping_init_ops(void)
1915{
1916	register_syscore_ops(&timekeeping_syscore_ops);
1917	return 0;
1918}
1919device_initcall(timekeeping_init_ops);
1920
1921/*
1922 * Apply a multiplier adjustment to the timekeeper
1923 */
1924static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1925							 s64 offset,
1926							 s32 mult_adj)
 
1927{
1928	s64 interval = tk->cycle_interval;
 
1929
1930	if (mult_adj == 0) {
1931		return;
1932	} else if (mult_adj == -1) {
1933		interval = -interval;
1934		offset = -offset;
1935	} else if (mult_adj != 1) {
1936		interval *= mult_adj;
1937		offset *= mult_adj;
1938	}
 
 
 
1939
1940	/*
1941	 * So the following can be confusing.
1942	 *
1943	 * To keep things simple, lets assume mult_adj == 1 for now.
1944	 *
1945	 * When mult_adj != 1, remember that the interval and offset values
1946	 * have been appropriately scaled so the math is the same.
1947	 *
1948	 * The basic idea here is that we're increasing the multiplier
1949	 * by one, this causes the xtime_interval to be incremented by
1950	 * one cycle_interval. This is because:
1951	 *	xtime_interval = cycle_interval * mult
1952	 * So if mult is being incremented by one:
1953	 *	xtime_interval = cycle_interval * (mult + 1)
1954	 * Its the same as:
1955	 *	xtime_interval = (cycle_interval * mult) + cycle_interval
1956	 * Which can be shortened to:
1957	 *	xtime_interval += cycle_interval
1958	 *
1959	 * So offset stores the non-accumulated cycles. Thus the current
1960	 * time (in shifted nanoseconds) is:
1961	 *	now = (offset * adj) + xtime_nsec
1962	 * Now, even though we're adjusting the clock frequency, we have
1963	 * to keep time consistent. In other words, we can't jump back
1964	 * in time, and we also want to avoid jumping forward in time.
1965	 *
1966	 * So given the same offset value, we need the time to be the same
1967	 * both before and after the freq adjustment.
1968	 *	now = (offset * adj_1) + xtime_nsec_1
1969	 *	now = (offset * adj_2) + xtime_nsec_2
1970	 * So:
1971	 *	(offset * adj_1) + xtime_nsec_1 =
1972	 *		(offset * adj_2) + xtime_nsec_2
1973	 * And we know:
1974	 *	adj_2 = adj_1 + 1
1975	 * So:
1976	 *	(offset * adj_1) + xtime_nsec_1 =
1977	 *		(offset * (adj_1+1)) + xtime_nsec_2
1978	 *	(offset * adj_1) + xtime_nsec_1 =
1979	 *		(offset * adj_1) + offset + xtime_nsec_2
1980	 * Canceling the sides:
1981	 *	xtime_nsec_1 = offset + xtime_nsec_2
1982	 * Which gives us:
1983	 *	xtime_nsec_2 = xtime_nsec_1 - offset
1984	 * Which simplifies to:
1985	 *	xtime_nsec -= offset
 
 
1986	 */
1987	if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1988		/* NTP adjustment caused clocksource mult overflow */
1989		WARN_ON_ONCE(1);
1990		return;
1991	}
1992
1993	tk->tkr_mono.mult += mult_adj;
1994	tk->xtime_interval += interval;
1995	tk->tkr_mono.xtime_nsec -= offset;
 
1996}
1997
1998/*
1999 * Adjust the timekeeper's multiplier to the correct frequency
2000 * and also to reduce the accumulated error value.
2001 */
2002static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
 
2003{
2004	u32 mult;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2005
 
 
 
 
 
2006	/*
2007	 * Determine the multiplier from the current NTP tick length.
2008	 * Avoid expensive division when the tick length doesn't change.
2009	 */
2010	if (likely(tk->ntp_tick == ntp_tick_length())) {
2011		mult = tk->tkr_mono.mult - tk->ntp_err_mult;
2012	} else {
2013		tk->ntp_tick = ntp_tick_length();
2014		mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
2015				 tk->xtime_remainder, tk->cycle_interval);
 
 
 
 
 
 
 
 
2016	}
2017
2018	/*
2019	 * If the clock is behind the NTP time, increase the multiplier by 1
2020	 * to catch up with it. If it's ahead and there was a remainder in the
2021	 * tick division, the clock will slow down. Otherwise it will stay
2022	 * ahead until the tick length changes to a non-divisible value.
2023	 */
2024	tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
2025	mult += tk->ntp_err_mult;
 
 
 
 
2026
2027	timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
 
 
 
 
 
 
 
 
2028
2029	if (unlikely(tk->tkr_mono.clock->maxadj &&
2030		(abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
2031			> tk->tkr_mono.clock->maxadj))) {
2032		printk_once(KERN_WARNING
2033			"Adjusting %s more than 11%% (%ld vs %ld)\n",
2034			tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
2035			(long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
2036	}
2037
2038	/*
2039	 * It may be possible that when we entered this function, xtime_nsec
2040	 * was very small.  Further, if we're slightly speeding the clocksource
2041	 * in the code above, its possible the required corrective factor to
2042	 * xtime_nsec could cause it to underflow.
2043	 *
2044	 * Now, since we have already accumulated the second and the NTP
2045	 * subsystem has been notified via second_overflow(), we need to skip
2046	 * the next update.
 
 
 
 
2047	 */
2048	if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
2049		tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
2050							tk->tkr_mono.shift;
2051		tk->xtime_sec--;
2052		tk->skip_second_overflow = 1;
2053	}
2054}
2055
2056/*
2057 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
2058 *
2059 * Helper function that accumulates the nsecs greater than a second
2060 * from the xtime_nsec field to the xtime_secs field.
2061 * It also calls into the NTP code to handle leapsecond processing.
 
2062 */
2063static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
2064{
2065	u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
2066	unsigned int clock_set = 0;
2067
2068	while (tk->tkr_mono.xtime_nsec >= nsecps) {
2069		int leap;
2070
2071		tk->tkr_mono.xtime_nsec -= nsecps;
2072		tk->xtime_sec++;
2073
2074		/*
2075		 * Skip NTP update if this second was accumulated before,
2076		 * i.e. xtime_nsec underflowed in timekeeping_adjust()
2077		 */
2078		if (unlikely(tk->skip_second_overflow)) {
2079			tk->skip_second_overflow = 0;
2080			continue;
2081		}
2082
2083		/* Figure out if its a leap sec and apply if needed */
2084		leap = second_overflow(tk->xtime_sec);
2085		if (unlikely(leap)) {
2086			struct timespec64 ts;
2087
2088			tk->xtime_sec += leap;
2089
2090			ts.tv_sec = leap;
2091			ts.tv_nsec = 0;
2092			tk_set_wall_to_mono(tk,
2093				timespec64_sub(tk->wall_to_monotonic, ts));
2094
2095			__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
2096
2097			clock_set = TK_CLOCK_WAS_SET;
2098		}
2099	}
2100	return clock_set;
2101}
2102
2103/*
2104 * logarithmic_accumulation - shifted accumulation of cycles
2105 *
2106 * This functions accumulates a shifted interval of cycles into
2107 * a shifted interval nanoseconds. Allows for O(log) accumulation
2108 * loop.
2109 *
2110 * Returns the unconsumed cycles.
2111 */
2112static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2113				    u32 shift, unsigned int *clock_set)
 
2114{
2115	u64 interval = tk->cycle_interval << shift;
2116	u64 snsec_per_sec;
2117
2118	/* If the offset is smaller than a shifted interval, do nothing */
2119	if (offset < interval)
2120		return offset;
2121
2122	/* Accumulate one shifted interval */
2123	offset -= interval;
2124	tk->tkr_mono.cycle_last += interval;
2125	tk->tkr_raw.cycle_last  += interval;
2126
2127	tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2128	*clock_set |= accumulate_nsecs_to_secs(tk);
2129
2130	/* Accumulate raw time */
2131	tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2132	snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2133	while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2134		tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2135		tk->raw_sec++;
 
2136	}
 
2137
2138	/* Accumulate error between NTP and clock interval */
2139	tk->ntp_error += tk->ntp_tick << shift;
2140	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2141						(tk->ntp_error_shift + shift);
2142
2143	return offset;
2144}
2145
2146/*
2147 * timekeeping_advance - Updates the timekeeper to the current time and
2148 * current NTP tick length
2149 */
2150static bool timekeeping_advance(enum timekeeping_adv_mode mode)
2151{
2152	struct timekeeper *real_tk = &tk_core.timekeeper;
2153	struct timekeeper *tk = &shadow_timekeeper;
2154	u64 offset;
2155	int shift = 0, maxshift;
2156	unsigned int clock_set = 0;
2157	unsigned long flags;
2158
2159	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2160
2161	/* Make sure we're fully resumed: */
2162	if (unlikely(timekeeping_suspended))
2163		goto out;
2164
2165	offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
 
 
 
2166				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
 
2167
2168	/* Check if there's really nothing to do */
2169	if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
2170		goto out;
2171
2172	/* Do some additional sanity checking */
2173	timekeeping_check_update(tk, offset);
2174
2175	/*
2176	 * With NO_HZ we may have to accumulate many cycle_intervals
2177	 * (think "ticks") worth of time at once. To do this efficiently,
2178	 * we calculate the largest doubling multiple of cycle_intervals
2179	 * that is smaller than the offset.  We then accumulate that
2180	 * chunk in one go, and then try to consume the next smaller
2181	 * doubled multiple.
2182	 */
2183	shift = ilog2(offset) - ilog2(tk->cycle_interval);
2184	shift = max(0, shift);
2185	/* Bound shift to one less than what overflows tick_length */
2186	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2187	shift = min(shift, maxshift);
2188	while (offset >= tk->cycle_interval) {
2189		offset = logarithmic_accumulation(tk, offset, shift,
2190							&clock_set);
2191		if (offset < tk->cycle_interval<<shift)
2192			shift--;
2193	}
2194
2195	/* Adjust the multiplier to correct NTP error */
2196	timekeeping_adjust(tk, offset);
2197
2198	/*
 
 
 
 
 
 
2199	 * Finally, make sure that after the rounding
2200	 * xtime_nsec isn't larger than NSEC_PER_SEC
2201	 */
2202	clock_set |= accumulate_nsecs_to_secs(tk);
2203
2204	write_seqcount_begin(&tk_core.seq);
2205	/*
2206	 * Update the real timekeeper.
2207	 *
2208	 * We could avoid this memcpy by switching pointers, but that
2209	 * requires changes to all other timekeeper usage sites as
2210	 * well, i.e. move the timekeeper pointer getter into the
2211	 * spinlocked/seqcount protected sections. And we trade this
2212	 * memcpy under the tk_core.seq against one before we start
2213	 * updating.
2214	 */
2215	timekeeping_update(tk, clock_set);
2216	memcpy(real_tk, tk, sizeof(*tk));
2217	/* The memcpy must come last. Do not put anything here! */
2218	write_seqcount_end(&tk_core.seq);
2219out:
2220	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2221
2222	return !!clock_set;
2223}
2224
2225/**
2226 * update_wall_time - Uses the current clocksource to increment the wall time
2227 *
2228 */
2229void update_wall_time(void)
2230{
2231	if (timekeeping_advance(TK_ADV_TICK))
2232		clock_was_set_delayed();
2233}
2234
2235/**
2236 * getboottime64 - Return the real time of system boot.
2237 * @ts:		pointer to the timespec64 to be set
2238 *
2239 * Returns the wall-time of boot in a timespec64.
2240 *
2241 * This is based on the wall_to_monotonic offset and the total suspend
2242 * time. Calls to settimeofday will affect the value returned (which
2243 * basically means that however wrong your real time clock is at boot time,
2244 * you get the right time here).
2245 */
2246void getboottime64(struct timespec64 *ts)
2247{
2248	struct timekeeper *tk = &tk_core.timekeeper;
2249	ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2250
2251	*ts = ktime_to_timespec64(t);
2252}
2253EXPORT_SYMBOL_GPL(getboottime64);
2254
2255void ktime_get_coarse_real_ts64(struct timespec64 *ts)
2256{
2257	struct timekeeper *tk = &tk_core.timekeeper;
2258	unsigned int seq;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2259
2260	do {
2261		seq = read_seqcount_begin(&tk_core.seq);
2262
2263		*ts = tk_xtime(tk);
2264	} while (read_seqcount_retry(&tk_core.seq, seq));
 
 
2265}
2266EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
2267
2268void ktime_get_coarse_ts64(struct timespec64 *ts)
2269{
2270	struct timekeeper *tk = &tk_core.timekeeper;
2271	struct timespec64 now, mono;
2272	unsigned int seq;
2273
2274	do {
2275		seq = read_seqcount_begin(&tk_core.seq);
2276
2277		now = tk_xtime(tk);
2278		mono = tk->wall_to_monotonic;
2279	} while (read_seqcount_retry(&tk_core.seq, seq));
2280
2281	set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
2282				now.tv_nsec + mono.tv_nsec);
 
 
2283}
2284EXPORT_SYMBOL(ktime_get_coarse_ts64);
2285
2286/*
2287 * Must hold jiffies_lock
2288 */
2289void do_timer(unsigned long ticks)
2290{
2291	jiffies_64 += ticks;
2292	calc_global_load();
2293}
2294
2295/**
2296 * ktime_get_update_offsets_now - hrtimer helper
2297 * @cwsseq:	pointer to check and store the clock was set sequence number
2298 * @offs_real:	pointer to storage for monotonic -> realtime offset
2299 * @offs_boot:	pointer to storage for monotonic -> boottime offset
2300 * @offs_tai:	pointer to storage for monotonic -> clock tai offset
2301 *
2302 * Returns current monotonic time and updates the offsets if the
2303 * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
2304 * different.
2305 *
2306 * Called from hrtimer_interrupt() or retrigger_next_event()
2307 */
2308ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2309				     ktime_t *offs_boot, ktime_t *offs_tai)
2310{
2311	struct timekeeper *tk = &tk_core.timekeeper;
2312	unsigned int seq;
2313	ktime_t base;
2314	u64 nsecs;
2315
2316	do {
2317		seq = read_seqcount_begin(&tk_core.seq);
2318
2319		base = tk->tkr_mono.base;
2320		nsecs = timekeeping_get_ns(&tk->tkr_mono);
2321		base = ktime_add_ns(base, nsecs);
2322
2323		if (*cwsseq != tk->clock_was_set_seq) {
2324			*cwsseq = tk->clock_was_set_seq;
2325			*offs_real = tk->offs_real;
2326			*offs_boot = tk->offs_boot;
2327			*offs_tai = tk->offs_tai;
2328		}
2329
2330		/* Handle leapsecond insertion adjustments */
2331		if (unlikely(base >= tk->next_leap_ktime))
2332			*offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2333
2334	} while (read_seqcount_retry(&tk_core.seq, seq));
2335
2336	return base;
2337}
2338
2339/*
2340 * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
2341 */
2342static int timekeeping_validate_timex(const struct __kernel_timex *txc)
2343{
2344	if (txc->modes & ADJ_ADJTIME) {
2345		/* singleshot must not be used with any other mode bits */
2346		if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
2347			return -EINVAL;
2348		if (!(txc->modes & ADJ_OFFSET_READONLY) &&
2349		    !capable(CAP_SYS_TIME))
2350			return -EPERM;
2351	} else {
2352		/* In order to modify anything, you gotta be super-user! */
2353		if (txc->modes && !capable(CAP_SYS_TIME))
2354			return -EPERM;
2355		/*
2356		 * if the quartz is off by more than 10% then
2357		 * something is VERY wrong!
2358		 */
2359		if (txc->modes & ADJ_TICK &&
2360		    (txc->tick <  900000/USER_HZ ||
2361		     txc->tick > 1100000/USER_HZ))
2362			return -EINVAL;
2363	}
2364
2365	if (txc->modes & ADJ_SETOFFSET) {
2366		/* In order to inject time, you gotta be super-user! */
2367		if (!capable(CAP_SYS_TIME))
2368			return -EPERM;
2369
2370		/*
2371		 * Validate if a timespec/timeval used to inject a time
2372		 * offset is valid.  Offsets can be positive or negative, so
2373		 * we don't check tv_sec. The value of the timeval/timespec
2374		 * is the sum of its fields,but *NOTE*:
2375		 * The field tv_usec/tv_nsec must always be non-negative and
2376		 * we can't have more nanoseconds/microseconds than a second.
2377		 */
2378		if (txc->time.tv_usec < 0)
2379			return -EINVAL;
2380
2381		if (txc->modes & ADJ_NANO) {
2382			if (txc->time.tv_usec >= NSEC_PER_SEC)
2383				return -EINVAL;
2384		} else {
2385			if (txc->time.tv_usec >= USEC_PER_SEC)
2386				return -EINVAL;
2387		}
2388	}
2389
2390	/*
2391	 * Check for potential multiplication overflows that can
2392	 * only happen on 64-bit systems:
2393	 */
2394	if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
2395		if (LLONG_MIN / PPM_SCALE > txc->freq)
2396			return -EINVAL;
2397		if (LLONG_MAX / PPM_SCALE < txc->freq)
2398			return -EINVAL;
2399	}
2400
2401	return 0;
2402}
2403
2404/**
2405 * random_get_entropy_fallback - Returns the raw clock source value,
2406 * used by random.c for platforms with no valid random_get_entropy().
2407 */
2408unsigned long random_get_entropy_fallback(void)
2409{
2410	struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
2411	struct clocksource *clock = READ_ONCE(tkr->clock);
2412
2413	if (unlikely(timekeeping_suspended || !clock))
2414		return 0;
2415	return clock->read(clock);
2416}
2417EXPORT_SYMBOL_GPL(random_get_entropy_fallback);
2418
2419/**
2420 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2421 */
2422int do_adjtimex(struct __kernel_timex *txc)
2423{
2424	struct timekeeper *tk = &tk_core.timekeeper;
2425	struct audit_ntp_data ad;
2426	bool clock_set = false;
2427	struct timespec64 ts;
2428	unsigned long flags;
 
2429	s32 orig_tai, tai;
2430	int ret;
2431
2432	/* Validate the data before disabling interrupts */
2433	ret = timekeeping_validate_timex(txc);
2434	if (ret)
2435		return ret;
2436	add_device_randomness(txc, sizeof(*txc));
2437
2438	if (txc->modes & ADJ_SETOFFSET) {
2439		struct timespec64 delta;
2440		delta.tv_sec  = txc->time.tv_sec;
2441		delta.tv_nsec = txc->time.tv_usec;
2442		if (!(txc->modes & ADJ_NANO))
2443			delta.tv_nsec *= 1000;
2444		ret = timekeeping_inject_offset(&delta);
2445		if (ret)
2446			return ret;
2447
2448		audit_tk_injoffset(delta);
2449	}
2450
2451	audit_ntp_init(&ad);
2452
2453	ktime_get_real_ts64(&ts);
2454	add_device_randomness(&ts, sizeof(ts));
2455
2456	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2457	write_seqcount_begin(&tk_core.seq);
2458
2459	orig_tai = tai = tk->tai_offset;
2460	ret = __do_adjtimex(txc, &ts, &tai, &ad);
2461
2462	if (tai != orig_tai) {
2463		__timekeeping_set_tai_offset(tk, tai);
2464		timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2465		clock_set = true;
2466	}
2467	tk_update_leap_state(tk);
2468
2469	write_seqcount_end(&tk_core.seq);
2470	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2471
2472	audit_ntp_log(&ad);
2473
2474	/* Update the multiplier immediately if frequency was set directly */
2475	if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
2476		clock_set |= timekeeping_advance(TK_ADV_FREQ);
2477
2478	if (clock_set)
2479		clock_was_set(CLOCK_REALTIME);
2480
2481	ntp_notify_cmos_timer();
2482
2483	return ret;
2484}
2485
2486#ifdef CONFIG_NTP_PPS
2487/**
2488 * hardpps() - Accessor function to NTP __hardpps function
2489 */
2490void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2491{
2492	unsigned long flags;
2493
2494	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2495	write_seqcount_begin(&tk_core.seq);
2496
2497	__hardpps(phase_ts, raw_ts);
2498
2499	write_seqcount_end(&tk_core.seq);
2500	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2501}
2502EXPORT_SYMBOL(hardpps);
2503#endif /* CONFIG_NTP_PPS */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v4.6
 
   1/*
   2 *  linux/kernel/time/timekeeping.c
   3 *
   4 *  Kernel timekeeping code and accessor functions
   5 *
   6 *  This code was moved from linux/kernel/timer.c.
   7 *  Please see that file for copyright and history logs.
   8 *
   9 */
  10
  11#include <linux/timekeeper_internal.h>
  12#include <linux/module.h>
  13#include <linux/interrupt.h>
  14#include <linux/percpu.h>
  15#include <linux/init.h>
  16#include <linux/mm.h>
 
  17#include <linux/sched.h>
 
 
  18#include <linux/syscore_ops.h>
  19#include <linux/clocksource.h>
  20#include <linux/jiffies.h>
  21#include <linux/time.h>
 
  22#include <linux/tick.h>
  23#include <linux/stop_machine.h>
  24#include <linux/pvclock_gtod.h>
  25#include <linux/compiler.h>
 
 
  26
  27#include "tick-internal.h"
  28#include "ntp_internal.h"
  29#include "timekeeping_internal.h"
  30
  31#define TK_CLEAR_NTP		(1 << 0)
  32#define TK_MIRROR		(1 << 1)
  33#define TK_CLOCK_WAS_SET	(1 << 2)
  34
 
 
 
 
 
 
 
 
 
 
  35/*
  36 * The most important data for readout fits into a single 64 byte
  37 * cache line.
  38 */
  39static struct {
  40	seqcount_t		seq;
  41	struct timekeeper	timekeeper;
  42} tk_core ____cacheline_aligned;
 
 
  43
  44static DEFINE_RAW_SPINLOCK(timekeeper_lock);
  45static struct timekeeper shadow_timekeeper;
  46
 
 
 
  47/**
  48 * struct tk_fast - NMI safe timekeeper
  49 * @seq:	Sequence counter for protecting updates. The lowest bit
  50 *		is the index for the tk_read_base array
  51 * @base:	tk_read_base array. Access is indexed by the lowest bit of
  52 *		@seq.
  53 *
  54 * See @update_fast_timekeeper() below.
  55 */
  56struct tk_fast {
  57	seqcount_t		seq;
  58	struct tk_read_base	base[2];
  59};
  60
  61static struct tk_fast tk_fast_mono ____cacheline_aligned;
  62static struct tk_fast tk_fast_raw  ____cacheline_aligned;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  63
  64/* flag for if timekeeping is suspended */
  65int __read_mostly timekeeping_suspended;
 
 
 
  66
  67static inline void tk_normalize_xtime(struct timekeeper *tk)
  68{
  69	while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
  70		tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
  71		tk->xtime_sec++;
  72	}
 
 
 
 
  73}
  74
  75static inline struct timespec64 tk_xtime(struct timekeeper *tk)
  76{
  77	struct timespec64 ts;
  78
  79	ts.tv_sec = tk->xtime_sec;
  80	ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
  81	return ts;
  82}
  83
  84static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
  85{
  86	tk->xtime_sec = ts->tv_sec;
  87	tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
  88}
  89
  90static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
  91{
  92	tk->xtime_sec += ts->tv_sec;
  93	tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
  94	tk_normalize_xtime(tk);
  95}
  96
  97static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
  98{
  99	struct timespec64 tmp;
 100
 101	/*
 102	 * Verify consistency of: offset_real = -wall_to_monotonic
 103	 * before modifying anything
 104	 */
 105	set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
 106					-tk->wall_to_monotonic.tv_nsec);
 107	WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
 108	tk->wall_to_monotonic = wtm;
 109	set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
 110	tk->offs_real = timespec64_to_ktime(tmp);
 111	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
 112}
 113
 114static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
 115{
 116	tk->offs_boot = ktime_add(tk->offs_boot, delta);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 117}
 118
 119#ifdef CONFIG_DEBUG_TIMEKEEPING
 120#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
 121
 122static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
 123{
 124
 125	cycle_t max_cycles = tk->tkr_mono.clock->max_cycles;
 126	const char *name = tk->tkr_mono.clock->name;
 127
 128	if (offset > max_cycles) {
 129		printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
 130				offset, name, max_cycles);
 131		printk_deferred("         timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
 132	} else {
 133		if (offset > (max_cycles >> 1)) {
 134			printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
 135					offset, name, max_cycles >> 1);
 136			printk_deferred("      timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
 137		}
 138	}
 139
 140	if (tk->underflow_seen) {
 141		if (jiffies - tk->last_warning > WARNING_FREQ) {
 142			printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
 143			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
 144			printk_deferred("         Your kernel is probably still fine.\n");
 145			tk->last_warning = jiffies;
 146		}
 147		tk->underflow_seen = 0;
 148	}
 149
 150	if (tk->overflow_seen) {
 151		if (jiffies - tk->last_warning > WARNING_FREQ) {
 152			printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
 153			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
 154			printk_deferred("         Your kernel is probably still fine.\n");
 155			tk->last_warning = jiffies;
 156		}
 157		tk->overflow_seen = 0;
 158	}
 159}
 160
 161static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
 162{
 163	struct timekeeper *tk = &tk_core.timekeeper;
 164	cycle_t now, last, mask, max, delta;
 165	unsigned int seq;
 166
 167	/*
 168	 * Since we're called holding a seqlock, the data may shift
 169	 * under us while we're doing the calculation. This can cause
 170	 * false positives, since we'd note a problem but throw the
 171	 * results away. So nest another seqlock here to atomically
 172	 * grab the points we are checking with.
 173	 */
 174	do {
 175		seq = read_seqcount_begin(&tk_core.seq);
 176		now = tkr->read(tkr->clock);
 177		last = tkr->cycle_last;
 178		mask = tkr->mask;
 179		max = tkr->clock->max_cycles;
 180	} while (read_seqcount_retry(&tk_core.seq, seq));
 181
 182	delta = clocksource_delta(now, last, mask);
 183
 184	/*
 185	 * Try to catch underflows by checking if we are seeing small
 186	 * mask-relative negative values.
 187	 */
 188	if (unlikely((~delta & mask) < (mask >> 3))) {
 189		tk->underflow_seen = 1;
 190		delta = 0;
 191	}
 192
 193	/* Cap delta value to the max_cycles values to avoid mult overflows */
 194	if (unlikely(delta > max)) {
 195		tk->overflow_seen = 1;
 196		delta = tkr->clock->max_cycles;
 197	}
 198
 199	return delta;
 200}
 201#else
 202static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
 203{
 204}
 205static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
 206{
 207	cycle_t cycle_now, delta;
 208
 209	/* read clocksource */
 210	cycle_now = tkr->read(tkr->clock);
 211
 212	/* calculate the delta since the last update_wall_time */
 213	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
 214
 215	return delta;
 216}
 217#endif
 218
 219/**
 220 * tk_setup_internals - Set up internals to use clocksource clock.
 221 *
 222 * @tk:		The target timekeeper to setup.
 223 * @clock:		Pointer to clocksource.
 224 *
 225 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
 226 * pair and interval request.
 227 *
 228 * Unless you're the timekeeping code, you should not be using this!
 229 */
 230static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
 231{
 232	cycle_t interval;
 233	u64 tmp, ntpinterval;
 234	struct clocksource *old_clock;
 235
 236	++tk->cs_was_changed_seq;
 237	old_clock = tk->tkr_mono.clock;
 238	tk->tkr_mono.clock = clock;
 239	tk->tkr_mono.read = clock->read;
 240	tk->tkr_mono.mask = clock->mask;
 241	tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
 242
 243	tk->tkr_raw.clock = clock;
 244	tk->tkr_raw.read = clock->read;
 245	tk->tkr_raw.mask = clock->mask;
 246	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
 247
 248	/* Do the ns -> cycle conversion first, using original mult */
 249	tmp = NTP_INTERVAL_LENGTH;
 250	tmp <<= clock->shift;
 251	ntpinterval = tmp;
 252	tmp += clock->mult/2;
 253	do_div(tmp, clock->mult);
 254	if (tmp == 0)
 255		tmp = 1;
 256
 257	interval = (cycle_t) tmp;
 258	tk->cycle_interval = interval;
 259
 260	/* Go back from cycles -> shifted ns */
 261	tk->xtime_interval = (u64) interval * clock->mult;
 262	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
 263	tk->raw_interval =
 264		((u64) interval * clock->mult) >> clock->shift;
 265
 266	 /* if changing clocks, convert xtime_nsec shift units */
 267	if (old_clock) {
 268		int shift_change = clock->shift - old_clock->shift;
 269		if (shift_change < 0)
 270			tk->tkr_mono.xtime_nsec >>= -shift_change;
 271		else
 
 272			tk->tkr_mono.xtime_nsec <<= shift_change;
 
 
 273	}
 274	tk->tkr_raw.xtime_nsec = 0;
 275
 276	tk->tkr_mono.shift = clock->shift;
 277	tk->tkr_raw.shift = clock->shift;
 278
 279	tk->ntp_error = 0;
 280	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
 281	tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
 282
 283	/*
 284	 * The timekeeper keeps its own mult values for the currently
 285	 * active clocksource. These value will be adjusted via NTP
 286	 * to counteract clock drifting.
 287	 */
 288	tk->tkr_mono.mult = clock->mult;
 289	tk->tkr_raw.mult = clock->mult;
 290	tk->ntp_err_mult = 0;
 
 291}
 292
 293/* Timekeeper helper functions. */
 294
 295#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
 296static u32 default_arch_gettimeoffset(void) { return 0; }
 297u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
 298#else
 299static inline u32 arch_gettimeoffset(void) { return 0; }
 300#endif
 301
 302static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
 303					  cycle_t delta)
 304{
 305	s64 nsec;
 306
 307	nsec = delta * tkr->mult + tkr->xtime_nsec;
 308	nsec >>= tkr->shift;
 309
 310	/* If arch requires, add in get_arch_timeoffset() */
 311	return nsec + arch_gettimeoffset();
 312}
 313
 314static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
 315{
 316	cycle_t delta;
 317
 318	delta = timekeeping_get_delta(tkr);
 319	return timekeeping_delta_to_ns(tkr, delta);
 320}
 321
 322static inline s64 timekeeping_cycles_to_ns(struct tk_read_base *tkr,
 323					    cycle_t cycles)
 324{
 325	cycle_t delta;
 326
 327	/* calculate the delta since the last update_wall_time */
 328	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
 329	return timekeeping_delta_to_ns(tkr, delta);
 330}
 331
 332/**
 333 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
 334 * @tkr: Timekeeping readout base from which we take the update
 
 335 *
 336 * We want to use this from any context including NMI and tracing /
 337 * instrumenting the timekeeping code itself.
 338 *
 339 * Employ the latch technique; see @raw_write_seqcount_latch.
 340 *
 341 * So if a NMI hits the update of base[0] then it will use base[1]
 342 * which is still consistent. In the worst case this can result is a
 343 * slightly wrong timestamp (a few nanoseconds). See
 344 * @ktime_get_mono_fast_ns.
 345 */
 346static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
 
 347{
 348	struct tk_read_base *base = tkf->base;
 349
 350	/* Force readers off to base[1] */
 351	raw_write_seqcount_latch(&tkf->seq);
 352
 353	/* Update base[0] */
 354	memcpy(base, tkr, sizeof(*base));
 355
 356	/* Force readers back to base[0] */
 357	raw_write_seqcount_latch(&tkf->seq);
 358
 359	/* Update base[1] */
 360	memcpy(base + 1, base, sizeof(*base));
 361}
 362
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 363/**
 364 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
 365 *
 366 * This timestamp is not guaranteed to be monotonic across an update.
 367 * The timestamp is calculated by:
 368 *
 369 *	now = base_mono + clock_delta * slope
 370 *
 371 * So if the update lowers the slope, readers who are forced to the
 372 * not yet updated second array are still using the old steeper slope.
 373 *
 374 * tmono
 375 * ^
 376 * |    o  n
 377 * |   o n
 378 * |  u
 379 * | o
 380 * |o
 381 * |12345678---> reader order
 382 *
 383 * o = old slope
 384 * u = update
 385 * n = new slope
 386 *
 387 * So reader 6 will observe time going backwards versus reader 5.
 388 *
 389 * While other CPUs are likely to be able observe that, the only way
 390 * for a CPU local observation is when an NMI hits in the middle of
 391 * the update. Timestamps taken from that NMI context might be ahead
 392 * of the following timestamps. Callers need to be aware of that and
 393 * deal with it.
 394 */
 395static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 396{
 397	struct tk_read_base *tkr;
 
 398	unsigned int seq;
 399	u64 now;
 400
 401	do {
 402		seq = raw_read_seqcount_latch(&tkf->seq);
 403		tkr = tkf->base + (seq & 0x01);
 404		now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
 405	} while (read_seqcount_retry(&tkf->seq, seq));
 
 
 406
 407	return now;
 
 
 408}
 409
 410u64 ktime_get_mono_fast_ns(void)
 
 
 
 
 
 411{
 412	return __ktime_get_fast_ns(&tk_fast_mono);
 413}
 414EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
 415
 416u64 ktime_get_raw_fast_ns(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 417{
 418	return __ktime_get_fast_ns(&tk_fast_raw);
 419}
 420EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
 421
 422/* Suspend-time cycles value for halted fast timekeeper. */
 423static cycle_t cycles_at_suspend;
 424
 425static cycle_t dummy_clock_read(struct clocksource *cs)
 426{
 427	return cycles_at_suspend;
 428}
 429
 430/**
 431 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
 432 * @tk: Timekeeper to snapshot.
 433 *
 434 * It generally is unsafe to access the clocksource after timekeeping has been
 435 * suspended, so take a snapshot of the readout base of @tk and use it as the
 436 * fast timekeeper's readout base while suspended.  It will return the same
 437 * number of cycles every time until timekeeping is resumed at which time the
 438 * proper readout base for the fast timekeeper will be restored automatically.
 439 */
 440static void halt_fast_timekeeper(struct timekeeper *tk)
 441{
 442	static struct tk_read_base tkr_dummy;
 443	struct tk_read_base *tkr = &tk->tkr_mono;
 444
 445	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
 446	cycles_at_suspend = tkr->read(tkr->clock);
 447	tkr_dummy.read = dummy_clock_read;
 
 448	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
 449
 450	tkr = &tk->tkr_raw;
 451	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
 452	tkr_dummy.read = dummy_clock_read;
 453	update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
 454}
 455
 456#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
 457
 458static inline void update_vsyscall(struct timekeeper *tk)
 459{
 460	struct timespec xt, wm;
 461
 462	xt = timespec64_to_timespec(tk_xtime(tk));
 463	wm = timespec64_to_timespec(tk->wall_to_monotonic);
 464	update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
 465			    tk->tkr_mono.cycle_last);
 466}
 467
 468static inline void old_vsyscall_fixup(struct timekeeper *tk)
 469{
 470	s64 remainder;
 471
 472	/*
 473	* Store only full nanoseconds into xtime_nsec after rounding
 474	* it up and add the remainder to the error difference.
 475	* XXX - This is necessary to avoid small 1ns inconsistnecies caused
 476	* by truncating the remainder in vsyscalls. However, it causes
 477	* additional work to be done in timekeeping_adjust(). Once
 478	* the vsyscall implementations are converted to use xtime_nsec
 479	* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
 480	* users are removed, this can be killed.
 481	*/
 482	remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
 483	tk->tkr_mono.xtime_nsec -= remainder;
 484	tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
 485	tk->ntp_error += remainder << tk->ntp_error_shift;
 486	tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
 487}
 488#else
 489#define old_vsyscall_fixup(tk)
 490#endif
 491
 492static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
 493
 494static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
 495{
 496	raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
 497}
 498
 499/**
 500 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
 
 501 */
 502int pvclock_gtod_register_notifier(struct notifier_block *nb)
 503{
 504	struct timekeeper *tk = &tk_core.timekeeper;
 505	unsigned long flags;
 506	int ret;
 507
 508	raw_spin_lock_irqsave(&timekeeper_lock, flags);
 509	ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
 510	update_pvclock_gtod(tk, true);
 511	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 512
 513	return ret;
 514}
 515EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
 516
 517/**
 518 * pvclock_gtod_unregister_notifier - unregister a pvclock
 519 * timedata update listener
 
 520 */
 521int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
 522{
 523	unsigned long flags;
 524	int ret;
 525
 526	raw_spin_lock_irqsave(&timekeeper_lock, flags);
 527	ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
 528	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 529
 530	return ret;
 531}
 532EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
 533
 534/*
 535 * tk_update_leap_state - helper to update the next_leap_ktime
 536 */
 537static inline void tk_update_leap_state(struct timekeeper *tk)
 538{
 539	tk->next_leap_ktime = ntp_get_next_leap();
 540	if (tk->next_leap_ktime.tv64 != KTIME_MAX)
 541		/* Convert to monotonic time */
 542		tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
 543}
 544
 545/*
 546 * Update the ktime_t based scalar nsec members of the timekeeper
 547 */
 548static inline void tk_update_ktime_data(struct timekeeper *tk)
 549{
 550	u64 seconds;
 551	u32 nsec;
 552
 553	/*
 554	 * The xtime based monotonic readout is:
 555	 *	nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
 556	 * The ktime based monotonic readout is:
 557	 *	nsec = base_mono + now();
 558	 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
 559	 */
 560	seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
 561	nsec = (u32) tk->wall_to_monotonic.tv_nsec;
 562	tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
 563
 564	/* Update the monotonic raw base */
 565	tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
 566
 567	/*
 568	 * The sum of the nanoseconds portions of xtime and
 569	 * wall_to_monotonic can be greater/equal one second. Take
 570	 * this into account before updating tk->ktime_sec.
 571	 */
 572	nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
 573	if (nsec >= NSEC_PER_SEC)
 574		seconds++;
 575	tk->ktime_sec = seconds;
 
 
 
 576}
 577
 578/* must hold timekeeper_lock */
 579static void timekeeping_update(struct timekeeper *tk, unsigned int action)
 580{
 581	if (action & TK_CLEAR_NTP) {
 582		tk->ntp_error = 0;
 583		ntp_clear();
 584	}
 585
 586	tk_update_leap_state(tk);
 587	tk_update_ktime_data(tk);
 588
 589	update_vsyscall(tk);
 590	update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
 591
 
 592	update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
 593	update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
 594
 595	if (action & TK_CLOCK_WAS_SET)
 596		tk->clock_was_set_seq++;
 597	/*
 598	 * The mirroring of the data to the shadow-timekeeper needs
 599	 * to happen last here to ensure we don't over-write the
 600	 * timekeeper structure on the next update with stale data
 601	 */
 602	if (action & TK_MIRROR)
 603		memcpy(&shadow_timekeeper, &tk_core.timekeeper,
 604		       sizeof(tk_core.timekeeper));
 605}
 606
 607/**
 608 * timekeeping_forward_now - update clock to the current time
 
 609 *
 610 * Forward the current clock to update its state since the last call to
 611 * update_wall_time(). This is useful before significant clock changes,
 612 * as it avoids having to deal with this time offset explicitly.
 613 */
 614static void timekeeping_forward_now(struct timekeeper *tk)
 615{
 616	struct clocksource *clock = tk->tkr_mono.clock;
 617	cycle_t cycle_now, delta;
 618	s64 nsec;
 619
 620	cycle_now = tk->tkr_mono.read(clock);
 621	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
 622	tk->tkr_mono.cycle_last = cycle_now;
 623	tk->tkr_raw.cycle_last  = cycle_now;
 624
 625	tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
 626
 627	/* If arch requires, add in get_arch_timeoffset() */
 628	tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
 629
 630	tk_normalize_xtime(tk);
 631
 632	nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
 633	timespec64_add_ns(&tk->raw_time, nsec);
 634}
 635
 636/**
 637 * __getnstimeofday64 - Returns the time of day in a timespec64.
 638 * @ts:		pointer to the timespec to be set
 639 *
 640 * Updates the time of day in the timespec.
 641 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
 642 */
 643int __getnstimeofday64(struct timespec64 *ts)
 644{
 645	struct timekeeper *tk = &tk_core.timekeeper;
 646	unsigned long seq;
 647	s64 nsecs = 0;
 
 
 648
 649	do {
 650		seq = read_seqcount_begin(&tk_core.seq);
 651
 652		ts->tv_sec = tk->xtime_sec;
 653		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 654
 655	} while (read_seqcount_retry(&tk_core.seq, seq));
 656
 657	ts->tv_nsec = 0;
 658	timespec64_add_ns(ts, nsecs);
 659
 660	/*
 661	 * Do not bail out early, in case there were callers still using
 662	 * the value, even in the face of the WARN_ON.
 663	 */
 664	if (unlikely(timekeeping_suspended))
 665		return -EAGAIN;
 666	return 0;
 667}
 668EXPORT_SYMBOL(__getnstimeofday64);
 669
 670/**
 671 * getnstimeofday64 - Returns the time of day in a timespec64.
 672 * @ts:		pointer to the timespec64 to be set
 673 *
 674 * Returns the time of day in a timespec64 (WARN if suspended).
 675 */
 676void getnstimeofday64(struct timespec64 *ts)
 677{
 678	WARN_ON(__getnstimeofday64(ts));
 679}
 680EXPORT_SYMBOL(getnstimeofday64);
 681
 682ktime_t ktime_get(void)
 683{
 684	struct timekeeper *tk = &tk_core.timekeeper;
 685	unsigned int seq;
 686	ktime_t base;
 687	s64 nsecs;
 688
 689	WARN_ON(timekeeping_suspended);
 690
 691	do {
 692		seq = read_seqcount_begin(&tk_core.seq);
 693		base = tk->tkr_mono.base;
 694		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 695
 696	} while (read_seqcount_retry(&tk_core.seq, seq));
 697
 698	return ktime_add_ns(base, nsecs);
 699}
 700EXPORT_SYMBOL_GPL(ktime_get);
 701
 702u32 ktime_get_resolution_ns(void)
 703{
 704	struct timekeeper *tk = &tk_core.timekeeper;
 705	unsigned int seq;
 706	u32 nsecs;
 707
 708	WARN_ON(timekeeping_suspended);
 709
 710	do {
 711		seq = read_seqcount_begin(&tk_core.seq);
 712		nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
 713	} while (read_seqcount_retry(&tk_core.seq, seq));
 714
 715	return nsecs;
 716}
 717EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
 718
 719static ktime_t *offsets[TK_OFFS_MAX] = {
 720	[TK_OFFS_REAL]	= &tk_core.timekeeper.offs_real,
 721	[TK_OFFS_BOOT]	= &tk_core.timekeeper.offs_boot,
 722	[TK_OFFS_TAI]	= &tk_core.timekeeper.offs_tai,
 723};
 724
 725ktime_t ktime_get_with_offset(enum tk_offsets offs)
 726{
 727	struct timekeeper *tk = &tk_core.timekeeper;
 728	unsigned int seq;
 729	ktime_t base, *offset = offsets[offs];
 730	s64 nsecs;
 731
 732	WARN_ON(timekeeping_suspended);
 733
 734	do {
 735		seq = read_seqcount_begin(&tk_core.seq);
 736		base = ktime_add(tk->tkr_mono.base, *offset);
 737		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 738
 739	} while (read_seqcount_retry(&tk_core.seq, seq));
 740
 741	return ktime_add_ns(base, nsecs);
 742
 743}
 744EXPORT_SYMBOL_GPL(ktime_get_with_offset);
 745
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 746/**
 747 * ktime_mono_to_any() - convert mononotic time to any other time
 748 * @tmono:	time to convert.
 749 * @offs:	which offset to use
 750 */
 751ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
 752{
 753	ktime_t *offset = offsets[offs];
 754	unsigned long seq;
 755	ktime_t tconv;
 756
 757	do {
 758		seq = read_seqcount_begin(&tk_core.seq);
 759		tconv = ktime_add(tmono, *offset);
 760	} while (read_seqcount_retry(&tk_core.seq, seq));
 761
 762	return tconv;
 763}
 764EXPORT_SYMBOL_GPL(ktime_mono_to_any);
 765
 766/**
 767 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
 768 */
 769ktime_t ktime_get_raw(void)
 770{
 771	struct timekeeper *tk = &tk_core.timekeeper;
 772	unsigned int seq;
 773	ktime_t base;
 774	s64 nsecs;
 775
 776	do {
 777		seq = read_seqcount_begin(&tk_core.seq);
 778		base = tk->tkr_raw.base;
 779		nsecs = timekeeping_get_ns(&tk->tkr_raw);
 780
 781	} while (read_seqcount_retry(&tk_core.seq, seq));
 782
 783	return ktime_add_ns(base, nsecs);
 784}
 785EXPORT_SYMBOL_GPL(ktime_get_raw);
 786
 787/**
 788 * ktime_get_ts64 - get the monotonic clock in timespec64 format
 789 * @ts:		pointer to timespec variable
 790 *
 791 * The function calculates the monotonic clock from the realtime
 792 * clock and the wall_to_monotonic offset and stores the result
 793 * in normalized timespec64 format in the variable pointed to by @ts.
 794 */
 795void ktime_get_ts64(struct timespec64 *ts)
 796{
 797	struct timekeeper *tk = &tk_core.timekeeper;
 798	struct timespec64 tomono;
 799	s64 nsec;
 800	unsigned int seq;
 
 801
 802	WARN_ON(timekeeping_suspended);
 803
 804	do {
 805		seq = read_seqcount_begin(&tk_core.seq);
 806		ts->tv_sec = tk->xtime_sec;
 807		nsec = timekeeping_get_ns(&tk->tkr_mono);
 808		tomono = tk->wall_to_monotonic;
 809
 810	} while (read_seqcount_retry(&tk_core.seq, seq));
 811
 812	ts->tv_sec += tomono.tv_sec;
 813	ts->tv_nsec = 0;
 814	timespec64_add_ns(ts, nsec + tomono.tv_nsec);
 815}
 816EXPORT_SYMBOL_GPL(ktime_get_ts64);
 817
 818/**
 819 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
 820 *
 821 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
 822 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
 823 * works on both 32 and 64 bit systems. On 32 bit systems the readout
 824 * covers ~136 years of uptime which should be enough to prevent
 825 * premature wrap arounds.
 826 */
 827time64_t ktime_get_seconds(void)
 828{
 829	struct timekeeper *tk = &tk_core.timekeeper;
 830
 831	WARN_ON(timekeeping_suspended);
 832	return tk->ktime_sec;
 833}
 834EXPORT_SYMBOL_GPL(ktime_get_seconds);
 835
 836/**
 837 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
 838 *
 839 * Returns the wall clock seconds since 1970. This replaces the
 840 * get_seconds() interface which is not y2038 safe on 32bit systems.
 841 *
 842 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
 843 * 32bit systems the access must be protected with the sequence
 844 * counter to provide "atomic" access to the 64bit tk->xtime_sec
 845 * value.
 846 */
 847time64_t ktime_get_real_seconds(void)
 848{
 849	struct timekeeper *tk = &tk_core.timekeeper;
 850	time64_t seconds;
 851	unsigned int seq;
 852
 853	if (IS_ENABLED(CONFIG_64BIT))
 854		return tk->xtime_sec;
 855
 856	do {
 857		seq = read_seqcount_begin(&tk_core.seq);
 858		seconds = tk->xtime_sec;
 859
 860	} while (read_seqcount_retry(&tk_core.seq, seq));
 861
 862	return seconds;
 863}
 864EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
 865
 866/**
 867 * __ktime_get_real_seconds - The same as ktime_get_real_seconds
 868 * but without the sequence counter protect. This internal function
 869 * is called just when timekeeping lock is already held.
 870 */
 871time64_t __ktime_get_real_seconds(void)
 872{
 873	struct timekeeper *tk = &tk_core.timekeeper;
 874
 875	return tk->xtime_sec;
 876}
 877
 878/**
 879 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
 880 * @systime_snapshot:	pointer to struct receiving the system time snapshot
 881 */
 882void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
 883{
 884	struct timekeeper *tk = &tk_core.timekeeper;
 885	unsigned long seq;
 886	ktime_t base_raw;
 887	ktime_t base_real;
 888	s64 nsec_raw;
 889	s64 nsec_real;
 890	cycle_t now;
 891
 892	WARN_ON_ONCE(timekeeping_suspended);
 893
 894	do {
 895		seq = read_seqcount_begin(&tk_core.seq);
 896
 897		now = tk->tkr_mono.read(tk->tkr_mono.clock);
 898		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
 899		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
 900		base_real = ktime_add(tk->tkr_mono.base,
 901				      tk_core.timekeeper.offs_real);
 902		base_raw = tk->tkr_raw.base;
 903		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
 904		nsec_raw  = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
 905	} while (read_seqcount_retry(&tk_core.seq, seq));
 906
 907	systime_snapshot->cycles = now;
 908	systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
 909	systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
 910}
 911EXPORT_SYMBOL_GPL(ktime_get_snapshot);
 912
 913/* Scale base by mult/div checking for overflow */
 914static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
 915{
 916	u64 tmp, rem;
 917
 918	tmp = div64_u64_rem(*base, div, &rem);
 919
 920	if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
 921	    ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
 922		return -EOVERFLOW;
 923	tmp *= mult;
 924	rem *= mult;
 925
 926	do_div(rem, div);
 927	*base = tmp + rem;
 928	return 0;
 929}
 930
 931/**
 932 * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
 933 * @history:			Snapshot representing start of history
 934 * @partial_history_cycles:	Cycle offset into history (fractional part)
 935 * @total_history_cycles:	Total history length in cycles
 936 * @discontinuity:		True indicates clock was set on history period
 937 * @ts:				Cross timestamp that should be adjusted using
 938 *	partial/total ratio
 939 *
 940 * Helper function used by get_device_system_crosststamp() to correct the
 941 * crosstimestamp corresponding to the start of the current interval to the
 942 * system counter value (timestamp point) provided by the driver. The
 943 * total_history_* quantities are the total history starting at the provided
 944 * reference point and ending at the start of the current interval. The cycle
 945 * count between the driver timestamp point and the start of the current
 946 * interval is partial_history_cycles.
 947 */
 948static int adjust_historical_crosststamp(struct system_time_snapshot *history,
 949					 cycle_t partial_history_cycles,
 950					 cycle_t total_history_cycles,
 951					 bool discontinuity,
 952					 struct system_device_crosststamp *ts)
 953{
 954	struct timekeeper *tk = &tk_core.timekeeper;
 955	u64 corr_raw, corr_real;
 956	bool interp_forward;
 957	int ret;
 958
 959	if (total_history_cycles == 0 || partial_history_cycles == 0)
 960		return 0;
 961
 962	/* Interpolate shortest distance from beginning or end of history */
 963	interp_forward = partial_history_cycles > total_history_cycles/2 ?
 964		true : false;
 965	partial_history_cycles = interp_forward ?
 966		total_history_cycles - partial_history_cycles :
 967		partial_history_cycles;
 968
 969	/*
 970	 * Scale the monotonic raw time delta by:
 971	 *	partial_history_cycles / total_history_cycles
 972	 */
 973	corr_raw = (u64)ktime_to_ns(
 974		ktime_sub(ts->sys_monoraw, history->raw));
 975	ret = scale64_check_overflow(partial_history_cycles,
 976				     total_history_cycles, &corr_raw);
 977	if (ret)
 978		return ret;
 979
 980	/*
 981	 * If there is a discontinuity in the history, scale monotonic raw
 982	 *	correction by:
 983	 *	mult(real)/mult(raw) yielding the realtime correction
 984	 * Otherwise, calculate the realtime correction similar to monotonic
 985	 *	raw calculation
 986	 */
 987	if (discontinuity) {
 988		corr_real = mul_u64_u32_div
 989			(corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
 990	} else {
 991		corr_real = (u64)ktime_to_ns(
 992			ktime_sub(ts->sys_realtime, history->real));
 993		ret = scale64_check_overflow(partial_history_cycles,
 994					     total_history_cycles, &corr_real);
 995		if (ret)
 996			return ret;
 997	}
 998
 999	/* Fixup monotonic raw and real time time values */
1000	if (interp_forward) {
1001		ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1002		ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1003	} else {
1004		ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1005		ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1006	}
1007
1008	return 0;
1009}
1010
1011/*
1012 * cycle_between - true if test occurs chronologically between before and after
1013 */
1014static bool cycle_between(cycle_t before, cycle_t test, cycle_t after)
1015{
1016	if (test > before && test < after)
1017		return true;
1018	if (test < before && before > after)
1019		return true;
1020	return false;
1021}
1022
1023/**
1024 * get_device_system_crosststamp - Synchronously capture system/device timestamp
1025 * @get_time_fn:	Callback to get simultaneous device time and
1026 *	system counter from the device driver
1027 * @ctx:		Context passed to get_time_fn()
1028 * @history_begin:	Historical reference point used to interpolate system
1029 *	time when counter provided by the driver is before the current interval
1030 * @xtstamp:		Receives simultaneously captured system and device time
1031 *
1032 * Reads a timestamp from a device and correlates it to system time
1033 */
1034int get_device_system_crosststamp(int (*get_time_fn)
1035				  (ktime_t *device_time,
1036				   struct system_counterval_t *sys_counterval,
1037				   void *ctx),
1038				  void *ctx,
1039				  struct system_time_snapshot *history_begin,
1040				  struct system_device_crosststamp *xtstamp)
1041{
1042	struct system_counterval_t system_counterval;
1043	struct timekeeper *tk = &tk_core.timekeeper;
1044	cycle_t cycles, now, interval_start;
1045	unsigned int clock_was_set_seq = 0;
1046	ktime_t base_real, base_raw;
1047	s64 nsec_real, nsec_raw;
1048	u8 cs_was_changed_seq;
1049	unsigned long seq;
1050	bool do_interp;
1051	int ret;
1052
1053	do {
1054		seq = read_seqcount_begin(&tk_core.seq);
1055		/*
1056		 * Try to synchronously capture device time and a system
1057		 * counter value calling back into the device driver
1058		 */
1059		ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1060		if (ret)
1061			return ret;
1062
1063		/*
1064		 * Verify that the clocksource associated with the captured
1065		 * system counter value is the same as the currently installed
1066		 * timekeeper clocksource
1067		 */
1068		if (tk->tkr_mono.clock != system_counterval.cs)
1069			return -ENODEV;
1070		cycles = system_counterval.cycles;
1071
1072		/*
1073		 * Check whether the system counter value provided by the
1074		 * device driver is on the current timekeeping interval.
1075		 */
1076		now = tk->tkr_mono.read(tk->tkr_mono.clock);
1077		interval_start = tk->tkr_mono.cycle_last;
1078		if (!cycle_between(interval_start, cycles, now)) {
1079			clock_was_set_seq = tk->clock_was_set_seq;
1080			cs_was_changed_seq = tk->cs_was_changed_seq;
1081			cycles = interval_start;
1082			do_interp = true;
1083		} else {
1084			do_interp = false;
1085		}
1086
1087		base_real = ktime_add(tk->tkr_mono.base,
1088				      tk_core.timekeeper.offs_real);
1089		base_raw = tk->tkr_raw.base;
1090
1091		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1092						     system_counterval.cycles);
1093		nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1094						    system_counterval.cycles);
1095	} while (read_seqcount_retry(&tk_core.seq, seq));
1096
1097	xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1098	xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1099
1100	/*
1101	 * Interpolate if necessary, adjusting back from the start of the
1102	 * current interval
1103	 */
1104	if (do_interp) {
1105		cycle_t partial_history_cycles, total_history_cycles;
1106		bool discontinuity;
1107
1108		/*
1109		 * Check that the counter value occurs after the provided
1110		 * history reference and that the history doesn't cross a
1111		 * clocksource change
1112		 */
1113		if (!history_begin ||
1114		    !cycle_between(history_begin->cycles,
1115				   system_counterval.cycles, cycles) ||
1116		    history_begin->cs_was_changed_seq != cs_was_changed_seq)
1117			return -EINVAL;
1118		partial_history_cycles = cycles - system_counterval.cycles;
1119		total_history_cycles = cycles - history_begin->cycles;
1120		discontinuity =
1121			history_begin->clock_was_set_seq != clock_was_set_seq;
1122
1123		ret = adjust_historical_crosststamp(history_begin,
1124						    partial_history_cycles,
1125						    total_history_cycles,
1126						    discontinuity, xtstamp);
1127		if (ret)
1128			return ret;
1129	}
1130
1131	return 0;
1132}
1133EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1134
1135/**
1136 * do_gettimeofday - Returns the time of day in a timeval
1137 * @tv:		pointer to the timeval to be set
1138 *
1139 * NOTE: Users should be converted to using getnstimeofday()
1140 */
1141void do_gettimeofday(struct timeval *tv)
1142{
1143	struct timespec64 now;
1144
1145	getnstimeofday64(&now);
1146	tv->tv_sec = now.tv_sec;
1147	tv->tv_usec = now.tv_nsec/1000;
1148}
1149EXPORT_SYMBOL(do_gettimeofday);
1150
1151/**
1152 * do_settimeofday64 - Sets the time of day.
1153 * @ts:     pointer to the timespec64 variable containing the new time
1154 *
1155 * Sets the time of day to the new time and update NTP and notify hrtimers
1156 */
1157int do_settimeofday64(const struct timespec64 *ts)
1158{
1159	struct timekeeper *tk = &tk_core.timekeeper;
1160	struct timespec64 ts_delta, xt;
1161	unsigned long flags;
1162	int ret = 0;
1163
1164	if (!timespec64_valid_strict(ts))
1165		return -EINVAL;
1166
1167	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1168	write_seqcount_begin(&tk_core.seq);
1169
1170	timekeeping_forward_now(tk);
1171
1172	xt = tk_xtime(tk);
1173	ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1174	ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1175
1176	if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1177		ret = -EINVAL;
1178		goto out;
1179	}
1180
1181	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1182
1183	tk_set_xtime(tk, ts);
1184out:
1185	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1186
1187	write_seqcount_end(&tk_core.seq);
1188	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1189
1190	/* signal hrtimers about time change */
1191	clock_was_set();
 
 
 
 
 
1192
1193	return ret;
1194}
1195EXPORT_SYMBOL(do_settimeofday64);
1196
1197/**
1198 * timekeeping_inject_offset - Adds or subtracts from the current time.
1199 * @tv:		pointer to the timespec variable containing the offset
1200 *
1201 * Adds or subtracts an offset value from the current time.
1202 */
1203int timekeeping_inject_offset(struct timespec *ts)
1204{
1205	struct timekeeper *tk = &tk_core.timekeeper;
1206	unsigned long flags;
1207	struct timespec64 ts64, tmp;
1208	int ret = 0;
1209
1210	if (!timespec_inject_offset_valid(ts))
1211		return -EINVAL;
1212
1213	ts64 = timespec_to_timespec64(*ts);
1214
1215	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1216	write_seqcount_begin(&tk_core.seq);
1217
1218	timekeeping_forward_now(tk);
1219
1220	/* Make sure the proposed value is valid */
1221	tmp = timespec64_add(tk_xtime(tk),  ts64);
1222	if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 ||
1223	    !timespec64_valid_strict(&tmp)) {
1224		ret = -EINVAL;
1225		goto error;
1226	}
1227
1228	tk_xtime_add(tk, &ts64);
1229	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
1230
1231error: /* even if we error out, we forwarded the time, so call update */
1232	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1233
1234	write_seqcount_end(&tk_core.seq);
1235	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1236
1237	/* signal hrtimers about time change */
1238	clock_was_set();
1239
1240	return ret;
1241}
1242EXPORT_SYMBOL(timekeeping_inject_offset);
1243
 
 
 
 
 
1244
1245/**
1246 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
 
1247 *
1248 */
1249s32 timekeeping_get_tai_offset(void)
1250{
1251	struct timekeeper *tk = &tk_core.timekeeper;
1252	unsigned int seq;
1253	s32 ret;
1254
1255	do {
1256		seq = read_seqcount_begin(&tk_core.seq);
1257		ret = tk->tai_offset;
1258	} while (read_seqcount_retry(&tk_core.seq, seq));
1259
1260	return ret;
 
 
 
 
 
 
 
 
 
1261}
1262
1263/**
1264 * __timekeeping_set_tai_offset - Lock free worker function
1265 *
1266 */
1267static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1268{
1269	tk->tai_offset = tai_offset;
1270	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1271}
1272
1273/**
1274 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
1275 *
1276 */
1277void timekeeping_set_tai_offset(s32 tai_offset)
1278{
1279	struct timekeeper *tk = &tk_core.timekeeper;
1280	unsigned long flags;
1281
1282	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1283	write_seqcount_begin(&tk_core.seq);
1284	__timekeeping_set_tai_offset(tk, tai_offset);
1285	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1286	write_seqcount_end(&tk_core.seq);
1287	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1288	clock_was_set();
1289}
1290
1291/**
1292 * change_clocksource - Swaps clocksources if a new one is available
1293 *
1294 * Accumulates current time interval and initializes new clocksource
1295 */
1296static int change_clocksource(void *data)
1297{
1298	struct timekeeper *tk = &tk_core.timekeeper;
1299	struct clocksource *new, *old;
1300	unsigned long flags;
 
1301
1302	new = (struct clocksource *) data;
1303
1304	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1305	write_seqcount_begin(&tk_core.seq);
1306
1307	timekeeping_forward_now(tk);
1308	/*
1309	 * If the cs is in module, get a module reference. Succeeds
1310	 * for built-in code (owner == NULL) as well.
1311	 */
1312	if (try_module_get(new->owner)) {
1313		if (!new->enable || new->enable(new) == 0) {
1314			old = tk->tkr_mono.clock;
1315			tk_setup_internals(tk, new);
1316			if (old->disable)
1317				old->disable(old);
1318			module_put(old->owner);
1319		} else {
1320			module_put(new->owner);
1321		}
1322	}
 
 
 
 
 
 
 
 
 
 
 
1323	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1324
1325	write_seqcount_end(&tk_core.seq);
1326	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1327
 
 
 
 
 
 
 
1328	return 0;
1329}
1330
1331/**
1332 * timekeeping_notify - Install a new clock source
1333 * @clock:		pointer to the clock source
1334 *
1335 * This function is called from clocksource.c after a new, better clock
1336 * source has been registered. The caller holds the clocksource_mutex.
1337 */
1338int timekeeping_notify(struct clocksource *clock)
1339{
1340	struct timekeeper *tk = &tk_core.timekeeper;
1341
1342	if (tk->tkr_mono.clock == clock)
1343		return 0;
1344	stop_machine(change_clocksource, clock, NULL);
1345	tick_clock_notify();
1346	return tk->tkr_mono.clock == clock ? 0 : -1;
1347}
1348
1349/**
1350 * getrawmonotonic64 - Returns the raw monotonic time in a timespec
1351 * @ts:		pointer to the timespec64 to be set
1352 *
1353 * Returns the raw monotonic time (completely un-modified by ntp)
1354 */
1355void getrawmonotonic64(struct timespec64 *ts)
1356{
1357	struct timekeeper *tk = &tk_core.timekeeper;
1358	struct timespec64 ts64;
1359	unsigned long seq;
1360	s64 nsecs;
1361
1362	do {
1363		seq = read_seqcount_begin(&tk_core.seq);
 
1364		nsecs = timekeeping_get_ns(&tk->tkr_raw);
1365		ts64 = tk->raw_time;
1366
1367	} while (read_seqcount_retry(&tk_core.seq, seq));
1368
1369	timespec64_add_ns(&ts64, nsecs);
1370	*ts = ts64;
1371}
1372EXPORT_SYMBOL(getrawmonotonic64);
1373
1374
1375/**
1376 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1377 */
1378int timekeeping_valid_for_hres(void)
1379{
1380	struct timekeeper *tk = &tk_core.timekeeper;
1381	unsigned long seq;
1382	int ret;
1383
1384	do {
1385		seq = read_seqcount_begin(&tk_core.seq);
1386
1387		ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1388
1389	} while (read_seqcount_retry(&tk_core.seq, seq));
1390
1391	return ret;
1392}
1393
1394/**
1395 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1396 */
1397u64 timekeeping_max_deferment(void)
1398{
1399	struct timekeeper *tk = &tk_core.timekeeper;
1400	unsigned long seq;
1401	u64 ret;
1402
1403	do {
1404		seq = read_seqcount_begin(&tk_core.seq);
1405
1406		ret = tk->tkr_mono.clock->max_idle_ns;
1407
1408	} while (read_seqcount_retry(&tk_core.seq, seq));
1409
1410	return ret;
1411}
1412
1413/**
1414 * read_persistent_clock -  Return time from the persistent clock.
 
1415 *
1416 * Weak dummy function for arches that do not yet support it.
1417 * Reads the time from the battery backed persistent clock.
1418 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1419 *
1420 *  XXX - Do be sure to remove it once all arches implement it.
1421 */
1422void __weak read_persistent_clock(struct timespec *ts)
1423{
1424	ts->tv_sec = 0;
1425	ts->tv_nsec = 0;
1426}
1427
1428void __weak read_persistent_clock64(struct timespec64 *ts64)
1429{
1430	struct timespec ts;
1431
1432	read_persistent_clock(&ts);
1433	*ts64 = timespec_to_timespec64(ts);
1434}
1435
1436/**
1437 * read_boot_clock64 -  Return time of the system start.
 
 
 
1438 *
1439 * Weak dummy function for arches that do not yet support it.
1440 * Function to read the exact time the system has been started.
1441 * Returns a timespec64 with tv_sec=0 and tv_nsec=0 if unsupported.
1442 *
1443 *  XXX - Do be sure to remove it once all arches implement it.
1444 */
1445void __weak read_boot_clock64(struct timespec64 *ts)
 
 
 
 
 
1446{
1447	ts->tv_sec = 0;
1448	ts->tv_nsec = 0;
1449}
1450
1451/* Flag for if timekeeping_resume() has injected sleeptime */
1452static bool sleeptime_injected;
 
 
 
 
 
 
 
 
 
 
 
 
1453
1454/* Flag for if there is a persistent clock on this platform */
1455static bool persistent_clock_exists;
1456
1457/*
1458 * timekeeping_init - Initializes the clocksource and common timekeeping values
1459 */
1460void __init timekeeping_init(void)
1461{
 
1462	struct timekeeper *tk = &tk_core.timekeeper;
1463	struct clocksource *clock;
1464	unsigned long flags;
1465	struct timespec64 now, boot, tmp;
1466
1467	read_persistent_clock64(&now);
1468	if (!timespec64_valid_strict(&now)) {
1469		pr_warn("WARNING: Persistent clock returned invalid value!\n"
1470			"         Check your CMOS/BIOS settings.\n");
1471		now.tv_sec = 0;
1472		now.tv_nsec = 0;
1473	} else if (now.tv_sec || now.tv_nsec)
1474		persistent_clock_exists = true;
 
 
 
 
 
 
 
1475
1476	read_boot_clock64(&boot);
1477	if (!timespec64_valid_strict(&boot)) {
1478		pr_warn("WARNING: Boot clock returned invalid value!\n"
1479			"         Check your CMOS/BIOS settings.\n");
1480		boot.tv_sec = 0;
1481		boot.tv_nsec = 0;
1482	}
1483
1484	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1485	write_seqcount_begin(&tk_core.seq);
1486	ntp_init();
1487
1488	clock = clocksource_default_clock();
1489	if (clock->enable)
1490		clock->enable(clock);
1491	tk_setup_internals(tk, clock);
1492
1493	tk_set_xtime(tk, &now);
1494	tk->raw_time.tv_sec = 0;
1495	tk->raw_time.tv_nsec = 0;
1496	if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1497		boot = tk_xtime(tk);
1498
1499	set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1500	tk_set_wall_to_mono(tk, tmp);
1501
1502	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1503
1504	write_seqcount_end(&tk_core.seq);
1505	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1506}
1507
1508/* time in seconds when suspend began for persistent clock */
1509static struct timespec64 timekeeping_suspend_time;
1510
1511/**
1512 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1513 * @delta: pointer to a timespec delta value
 
1514 *
1515 * Takes a timespec offset measuring a suspend interval and properly
1516 * adds the sleep offset to the timekeeping variables.
1517 */
1518static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1519					   struct timespec64 *delta)
1520{
1521	if (!timespec64_valid_strict(delta)) {
1522		printk_deferred(KERN_WARNING
1523				"__timekeeping_inject_sleeptime: Invalid "
1524				"sleep delta value!\n");
1525		return;
1526	}
1527	tk_xtime_add(tk, delta);
1528	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1529	tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1530	tk_debug_account_sleep_time(delta);
1531}
1532
1533#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1534/**
1535 * We have three kinds of time sources to use for sleep time
1536 * injection, the preference order is:
1537 * 1) non-stop clocksource
1538 * 2) persistent clock (ie: RTC accessible when irqs are off)
1539 * 3) RTC
1540 *
1541 * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
1542 * If system has neither 1) nor 2), 3) will be used finally.
1543 *
1544 *
1545 * If timekeeping has injected sleeptime via either 1) or 2),
1546 * 3) becomes needless, so in this case we don't need to call
1547 * rtc_resume(), and this is what timekeeping_rtc_skipresume()
1548 * means.
1549 */
1550bool timekeeping_rtc_skipresume(void)
1551{
1552	return sleeptime_injected;
1553}
1554
1555/**
1556 * 1) can be determined whether to use or not only when doing
1557 * timekeeping_resume() which is invoked after rtc_suspend(),
1558 * so we can't skip rtc_suspend() surely if system has 1).
1559 *
1560 * But if system has 2), 2) will definitely be used, so in this
1561 * case we don't need to call rtc_suspend(), and this is what
1562 * timekeeping_rtc_skipsuspend() means.
1563 */
1564bool timekeeping_rtc_skipsuspend(void)
1565{
1566	return persistent_clock_exists;
1567}
1568
1569/**
1570 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1571 * @delta: pointer to a timespec64 delta value
1572 *
1573 * This hook is for architectures that cannot support read_persistent_clock64
1574 * because their RTC/persistent clock is only accessible when irqs are enabled.
1575 * and also don't have an effective nonstop clocksource.
1576 *
1577 * This function should only be called by rtc_resume(), and allows
1578 * a suspend offset to be injected into the timekeeping values.
1579 */
1580void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1581{
1582	struct timekeeper *tk = &tk_core.timekeeper;
1583	unsigned long flags;
1584
1585	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1586	write_seqcount_begin(&tk_core.seq);
1587
 
 
1588	timekeeping_forward_now(tk);
1589
1590	__timekeeping_inject_sleeptime(tk, delta);
1591
1592	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1593
1594	write_seqcount_end(&tk_core.seq);
1595	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1596
1597	/* signal hrtimers about time change */
1598	clock_was_set();
1599}
1600#endif
1601
1602/**
1603 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1604 */
1605void timekeeping_resume(void)
1606{
1607	struct timekeeper *tk = &tk_core.timekeeper;
1608	struct clocksource *clock = tk->tkr_mono.clock;
1609	unsigned long flags;
1610	struct timespec64 ts_new, ts_delta;
1611	cycle_t cycle_now, cycle_delta;
 
1612
1613	sleeptime_injected = false;
1614	read_persistent_clock64(&ts_new);
1615
1616	clockevents_resume();
1617	clocksource_resume();
1618
1619	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1620	write_seqcount_begin(&tk_core.seq);
1621
1622	/*
1623	 * After system resumes, we need to calculate the suspended time and
1624	 * compensate it for the OS time. There are 3 sources that could be
1625	 * used: Nonstop clocksource during suspend, persistent clock and rtc
1626	 * device.
1627	 *
1628	 * One specific platform may have 1 or 2 or all of them, and the
1629	 * preference will be:
1630	 *	suspend-nonstop clocksource -> persistent clock -> rtc
1631	 * The less preferred source will only be tried if there is no better
1632	 * usable source. The rtc part is handled separately in rtc core code.
1633	 */
1634	cycle_now = tk->tkr_mono.read(clock);
1635	if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1636		cycle_now > tk->tkr_mono.cycle_last) {
1637		u64 num, max = ULLONG_MAX;
1638		u32 mult = clock->mult;
1639		u32 shift = clock->shift;
1640		s64 nsec = 0;
1641
1642		cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1643						tk->tkr_mono.mask);
1644
1645		/*
1646		 * "cycle_delta * mutl" may cause 64 bits overflow, if the
1647		 * suspended time is too long. In that case we need do the
1648		 * 64 bits math carefully
1649		 */
1650		do_div(max, mult);
1651		if (cycle_delta > max) {
1652			num = div64_u64(cycle_delta, max);
1653			nsec = (((u64) max * mult) >> shift) * num;
1654			cycle_delta -= num * max;
1655		}
1656		nsec += ((u64) cycle_delta * mult) >> shift;
1657
1658		ts_delta = ns_to_timespec64(nsec);
1659		sleeptime_injected = true;
1660	} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1661		ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1662		sleeptime_injected = true;
1663	}
1664
1665	if (sleeptime_injected)
 
1666		__timekeeping_inject_sleeptime(tk, &ts_delta);
 
1667
1668	/* Re-base the last cycle value */
1669	tk->tkr_mono.cycle_last = cycle_now;
1670	tk->tkr_raw.cycle_last  = cycle_now;
1671
1672	tk->ntp_error = 0;
1673	timekeeping_suspended = 0;
1674	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1675	write_seqcount_end(&tk_core.seq);
1676	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1677
1678	touch_softlockup_watchdog();
1679
 
1680	tick_resume();
1681	hrtimers_resume();
 
1682}
1683
1684int timekeeping_suspend(void)
1685{
1686	struct timekeeper *tk = &tk_core.timekeeper;
1687	unsigned long flags;
1688	struct timespec64		delta, delta_delta;
1689	static struct timespec64	old_delta;
 
 
1690
1691	read_persistent_clock64(&timekeeping_suspend_time);
1692
1693	/*
1694	 * On some systems the persistent_clock can not be detected at
1695	 * timekeeping_init by its return value, so if we see a valid
1696	 * value returned, update the persistent_clock_exists flag.
1697	 */
1698	if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1699		persistent_clock_exists = true;
1700
 
 
1701	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1702	write_seqcount_begin(&tk_core.seq);
1703	timekeeping_forward_now(tk);
1704	timekeeping_suspended = 1;
1705
 
 
 
 
 
 
 
 
 
1706	if (persistent_clock_exists) {
1707		/*
1708		 * To avoid drift caused by repeated suspend/resumes,
1709		 * which each can add ~1 second drift error,
1710		 * try to compensate so the difference in system time
1711		 * and persistent_clock time stays close to constant.
1712		 */
1713		delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1714		delta_delta = timespec64_sub(delta, old_delta);
1715		if (abs(delta_delta.tv_sec) >= 2) {
1716			/*
1717			 * if delta_delta is too large, assume time correction
1718			 * has occurred and set old_delta to the current delta.
1719			 */
1720			old_delta = delta;
1721		} else {
1722			/* Otherwise try to adjust old_system to compensate */
1723			timekeeping_suspend_time =
1724				timespec64_add(timekeeping_suspend_time, delta_delta);
1725		}
1726	}
1727
1728	timekeeping_update(tk, TK_MIRROR);
1729	halt_fast_timekeeper(tk);
1730	write_seqcount_end(&tk_core.seq);
1731	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1732
1733	tick_suspend();
1734	clocksource_suspend();
1735	clockevents_suspend();
1736
1737	return 0;
1738}
1739
1740/* sysfs resume/suspend bits for timekeeping */
1741static struct syscore_ops timekeeping_syscore_ops = {
1742	.resume		= timekeeping_resume,
1743	.suspend	= timekeeping_suspend,
1744};
1745
1746static int __init timekeeping_init_ops(void)
1747{
1748	register_syscore_ops(&timekeeping_syscore_ops);
1749	return 0;
1750}
1751device_initcall(timekeeping_init_ops);
1752
1753/*
1754 * Apply a multiplier adjustment to the timekeeper
1755 */
1756static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1757							 s64 offset,
1758							 bool negative,
1759							 int adj_scale)
1760{
1761	s64 interval = tk->cycle_interval;
1762	s32 mult_adj = 1;
1763
1764	if (negative) {
1765		mult_adj = -mult_adj;
 
1766		interval = -interval;
1767		offset  = -offset;
 
 
 
1768	}
1769	mult_adj <<= adj_scale;
1770	interval <<= adj_scale;
1771	offset <<= adj_scale;
1772
1773	/*
1774	 * So the following can be confusing.
1775	 *
1776	 * To keep things simple, lets assume mult_adj == 1 for now.
1777	 *
1778	 * When mult_adj != 1, remember that the interval and offset values
1779	 * have been appropriately scaled so the math is the same.
1780	 *
1781	 * The basic idea here is that we're increasing the multiplier
1782	 * by one, this causes the xtime_interval to be incremented by
1783	 * one cycle_interval. This is because:
1784	 *	xtime_interval = cycle_interval * mult
1785	 * So if mult is being incremented by one:
1786	 *	xtime_interval = cycle_interval * (mult + 1)
1787	 * Its the same as:
1788	 *	xtime_interval = (cycle_interval * mult) + cycle_interval
1789	 * Which can be shortened to:
1790	 *	xtime_interval += cycle_interval
1791	 *
1792	 * So offset stores the non-accumulated cycles. Thus the current
1793	 * time (in shifted nanoseconds) is:
1794	 *	now = (offset * adj) + xtime_nsec
1795	 * Now, even though we're adjusting the clock frequency, we have
1796	 * to keep time consistent. In other words, we can't jump back
1797	 * in time, and we also want to avoid jumping forward in time.
1798	 *
1799	 * So given the same offset value, we need the time to be the same
1800	 * both before and after the freq adjustment.
1801	 *	now = (offset * adj_1) + xtime_nsec_1
1802	 *	now = (offset * adj_2) + xtime_nsec_2
1803	 * So:
1804	 *	(offset * adj_1) + xtime_nsec_1 =
1805	 *		(offset * adj_2) + xtime_nsec_2
1806	 * And we know:
1807	 *	adj_2 = adj_1 + 1
1808	 * So:
1809	 *	(offset * adj_1) + xtime_nsec_1 =
1810	 *		(offset * (adj_1+1)) + xtime_nsec_2
1811	 *	(offset * adj_1) + xtime_nsec_1 =
1812	 *		(offset * adj_1) + offset + xtime_nsec_2
1813	 * Canceling the sides:
1814	 *	xtime_nsec_1 = offset + xtime_nsec_2
1815	 * Which gives us:
1816	 *	xtime_nsec_2 = xtime_nsec_1 - offset
1817	 * Which simplfies to:
1818	 *	xtime_nsec -= offset
1819	 *
1820	 * XXX - TODO: Doc ntp_error calculation.
1821	 */
1822	if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1823		/* NTP adjustment caused clocksource mult overflow */
1824		WARN_ON_ONCE(1);
1825		return;
1826	}
1827
1828	tk->tkr_mono.mult += mult_adj;
1829	tk->xtime_interval += interval;
1830	tk->tkr_mono.xtime_nsec -= offset;
1831	tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1832}
1833
1834/*
1835 * Calculate the multiplier adjustment needed to match the frequency
1836 * specified by NTP
1837 */
1838static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1839							s64 offset)
1840{
1841	s64 interval = tk->cycle_interval;
1842	s64 xinterval = tk->xtime_interval;
1843	u32 base = tk->tkr_mono.clock->mult;
1844	u32 max = tk->tkr_mono.clock->maxadj;
1845	u32 cur_adj = tk->tkr_mono.mult;
1846	s64 tick_error;
1847	bool negative;
1848	u32 adj_scale;
1849
1850	/* Remove any current error adj from freq calculation */
1851	if (tk->ntp_err_mult)
1852		xinterval -= tk->cycle_interval;
1853
1854	tk->ntp_tick = ntp_tick_length();
1855
1856	/* Calculate current error per tick */
1857	tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1858	tick_error -= (xinterval + tk->xtime_remainder);
1859
1860	/* Don't worry about correcting it if its small */
1861	if (likely((tick_error >= 0) && (tick_error <= interval)))
1862		return;
1863
1864	/* preserve the direction of correction */
1865	negative = (tick_error < 0);
1866
1867	/* If any adjustment would pass the max, just return */
1868	if (negative && (cur_adj - 1) <= (base - max))
1869		return;
1870	if (!negative && (cur_adj + 1) >= (base + max))
1871		return;
1872	/*
1873	 * Sort out the magnitude of the correction, but
1874	 * avoid making so large a correction that we go
1875	 * over the max adjustment.
1876	 */
1877	adj_scale = 0;
1878	tick_error = abs(tick_error);
1879	while (tick_error > interval) {
1880		u32 adj = 1 << (adj_scale + 1);
1881
1882		/* Check if adjustment gets us within 1 unit from the max */
1883		if (negative && (cur_adj - adj) <= (base - max))
1884			break;
1885		if (!negative && (cur_adj + adj) >= (base + max))
1886			break;
1887
1888		adj_scale++;
1889		tick_error >>= 1;
1890	}
1891
1892	/* scale the corrections */
1893	timekeeping_apply_adjustment(tk, offset, negative, adj_scale);
1894}
1895
1896/*
1897 * Adjust the timekeeper's multiplier to the correct frequency
1898 * and also to reduce the accumulated error value.
1899 */
1900static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1901{
1902	/* Correct for the current frequency error */
1903	timekeeping_freqadjust(tk, offset);
1904
1905	/* Next make a small adjustment to fix any cumulative error */
1906	if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1907		tk->ntp_err_mult = 1;
1908		timekeeping_apply_adjustment(tk, offset, 0, 0);
1909	} else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1910		/* Undo any existing error adjustment */
1911		timekeeping_apply_adjustment(tk, offset, 1, 0);
1912		tk->ntp_err_mult = 0;
1913	}
1914
1915	if (unlikely(tk->tkr_mono.clock->maxadj &&
1916		(abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1917			> tk->tkr_mono.clock->maxadj))) {
1918		printk_once(KERN_WARNING
1919			"Adjusting %s more than 11%% (%ld vs %ld)\n",
1920			tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1921			(long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1922	}
1923
1924	/*
1925	 * It may be possible that when we entered this function, xtime_nsec
1926	 * was very small.  Further, if we're slightly speeding the clocksource
1927	 * in the code above, its possible the required corrective factor to
1928	 * xtime_nsec could cause it to underflow.
1929	 *
1930	 * Now, since we already accumulated the second, cannot simply roll
1931	 * the accumulated second back, since the NTP subsystem has been
1932	 * notified via second_overflow. So instead we push xtime_nsec forward
1933	 * by the amount we underflowed, and add that amount into the error.
1934	 *
1935	 * We'll correct this error next time through this function, when
1936	 * xtime_nsec is not as small.
1937	 */
1938	if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1939		s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
1940		tk->tkr_mono.xtime_nsec = 0;
1941		tk->ntp_error += neg << tk->ntp_error_shift;
 
1942	}
1943}
1944
1945/**
1946 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1947 *
1948 * Helper function that accumulates the nsecs greater than a second
1949 * from the xtime_nsec field to the xtime_secs field.
1950 * It also calls into the NTP code to handle leapsecond processing.
1951 *
1952 */
1953static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1954{
1955	u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1956	unsigned int clock_set = 0;
1957
1958	while (tk->tkr_mono.xtime_nsec >= nsecps) {
1959		int leap;
1960
1961		tk->tkr_mono.xtime_nsec -= nsecps;
1962		tk->xtime_sec++;
1963
 
 
 
 
 
 
 
 
 
1964		/* Figure out if its a leap sec and apply if needed */
1965		leap = second_overflow(tk->xtime_sec);
1966		if (unlikely(leap)) {
1967			struct timespec64 ts;
1968
1969			tk->xtime_sec += leap;
1970
1971			ts.tv_sec = leap;
1972			ts.tv_nsec = 0;
1973			tk_set_wall_to_mono(tk,
1974				timespec64_sub(tk->wall_to_monotonic, ts));
1975
1976			__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1977
1978			clock_set = TK_CLOCK_WAS_SET;
1979		}
1980	}
1981	return clock_set;
1982}
1983
1984/**
1985 * logarithmic_accumulation - shifted accumulation of cycles
1986 *
1987 * This functions accumulates a shifted interval of cycles into
1988 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1989 * loop.
1990 *
1991 * Returns the unconsumed cycles.
1992 */
1993static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1994						u32 shift,
1995						unsigned int *clock_set)
1996{
1997	cycle_t interval = tk->cycle_interval << shift;
1998	u64 raw_nsecs;
1999
2000	/* If the offset is smaller than a shifted interval, do nothing */
2001	if (offset < interval)
2002		return offset;
2003
2004	/* Accumulate one shifted interval */
2005	offset -= interval;
2006	tk->tkr_mono.cycle_last += interval;
2007	tk->tkr_raw.cycle_last  += interval;
2008
2009	tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2010	*clock_set |= accumulate_nsecs_to_secs(tk);
2011
2012	/* Accumulate raw time */
2013	raw_nsecs = (u64)tk->raw_interval << shift;
2014	raw_nsecs += tk->raw_time.tv_nsec;
2015	if (raw_nsecs >= NSEC_PER_SEC) {
2016		u64 raw_secs = raw_nsecs;
2017		raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
2018		tk->raw_time.tv_sec += raw_secs;
2019	}
2020	tk->raw_time.tv_nsec = raw_nsecs;
2021
2022	/* Accumulate error between NTP and clock interval */
2023	tk->ntp_error += tk->ntp_tick << shift;
2024	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2025						(tk->ntp_error_shift + shift);
2026
2027	return offset;
2028}
2029
2030/**
2031 * update_wall_time - Uses the current clocksource to increment the wall time
2032 *
2033 */
2034void update_wall_time(void)
2035{
2036	struct timekeeper *real_tk = &tk_core.timekeeper;
2037	struct timekeeper *tk = &shadow_timekeeper;
2038	cycle_t offset;
2039	int shift = 0, maxshift;
2040	unsigned int clock_set = 0;
2041	unsigned long flags;
2042
2043	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2044
2045	/* Make sure we're fully resumed: */
2046	if (unlikely(timekeeping_suspended))
2047		goto out;
2048
2049#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2050	offset = real_tk->cycle_interval;
2051#else
2052	offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
2053				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2054#endif
2055
2056	/* Check if there's really nothing to do */
2057	if (offset < real_tk->cycle_interval)
2058		goto out;
2059
2060	/* Do some additional sanity checking */
2061	timekeeping_check_update(real_tk, offset);
2062
2063	/*
2064	 * With NO_HZ we may have to accumulate many cycle_intervals
2065	 * (think "ticks") worth of time at once. To do this efficiently,
2066	 * we calculate the largest doubling multiple of cycle_intervals
2067	 * that is smaller than the offset.  We then accumulate that
2068	 * chunk in one go, and then try to consume the next smaller
2069	 * doubled multiple.
2070	 */
2071	shift = ilog2(offset) - ilog2(tk->cycle_interval);
2072	shift = max(0, shift);
2073	/* Bound shift to one less than what overflows tick_length */
2074	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2075	shift = min(shift, maxshift);
2076	while (offset >= tk->cycle_interval) {
2077		offset = logarithmic_accumulation(tk, offset, shift,
2078							&clock_set);
2079		if (offset < tk->cycle_interval<<shift)
2080			shift--;
2081	}
2082
2083	/* correct the clock when NTP error is too big */
2084	timekeeping_adjust(tk, offset);
2085
2086	/*
2087	 * XXX This can be killed once everyone converts
2088	 * to the new update_vsyscall.
2089	 */
2090	old_vsyscall_fixup(tk);
2091
2092	/*
2093	 * Finally, make sure that after the rounding
2094	 * xtime_nsec isn't larger than NSEC_PER_SEC
2095	 */
2096	clock_set |= accumulate_nsecs_to_secs(tk);
2097
2098	write_seqcount_begin(&tk_core.seq);
2099	/*
2100	 * Update the real timekeeper.
2101	 *
2102	 * We could avoid this memcpy by switching pointers, but that
2103	 * requires changes to all other timekeeper usage sites as
2104	 * well, i.e. move the timekeeper pointer getter into the
2105	 * spinlocked/seqcount protected sections. And we trade this
2106	 * memcpy under the tk_core.seq against one before we start
2107	 * updating.
2108	 */
2109	timekeeping_update(tk, clock_set);
2110	memcpy(real_tk, tk, sizeof(*tk));
2111	/* The memcpy must come last. Do not put anything here! */
2112	write_seqcount_end(&tk_core.seq);
2113out:
2114	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2115	if (clock_set)
2116		/* Have to call _delayed version, since in irq context*/
 
 
 
 
 
 
 
 
 
2117		clock_was_set_delayed();
2118}
2119
2120/**
2121 * getboottime64 - Return the real time of system boot.
2122 * @ts:		pointer to the timespec64 to be set
2123 *
2124 * Returns the wall-time of boot in a timespec64.
2125 *
2126 * This is based on the wall_to_monotonic offset and the total suspend
2127 * time. Calls to settimeofday will affect the value returned (which
2128 * basically means that however wrong your real time clock is at boot time,
2129 * you get the right time here).
2130 */
2131void getboottime64(struct timespec64 *ts)
2132{
2133	struct timekeeper *tk = &tk_core.timekeeper;
2134	ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2135
2136	*ts = ktime_to_timespec64(t);
2137}
2138EXPORT_SYMBOL_GPL(getboottime64);
2139
2140unsigned long get_seconds(void)
2141{
2142	struct timekeeper *tk = &tk_core.timekeeper;
2143
2144	return tk->xtime_sec;
2145}
2146EXPORT_SYMBOL(get_seconds);
2147
2148struct timespec __current_kernel_time(void)
2149{
2150	struct timekeeper *tk = &tk_core.timekeeper;
2151
2152	return timespec64_to_timespec(tk_xtime(tk));
2153}
2154
2155struct timespec64 current_kernel_time64(void)
2156{
2157	struct timekeeper *tk = &tk_core.timekeeper;
2158	struct timespec64 now;
2159	unsigned long seq;
2160
2161	do {
2162		seq = read_seqcount_begin(&tk_core.seq);
2163
2164		now = tk_xtime(tk);
2165	} while (read_seqcount_retry(&tk_core.seq, seq));
2166
2167	return now;
2168}
2169EXPORT_SYMBOL(current_kernel_time64);
2170
2171struct timespec64 get_monotonic_coarse64(void)
2172{
2173	struct timekeeper *tk = &tk_core.timekeeper;
2174	struct timespec64 now, mono;
2175	unsigned long seq;
2176
2177	do {
2178		seq = read_seqcount_begin(&tk_core.seq);
2179
2180		now = tk_xtime(tk);
2181		mono = tk->wall_to_monotonic;
2182	} while (read_seqcount_retry(&tk_core.seq, seq));
2183
2184	set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
2185				now.tv_nsec + mono.tv_nsec);
2186
2187	return now;
2188}
 
2189
2190/*
2191 * Must hold jiffies_lock
2192 */
2193void do_timer(unsigned long ticks)
2194{
2195	jiffies_64 += ticks;
2196	calc_global_load(ticks);
2197}
2198
2199/**
2200 * ktime_get_update_offsets_now - hrtimer helper
2201 * @cwsseq:	pointer to check and store the clock was set sequence number
2202 * @offs_real:	pointer to storage for monotonic -> realtime offset
2203 * @offs_boot:	pointer to storage for monotonic -> boottime offset
2204 * @offs_tai:	pointer to storage for monotonic -> clock tai offset
2205 *
2206 * Returns current monotonic time and updates the offsets if the
2207 * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
2208 * different.
2209 *
2210 * Called from hrtimer_interrupt() or retrigger_next_event()
2211 */
2212ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2213				     ktime_t *offs_boot, ktime_t *offs_tai)
2214{
2215	struct timekeeper *tk = &tk_core.timekeeper;
2216	unsigned int seq;
2217	ktime_t base;
2218	u64 nsecs;
2219
2220	do {
2221		seq = read_seqcount_begin(&tk_core.seq);
2222
2223		base = tk->tkr_mono.base;
2224		nsecs = timekeeping_get_ns(&tk->tkr_mono);
2225		base = ktime_add_ns(base, nsecs);
2226
2227		if (*cwsseq != tk->clock_was_set_seq) {
2228			*cwsseq = tk->clock_was_set_seq;
2229			*offs_real = tk->offs_real;
2230			*offs_boot = tk->offs_boot;
2231			*offs_tai = tk->offs_tai;
2232		}
2233
2234		/* Handle leapsecond insertion adjustments */
2235		if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64))
2236			*offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2237
2238	} while (read_seqcount_retry(&tk_core.seq, seq));
2239
2240	return base;
2241}
2242
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2243/**
2244 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2245 */
2246int do_adjtimex(struct timex *txc)
2247{
2248	struct timekeeper *tk = &tk_core.timekeeper;
 
 
 
2249	unsigned long flags;
2250	struct timespec64 ts;
2251	s32 orig_tai, tai;
2252	int ret;
2253
2254	/* Validate the data before disabling interrupts */
2255	ret = ntp_validate_timex(txc);
2256	if (ret)
2257		return ret;
 
2258
2259	if (txc->modes & ADJ_SETOFFSET) {
2260		struct timespec delta;
2261		delta.tv_sec  = txc->time.tv_sec;
2262		delta.tv_nsec = txc->time.tv_usec;
2263		if (!(txc->modes & ADJ_NANO))
2264			delta.tv_nsec *= 1000;
2265		ret = timekeeping_inject_offset(&delta);
2266		if (ret)
2267			return ret;
 
 
2268	}
2269
2270	getnstimeofday64(&ts);
 
 
 
2271
2272	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2273	write_seqcount_begin(&tk_core.seq);
2274
2275	orig_tai = tai = tk->tai_offset;
2276	ret = __do_adjtimex(txc, &ts, &tai);
2277
2278	if (tai != orig_tai) {
2279		__timekeeping_set_tai_offset(tk, tai);
2280		timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
 
2281	}
2282	tk_update_leap_state(tk);
2283
2284	write_seqcount_end(&tk_core.seq);
2285	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2286
2287	if (tai != orig_tai)
2288		clock_was_set();
 
 
 
 
 
 
2289
2290	ntp_notify_cmos_timer();
2291
2292	return ret;
2293}
2294
2295#ifdef CONFIG_NTP_PPS
2296/**
2297 * hardpps() - Accessor function to NTP __hardpps function
2298 */
2299void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2300{
2301	unsigned long flags;
2302
2303	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2304	write_seqcount_begin(&tk_core.seq);
2305
2306	__hardpps(phase_ts, raw_ts);
2307
2308	write_seqcount_end(&tk_core.seq);
2309	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2310}
2311EXPORT_SYMBOL(hardpps);
2312#endif
2313
2314/**
2315 * xtime_update() - advances the timekeeping infrastructure
2316 * @ticks:	number of ticks, that have elapsed since the last call.
2317 *
2318 * Must be called with interrupts disabled.
2319 */
2320void xtime_update(unsigned long ticks)
2321{
2322	write_seqlock(&jiffies_lock);
2323	do_timer(ticks);
2324	write_sequnlock(&jiffies_lock);
2325	update_wall_time();
2326}