Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Kernel timekeeping code and accessor functions. Based on code from
   4 *  timer.c, moved in commit 8524070b7982.
   5 */
   6#include <linux/timekeeper_internal.h>
   7#include <linux/module.h>
   8#include <linux/interrupt.h>
   9#include <linux/percpu.h>
  10#include <linux/init.h>
  11#include <linux/mm.h>
  12#include <linux/nmi.h>
  13#include <linux/sched.h>
  14#include <linux/sched/loadavg.h>
  15#include <linux/sched/clock.h>
  16#include <linux/syscore_ops.h>
  17#include <linux/clocksource.h>
  18#include <linux/jiffies.h>
  19#include <linux/time.h>
  20#include <linux/timex.h>
  21#include <linux/tick.h>
  22#include <linux/stop_machine.h>
  23#include <linux/pvclock_gtod.h>
  24#include <linux/compiler.h>
  25#include <linux/audit.h>
  26#include <linux/random.h>
  27
  28#include "tick-internal.h"
  29#include "ntp_internal.h"
  30#include "timekeeping_internal.h"
  31
  32#define TK_CLEAR_NTP		(1 << 0)
  33#define TK_MIRROR		(1 << 1)
  34#define TK_CLOCK_WAS_SET	(1 << 2)
 
  35
  36enum timekeeping_adv_mode {
  37	/* Update timekeeper when a tick has passed */
  38	TK_ADV_TICK,
  39
  40	/* Update timekeeper on a direct frequency change */
  41	TK_ADV_FREQ
  42};
  43
  44DEFINE_RAW_SPINLOCK(timekeeper_lock);
  45
  46/*
  47 * The most important data for readout fits into a single 64 byte
  48 * cache line.
  49 */
  50static struct {
  51	seqcount_raw_spinlock_t	seq;
  52	struct timekeeper	timekeeper;
  53} tk_core ____cacheline_aligned = {
  54	.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
  55};
  56
  57static struct timekeeper shadow_timekeeper;
  58
  59/* flag for if timekeeping is suspended */
  60int __read_mostly timekeeping_suspended;
  61
  62/**
  63 * struct tk_fast - NMI safe timekeeper
  64 * @seq:	Sequence counter for protecting updates. The lowest bit
  65 *		is the index for the tk_read_base array
  66 * @base:	tk_read_base array. Access is indexed by the lowest bit of
  67 *		@seq.
  68 *
  69 * See @update_fast_timekeeper() below.
  70 */
  71struct tk_fast {
  72	seqcount_latch_t	seq;
  73	struct tk_read_base	base[2];
  74};
  75
  76/* Suspend-time cycles value for halted fast timekeeper. */
  77static u64 cycles_at_suspend;
  78
  79static u64 dummy_clock_read(struct clocksource *cs)
  80{
  81	if (timekeeping_suspended)
  82		return cycles_at_suspend;
  83	return local_clock();
  84}
  85
  86static struct clocksource dummy_clock = {
  87	.read = dummy_clock_read,
  88};
  89
  90/*
  91 * Boot time initialization which allows local_clock() to be utilized
  92 * during early boot when clocksources are not available. local_clock()
  93 * returns nanoseconds already so no conversion is required, hence mult=1
  94 * and shift=0. When the first proper clocksource is installed then
  95 * the fast time keepers are updated with the correct values.
  96 */
  97#define FAST_TK_INIT						\
  98	{							\
  99		.clock		= &dummy_clock,			\
 100		.mask		= CLOCKSOURCE_MASK(64),		\
 101		.mult		= 1,				\
 102		.shift		= 0,				\
 103	}
 104
 105static struct tk_fast tk_fast_mono ____cacheline_aligned = {
 106	.seq     = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
 107	.base[0] = FAST_TK_INIT,
 108	.base[1] = FAST_TK_INIT,
 109};
 110
 111static struct tk_fast tk_fast_raw  ____cacheline_aligned = {
 112	.seq     = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
 113	.base[0] = FAST_TK_INIT,
 114	.base[1] = FAST_TK_INIT,
 115};
 116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 117static inline void tk_normalize_xtime(struct timekeeper *tk)
 118{
 119	while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
 120		tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
 121		tk->xtime_sec++;
 122	}
 123	while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
 124		tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
 125		tk->raw_sec++;
 126	}
 127}
 128
 129static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
 130{
 131	struct timespec64 ts;
 132
 133	ts.tv_sec = tk->xtime_sec;
 134	ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
 135	return ts;
 136}
 137
 138static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
 139{
 140	tk->xtime_sec = ts->tv_sec;
 141	tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
 142}
 143
 144static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
 145{
 146	tk->xtime_sec += ts->tv_sec;
 147	tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
 148	tk_normalize_xtime(tk);
 149}
 150
 151static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
 152{
 153	struct timespec64 tmp;
 154
 155	/*
 156	 * Verify consistency of: offset_real = -wall_to_monotonic
 157	 * before modifying anything
 158	 */
 159	set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
 160					-tk->wall_to_monotonic.tv_nsec);
 161	WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
 162	tk->wall_to_monotonic = wtm;
 163	set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
 164	tk->offs_real = timespec64_to_ktime(tmp);
 165	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
 
 166}
 167
 168static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
 169{
 170	tk->offs_boot = ktime_add(tk->offs_boot, delta);
 
 171	/*
 172	 * Timespec representation for VDSO update to avoid 64bit division
 173	 * on every update.
 174	 */
 175	tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
 176}
 177
 178/*
 179 * tk_clock_read - atomic clocksource read() helper
 180 *
 181 * This helper is necessary to use in the read paths because, while the
 182 * seqcount ensures we don't return a bad value while structures are updated,
 183 * it doesn't protect from potential crashes. There is the possibility that
 184 * the tkr's clocksource may change between the read reference, and the
 185 * clock reference passed to the read function.  This can cause crashes if
 186 * the wrong clocksource is passed to the wrong read function.
 187 * This isn't necessary to use when holding the timekeeper_lock or doing
 188 * a read of the fast-timekeeper tkrs (which is protected by its own locking
 189 * and update logic).
 190 */
 191static inline u64 tk_clock_read(const struct tk_read_base *tkr)
 192{
 193	struct clocksource *clock = READ_ONCE(tkr->clock);
 194
 195	return clock->read(clock);
 196}
 197
 198#ifdef CONFIG_DEBUG_TIMEKEEPING
 199#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
 200
 201static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
 202{
 203
 204	u64 max_cycles = tk->tkr_mono.clock->max_cycles;
 205	const char *name = tk->tkr_mono.clock->name;
 206
 207	if (offset > max_cycles) {
 208		printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
 209				offset, name, max_cycles);
 210		printk_deferred("         timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
 211	} else {
 212		if (offset > (max_cycles >> 1)) {
 213			printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
 214					offset, name, max_cycles >> 1);
 215			printk_deferred("      timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
 216		}
 217	}
 218
 219	if (tk->underflow_seen) {
 220		if (jiffies - tk->last_warning > WARNING_FREQ) {
 221			printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
 222			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
 223			printk_deferred("         Your kernel is probably still fine.\n");
 224			tk->last_warning = jiffies;
 225		}
 226		tk->underflow_seen = 0;
 227	}
 228
 229	if (tk->overflow_seen) {
 230		if (jiffies - tk->last_warning > WARNING_FREQ) {
 231			printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
 232			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
 233			printk_deferred("         Your kernel is probably still fine.\n");
 234			tk->last_warning = jiffies;
 235		}
 236		tk->overflow_seen = 0;
 237	}
 238}
 239
 240static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
 241{
 242	struct timekeeper *tk = &tk_core.timekeeper;
 243	u64 now, last, mask, max, delta;
 244	unsigned int seq;
 245
 246	/*
 247	 * Since we're called holding a seqcount, the data may shift
 248	 * under us while we're doing the calculation. This can cause
 249	 * false positives, since we'd note a problem but throw the
 250	 * results away. So nest another seqcount here to atomically
 251	 * grab the points we are checking with.
 252	 */
 253	do {
 254		seq = read_seqcount_begin(&tk_core.seq);
 255		now = tk_clock_read(tkr);
 256		last = tkr->cycle_last;
 257		mask = tkr->mask;
 258		max = tkr->clock->max_cycles;
 259	} while (read_seqcount_retry(&tk_core.seq, seq));
 260
 261	delta = clocksource_delta(now, last, mask);
 262
 263	/*
 264	 * Try to catch underflows by checking if we are seeing small
 265	 * mask-relative negative values.
 266	 */
 267	if (unlikely((~delta & mask) < (mask >> 3))) {
 268		tk->underflow_seen = 1;
 269		delta = 0;
 270	}
 271
 272	/* Cap delta value to the max_cycles values to avoid mult overflows */
 273	if (unlikely(delta > max)) {
 274		tk->overflow_seen = 1;
 275		delta = tkr->clock->max_cycles;
 276	}
 277
 278	return delta;
 279}
 280#else
 281static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
 282{
 283}
 284static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
 285{
 286	u64 cycle_now, delta;
 287
 288	/* read clocksource */
 289	cycle_now = tk_clock_read(tkr);
 290
 291	/* calculate the delta since the last update_wall_time */
 292	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
 293
 294	return delta;
 295}
 296#endif
 297
 298/**
 299 * tk_setup_internals - Set up internals to use clocksource clock.
 300 *
 301 * @tk:		The target timekeeper to setup.
 302 * @clock:		Pointer to clocksource.
 303 *
 304 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
 305 * pair and interval request.
 306 *
 307 * Unless you're the timekeeping code, you should not be using this!
 308 */
 309static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
 310{
 311	u64 interval;
 312	u64 tmp, ntpinterval;
 313	struct clocksource *old_clock;
 314
 315	++tk->cs_was_changed_seq;
 316	old_clock = tk->tkr_mono.clock;
 317	tk->tkr_mono.clock = clock;
 318	tk->tkr_mono.mask = clock->mask;
 319	tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
 320
 321	tk->tkr_raw.clock = clock;
 322	tk->tkr_raw.mask = clock->mask;
 323	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
 324
 325	/* Do the ns -> cycle conversion first, using original mult */
 326	tmp = NTP_INTERVAL_LENGTH;
 327	tmp <<= clock->shift;
 328	ntpinterval = tmp;
 329	tmp += clock->mult/2;
 330	do_div(tmp, clock->mult);
 331	if (tmp == 0)
 332		tmp = 1;
 333
 334	interval = (u64) tmp;
 335	tk->cycle_interval = interval;
 336
 337	/* Go back from cycles -> shifted ns */
 338	tk->xtime_interval = interval * clock->mult;
 339	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
 340	tk->raw_interval = interval * clock->mult;
 341
 342	 /* if changing clocks, convert xtime_nsec shift units */
 343	if (old_clock) {
 344		int shift_change = clock->shift - old_clock->shift;
 345		if (shift_change < 0) {
 346			tk->tkr_mono.xtime_nsec >>= -shift_change;
 347			tk->tkr_raw.xtime_nsec >>= -shift_change;
 348		} else {
 349			tk->tkr_mono.xtime_nsec <<= shift_change;
 350			tk->tkr_raw.xtime_nsec <<= shift_change;
 351		}
 352	}
 353
 354	tk->tkr_mono.shift = clock->shift;
 355	tk->tkr_raw.shift = clock->shift;
 356
 357	tk->ntp_error = 0;
 358	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
 359	tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
 360
 361	/*
 362	 * The timekeeper keeps its own mult values for the currently
 363	 * active clocksource. These value will be adjusted via NTP
 364	 * to counteract clock drifting.
 365	 */
 366	tk->tkr_mono.mult = clock->mult;
 367	tk->tkr_raw.mult = clock->mult;
 368	tk->ntp_err_mult = 0;
 369	tk->skip_second_overflow = 0;
 370}
 371
 372/* Timekeeper helper functions. */
 373
 374static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
 375{
 376	u64 nsec;
 377
 378	nsec = delta * tkr->mult + tkr->xtime_nsec;
 379	nsec >>= tkr->shift;
 380
 381	return nsec;
 382}
 383
 384static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
 385{
 386	u64 delta;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387
 388	delta = timekeeping_get_delta(tkr);
 389	return timekeeping_delta_to_ns(tkr, delta);
 390}
 391
 392static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
 393{
 394	u64 delta;
 395
 396	/* calculate the delta since the last update_wall_time */
 397	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
 398	return timekeeping_delta_to_ns(tkr, delta);
 399}
 400
 401/**
 402 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
 403 * @tkr: Timekeeping readout base from which we take the update
 404 * @tkf: Pointer to NMI safe timekeeper
 405 *
 406 * We want to use this from any context including NMI and tracing /
 407 * instrumenting the timekeeping code itself.
 408 *
 409 * Employ the latch technique; see @raw_write_seqcount_latch.
 410 *
 411 * So if a NMI hits the update of base[0] then it will use base[1]
 412 * which is still consistent. In the worst case this can result is a
 413 * slightly wrong timestamp (a few nanoseconds). See
 414 * @ktime_get_mono_fast_ns.
 415 */
 416static void update_fast_timekeeper(const struct tk_read_base *tkr,
 417				   struct tk_fast *tkf)
 418{
 419	struct tk_read_base *base = tkf->base;
 420
 421	/* Force readers off to base[1] */
 422	raw_write_seqcount_latch(&tkf->seq);
 423
 424	/* Update base[0] */
 425	memcpy(base, tkr, sizeof(*base));
 426
 427	/* Force readers back to base[0] */
 428	raw_write_seqcount_latch(&tkf->seq);
 429
 430	/* Update base[1] */
 431	memcpy(base + 1, base, sizeof(*base));
 432}
 433
 434static __always_inline u64 fast_tk_get_delta_ns(struct tk_read_base *tkr)
 435{
 436	u64 delta, cycles = tk_clock_read(tkr);
 437
 438	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
 439	return timekeeping_delta_to_ns(tkr, delta);
 440}
 441
 442static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
 443{
 444	struct tk_read_base *tkr;
 445	unsigned int seq;
 446	u64 now;
 447
 448	do {
 449		seq = raw_read_seqcount_latch(&tkf->seq);
 450		tkr = tkf->base + (seq & 0x01);
 451		now = ktime_to_ns(tkr->base);
 452		now += fast_tk_get_delta_ns(tkr);
 453	} while (read_seqcount_latch_retry(&tkf->seq, seq));
 454
 455	return now;
 456}
 457
 458/**
 459 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
 460 *
 461 * This timestamp is not guaranteed to be monotonic across an update.
 462 * The timestamp is calculated by:
 463 *
 464 *	now = base_mono + clock_delta * slope
 465 *
 466 * So if the update lowers the slope, readers who are forced to the
 467 * not yet updated second array are still using the old steeper slope.
 468 *
 469 * tmono
 470 * ^
 471 * |    o  n
 472 * |   o n
 473 * |  u
 474 * | o
 475 * |o
 476 * |12345678---> reader order
 477 *
 478 * o = old slope
 479 * u = update
 480 * n = new slope
 481 *
 482 * So reader 6 will observe time going backwards versus reader 5.
 483 *
 484 * While other CPUs are likely to be able to observe that, the only way
 485 * for a CPU local observation is when an NMI hits in the middle of
 486 * the update. Timestamps taken from that NMI context might be ahead
 487 * of the following timestamps. Callers need to be aware of that and
 488 * deal with it.
 489 */
 490u64 notrace ktime_get_mono_fast_ns(void)
 491{
 492	return __ktime_get_fast_ns(&tk_fast_mono);
 493}
 494EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
 495
 496/**
 497 * ktime_get_raw_fast_ns - Fast NMI safe access to clock monotonic raw
 498 *
 499 * Contrary to ktime_get_mono_fast_ns() this is always correct because the
 500 * conversion factor is not affected by NTP/PTP correction.
 501 */
 502u64 notrace ktime_get_raw_fast_ns(void)
 503{
 504	return __ktime_get_fast_ns(&tk_fast_raw);
 505}
 506EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
 507
 508/**
 509 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
 510 *
 511 * To keep it NMI safe since we're accessing from tracing, we're not using a
 512 * separate timekeeper with updates to monotonic clock and boot offset
 513 * protected with seqcounts. This has the following minor side effects:
 514 *
 515 * (1) Its possible that a timestamp be taken after the boot offset is updated
 516 * but before the timekeeper is updated. If this happens, the new boot offset
 517 * is added to the old timekeeping making the clock appear to update slightly
 518 * earlier:
 519 *    CPU 0                                        CPU 1
 520 *    timekeeping_inject_sleeptime64()
 521 *    __timekeeping_inject_sleeptime(tk, delta);
 522 *                                                 timestamp();
 523 *    timekeeping_update(tk, TK_CLEAR_NTP...);
 524 *
 525 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
 526 * partially updated.  Since the tk->offs_boot update is a rare event, this
 527 * should be a rare occurrence which postprocessing should be able to handle.
 528 *
 529 * The caveats vs. timestamp ordering as documented for ktime_get_fast_ns()
 530 * apply as well.
 531 */
 532u64 notrace ktime_get_boot_fast_ns(void)
 533{
 534	struct timekeeper *tk = &tk_core.timekeeper;
 535
 536	return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_boot)));
 537}
 538EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
 539
 540/**
 541 * ktime_get_tai_fast_ns - NMI safe and fast access to tai clock.
 542 *
 543 * The same limitations as described for ktime_get_boot_fast_ns() apply. The
 544 * mono time and the TAI offset are not read atomically which may yield wrong
 545 * readouts. However, an update of the TAI offset is an rare event e.g., caused
 546 * by settime or adjtimex with an offset. The user of this function has to deal
 547 * with the possibility of wrong timestamps in post processing.
 548 */
 549u64 notrace ktime_get_tai_fast_ns(void)
 550{
 551	struct timekeeper *tk = &tk_core.timekeeper;
 552
 553	return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_tai)));
 554}
 555EXPORT_SYMBOL_GPL(ktime_get_tai_fast_ns);
 556
 557static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
 558{
 559	struct tk_read_base *tkr;
 560	u64 basem, baser, delta;
 561	unsigned int seq;
 562
 563	do {
 564		seq = raw_read_seqcount_latch(&tkf->seq);
 565		tkr = tkf->base + (seq & 0x01);
 566		basem = ktime_to_ns(tkr->base);
 567		baser = ktime_to_ns(tkr->base_real);
 568		delta = fast_tk_get_delta_ns(tkr);
 569	} while (read_seqcount_latch_retry(&tkf->seq, seq));
 570
 571	if (mono)
 572		*mono = basem + delta;
 573	return baser + delta;
 574}
 575
 576/**
 577 * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
 578 *
 579 * See ktime_get_fast_ns() for documentation of the time stamp ordering.
 580 */
 581u64 ktime_get_real_fast_ns(void)
 582{
 583	return __ktime_get_real_fast(&tk_fast_mono, NULL);
 584}
 585EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
 586
 587/**
 588 * ktime_get_fast_timestamps: - NMI safe timestamps
 589 * @snapshot:	Pointer to timestamp storage
 590 *
 591 * Stores clock monotonic, boottime and realtime timestamps.
 592 *
 593 * Boot time is a racy access on 32bit systems if the sleep time injection
 594 * happens late during resume and not in timekeeping_resume(). That could
 595 * be avoided by expanding struct tk_read_base with boot offset for 32bit
 596 * and adding more overhead to the update. As this is a hard to observe
 597 * once per resume event which can be filtered with reasonable effort using
 598 * the accurate mono/real timestamps, it's probably not worth the trouble.
 599 *
 600 * Aside of that it might be possible on 32 and 64 bit to observe the
 601 * following when the sleep time injection happens late:
 602 *
 603 * CPU 0				CPU 1
 604 * timekeeping_resume()
 605 * ktime_get_fast_timestamps()
 606 *	mono, real = __ktime_get_real_fast()
 607 *					inject_sleep_time()
 608 *					   update boot offset
 609 *	boot = mono + bootoffset;
 610 *
 611 * That means that boot time already has the sleep time adjustment, but
 612 * real time does not. On the next readout both are in sync again.
 613 *
 614 * Preventing this for 64bit is not really feasible without destroying the
 615 * careful cache layout of the timekeeper because the sequence count and
 616 * struct tk_read_base would then need two cache lines instead of one.
 617 *
 618 * Access to the time keeper clock source is disabled across the innermost
 619 * steps of suspend/resume. The accessors still work, but the timestamps
 620 * are frozen until time keeping is resumed which happens very early.
 621 *
 622 * For regular suspend/resume there is no observable difference vs. sched
 623 * clock, but it might affect some of the nasty low level debug printks.
 624 *
 625 * OTOH, access to sched clock is not guaranteed across suspend/resume on
 626 * all systems either so it depends on the hardware in use.
 627 *
 628 * If that turns out to be a real problem then this could be mitigated by
 629 * using sched clock in a similar way as during early boot. But it's not as
 630 * trivial as on early boot because it needs some careful protection
 631 * against the clock monotonic timestamp jumping backwards on resume.
 632 */
 633void ktime_get_fast_timestamps(struct ktime_timestamps *snapshot)
 634{
 635	struct timekeeper *tk = &tk_core.timekeeper;
 636
 637	snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono);
 638	snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot));
 639}
 640
 641/**
 642 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
 643 * @tk: Timekeeper to snapshot.
 644 *
 645 * It generally is unsafe to access the clocksource after timekeeping has been
 646 * suspended, so take a snapshot of the readout base of @tk and use it as the
 647 * fast timekeeper's readout base while suspended.  It will return the same
 648 * number of cycles every time until timekeeping is resumed at which time the
 649 * proper readout base for the fast timekeeper will be restored automatically.
 650 */
 651static void halt_fast_timekeeper(const struct timekeeper *tk)
 652{
 653	static struct tk_read_base tkr_dummy;
 654	const struct tk_read_base *tkr = &tk->tkr_mono;
 655
 656	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
 657	cycles_at_suspend = tk_clock_read(tkr);
 658	tkr_dummy.clock = &dummy_clock;
 659	tkr_dummy.base_real = tkr->base + tk->offs_real;
 660	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
 661
 662	tkr = &tk->tkr_raw;
 663	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
 664	tkr_dummy.clock = &dummy_clock;
 665	update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
 666}
 667
 668static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
 669
 670static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
 671{
 672	raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
 673}
 674
 675/**
 676 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
 677 * @nb: Pointer to the notifier block to register
 678 */
 679int pvclock_gtod_register_notifier(struct notifier_block *nb)
 680{
 681	struct timekeeper *tk = &tk_core.timekeeper;
 682	unsigned long flags;
 683	int ret;
 684
 685	raw_spin_lock_irqsave(&timekeeper_lock, flags);
 686	ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
 687	update_pvclock_gtod(tk, true);
 688	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 689
 690	return ret;
 691}
 692EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
 693
 694/**
 695 * pvclock_gtod_unregister_notifier - unregister a pvclock
 696 * timedata update listener
 697 * @nb: Pointer to the notifier block to unregister
 698 */
 699int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
 700{
 701	unsigned long flags;
 702	int ret;
 703
 704	raw_spin_lock_irqsave(&timekeeper_lock, flags);
 705	ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
 706	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 707
 708	return ret;
 709}
 710EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
 711
 712/*
 713 * tk_update_leap_state - helper to update the next_leap_ktime
 714 */
 715static inline void tk_update_leap_state(struct timekeeper *tk)
 716{
 717	tk->next_leap_ktime = ntp_get_next_leap();
 718	if (tk->next_leap_ktime != KTIME_MAX)
 719		/* Convert to monotonic time */
 720		tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
 721}
 722
 723/*
 
 
 
 
 
 
 
 
 
 
 
 
 724 * Update the ktime_t based scalar nsec members of the timekeeper
 725 */
 726static inline void tk_update_ktime_data(struct timekeeper *tk)
 727{
 728	u64 seconds;
 729	u32 nsec;
 730
 731	/*
 732	 * The xtime based monotonic readout is:
 733	 *	nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
 734	 * The ktime based monotonic readout is:
 735	 *	nsec = base_mono + now();
 736	 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
 737	 */
 738	seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
 739	nsec = (u32) tk->wall_to_monotonic.tv_nsec;
 740	tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
 741
 742	/*
 743	 * The sum of the nanoseconds portions of xtime and
 744	 * wall_to_monotonic can be greater/equal one second. Take
 745	 * this into account before updating tk->ktime_sec.
 746	 */
 747	nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
 748	if (nsec >= NSEC_PER_SEC)
 749		seconds++;
 750	tk->ktime_sec = seconds;
 751
 752	/* Update the monotonic raw base */
 753	tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
 754}
 755
 756/* must hold timekeeper_lock */
 757static void timekeeping_update(struct timekeeper *tk, unsigned int action)
 
 
 
 
 
 
 
 
 758{
 
 
 
 
 
 
 
 
 
 
 
 
 
 759	if (action & TK_CLEAR_NTP) {
 760		tk->ntp_error = 0;
 761		ntp_clear();
 762	}
 763
 764	tk_update_leap_state(tk);
 765	tk_update_ktime_data(tk);
 766
 767	update_vsyscall(tk);
 768	update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
 769
 770	tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
 771	update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
 772	update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
 773
 774	if (action & TK_CLOCK_WAS_SET)
 775		tk->clock_was_set_seq++;
 
 776	/*
 777	 * The mirroring of the data to the shadow-timekeeper needs
 778	 * to happen last here to ensure we don't over-write the
 779	 * timekeeper structure on the next update with stale data
 780	 */
 781	if (action & TK_MIRROR)
 782		memcpy(&shadow_timekeeper, &tk_core.timekeeper,
 783		       sizeof(tk_core.timekeeper));
 
 
 784}
 785
 786/**
 787 * timekeeping_forward_now - update clock to the current time
 788 * @tk:		Pointer to the timekeeper to update
 789 *
 790 * Forward the current clock to update its state since the last call to
 791 * update_wall_time(). This is useful before significant clock changes,
 792 * as it avoids having to deal with this time offset explicitly.
 793 */
 794static void timekeeping_forward_now(struct timekeeper *tk)
 795{
 796	u64 cycle_now, delta;
 797
 798	cycle_now = tk_clock_read(&tk->tkr_mono);
 799	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
 
 800	tk->tkr_mono.cycle_last = cycle_now;
 801	tk->tkr_raw.cycle_last  = cycle_now;
 802
 803	tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
 804	tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
 805
 806	tk_normalize_xtime(tk);
 
 
 
 
 
 807}
 808
 809/**
 810 * ktime_get_real_ts64 - Returns the time of day in a timespec64.
 811 * @ts:		pointer to the timespec to be set
 812 *
 813 * Returns the time of day in a timespec64 (WARN if suspended).
 814 */
 815void ktime_get_real_ts64(struct timespec64 *ts)
 816{
 817	struct timekeeper *tk = &tk_core.timekeeper;
 818	unsigned int seq;
 819	u64 nsecs;
 820
 821	WARN_ON(timekeeping_suspended);
 822
 823	do {
 824		seq = read_seqcount_begin(&tk_core.seq);
 825
 826		ts->tv_sec = tk->xtime_sec;
 827		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 828
 829	} while (read_seqcount_retry(&tk_core.seq, seq));
 830
 831	ts->tv_nsec = 0;
 832	timespec64_add_ns(ts, nsecs);
 833}
 834EXPORT_SYMBOL(ktime_get_real_ts64);
 835
 836ktime_t ktime_get(void)
 837{
 838	struct timekeeper *tk = &tk_core.timekeeper;
 839	unsigned int seq;
 840	ktime_t base;
 841	u64 nsecs;
 842
 843	WARN_ON(timekeeping_suspended);
 844
 845	do {
 846		seq = read_seqcount_begin(&tk_core.seq);
 847		base = tk->tkr_mono.base;
 848		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 849
 850	} while (read_seqcount_retry(&tk_core.seq, seq));
 851
 852	return ktime_add_ns(base, nsecs);
 853}
 854EXPORT_SYMBOL_GPL(ktime_get);
 855
 856u32 ktime_get_resolution_ns(void)
 857{
 858	struct timekeeper *tk = &tk_core.timekeeper;
 859	unsigned int seq;
 860	u32 nsecs;
 861
 862	WARN_ON(timekeeping_suspended);
 863
 864	do {
 865		seq = read_seqcount_begin(&tk_core.seq);
 866		nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
 867	} while (read_seqcount_retry(&tk_core.seq, seq));
 868
 869	return nsecs;
 870}
 871EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
 872
 873static ktime_t *offsets[TK_OFFS_MAX] = {
 874	[TK_OFFS_REAL]	= &tk_core.timekeeper.offs_real,
 875	[TK_OFFS_BOOT]	= &tk_core.timekeeper.offs_boot,
 876	[TK_OFFS_TAI]	= &tk_core.timekeeper.offs_tai,
 877};
 878
 879ktime_t ktime_get_with_offset(enum tk_offsets offs)
 880{
 881	struct timekeeper *tk = &tk_core.timekeeper;
 882	unsigned int seq;
 883	ktime_t base, *offset = offsets[offs];
 884	u64 nsecs;
 885
 886	WARN_ON(timekeeping_suspended);
 887
 888	do {
 889		seq = read_seqcount_begin(&tk_core.seq);
 890		base = ktime_add(tk->tkr_mono.base, *offset);
 891		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 892
 893	} while (read_seqcount_retry(&tk_core.seq, seq));
 894
 895	return ktime_add_ns(base, nsecs);
 896
 897}
 898EXPORT_SYMBOL_GPL(ktime_get_with_offset);
 899
 900ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
 901{
 902	struct timekeeper *tk = &tk_core.timekeeper;
 903	unsigned int seq;
 904	ktime_t base, *offset = offsets[offs];
 905	u64 nsecs;
 906
 907	WARN_ON(timekeeping_suspended);
 908
 909	do {
 910		seq = read_seqcount_begin(&tk_core.seq);
 911		base = ktime_add(tk->tkr_mono.base, *offset);
 912		nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
 913
 914	} while (read_seqcount_retry(&tk_core.seq, seq));
 915
 916	return ktime_add_ns(base, nsecs);
 917}
 918EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
 919
 920/**
 921 * ktime_mono_to_any() - convert monotonic time to any other time
 922 * @tmono:	time to convert.
 923 * @offs:	which offset to use
 924 */
 925ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
 926{
 927	ktime_t *offset = offsets[offs];
 928	unsigned int seq;
 929	ktime_t tconv;
 930
 
 
 
 
 
 
 
 
 931	do {
 932		seq = read_seqcount_begin(&tk_core.seq);
 933		tconv = ktime_add(tmono, *offset);
 934	} while (read_seqcount_retry(&tk_core.seq, seq));
 935
 936	return tconv;
 937}
 938EXPORT_SYMBOL_GPL(ktime_mono_to_any);
 939
 940/**
 941 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
 942 */
 943ktime_t ktime_get_raw(void)
 944{
 945	struct timekeeper *tk = &tk_core.timekeeper;
 946	unsigned int seq;
 947	ktime_t base;
 948	u64 nsecs;
 949
 950	do {
 951		seq = read_seqcount_begin(&tk_core.seq);
 952		base = tk->tkr_raw.base;
 953		nsecs = timekeeping_get_ns(&tk->tkr_raw);
 954
 955	} while (read_seqcount_retry(&tk_core.seq, seq));
 956
 957	return ktime_add_ns(base, nsecs);
 958}
 959EXPORT_SYMBOL_GPL(ktime_get_raw);
 960
 961/**
 962 * ktime_get_ts64 - get the monotonic clock in timespec64 format
 963 * @ts:		pointer to timespec variable
 964 *
 965 * The function calculates the monotonic clock from the realtime
 966 * clock and the wall_to_monotonic offset and stores the result
 967 * in normalized timespec64 format in the variable pointed to by @ts.
 968 */
 969void ktime_get_ts64(struct timespec64 *ts)
 970{
 971	struct timekeeper *tk = &tk_core.timekeeper;
 972	struct timespec64 tomono;
 973	unsigned int seq;
 974	u64 nsec;
 975
 976	WARN_ON(timekeeping_suspended);
 977
 978	do {
 979		seq = read_seqcount_begin(&tk_core.seq);
 980		ts->tv_sec = tk->xtime_sec;
 981		nsec = timekeeping_get_ns(&tk->tkr_mono);
 982		tomono = tk->wall_to_monotonic;
 983
 984	} while (read_seqcount_retry(&tk_core.seq, seq));
 985
 986	ts->tv_sec += tomono.tv_sec;
 987	ts->tv_nsec = 0;
 988	timespec64_add_ns(ts, nsec + tomono.tv_nsec);
 989}
 990EXPORT_SYMBOL_GPL(ktime_get_ts64);
 991
 992/**
 993 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
 994 *
 995 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
 996 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
 997 * works on both 32 and 64 bit systems. On 32 bit systems the readout
 998 * covers ~136 years of uptime which should be enough to prevent
 999 * premature wrap arounds.
1000 */
1001time64_t ktime_get_seconds(void)
1002{
1003	struct timekeeper *tk = &tk_core.timekeeper;
1004
1005	WARN_ON(timekeeping_suspended);
1006	return tk->ktime_sec;
1007}
1008EXPORT_SYMBOL_GPL(ktime_get_seconds);
1009
1010/**
1011 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
1012 *
1013 * Returns the wall clock seconds since 1970.
1014 *
1015 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
1016 * 32bit systems the access must be protected with the sequence
1017 * counter to provide "atomic" access to the 64bit tk->xtime_sec
1018 * value.
1019 */
1020time64_t ktime_get_real_seconds(void)
1021{
1022	struct timekeeper *tk = &tk_core.timekeeper;
1023	time64_t seconds;
1024	unsigned int seq;
1025
1026	if (IS_ENABLED(CONFIG_64BIT))
1027		return tk->xtime_sec;
1028
1029	do {
1030		seq = read_seqcount_begin(&tk_core.seq);
1031		seconds = tk->xtime_sec;
1032
1033	} while (read_seqcount_retry(&tk_core.seq, seq));
1034
1035	return seconds;
1036}
1037EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
1038
1039/**
1040 * __ktime_get_real_seconds - The same as ktime_get_real_seconds
1041 * but without the sequence counter protect. This internal function
1042 * is called just when timekeeping lock is already held.
1043 */
1044noinstr time64_t __ktime_get_real_seconds(void)
1045{
1046	struct timekeeper *tk = &tk_core.timekeeper;
1047
1048	return tk->xtime_sec;
1049}
1050
1051/**
1052 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
1053 * @systime_snapshot:	pointer to struct receiving the system time snapshot
1054 */
1055void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
1056{
1057	struct timekeeper *tk = &tk_core.timekeeper;
1058	unsigned int seq;
1059	ktime_t base_raw;
1060	ktime_t base_real;
 
1061	u64 nsec_raw;
1062	u64 nsec_real;
1063	u64 now;
1064
1065	WARN_ON_ONCE(timekeeping_suspended);
1066
1067	do {
1068		seq = read_seqcount_begin(&tk_core.seq);
1069		now = tk_clock_read(&tk->tkr_mono);
1070		systime_snapshot->cs_id = tk->tkr_mono.clock->id;
1071		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
1072		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
1073		base_real = ktime_add(tk->tkr_mono.base,
1074				      tk_core.timekeeper.offs_real);
 
 
1075		base_raw = tk->tkr_raw.base;
1076		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
1077		nsec_raw  = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
1078	} while (read_seqcount_retry(&tk_core.seq, seq));
1079
1080	systime_snapshot->cycles = now;
1081	systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
 
1082	systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
1083}
1084EXPORT_SYMBOL_GPL(ktime_get_snapshot);
1085
1086/* Scale base by mult/div checking for overflow */
1087static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
1088{
1089	u64 tmp, rem;
1090
1091	tmp = div64_u64_rem(*base, div, &rem);
1092
1093	if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
1094	    ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
1095		return -EOVERFLOW;
1096	tmp *= mult;
1097
1098	rem = div64_u64(rem * mult, div);
1099	*base = tmp + rem;
1100	return 0;
1101}
1102
1103/**
1104 * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
1105 * @history:			Snapshot representing start of history
1106 * @partial_history_cycles:	Cycle offset into history (fractional part)
1107 * @total_history_cycles:	Total history length in cycles
1108 * @discontinuity:		True indicates clock was set on history period
1109 * @ts:				Cross timestamp that should be adjusted using
1110 *	partial/total ratio
1111 *
1112 * Helper function used by get_device_system_crosststamp() to correct the
1113 * crosstimestamp corresponding to the start of the current interval to the
1114 * system counter value (timestamp point) provided by the driver. The
1115 * total_history_* quantities are the total history starting at the provided
1116 * reference point and ending at the start of the current interval. The cycle
1117 * count between the driver timestamp point and the start of the current
1118 * interval is partial_history_cycles.
1119 */
1120static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1121					 u64 partial_history_cycles,
1122					 u64 total_history_cycles,
1123					 bool discontinuity,
1124					 struct system_device_crosststamp *ts)
1125{
1126	struct timekeeper *tk = &tk_core.timekeeper;
1127	u64 corr_raw, corr_real;
1128	bool interp_forward;
1129	int ret;
1130
1131	if (total_history_cycles == 0 || partial_history_cycles == 0)
1132		return 0;
1133
1134	/* Interpolate shortest distance from beginning or end of history */
1135	interp_forward = partial_history_cycles > total_history_cycles / 2;
1136	partial_history_cycles = interp_forward ?
1137		total_history_cycles - partial_history_cycles :
1138		partial_history_cycles;
1139
1140	/*
1141	 * Scale the monotonic raw time delta by:
1142	 *	partial_history_cycles / total_history_cycles
1143	 */
1144	corr_raw = (u64)ktime_to_ns(
1145		ktime_sub(ts->sys_monoraw, history->raw));
1146	ret = scale64_check_overflow(partial_history_cycles,
1147				     total_history_cycles, &corr_raw);
1148	if (ret)
1149		return ret;
1150
1151	/*
1152	 * If there is a discontinuity in the history, scale monotonic raw
1153	 *	correction by:
1154	 *	mult(real)/mult(raw) yielding the realtime correction
1155	 * Otherwise, calculate the realtime correction similar to monotonic
1156	 *	raw calculation
1157	 */
1158	if (discontinuity) {
1159		corr_real = mul_u64_u32_div
1160			(corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1161	} else {
1162		corr_real = (u64)ktime_to_ns(
1163			ktime_sub(ts->sys_realtime, history->real));
1164		ret = scale64_check_overflow(partial_history_cycles,
1165					     total_history_cycles, &corr_real);
1166		if (ret)
1167			return ret;
1168	}
1169
1170	/* Fixup monotonic raw and real time time values */
1171	if (interp_forward) {
1172		ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1173		ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1174	} else {
1175		ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1176		ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1177	}
1178
1179	return 0;
1180}
1181
1182/*
1183 * cycle_between - true if test occurs chronologically between before and after
 
 
1184 */
1185static bool cycle_between(u64 before, u64 test, u64 after)
1186{
1187	if (test > before && test < after)
1188		return true;
1189	if (test < before && before > after)
1190		return true;
1191	return false;
1192}
1193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1194/**
1195 * get_device_system_crosststamp - Synchronously capture system/device timestamp
1196 * @get_time_fn:	Callback to get simultaneous device time and
1197 *	system counter from the device driver
1198 * @ctx:		Context passed to get_time_fn()
1199 * @history_begin:	Historical reference point used to interpolate system
1200 *	time when counter provided by the driver is before the current interval
1201 * @xtstamp:		Receives simultaneously captured system and device time
1202 *
1203 * Reads a timestamp from a device and correlates it to system time
1204 */
1205int get_device_system_crosststamp(int (*get_time_fn)
1206				  (ktime_t *device_time,
1207				   struct system_counterval_t *sys_counterval,
1208				   void *ctx),
1209				  void *ctx,
1210				  struct system_time_snapshot *history_begin,
1211				  struct system_device_crosststamp *xtstamp)
1212{
1213	struct system_counterval_t system_counterval;
1214	struct timekeeper *tk = &tk_core.timekeeper;
1215	u64 cycles, now, interval_start;
1216	unsigned int clock_was_set_seq = 0;
1217	ktime_t base_real, base_raw;
1218	u64 nsec_real, nsec_raw;
1219	u8 cs_was_changed_seq;
1220	unsigned int seq;
1221	bool do_interp;
1222	int ret;
1223
1224	do {
1225		seq = read_seqcount_begin(&tk_core.seq);
1226		/*
1227		 * Try to synchronously capture device time and a system
1228		 * counter value calling back into the device driver
1229		 */
1230		ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1231		if (ret)
1232			return ret;
1233
1234		/*
1235		 * Verify that the clocksource associated with the captured
1236		 * system counter value is the same as the currently installed
1237		 * timekeeper clocksource
1238		 */
1239		if (tk->tkr_mono.clock != system_counterval.cs)
 
1240			return -ENODEV;
1241		cycles = system_counterval.cycles;
1242
1243		/*
1244		 * Check whether the system counter value provided by the
1245		 * device driver is on the current timekeeping interval.
1246		 */
1247		now = tk_clock_read(&tk->tkr_mono);
1248		interval_start = tk->tkr_mono.cycle_last;
1249		if (!cycle_between(interval_start, cycles, now)) {
1250			clock_was_set_seq = tk->clock_was_set_seq;
1251			cs_was_changed_seq = tk->cs_was_changed_seq;
1252			cycles = interval_start;
1253			do_interp = true;
1254		} else {
1255			do_interp = false;
1256		}
1257
1258		base_real = ktime_add(tk->tkr_mono.base,
1259				      tk_core.timekeeper.offs_real);
1260		base_raw = tk->tkr_raw.base;
1261
1262		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1263						     system_counterval.cycles);
1264		nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1265						    system_counterval.cycles);
1266	} while (read_seqcount_retry(&tk_core.seq, seq));
1267
1268	xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1269	xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1270
1271	/*
1272	 * Interpolate if necessary, adjusting back from the start of the
1273	 * current interval
1274	 */
1275	if (do_interp) {
1276		u64 partial_history_cycles, total_history_cycles;
1277		bool discontinuity;
1278
1279		/*
1280		 * Check that the counter value occurs after the provided
1281		 * history reference and that the history doesn't cross a
1282		 * clocksource change
1283		 */
1284		if (!history_begin ||
1285		    !cycle_between(history_begin->cycles,
1286				   system_counterval.cycles, cycles) ||
1287		    history_begin->cs_was_changed_seq != cs_was_changed_seq)
1288			return -EINVAL;
1289		partial_history_cycles = cycles - system_counterval.cycles;
1290		total_history_cycles = cycles - history_begin->cycles;
1291		discontinuity =
1292			history_begin->clock_was_set_seq != clock_was_set_seq;
1293
1294		ret = adjust_historical_crosststamp(history_begin,
1295						    partial_history_cycles,
1296						    total_history_cycles,
1297						    discontinuity, xtstamp);
1298		if (ret)
1299			return ret;
1300	}
1301
1302	return 0;
1303}
1304EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1305
1306/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1307 * do_settimeofday64 - Sets the time of day.
1308 * @ts:     pointer to the timespec64 variable containing the new time
1309 *
1310 * Sets the time of day to the new time and update NTP and notify hrtimers
1311 */
1312int do_settimeofday64(const struct timespec64 *ts)
1313{
1314	struct timekeeper *tk = &tk_core.timekeeper;
1315	struct timespec64 ts_delta, xt;
1316	unsigned long flags;
1317	int ret = 0;
1318
1319	if (!timespec64_valid_settod(ts))
1320		return -EINVAL;
1321
1322	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1323	write_seqcount_begin(&tk_core.seq);
1324
1325	timekeeping_forward_now(tk);
1326
1327	xt = tk_xtime(tk);
1328	ts_delta = timespec64_sub(*ts, xt);
1329
1330	if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1331		ret = -EINVAL;
1332		goto out;
1333	}
1334
1335	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1336
1337	tk_set_xtime(tk, ts);
1338out:
1339	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1340
1341	write_seqcount_end(&tk_core.seq);
1342	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
 
1343
1344	/* Signal hrtimers about time change */
1345	clock_was_set(CLOCK_SET_WALL);
1346
1347	if (!ret) {
1348		audit_tk_injoffset(ts_delta);
1349		add_device_randomness(ts, sizeof(*ts));
1350	}
1351
1352	return ret;
1353}
1354EXPORT_SYMBOL(do_settimeofday64);
1355
1356/**
1357 * timekeeping_inject_offset - Adds or subtracts from the current time.
1358 * @ts:		Pointer to the timespec variable containing the offset
1359 *
1360 * Adds or subtracts an offset value from the current time.
1361 */
1362static int timekeeping_inject_offset(const struct timespec64 *ts)
1363{
1364	struct timekeeper *tk = &tk_core.timekeeper;
1365	unsigned long flags;
1366	struct timespec64 tmp;
1367	int ret = 0;
1368
1369	if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
1370		return -EINVAL;
1371
1372	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1373	write_seqcount_begin(&tk_core.seq);
1374
1375	timekeeping_forward_now(tk);
 
 
 
 
 
 
 
 
 
1376
1377	/* Make sure the proposed value is valid */
1378	tmp = timespec64_add(tk_xtime(tk), *ts);
1379	if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
1380	    !timespec64_valid_settod(&tmp)) {
1381		ret = -EINVAL;
1382		goto error;
1383	}
1384
1385	tk_xtime_add(tk, ts);
1386	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
1387
1388error: /* even if we error out, we forwarded the time, so call update */
1389	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1390
1391	write_seqcount_end(&tk_core.seq);
1392	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1393
1394	/* Signal hrtimers about time change */
1395	clock_was_set(CLOCK_SET_WALL);
1396
1397	return ret;
1398}
1399
1400/*
1401 * Indicates if there is an offset between the system clock and the hardware
1402 * clock/persistent clock/rtc.
1403 */
1404int persistent_clock_is_local;
1405
1406/*
1407 * Adjust the time obtained from the CMOS to be UTC time instead of
1408 * local time.
1409 *
1410 * This is ugly, but preferable to the alternatives.  Otherwise we
1411 * would either need to write a program to do it in /etc/rc (and risk
1412 * confusion if the program gets run more than once; it would also be
1413 * hard to make the program warp the clock precisely n hours)  or
1414 * compile in the timezone information into the kernel.  Bad, bad....
1415 *
1416 *						- TYT, 1992-01-01
1417 *
1418 * The best thing to do is to keep the CMOS clock in universal time (UTC)
1419 * as real UNIX machines always do it. This avoids all headaches about
1420 * daylight saving times and warping kernel clocks.
1421 */
1422void timekeeping_warp_clock(void)
1423{
1424	if (sys_tz.tz_minuteswest != 0) {
1425		struct timespec64 adjust;
1426
1427		persistent_clock_is_local = 1;
1428		adjust.tv_sec = sys_tz.tz_minuteswest * 60;
1429		adjust.tv_nsec = 0;
1430		timekeeping_inject_offset(&adjust);
1431	}
1432}
1433
1434/*
1435 * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
1436 */
1437static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1438{
1439	tk->tai_offset = tai_offset;
1440	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1441}
1442
1443/*
1444 * change_clocksource - Swaps clocksources if a new one is available
1445 *
1446 * Accumulates current time interval and initializes new clocksource
1447 */
1448static int change_clocksource(void *data)
1449{
1450	struct timekeeper *tk = &tk_core.timekeeper;
1451	struct clocksource *new, *old = NULL;
1452	unsigned long flags;
1453	bool change = false;
1454
1455	new = (struct clocksource *) data;
1456
1457	/*
1458	 * If the cs is in module, get a module reference. Succeeds
1459	 * for built-in code (owner == NULL) as well.
 
1460	 */
1461	if (try_module_get(new->owner)) {
1462		if (!new->enable || new->enable(new) == 0)
1463			change = true;
1464		else
1465			module_put(new->owner);
1466	}
1467
1468	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1469	write_seqcount_begin(&tk_core.seq);
1470
1471	timekeeping_forward_now(tk);
1472
1473	if (change) {
1474		old = tk->tkr_mono.clock;
1475		tk_setup_internals(tk, new);
 
1476	}
1477
1478	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
 
1479
1480	write_seqcount_end(&tk_core.seq);
1481	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
 
 
1482
1483	if (old) {
1484		if (old->disable)
1485			old->disable(old);
1486
1487		module_put(old->owner);
1488	}
1489
1490	return 0;
1491}
1492
1493/**
1494 * timekeeping_notify - Install a new clock source
1495 * @clock:		pointer to the clock source
1496 *
1497 * This function is called from clocksource.c after a new, better clock
1498 * source has been registered. The caller holds the clocksource_mutex.
1499 */
1500int timekeeping_notify(struct clocksource *clock)
1501{
1502	struct timekeeper *tk = &tk_core.timekeeper;
1503
1504	if (tk->tkr_mono.clock == clock)
1505		return 0;
1506	stop_machine(change_clocksource, clock, NULL);
1507	tick_clock_notify();
1508	return tk->tkr_mono.clock == clock ? 0 : -1;
1509}
1510
1511/**
1512 * ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
1513 * @ts:		pointer to the timespec64 to be set
1514 *
1515 * Returns the raw monotonic time (completely un-modified by ntp)
1516 */
1517void ktime_get_raw_ts64(struct timespec64 *ts)
1518{
1519	struct timekeeper *tk = &tk_core.timekeeper;
1520	unsigned int seq;
1521	u64 nsecs;
1522
1523	do {
1524		seq = read_seqcount_begin(&tk_core.seq);
1525		ts->tv_sec = tk->raw_sec;
1526		nsecs = timekeeping_get_ns(&tk->tkr_raw);
1527
1528	} while (read_seqcount_retry(&tk_core.seq, seq));
1529
1530	ts->tv_nsec = 0;
1531	timespec64_add_ns(ts, nsecs);
1532}
1533EXPORT_SYMBOL(ktime_get_raw_ts64);
1534
1535
1536/**
1537 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1538 */
1539int timekeeping_valid_for_hres(void)
1540{
1541	struct timekeeper *tk = &tk_core.timekeeper;
1542	unsigned int seq;
1543	int ret;
1544
1545	do {
1546		seq = read_seqcount_begin(&tk_core.seq);
1547
1548		ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1549
1550	} while (read_seqcount_retry(&tk_core.seq, seq));
1551
1552	return ret;
1553}
1554
1555/**
1556 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1557 */
1558u64 timekeeping_max_deferment(void)
1559{
1560	struct timekeeper *tk = &tk_core.timekeeper;
1561	unsigned int seq;
1562	u64 ret;
1563
1564	do {
1565		seq = read_seqcount_begin(&tk_core.seq);
1566
1567		ret = tk->tkr_mono.clock->max_idle_ns;
1568
1569	} while (read_seqcount_retry(&tk_core.seq, seq));
1570
1571	return ret;
1572}
1573
1574/**
1575 * read_persistent_clock64 -  Return time from the persistent clock.
1576 * @ts: Pointer to the storage for the readout value
1577 *
1578 * Weak dummy function for arches that do not yet support it.
1579 * Reads the time from the battery backed persistent clock.
1580 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1581 *
1582 *  XXX - Do be sure to remove it once all arches implement it.
1583 */
1584void __weak read_persistent_clock64(struct timespec64 *ts)
1585{
1586	ts->tv_sec = 0;
1587	ts->tv_nsec = 0;
1588}
1589
1590/**
1591 * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
1592 *                                        from the boot.
1593 * @wall_time:	  current time as returned by persistent clock
1594 * @boot_offset:  offset that is defined as wall_time - boot_time
1595 *
1596 * Weak dummy function for arches that do not yet support it.
1597 *
1598 * The default function calculates offset based on the current value of
1599 * local_clock(). This way architectures that support sched_clock() but don't
1600 * support dedicated boot time clock will provide the best estimate of the
1601 * boot time.
1602 */
1603void __weak __init
1604read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
1605				     struct timespec64 *boot_offset)
1606{
1607	read_persistent_clock64(wall_time);
1608	*boot_offset = ns_to_timespec64(local_clock());
1609}
1610
 
 
 
 
 
 
1611/*
1612 * Flag reflecting whether timekeeping_resume() has injected sleeptime.
1613 *
1614 * The flag starts of false and is only set when a suspend reaches
1615 * timekeeping_suspend(), timekeeping_resume() sets it to false when the
1616 * timekeeper clocksource is not stopping across suspend and has been
1617 * used to update sleep time. If the timekeeper clocksource has stopped
1618 * then the flag stays true and is used by the RTC resume code to decide
1619 * whether sleeptime must be injected and if so the flag gets false then.
1620 *
1621 * If a suspend fails before reaching timekeeping_resume() then the flag
1622 * stays false and prevents erroneous sleeptime injection.
1623 */
1624static bool suspend_timing_needed;
1625
1626/* Flag for if there is a persistent clock on this platform */
1627static bool persistent_clock_exists;
1628
1629/*
1630 * timekeeping_init - Initializes the clocksource and common timekeeping values
1631 */
1632void __init timekeeping_init(void)
1633{
1634	struct timespec64 wall_time, boot_offset, wall_to_mono;
1635	struct timekeeper *tk = &tk_core.timekeeper;
1636	struct clocksource *clock;
1637	unsigned long flags;
 
1638
1639	read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
1640	if (timespec64_valid_settod(&wall_time) &&
1641	    timespec64_to_ns(&wall_time) > 0) {
1642		persistent_clock_exists = true;
1643	} else if (timespec64_to_ns(&wall_time) != 0) {
1644		pr_warn("Persistent clock returned invalid value");
1645		wall_time = (struct timespec64){0};
1646	}
1647
1648	if (timespec64_compare(&wall_time, &boot_offset) < 0)
1649		boot_offset = (struct timespec64){0};
1650
1651	/*
1652	 * We want set wall_to_mono, so the following is true:
1653	 * wall time + wall_to_mono = boot time
1654	 */
1655	wall_to_mono = timespec64_sub(boot_offset, wall_time);
1656
1657	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1658	write_seqcount_begin(&tk_core.seq);
1659	ntp_init();
1660
1661	clock = clocksource_default_clock();
1662	if (clock->enable)
1663		clock->enable(clock);
1664	tk_setup_internals(tk, clock);
1665
1666	tk_set_xtime(tk, &wall_time);
1667	tk->raw_sec = 0;
1668
1669	tk_set_wall_to_mono(tk, wall_to_mono);
1670
1671	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1672
1673	write_seqcount_end(&tk_core.seq);
1674	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1675}
1676
1677/* time in seconds when suspend began for persistent clock */
1678static struct timespec64 timekeeping_suspend_time;
1679
1680/**
1681 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1682 * @tk:		Pointer to the timekeeper to be updated
1683 * @delta:	Pointer to the delta value in timespec64 format
1684 *
1685 * Takes a timespec offset measuring a suspend interval and properly
1686 * adds the sleep offset to the timekeeping variables.
1687 */
1688static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1689					   const struct timespec64 *delta)
1690{
1691	if (!timespec64_valid_strict(delta)) {
1692		printk_deferred(KERN_WARNING
1693				"__timekeeping_inject_sleeptime: Invalid "
1694				"sleep delta value!\n");
1695		return;
1696	}
1697	tk_xtime_add(tk, delta);
1698	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1699	tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1700	tk_debug_account_sleep_time(delta);
1701}
1702
1703#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1704/*
1705 * We have three kinds of time sources to use for sleep time
1706 * injection, the preference order is:
1707 * 1) non-stop clocksource
1708 * 2) persistent clock (ie: RTC accessible when irqs are off)
1709 * 3) RTC
1710 *
1711 * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
1712 * If system has neither 1) nor 2), 3) will be used finally.
1713 *
1714 *
1715 * If timekeeping has injected sleeptime via either 1) or 2),
1716 * 3) becomes needless, so in this case we don't need to call
1717 * rtc_resume(), and this is what timekeeping_rtc_skipresume()
1718 * means.
1719 */
1720bool timekeeping_rtc_skipresume(void)
1721{
1722	return !suspend_timing_needed;
1723}
1724
1725/*
1726 * 1) can be determined whether to use or not only when doing
1727 * timekeeping_resume() which is invoked after rtc_suspend(),
1728 * so we can't skip rtc_suspend() surely if system has 1).
1729 *
1730 * But if system has 2), 2) will definitely be used, so in this
1731 * case we don't need to call rtc_suspend(), and this is what
1732 * timekeeping_rtc_skipsuspend() means.
1733 */
1734bool timekeeping_rtc_skipsuspend(void)
1735{
1736	return persistent_clock_exists;
1737}
1738
1739/**
1740 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1741 * @delta: pointer to a timespec64 delta value
1742 *
1743 * This hook is for architectures that cannot support read_persistent_clock64
1744 * because their RTC/persistent clock is only accessible when irqs are enabled.
1745 * and also don't have an effective nonstop clocksource.
1746 *
1747 * This function should only be called by rtc_resume(), and allows
1748 * a suspend offset to be injected into the timekeeping values.
1749 */
1750void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
1751{
1752	struct timekeeper *tk = &tk_core.timekeeper;
1753	unsigned long flags;
1754
1755	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1756	write_seqcount_begin(&tk_core.seq);
1757
1758	suspend_timing_needed = false;
1759
1760	timekeeping_forward_now(tk);
1761
1762	__timekeeping_inject_sleeptime(tk, delta);
1763
1764	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1765
1766	write_seqcount_end(&tk_core.seq);
1767	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1768
1769	/* Signal hrtimers about time change */
1770	clock_was_set(CLOCK_SET_WALL | CLOCK_SET_BOOT);
1771}
1772#endif
1773
1774/**
1775 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1776 */
1777void timekeeping_resume(void)
1778{
1779	struct timekeeper *tk = &tk_core.timekeeper;
1780	struct clocksource *clock = tk->tkr_mono.clock;
1781	unsigned long flags;
1782	struct timespec64 ts_new, ts_delta;
1783	u64 cycle_now, nsec;
1784	bool inject_sleeptime = false;
 
 
1785
1786	read_persistent_clock64(&ts_new);
1787
1788	clockevents_resume();
1789	clocksource_resume();
1790
1791	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1792	write_seqcount_begin(&tk_core.seq);
1793
1794	/*
1795	 * After system resumes, we need to calculate the suspended time and
1796	 * compensate it for the OS time. There are 3 sources that could be
1797	 * used: Nonstop clocksource during suspend, persistent clock and rtc
1798	 * device.
1799	 *
1800	 * One specific platform may have 1 or 2 or all of them, and the
1801	 * preference will be:
1802	 *	suspend-nonstop clocksource -> persistent clock -> rtc
1803	 * The less preferred source will only be tried if there is no better
1804	 * usable source. The rtc part is handled separately in rtc core code.
1805	 */
1806	cycle_now = tk_clock_read(&tk->tkr_mono);
1807	nsec = clocksource_stop_suspend_timing(clock, cycle_now);
1808	if (nsec > 0) {
1809		ts_delta = ns_to_timespec64(nsec);
1810		inject_sleeptime = true;
1811	} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1812		ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1813		inject_sleeptime = true;
1814	}
1815
1816	if (inject_sleeptime) {
1817		suspend_timing_needed = false;
1818		__timekeeping_inject_sleeptime(tk, &ts_delta);
1819	}
1820
1821	/* Re-base the last cycle value */
1822	tk->tkr_mono.cycle_last = cycle_now;
1823	tk->tkr_raw.cycle_last  = cycle_now;
1824
1825	tk->ntp_error = 0;
1826	timekeeping_suspended = 0;
1827	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1828	write_seqcount_end(&tk_core.seq);
1829	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1830
1831	touch_softlockup_watchdog();
1832
1833	/* Resume the clockevent device(s) and hrtimers */
1834	tick_resume();
1835	/* Notify timerfd as resume is equivalent to clock_was_set() */
1836	timerfd_resume();
1837}
1838
1839int timekeeping_suspend(void)
1840{
1841	struct timekeeper *tk = &tk_core.timekeeper;
1842	unsigned long flags;
1843	struct timespec64		delta, delta_delta;
1844	static struct timespec64	old_delta;
1845	struct clocksource *curr_clock;
 
1846	u64 cycle_now;
1847
1848	read_persistent_clock64(&timekeeping_suspend_time);
1849
1850	/*
1851	 * On some systems the persistent_clock can not be detected at
1852	 * timekeeping_init by its return value, so if we see a valid
1853	 * value returned, update the persistent_clock_exists flag.
1854	 */
1855	if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1856		persistent_clock_exists = true;
1857
1858	suspend_timing_needed = true;
1859
1860	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1861	write_seqcount_begin(&tk_core.seq);
1862	timekeeping_forward_now(tk);
1863	timekeeping_suspended = 1;
1864
1865	/*
1866	 * Since we've called forward_now, cycle_last stores the value
1867	 * just read from the current clocksource. Save this to potentially
1868	 * use in suspend timing.
1869	 */
1870	curr_clock = tk->tkr_mono.clock;
1871	cycle_now = tk->tkr_mono.cycle_last;
1872	clocksource_start_suspend_timing(curr_clock, cycle_now);
1873
1874	if (persistent_clock_exists) {
1875		/*
1876		 * To avoid drift caused by repeated suspend/resumes,
1877		 * which each can add ~1 second drift error,
1878		 * try to compensate so the difference in system time
1879		 * and persistent_clock time stays close to constant.
1880		 */
1881		delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1882		delta_delta = timespec64_sub(delta, old_delta);
1883		if (abs(delta_delta.tv_sec) >= 2) {
1884			/*
1885			 * if delta_delta is too large, assume time correction
1886			 * has occurred and set old_delta to the current delta.
1887			 */
1888			old_delta = delta;
1889		} else {
1890			/* Otherwise try to adjust old_system to compensate */
1891			timekeeping_suspend_time =
1892				timespec64_add(timekeeping_suspend_time, delta_delta);
1893		}
1894	}
1895
1896	timekeeping_update(tk, TK_MIRROR);
1897	halt_fast_timekeeper(tk);
1898	write_seqcount_end(&tk_core.seq);
1899	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1900
1901	tick_suspend();
1902	clocksource_suspend();
1903	clockevents_suspend();
1904
1905	return 0;
1906}
1907
1908/* sysfs resume/suspend bits for timekeeping */
1909static struct syscore_ops timekeeping_syscore_ops = {
1910	.resume		= timekeeping_resume,
1911	.suspend	= timekeeping_suspend,
1912};
1913
1914static int __init timekeeping_init_ops(void)
1915{
1916	register_syscore_ops(&timekeeping_syscore_ops);
1917	return 0;
1918}
1919device_initcall(timekeeping_init_ops);
1920
1921/*
1922 * Apply a multiplier adjustment to the timekeeper
1923 */
1924static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1925							 s64 offset,
1926							 s32 mult_adj)
1927{
1928	s64 interval = tk->cycle_interval;
1929
1930	if (mult_adj == 0) {
1931		return;
1932	} else if (mult_adj == -1) {
1933		interval = -interval;
1934		offset = -offset;
1935	} else if (mult_adj != 1) {
1936		interval *= mult_adj;
1937		offset *= mult_adj;
1938	}
1939
1940	/*
1941	 * So the following can be confusing.
1942	 *
1943	 * To keep things simple, lets assume mult_adj == 1 for now.
1944	 *
1945	 * When mult_adj != 1, remember that the interval and offset values
1946	 * have been appropriately scaled so the math is the same.
1947	 *
1948	 * The basic idea here is that we're increasing the multiplier
1949	 * by one, this causes the xtime_interval to be incremented by
1950	 * one cycle_interval. This is because:
1951	 *	xtime_interval = cycle_interval * mult
1952	 * So if mult is being incremented by one:
1953	 *	xtime_interval = cycle_interval * (mult + 1)
1954	 * Its the same as:
1955	 *	xtime_interval = (cycle_interval * mult) + cycle_interval
1956	 * Which can be shortened to:
1957	 *	xtime_interval += cycle_interval
1958	 *
1959	 * So offset stores the non-accumulated cycles. Thus the current
1960	 * time (in shifted nanoseconds) is:
1961	 *	now = (offset * adj) + xtime_nsec
1962	 * Now, even though we're adjusting the clock frequency, we have
1963	 * to keep time consistent. In other words, we can't jump back
1964	 * in time, and we also want to avoid jumping forward in time.
1965	 *
1966	 * So given the same offset value, we need the time to be the same
1967	 * both before and after the freq adjustment.
1968	 *	now = (offset * adj_1) + xtime_nsec_1
1969	 *	now = (offset * adj_2) + xtime_nsec_2
1970	 * So:
1971	 *	(offset * adj_1) + xtime_nsec_1 =
1972	 *		(offset * adj_2) + xtime_nsec_2
1973	 * And we know:
1974	 *	adj_2 = adj_1 + 1
1975	 * So:
1976	 *	(offset * adj_1) + xtime_nsec_1 =
1977	 *		(offset * (adj_1+1)) + xtime_nsec_2
1978	 *	(offset * adj_1) + xtime_nsec_1 =
1979	 *		(offset * adj_1) + offset + xtime_nsec_2
1980	 * Canceling the sides:
1981	 *	xtime_nsec_1 = offset + xtime_nsec_2
1982	 * Which gives us:
1983	 *	xtime_nsec_2 = xtime_nsec_1 - offset
1984	 * Which simplifies to:
1985	 *	xtime_nsec -= offset
1986	 */
1987	if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1988		/* NTP adjustment caused clocksource mult overflow */
1989		WARN_ON_ONCE(1);
1990		return;
1991	}
1992
1993	tk->tkr_mono.mult += mult_adj;
1994	tk->xtime_interval += interval;
1995	tk->tkr_mono.xtime_nsec -= offset;
1996}
1997
1998/*
1999 * Adjust the timekeeper's multiplier to the correct frequency
2000 * and also to reduce the accumulated error value.
2001 */
2002static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
2003{
 
2004	u32 mult;
2005
2006	/*
2007	 * Determine the multiplier from the current NTP tick length.
2008	 * Avoid expensive division when the tick length doesn't change.
2009	 */
2010	if (likely(tk->ntp_tick == ntp_tick_length())) {
2011		mult = tk->tkr_mono.mult - tk->ntp_err_mult;
2012	} else {
2013		tk->ntp_tick = ntp_tick_length();
2014		mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
2015				 tk->xtime_remainder, tk->cycle_interval);
2016	}
2017
2018	/*
2019	 * If the clock is behind the NTP time, increase the multiplier by 1
2020	 * to catch up with it. If it's ahead and there was a remainder in the
2021	 * tick division, the clock will slow down. Otherwise it will stay
2022	 * ahead until the tick length changes to a non-divisible value.
2023	 */
2024	tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
2025	mult += tk->ntp_err_mult;
2026
2027	timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
2028
2029	if (unlikely(tk->tkr_mono.clock->maxadj &&
2030		(abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
2031			> tk->tkr_mono.clock->maxadj))) {
2032		printk_once(KERN_WARNING
2033			"Adjusting %s more than 11%% (%ld vs %ld)\n",
2034			tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
2035			(long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
2036	}
2037
2038	/*
2039	 * It may be possible that when we entered this function, xtime_nsec
2040	 * was very small.  Further, if we're slightly speeding the clocksource
2041	 * in the code above, its possible the required corrective factor to
2042	 * xtime_nsec could cause it to underflow.
2043	 *
2044	 * Now, since we have already accumulated the second and the NTP
2045	 * subsystem has been notified via second_overflow(), we need to skip
2046	 * the next update.
2047	 */
2048	if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
2049		tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
2050							tk->tkr_mono.shift;
2051		tk->xtime_sec--;
2052		tk->skip_second_overflow = 1;
2053	}
2054}
2055
2056/*
2057 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
2058 *
2059 * Helper function that accumulates the nsecs greater than a second
2060 * from the xtime_nsec field to the xtime_secs field.
2061 * It also calls into the NTP code to handle leapsecond processing.
2062 */
2063static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
2064{
2065	u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
2066	unsigned int clock_set = 0;
2067
2068	while (tk->tkr_mono.xtime_nsec >= nsecps) {
2069		int leap;
2070
2071		tk->tkr_mono.xtime_nsec -= nsecps;
2072		tk->xtime_sec++;
2073
2074		/*
2075		 * Skip NTP update if this second was accumulated before,
2076		 * i.e. xtime_nsec underflowed in timekeeping_adjust()
2077		 */
2078		if (unlikely(tk->skip_second_overflow)) {
2079			tk->skip_second_overflow = 0;
2080			continue;
2081		}
2082
2083		/* Figure out if its a leap sec and apply if needed */
2084		leap = second_overflow(tk->xtime_sec);
2085		if (unlikely(leap)) {
2086			struct timespec64 ts;
2087
2088			tk->xtime_sec += leap;
2089
2090			ts.tv_sec = leap;
2091			ts.tv_nsec = 0;
2092			tk_set_wall_to_mono(tk,
2093				timespec64_sub(tk->wall_to_monotonic, ts));
2094
2095			__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
2096
2097			clock_set = TK_CLOCK_WAS_SET;
2098		}
2099	}
2100	return clock_set;
2101}
2102
2103/*
2104 * logarithmic_accumulation - shifted accumulation of cycles
2105 *
2106 * This functions accumulates a shifted interval of cycles into
2107 * a shifted interval nanoseconds. Allows for O(log) accumulation
2108 * loop.
2109 *
2110 * Returns the unconsumed cycles.
2111 */
2112static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2113				    u32 shift, unsigned int *clock_set)
2114{
2115	u64 interval = tk->cycle_interval << shift;
2116	u64 snsec_per_sec;
2117
2118	/* If the offset is smaller than a shifted interval, do nothing */
2119	if (offset < interval)
2120		return offset;
2121
2122	/* Accumulate one shifted interval */
2123	offset -= interval;
2124	tk->tkr_mono.cycle_last += interval;
2125	tk->tkr_raw.cycle_last  += interval;
2126
2127	tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2128	*clock_set |= accumulate_nsecs_to_secs(tk);
2129
2130	/* Accumulate raw time */
2131	tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2132	snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2133	while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2134		tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2135		tk->raw_sec++;
2136	}
2137
2138	/* Accumulate error between NTP and clock interval */
2139	tk->ntp_error += tk->ntp_tick << shift;
2140	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2141						(tk->ntp_error_shift + shift);
2142
2143	return offset;
2144}
2145
2146/*
2147 * timekeeping_advance - Updates the timekeeper to the current time and
2148 * current NTP tick length
2149 */
2150static bool timekeeping_advance(enum timekeeping_adv_mode mode)
2151{
 
2152	struct timekeeper *real_tk = &tk_core.timekeeper;
2153	struct timekeeper *tk = &shadow_timekeeper;
2154	u64 offset;
2155	int shift = 0, maxshift;
2156	unsigned int clock_set = 0;
2157	unsigned long flags;
 
2158
2159	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2160
2161	/* Make sure we're fully resumed: */
2162	if (unlikely(timekeeping_suspended))
2163		goto out;
2164
2165	offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2166				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
 
2167
2168	/* Check if there's really nothing to do */
2169	if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
2170		goto out;
2171
2172	/* Do some additional sanity checking */
2173	timekeeping_check_update(tk, offset);
2174
2175	/*
2176	 * With NO_HZ we may have to accumulate many cycle_intervals
2177	 * (think "ticks") worth of time at once. To do this efficiently,
2178	 * we calculate the largest doubling multiple of cycle_intervals
2179	 * that is smaller than the offset.  We then accumulate that
2180	 * chunk in one go, and then try to consume the next smaller
2181	 * doubled multiple.
2182	 */
2183	shift = ilog2(offset) - ilog2(tk->cycle_interval);
2184	shift = max(0, shift);
2185	/* Bound shift to one less than what overflows tick_length */
2186	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2187	shift = min(shift, maxshift);
2188	while (offset >= tk->cycle_interval) {
2189		offset = logarithmic_accumulation(tk, offset, shift,
2190							&clock_set);
2191		if (offset < tk->cycle_interval<<shift)
2192			shift--;
2193	}
2194
2195	/* Adjust the multiplier to correct NTP error */
2196	timekeeping_adjust(tk, offset);
2197
2198	/*
2199	 * Finally, make sure that after the rounding
2200	 * xtime_nsec isn't larger than NSEC_PER_SEC
2201	 */
2202	clock_set |= accumulate_nsecs_to_secs(tk);
2203
2204	write_seqcount_begin(&tk_core.seq);
2205	/*
2206	 * Update the real timekeeper.
2207	 *
2208	 * We could avoid this memcpy by switching pointers, but that
2209	 * requires changes to all other timekeeper usage sites as
2210	 * well, i.e. move the timekeeper pointer getter into the
2211	 * spinlocked/seqcount protected sections. And we trade this
2212	 * memcpy under the tk_core.seq against one before we start
2213	 * updating.
2214	 */
2215	timekeeping_update(tk, clock_set);
2216	memcpy(real_tk, tk, sizeof(*tk));
2217	/* The memcpy must come last. Do not put anything here! */
2218	write_seqcount_end(&tk_core.seq);
2219out:
2220	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2221
2222	return !!clock_set;
2223}
2224
2225/**
2226 * update_wall_time - Uses the current clocksource to increment the wall time
2227 *
2228 */
2229void update_wall_time(void)
2230{
2231	if (timekeeping_advance(TK_ADV_TICK))
2232		clock_was_set_delayed();
2233}
2234
2235/**
2236 * getboottime64 - Return the real time of system boot.
2237 * @ts:		pointer to the timespec64 to be set
2238 *
2239 * Returns the wall-time of boot in a timespec64.
2240 *
2241 * This is based on the wall_to_monotonic offset and the total suspend
2242 * time. Calls to settimeofday will affect the value returned (which
2243 * basically means that however wrong your real time clock is at boot time,
2244 * you get the right time here).
2245 */
2246void getboottime64(struct timespec64 *ts)
2247{
2248	struct timekeeper *tk = &tk_core.timekeeper;
2249	ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2250
2251	*ts = ktime_to_timespec64(t);
2252}
2253EXPORT_SYMBOL_GPL(getboottime64);
2254
2255void ktime_get_coarse_real_ts64(struct timespec64 *ts)
2256{
2257	struct timekeeper *tk = &tk_core.timekeeper;
2258	unsigned int seq;
2259
2260	do {
2261		seq = read_seqcount_begin(&tk_core.seq);
2262
2263		*ts = tk_xtime(tk);
2264	} while (read_seqcount_retry(&tk_core.seq, seq));
2265}
2266EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
2267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2268void ktime_get_coarse_ts64(struct timespec64 *ts)
2269{
2270	struct timekeeper *tk = &tk_core.timekeeper;
2271	struct timespec64 now, mono;
2272	unsigned int seq;
2273
2274	do {
2275		seq = read_seqcount_begin(&tk_core.seq);
2276
2277		now = tk_xtime(tk);
2278		mono = tk->wall_to_monotonic;
2279	} while (read_seqcount_retry(&tk_core.seq, seq));
2280
2281	set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
2282				now.tv_nsec + mono.tv_nsec);
2283}
2284EXPORT_SYMBOL(ktime_get_coarse_ts64);
2285
2286/*
2287 * Must hold jiffies_lock
2288 */
2289void do_timer(unsigned long ticks)
2290{
2291	jiffies_64 += ticks;
2292	calc_global_load();
2293}
2294
2295/**
2296 * ktime_get_update_offsets_now - hrtimer helper
2297 * @cwsseq:	pointer to check and store the clock was set sequence number
2298 * @offs_real:	pointer to storage for monotonic -> realtime offset
2299 * @offs_boot:	pointer to storage for monotonic -> boottime offset
2300 * @offs_tai:	pointer to storage for monotonic -> clock tai offset
2301 *
2302 * Returns current monotonic time and updates the offsets if the
2303 * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
2304 * different.
2305 *
2306 * Called from hrtimer_interrupt() or retrigger_next_event()
2307 */
2308ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2309				     ktime_t *offs_boot, ktime_t *offs_tai)
2310{
2311	struct timekeeper *tk = &tk_core.timekeeper;
2312	unsigned int seq;
2313	ktime_t base;
2314	u64 nsecs;
2315
2316	do {
2317		seq = read_seqcount_begin(&tk_core.seq);
2318
2319		base = tk->tkr_mono.base;
2320		nsecs = timekeeping_get_ns(&tk->tkr_mono);
2321		base = ktime_add_ns(base, nsecs);
2322
2323		if (*cwsseq != tk->clock_was_set_seq) {
2324			*cwsseq = tk->clock_was_set_seq;
2325			*offs_real = tk->offs_real;
2326			*offs_boot = tk->offs_boot;
2327			*offs_tai = tk->offs_tai;
2328		}
2329
2330		/* Handle leapsecond insertion adjustments */
2331		if (unlikely(base >= tk->next_leap_ktime))
2332			*offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2333
2334	} while (read_seqcount_retry(&tk_core.seq, seq));
2335
2336	return base;
2337}
2338
2339/*
2340 * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
2341 */
2342static int timekeeping_validate_timex(const struct __kernel_timex *txc)
2343{
2344	if (txc->modes & ADJ_ADJTIME) {
2345		/* singleshot must not be used with any other mode bits */
2346		if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
2347			return -EINVAL;
2348		if (!(txc->modes & ADJ_OFFSET_READONLY) &&
2349		    !capable(CAP_SYS_TIME))
2350			return -EPERM;
2351	} else {
2352		/* In order to modify anything, you gotta be super-user! */
2353		if (txc->modes && !capable(CAP_SYS_TIME))
2354			return -EPERM;
2355		/*
2356		 * if the quartz is off by more than 10% then
2357		 * something is VERY wrong!
2358		 */
2359		if (txc->modes & ADJ_TICK &&
2360		    (txc->tick <  900000/USER_HZ ||
2361		     txc->tick > 1100000/USER_HZ))
2362			return -EINVAL;
2363	}
2364
2365	if (txc->modes & ADJ_SETOFFSET) {
2366		/* In order to inject time, you gotta be super-user! */
2367		if (!capable(CAP_SYS_TIME))
2368			return -EPERM;
2369
2370		/*
2371		 * Validate if a timespec/timeval used to inject a time
2372		 * offset is valid.  Offsets can be positive or negative, so
2373		 * we don't check tv_sec. The value of the timeval/timespec
2374		 * is the sum of its fields,but *NOTE*:
2375		 * The field tv_usec/tv_nsec must always be non-negative and
2376		 * we can't have more nanoseconds/microseconds than a second.
2377		 */
2378		if (txc->time.tv_usec < 0)
2379			return -EINVAL;
2380
2381		if (txc->modes & ADJ_NANO) {
2382			if (txc->time.tv_usec >= NSEC_PER_SEC)
2383				return -EINVAL;
2384		} else {
2385			if (txc->time.tv_usec >= USEC_PER_SEC)
2386				return -EINVAL;
2387		}
2388	}
2389
2390	/*
2391	 * Check for potential multiplication overflows that can
2392	 * only happen on 64-bit systems:
2393	 */
2394	if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
2395		if (LLONG_MIN / PPM_SCALE > txc->freq)
2396			return -EINVAL;
2397		if (LLONG_MAX / PPM_SCALE < txc->freq)
2398			return -EINVAL;
2399	}
2400
2401	return 0;
2402}
2403
2404/**
2405 * random_get_entropy_fallback - Returns the raw clock source value,
2406 * used by random.c for platforms with no valid random_get_entropy().
2407 */
2408unsigned long random_get_entropy_fallback(void)
2409{
2410	struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
2411	struct clocksource *clock = READ_ONCE(tkr->clock);
2412
2413	if (unlikely(timekeeping_suspended || !clock))
2414		return 0;
2415	return clock->read(clock);
2416}
2417EXPORT_SYMBOL_GPL(random_get_entropy_fallback);
2418
2419/**
2420 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
 
2421 */
2422int do_adjtimex(struct __kernel_timex *txc)
2423{
2424	struct timekeeper *tk = &tk_core.timekeeper;
2425	struct audit_ntp_data ad;
 
2426	bool clock_set = false;
2427	struct timespec64 ts;
2428	unsigned long flags;
2429	s32 orig_tai, tai;
2430	int ret;
2431
2432	/* Validate the data before disabling interrupts */
2433	ret = timekeeping_validate_timex(txc);
2434	if (ret)
2435		return ret;
2436	add_device_randomness(txc, sizeof(*txc));
2437
2438	if (txc->modes & ADJ_SETOFFSET) {
2439		struct timespec64 delta;
 
2440		delta.tv_sec  = txc->time.tv_sec;
2441		delta.tv_nsec = txc->time.tv_usec;
2442		if (!(txc->modes & ADJ_NANO))
2443			delta.tv_nsec *= 1000;
2444		ret = timekeeping_inject_offset(&delta);
2445		if (ret)
2446			return ret;
2447
 
2448		audit_tk_injoffset(delta);
2449	}
2450
2451	audit_ntp_init(&ad);
2452
2453	ktime_get_real_ts64(&ts);
2454	add_device_randomness(&ts, sizeof(ts));
2455
2456	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2457	write_seqcount_begin(&tk_core.seq);
2458
2459	orig_tai = tai = tk->tai_offset;
2460	ret = __do_adjtimex(txc, &ts, &tai, &ad);
2461
2462	if (tai != orig_tai) {
2463		__timekeeping_set_tai_offset(tk, tai);
2464		timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2465		clock_set = true;
 
 
 
 
2466	}
2467	tk_update_leap_state(tk);
2468
2469	write_seqcount_end(&tk_core.seq);
2470	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2471
2472	audit_ntp_log(&ad);
2473
2474	/* Update the multiplier immediately if frequency was set directly */
2475	if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
2476		clock_set |= timekeeping_advance(TK_ADV_FREQ);
2477
2478	if (clock_set)
2479		clock_was_set(CLOCK_REALTIME);
2480
2481	ntp_notify_cmos_timer();
2482
2483	return ret;
2484}
2485
2486#ifdef CONFIG_NTP_PPS
2487/**
2488 * hardpps() - Accessor function to NTP __hardpps function
 
 
2489 */
2490void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2491{
2492	unsigned long flags;
2493
2494	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2495	write_seqcount_begin(&tk_core.seq);
2496
2497	__hardpps(phase_ts, raw_ts);
2498
2499	write_seqcount_end(&tk_core.seq);
2500	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2501}
2502EXPORT_SYMBOL(hardpps);
2503#endif /* CONFIG_NTP_PPS */
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Kernel timekeeping code and accessor functions. Based on code from
   4 *  timer.c, moved in commit 8524070b7982.
   5 */
   6#include <linux/timekeeper_internal.h>
   7#include <linux/module.h>
   8#include <linux/interrupt.h>
   9#include <linux/percpu.h>
  10#include <linux/init.h>
  11#include <linux/mm.h>
  12#include <linux/nmi.h>
  13#include <linux/sched.h>
  14#include <linux/sched/loadavg.h>
  15#include <linux/sched/clock.h>
  16#include <linux/syscore_ops.h>
  17#include <linux/clocksource.h>
  18#include <linux/jiffies.h>
  19#include <linux/time.h>
  20#include <linux/timex.h>
  21#include <linux/tick.h>
  22#include <linux/stop_machine.h>
  23#include <linux/pvclock_gtod.h>
  24#include <linux/compiler.h>
  25#include <linux/audit.h>
  26#include <linux/random.h>
  27
  28#include "tick-internal.h"
  29#include "ntp_internal.h"
  30#include "timekeeping_internal.h"
  31
  32#define TK_CLEAR_NTP		(1 << 0)
  33#define TK_CLOCK_WAS_SET	(1 << 1)
  34
  35#define TK_UPDATE_ALL		(TK_CLEAR_NTP | TK_CLOCK_WAS_SET)
  36
  37enum timekeeping_adv_mode {
  38	/* Update timekeeper when a tick has passed */
  39	TK_ADV_TICK,
  40
  41	/* Update timekeeper on a direct frequency change */
  42	TK_ADV_FREQ
  43};
  44
 
 
  45/*
  46 * The most important data for readout fits into a single 64 byte
  47 * cache line.
  48 */
  49struct tk_data {
  50	seqcount_raw_spinlock_t	seq;
  51	struct timekeeper	timekeeper;
  52	struct timekeeper	shadow_timekeeper;
  53	raw_spinlock_t		lock;
  54} ____cacheline_aligned;
  55
  56static struct tk_data tk_core;
  57
  58/* flag for if timekeeping is suspended */
  59int __read_mostly timekeeping_suspended;
  60
  61/**
  62 * struct tk_fast - NMI safe timekeeper
  63 * @seq:	Sequence counter for protecting updates. The lowest bit
  64 *		is the index for the tk_read_base array
  65 * @base:	tk_read_base array. Access is indexed by the lowest bit of
  66 *		@seq.
  67 *
  68 * See @update_fast_timekeeper() below.
  69 */
  70struct tk_fast {
  71	seqcount_latch_t	seq;
  72	struct tk_read_base	base[2];
  73};
  74
  75/* Suspend-time cycles value for halted fast timekeeper. */
  76static u64 cycles_at_suspend;
  77
  78static u64 dummy_clock_read(struct clocksource *cs)
  79{
  80	if (timekeeping_suspended)
  81		return cycles_at_suspend;
  82	return local_clock();
  83}
  84
  85static struct clocksource dummy_clock = {
  86	.read = dummy_clock_read,
  87};
  88
  89/*
  90 * Boot time initialization which allows local_clock() to be utilized
  91 * during early boot when clocksources are not available. local_clock()
  92 * returns nanoseconds already so no conversion is required, hence mult=1
  93 * and shift=0. When the first proper clocksource is installed then
  94 * the fast time keepers are updated with the correct values.
  95 */
  96#define FAST_TK_INIT						\
  97	{							\
  98		.clock		= &dummy_clock,			\
  99		.mask		= CLOCKSOURCE_MASK(64),		\
 100		.mult		= 1,				\
 101		.shift		= 0,				\
 102	}
 103
 104static struct tk_fast tk_fast_mono ____cacheline_aligned = {
 105	.seq     = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
 106	.base[0] = FAST_TK_INIT,
 107	.base[1] = FAST_TK_INIT,
 108};
 109
 110static struct tk_fast tk_fast_raw  ____cacheline_aligned = {
 111	.seq     = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
 112	.base[0] = FAST_TK_INIT,
 113	.base[1] = FAST_TK_INIT,
 114};
 115
 116unsigned long timekeeper_lock_irqsave(void)
 117{
 118	unsigned long flags;
 119
 120	raw_spin_lock_irqsave(&tk_core.lock, flags);
 121	return flags;
 122}
 123
 124void timekeeper_unlock_irqrestore(unsigned long flags)
 125{
 126	raw_spin_unlock_irqrestore(&tk_core.lock, flags);
 127}
 128
 129/*
 130 * Multigrain timestamps require tracking the latest fine-grained timestamp
 131 * that has been issued, and never returning a coarse-grained timestamp that is
 132 * earlier than that value.
 133 *
 134 * mg_floor represents the latest fine-grained time that has been handed out as
 135 * a file timestamp on the system. This is tracked as a monotonic ktime_t, and
 136 * converted to a realtime clock value on an as-needed basis.
 137 *
 138 * Maintaining mg_floor ensures the multigrain interfaces never issue a
 139 * timestamp earlier than one that has been previously issued.
 140 *
 141 * The exception to this rule is when there is a backward realtime clock jump. If
 142 * such an event occurs, a timestamp can appear to be earlier than a previous one.
 143 */
 144static __cacheline_aligned_in_smp atomic64_t mg_floor;
 145
 146static inline void tk_normalize_xtime(struct timekeeper *tk)
 147{
 148	while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
 149		tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
 150		tk->xtime_sec++;
 151	}
 152	while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
 153		tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
 154		tk->raw_sec++;
 155	}
 156}
 157
 158static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
 159{
 160	struct timespec64 ts;
 161
 162	ts.tv_sec = tk->xtime_sec;
 163	ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
 164	return ts;
 165}
 166
 167static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
 168{
 169	tk->xtime_sec = ts->tv_sec;
 170	tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
 171}
 172
 173static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
 174{
 175	tk->xtime_sec += ts->tv_sec;
 176	tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
 177	tk_normalize_xtime(tk);
 178}
 179
 180static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
 181{
 182	struct timespec64 tmp;
 183
 184	/*
 185	 * Verify consistency of: offset_real = -wall_to_monotonic
 186	 * before modifying anything
 187	 */
 188	set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
 189					-tk->wall_to_monotonic.tv_nsec);
 190	WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
 191	tk->wall_to_monotonic = wtm;
 192	set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
 193	/* Paired with READ_ONCE() in ktime_mono_to_any() */
 194	WRITE_ONCE(tk->offs_real, timespec64_to_ktime(tmp));
 195	WRITE_ONCE(tk->offs_tai, ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0)));
 196}
 197
 198static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
 199{
 200	/* Paired with READ_ONCE() in ktime_mono_to_any() */
 201	WRITE_ONCE(tk->offs_boot, ktime_add(tk->offs_boot, delta));
 202	/*
 203	 * Timespec representation for VDSO update to avoid 64bit division
 204	 * on every update.
 205	 */
 206	tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
 207}
 208
 209/*
 210 * tk_clock_read - atomic clocksource read() helper
 211 *
 212 * This helper is necessary to use in the read paths because, while the
 213 * seqcount ensures we don't return a bad value while structures are updated,
 214 * it doesn't protect from potential crashes. There is the possibility that
 215 * the tkr's clocksource may change between the read reference, and the
 216 * clock reference passed to the read function.  This can cause crashes if
 217 * the wrong clocksource is passed to the wrong read function.
 218 * This isn't necessary to use when holding the tk_core.lock or doing
 219 * a read of the fast-timekeeper tkrs (which is protected by its own locking
 220 * and update logic).
 221 */
 222static inline u64 tk_clock_read(const struct tk_read_base *tkr)
 223{
 224	struct clocksource *clock = READ_ONCE(tkr->clock);
 225
 226	return clock->read(clock);
 227}
 228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 229/**
 230 * tk_setup_internals - Set up internals to use clocksource clock.
 231 *
 232 * @tk:		The target timekeeper to setup.
 233 * @clock:		Pointer to clocksource.
 234 *
 235 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
 236 * pair and interval request.
 237 *
 238 * Unless you're the timekeeping code, you should not be using this!
 239 */
 240static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
 241{
 242	u64 interval;
 243	u64 tmp, ntpinterval;
 244	struct clocksource *old_clock;
 245
 246	++tk->cs_was_changed_seq;
 247	old_clock = tk->tkr_mono.clock;
 248	tk->tkr_mono.clock = clock;
 249	tk->tkr_mono.mask = clock->mask;
 250	tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
 251
 252	tk->tkr_raw.clock = clock;
 253	tk->tkr_raw.mask = clock->mask;
 254	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
 255
 256	/* Do the ns -> cycle conversion first, using original mult */
 257	tmp = NTP_INTERVAL_LENGTH;
 258	tmp <<= clock->shift;
 259	ntpinterval = tmp;
 260	tmp += clock->mult/2;
 261	do_div(tmp, clock->mult);
 262	if (tmp == 0)
 263		tmp = 1;
 264
 265	interval = (u64) tmp;
 266	tk->cycle_interval = interval;
 267
 268	/* Go back from cycles -> shifted ns */
 269	tk->xtime_interval = interval * clock->mult;
 270	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
 271	tk->raw_interval = interval * clock->mult;
 272
 273	 /* if changing clocks, convert xtime_nsec shift units */
 274	if (old_clock) {
 275		int shift_change = clock->shift - old_clock->shift;
 276		if (shift_change < 0) {
 277			tk->tkr_mono.xtime_nsec >>= -shift_change;
 278			tk->tkr_raw.xtime_nsec >>= -shift_change;
 279		} else {
 280			tk->tkr_mono.xtime_nsec <<= shift_change;
 281			tk->tkr_raw.xtime_nsec <<= shift_change;
 282		}
 283	}
 284
 285	tk->tkr_mono.shift = clock->shift;
 286	tk->tkr_raw.shift = clock->shift;
 287
 288	tk->ntp_error = 0;
 289	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
 290	tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
 291
 292	/*
 293	 * The timekeeper keeps its own mult values for the currently
 294	 * active clocksource. These value will be adjusted via NTP
 295	 * to counteract clock drifting.
 296	 */
 297	tk->tkr_mono.mult = clock->mult;
 298	tk->tkr_raw.mult = clock->mult;
 299	tk->ntp_err_mult = 0;
 300	tk->skip_second_overflow = 0;
 301}
 302
 303/* Timekeeper helper functions. */
 304static noinline u64 delta_to_ns_safe(const struct tk_read_base *tkr, u64 delta)
 
 305{
 306	return mul_u64_u32_add_u64_shr(delta, tkr->mult, tkr->xtime_nsec, tkr->shift);
 
 
 
 
 
 307}
 308
 309static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
 310{
 311	/* Calculate the delta since the last update_wall_time() */
 312	u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask;
 313
 314	/*
 315	 * This detects both negative motion and the case where the delta
 316	 * overflows the multiplication with tkr->mult.
 317	 */
 318	if (unlikely(delta > tkr->clock->max_cycles)) {
 319		/*
 320		 * Handle clocksource inconsistency between CPUs to prevent
 321		 * time from going backwards by checking for the MSB of the
 322		 * mask being set in the delta.
 323		 */
 324		if (delta & ~(mask >> 1))
 325			return tkr->xtime_nsec >> tkr->shift;
 326
 327		return delta_to_ns_safe(tkr, delta);
 328	}
 329
 330	return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift;
 
 331}
 332
 333static __always_inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
 334{
 335	return timekeeping_cycles_to_ns(tkr, tk_clock_read(tkr));
 
 
 
 
 336}
 337
 338/**
 339 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
 340 * @tkr: Timekeeping readout base from which we take the update
 341 * @tkf: Pointer to NMI safe timekeeper
 342 *
 343 * We want to use this from any context including NMI and tracing /
 344 * instrumenting the timekeeping code itself.
 345 *
 346 * Employ the latch technique; see @write_seqcount_latch.
 347 *
 348 * So if a NMI hits the update of base[0] then it will use base[1]
 349 * which is still consistent. In the worst case this can result is a
 350 * slightly wrong timestamp (a few nanoseconds). See
 351 * @ktime_get_mono_fast_ns.
 352 */
 353static void update_fast_timekeeper(const struct tk_read_base *tkr,
 354				   struct tk_fast *tkf)
 355{
 356	struct tk_read_base *base = tkf->base;
 357
 358	/* Force readers off to base[1] */
 359	write_seqcount_latch_begin(&tkf->seq);
 360
 361	/* Update base[0] */
 362	memcpy(base, tkr, sizeof(*base));
 363
 364	/* Force readers back to base[0] */
 365	write_seqcount_latch(&tkf->seq);
 366
 367	/* Update base[1] */
 368	memcpy(base + 1, base, sizeof(*base));
 
 
 
 
 
 369
 370	write_seqcount_latch_end(&tkf->seq);
 
 371}
 372
 373static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
 374{
 375	struct tk_read_base *tkr;
 376	unsigned int seq;
 377	u64 now;
 378
 379	do {
 380		seq = read_seqcount_latch(&tkf->seq);
 381		tkr = tkf->base + (seq & 0x01);
 382		now = ktime_to_ns(tkr->base);
 383		now += timekeeping_get_ns(tkr);
 384	} while (read_seqcount_latch_retry(&tkf->seq, seq));
 385
 386	return now;
 387}
 388
 389/**
 390 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
 391 *
 392 * This timestamp is not guaranteed to be monotonic across an update.
 393 * The timestamp is calculated by:
 394 *
 395 *	now = base_mono + clock_delta * slope
 396 *
 397 * So if the update lowers the slope, readers who are forced to the
 398 * not yet updated second array are still using the old steeper slope.
 399 *
 400 * tmono
 401 * ^
 402 * |    o  n
 403 * |   o n
 404 * |  u
 405 * | o
 406 * |o
 407 * |12345678---> reader order
 408 *
 409 * o = old slope
 410 * u = update
 411 * n = new slope
 412 *
 413 * So reader 6 will observe time going backwards versus reader 5.
 414 *
 415 * While other CPUs are likely to be able to observe that, the only way
 416 * for a CPU local observation is when an NMI hits in the middle of
 417 * the update. Timestamps taken from that NMI context might be ahead
 418 * of the following timestamps. Callers need to be aware of that and
 419 * deal with it.
 420 */
 421u64 notrace ktime_get_mono_fast_ns(void)
 422{
 423	return __ktime_get_fast_ns(&tk_fast_mono);
 424}
 425EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
 426
 427/**
 428 * ktime_get_raw_fast_ns - Fast NMI safe access to clock monotonic raw
 429 *
 430 * Contrary to ktime_get_mono_fast_ns() this is always correct because the
 431 * conversion factor is not affected by NTP/PTP correction.
 432 */
 433u64 notrace ktime_get_raw_fast_ns(void)
 434{
 435	return __ktime_get_fast_ns(&tk_fast_raw);
 436}
 437EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
 438
 439/**
 440 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
 441 *
 442 * To keep it NMI safe since we're accessing from tracing, we're not using a
 443 * separate timekeeper with updates to monotonic clock and boot offset
 444 * protected with seqcounts. This has the following minor side effects:
 445 *
 446 * (1) Its possible that a timestamp be taken after the boot offset is updated
 447 * but before the timekeeper is updated. If this happens, the new boot offset
 448 * is added to the old timekeeping making the clock appear to update slightly
 449 * earlier:
 450 *    CPU 0                                        CPU 1
 451 *    timekeeping_inject_sleeptime64()
 452 *    __timekeeping_inject_sleeptime(tk, delta);
 453 *                                                 timestamp();
 454 *    timekeeping_update_staged(tkd, TK_CLEAR_NTP...);
 455 *
 456 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
 457 * partially updated.  Since the tk->offs_boot update is a rare event, this
 458 * should be a rare occurrence which postprocessing should be able to handle.
 459 *
 460 * The caveats vs. timestamp ordering as documented for ktime_get_mono_fast_ns()
 461 * apply as well.
 462 */
 463u64 notrace ktime_get_boot_fast_ns(void)
 464{
 465	struct timekeeper *tk = &tk_core.timekeeper;
 466
 467	return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_boot)));
 468}
 469EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
 470
 471/**
 472 * ktime_get_tai_fast_ns - NMI safe and fast access to tai clock.
 473 *
 474 * The same limitations as described for ktime_get_boot_fast_ns() apply. The
 475 * mono time and the TAI offset are not read atomically which may yield wrong
 476 * readouts. However, an update of the TAI offset is an rare event e.g., caused
 477 * by settime or adjtimex with an offset. The user of this function has to deal
 478 * with the possibility of wrong timestamps in post processing.
 479 */
 480u64 notrace ktime_get_tai_fast_ns(void)
 481{
 482	struct timekeeper *tk = &tk_core.timekeeper;
 483
 484	return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_tai)));
 485}
 486EXPORT_SYMBOL_GPL(ktime_get_tai_fast_ns);
 487
 488static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
 489{
 490	struct tk_read_base *tkr;
 491	u64 basem, baser, delta;
 492	unsigned int seq;
 493
 494	do {
 495		seq = raw_read_seqcount_latch(&tkf->seq);
 496		tkr = tkf->base + (seq & 0x01);
 497		basem = ktime_to_ns(tkr->base);
 498		baser = ktime_to_ns(tkr->base_real);
 499		delta = timekeeping_get_ns(tkr);
 500	} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
 501
 502	if (mono)
 503		*mono = basem + delta;
 504	return baser + delta;
 505}
 506
 507/**
 508 * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
 509 *
 510 * See ktime_get_mono_fast_ns() for documentation of the time stamp ordering.
 511 */
 512u64 ktime_get_real_fast_ns(void)
 513{
 514	return __ktime_get_real_fast(&tk_fast_mono, NULL);
 515}
 516EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
 517
 518/**
 519 * ktime_get_fast_timestamps: - NMI safe timestamps
 520 * @snapshot:	Pointer to timestamp storage
 521 *
 522 * Stores clock monotonic, boottime and realtime timestamps.
 523 *
 524 * Boot time is a racy access on 32bit systems if the sleep time injection
 525 * happens late during resume and not in timekeeping_resume(). That could
 526 * be avoided by expanding struct tk_read_base with boot offset for 32bit
 527 * and adding more overhead to the update. As this is a hard to observe
 528 * once per resume event which can be filtered with reasonable effort using
 529 * the accurate mono/real timestamps, it's probably not worth the trouble.
 530 *
 531 * Aside of that it might be possible on 32 and 64 bit to observe the
 532 * following when the sleep time injection happens late:
 533 *
 534 * CPU 0				CPU 1
 535 * timekeeping_resume()
 536 * ktime_get_fast_timestamps()
 537 *	mono, real = __ktime_get_real_fast()
 538 *					inject_sleep_time()
 539 *					   update boot offset
 540 *	boot = mono + bootoffset;
 541 *
 542 * That means that boot time already has the sleep time adjustment, but
 543 * real time does not. On the next readout both are in sync again.
 544 *
 545 * Preventing this for 64bit is not really feasible without destroying the
 546 * careful cache layout of the timekeeper because the sequence count and
 547 * struct tk_read_base would then need two cache lines instead of one.
 548 *
 549 * Access to the time keeper clock source is disabled across the innermost
 550 * steps of suspend/resume. The accessors still work, but the timestamps
 551 * are frozen until time keeping is resumed which happens very early.
 552 *
 553 * For regular suspend/resume there is no observable difference vs. sched
 554 * clock, but it might affect some of the nasty low level debug printks.
 555 *
 556 * OTOH, access to sched clock is not guaranteed across suspend/resume on
 557 * all systems either so it depends on the hardware in use.
 558 *
 559 * If that turns out to be a real problem then this could be mitigated by
 560 * using sched clock in a similar way as during early boot. But it's not as
 561 * trivial as on early boot because it needs some careful protection
 562 * against the clock monotonic timestamp jumping backwards on resume.
 563 */
 564void ktime_get_fast_timestamps(struct ktime_timestamps *snapshot)
 565{
 566	struct timekeeper *tk = &tk_core.timekeeper;
 567
 568	snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono);
 569	snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot));
 570}
 571
 572/**
 573 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
 574 * @tk: Timekeeper to snapshot.
 575 *
 576 * It generally is unsafe to access the clocksource after timekeeping has been
 577 * suspended, so take a snapshot of the readout base of @tk and use it as the
 578 * fast timekeeper's readout base while suspended.  It will return the same
 579 * number of cycles every time until timekeeping is resumed at which time the
 580 * proper readout base for the fast timekeeper will be restored automatically.
 581 */
 582static void halt_fast_timekeeper(const struct timekeeper *tk)
 583{
 584	static struct tk_read_base tkr_dummy;
 585	const struct tk_read_base *tkr = &tk->tkr_mono;
 586
 587	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
 588	cycles_at_suspend = tk_clock_read(tkr);
 589	tkr_dummy.clock = &dummy_clock;
 590	tkr_dummy.base_real = tkr->base + tk->offs_real;
 591	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
 592
 593	tkr = &tk->tkr_raw;
 594	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
 595	tkr_dummy.clock = &dummy_clock;
 596	update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
 597}
 598
 599static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
 600
 601static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
 602{
 603	raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
 604}
 605
 606/**
 607 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
 608 * @nb: Pointer to the notifier block to register
 609 */
 610int pvclock_gtod_register_notifier(struct notifier_block *nb)
 611{
 612	struct timekeeper *tk = &tk_core.timekeeper;
 
 613	int ret;
 614
 615	guard(raw_spinlock_irqsave)(&tk_core.lock);
 616	ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
 617	update_pvclock_gtod(tk, true);
 
 618
 619	return ret;
 620}
 621EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
 622
 623/**
 624 * pvclock_gtod_unregister_notifier - unregister a pvclock
 625 * timedata update listener
 626 * @nb: Pointer to the notifier block to unregister
 627 */
 628int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
 629{
 630	guard(raw_spinlock_irqsave)(&tk_core.lock);
 631	return raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
 
 
 
 
 
 
 632}
 633EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
 634
 635/*
 636 * tk_update_leap_state - helper to update the next_leap_ktime
 637 */
 638static inline void tk_update_leap_state(struct timekeeper *tk)
 639{
 640	tk->next_leap_ktime = ntp_get_next_leap();
 641	if (tk->next_leap_ktime != KTIME_MAX)
 642		/* Convert to monotonic time */
 643		tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
 644}
 645
 646/*
 647 * Leap state update for both shadow and the real timekeeper
 648 * Separate to spare a full memcpy() of the timekeeper.
 649 */
 650static void tk_update_leap_state_all(struct tk_data *tkd)
 651{
 652	write_seqcount_begin(&tkd->seq);
 653	tk_update_leap_state(&tkd->shadow_timekeeper);
 654	tkd->timekeeper.next_leap_ktime = tkd->shadow_timekeeper.next_leap_ktime;
 655	write_seqcount_end(&tkd->seq);
 656}
 657
 658/*
 659 * Update the ktime_t based scalar nsec members of the timekeeper
 660 */
 661static inline void tk_update_ktime_data(struct timekeeper *tk)
 662{
 663	u64 seconds;
 664	u32 nsec;
 665
 666	/*
 667	 * The xtime based monotonic readout is:
 668	 *	nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
 669	 * The ktime based monotonic readout is:
 670	 *	nsec = base_mono + now();
 671	 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
 672	 */
 673	seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
 674	nsec = (u32) tk->wall_to_monotonic.tv_nsec;
 675	tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
 676
 677	/*
 678	 * The sum of the nanoseconds portions of xtime and
 679	 * wall_to_monotonic can be greater/equal one second. Take
 680	 * this into account before updating tk->ktime_sec.
 681	 */
 682	nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
 683	if (nsec >= NSEC_PER_SEC)
 684		seconds++;
 685	tk->ktime_sec = seconds;
 686
 687	/* Update the monotonic raw base */
 688	tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
 689}
 690
 691/*
 692 * Restore the shadow timekeeper from the real timekeeper.
 693 */
 694static void timekeeping_restore_shadow(struct tk_data *tkd)
 695{
 696	lockdep_assert_held(&tkd->lock);
 697	memcpy(&tkd->shadow_timekeeper, &tkd->timekeeper, sizeof(tkd->timekeeper));
 698}
 699
 700static void timekeeping_update_from_shadow(struct tk_data *tkd, unsigned int action)
 701{
 702	struct timekeeper *tk = &tk_core.shadow_timekeeper;
 703
 704	lockdep_assert_held(&tkd->lock);
 705
 706	/*
 707	 * Block out readers before running the updates below because that
 708	 * updates VDSO and other time related infrastructure. Not blocking
 709	 * the readers might let a reader see time going backwards when
 710	 * reading from the VDSO after the VDSO update and then reading in
 711	 * the kernel from the timekeeper before that got updated.
 712	 */
 713	write_seqcount_begin(&tkd->seq);
 714
 715	if (action & TK_CLEAR_NTP) {
 716		tk->ntp_error = 0;
 717		ntp_clear();
 718	}
 719
 720	tk_update_leap_state(tk);
 721	tk_update_ktime_data(tk);
 722
 723	update_vsyscall(tk);
 724	update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
 725
 726	tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
 727	update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
 728	update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
 729
 730	if (action & TK_CLOCK_WAS_SET)
 731		tk->clock_was_set_seq++;
 732
 733	/*
 734	 * Update the real timekeeper.
 735	 *
 736	 * We could avoid this memcpy() by switching pointers, but that has
 737	 * the downside that the reader side does not longer benefit from
 738	 * the cacheline optimized data layout of the timekeeper and requires
 739	 * another indirection.
 740	 */
 741	memcpy(&tkd->timekeeper, tk, sizeof(*tk));
 742	write_seqcount_end(&tkd->seq);
 743}
 744
 745/**
 746 * timekeeping_forward_now - update clock to the current time
 747 * @tk:		Pointer to the timekeeper to update
 748 *
 749 * Forward the current clock to update its state since the last call to
 750 * update_wall_time(). This is useful before significant clock changes,
 751 * as it avoids having to deal with this time offset explicitly.
 752 */
 753static void timekeeping_forward_now(struct timekeeper *tk)
 754{
 755	u64 cycle_now, delta;
 756
 757	cycle_now = tk_clock_read(&tk->tkr_mono);
 758	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
 759				  tk->tkr_mono.clock->max_raw_delta);
 760	tk->tkr_mono.cycle_last = cycle_now;
 761	tk->tkr_raw.cycle_last  = cycle_now;
 762
 763	while (delta > 0) {
 764		u64 max = tk->tkr_mono.clock->max_cycles;
 765		u64 incr = delta < max ? delta : max;
 766
 767		tk->tkr_mono.xtime_nsec += incr * tk->tkr_mono.mult;
 768		tk->tkr_raw.xtime_nsec += incr * tk->tkr_raw.mult;
 769		tk_normalize_xtime(tk);
 770		delta -= incr;
 771	}
 772}
 773
 774/**
 775 * ktime_get_real_ts64 - Returns the time of day in a timespec64.
 776 * @ts:		pointer to the timespec to be set
 777 *
 778 * Returns the time of day in a timespec64 (WARN if suspended).
 779 */
 780void ktime_get_real_ts64(struct timespec64 *ts)
 781{
 782	struct timekeeper *tk = &tk_core.timekeeper;
 783	unsigned int seq;
 784	u64 nsecs;
 785
 786	WARN_ON(timekeeping_suspended);
 787
 788	do {
 789		seq = read_seqcount_begin(&tk_core.seq);
 790
 791		ts->tv_sec = tk->xtime_sec;
 792		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 793
 794	} while (read_seqcount_retry(&tk_core.seq, seq));
 795
 796	ts->tv_nsec = 0;
 797	timespec64_add_ns(ts, nsecs);
 798}
 799EXPORT_SYMBOL(ktime_get_real_ts64);
 800
 801ktime_t ktime_get(void)
 802{
 803	struct timekeeper *tk = &tk_core.timekeeper;
 804	unsigned int seq;
 805	ktime_t base;
 806	u64 nsecs;
 807
 808	WARN_ON(timekeeping_suspended);
 809
 810	do {
 811		seq = read_seqcount_begin(&tk_core.seq);
 812		base = tk->tkr_mono.base;
 813		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 814
 815	} while (read_seqcount_retry(&tk_core.seq, seq));
 816
 817	return ktime_add_ns(base, nsecs);
 818}
 819EXPORT_SYMBOL_GPL(ktime_get);
 820
 821u32 ktime_get_resolution_ns(void)
 822{
 823	struct timekeeper *tk = &tk_core.timekeeper;
 824	unsigned int seq;
 825	u32 nsecs;
 826
 827	WARN_ON(timekeeping_suspended);
 828
 829	do {
 830		seq = read_seqcount_begin(&tk_core.seq);
 831		nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
 832	} while (read_seqcount_retry(&tk_core.seq, seq));
 833
 834	return nsecs;
 835}
 836EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
 837
 838static ktime_t *offsets[TK_OFFS_MAX] = {
 839	[TK_OFFS_REAL]	= &tk_core.timekeeper.offs_real,
 840	[TK_OFFS_BOOT]	= &tk_core.timekeeper.offs_boot,
 841	[TK_OFFS_TAI]	= &tk_core.timekeeper.offs_tai,
 842};
 843
 844ktime_t ktime_get_with_offset(enum tk_offsets offs)
 845{
 846	struct timekeeper *tk = &tk_core.timekeeper;
 847	unsigned int seq;
 848	ktime_t base, *offset = offsets[offs];
 849	u64 nsecs;
 850
 851	WARN_ON(timekeeping_suspended);
 852
 853	do {
 854		seq = read_seqcount_begin(&tk_core.seq);
 855		base = ktime_add(tk->tkr_mono.base, *offset);
 856		nsecs = timekeeping_get_ns(&tk->tkr_mono);
 857
 858	} while (read_seqcount_retry(&tk_core.seq, seq));
 859
 860	return ktime_add_ns(base, nsecs);
 861
 862}
 863EXPORT_SYMBOL_GPL(ktime_get_with_offset);
 864
 865ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
 866{
 867	struct timekeeper *tk = &tk_core.timekeeper;
 868	unsigned int seq;
 869	ktime_t base, *offset = offsets[offs];
 870	u64 nsecs;
 871
 872	WARN_ON(timekeeping_suspended);
 873
 874	do {
 875		seq = read_seqcount_begin(&tk_core.seq);
 876		base = ktime_add(tk->tkr_mono.base, *offset);
 877		nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
 878
 879	} while (read_seqcount_retry(&tk_core.seq, seq));
 880
 881	return ktime_add_ns(base, nsecs);
 882}
 883EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
 884
 885/**
 886 * ktime_mono_to_any() - convert monotonic time to any other time
 887 * @tmono:	time to convert.
 888 * @offs:	which offset to use
 889 */
 890ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
 891{
 892	ktime_t *offset = offsets[offs];
 893	unsigned int seq;
 894	ktime_t tconv;
 895
 896	if (IS_ENABLED(CONFIG_64BIT)) {
 897		/*
 898		 * Paired with WRITE_ONCE()s in tk_set_wall_to_mono() and
 899		 * tk_update_sleep_time().
 900		 */
 901		return ktime_add(tmono, READ_ONCE(*offset));
 902	}
 903
 904	do {
 905		seq = read_seqcount_begin(&tk_core.seq);
 906		tconv = ktime_add(tmono, *offset);
 907	} while (read_seqcount_retry(&tk_core.seq, seq));
 908
 909	return tconv;
 910}
 911EXPORT_SYMBOL_GPL(ktime_mono_to_any);
 912
 913/**
 914 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
 915 */
 916ktime_t ktime_get_raw(void)
 917{
 918	struct timekeeper *tk = &tk_core.timekeeper;
 919	unsigned int seq;
 920	ktime_t base;
 921	u64 nsecs;
 922
 923	do {
 924		seq = read_seqcount_begin(&tk_core.seq);
 925		base = tk->tkr_raw.base;
 926		nsecs = timekeeping_get_ns(&tk->tkr_raw);
 927
 928	} while (read_seqcount_retry(&tk_core.seq, seq));
 929
 930	return ktime_add_ns(base, nsecs);
 931}
 932EXPORT_SYMBOL_GPL(ktime_get_raw);
 933
 934/**
 935 * ktime_get_ts64 - get the monotonic clock in timespec64 format
 936 * @ts:		pointer to timespec variable
 937 *
 938 * The function calculates the monotonic clock from the realtime
 939 * clock and the wall_to_monotonic offset and stores the result
 940 * in normalized timespec64 format in the variable pointed to by @ts.
 941 */
 942void ktime_get_ts64(struct timespec64 *ts)
 943{
 944	struct timekeeper *tk = &tk_core.timekeeper;
 945	struct timespec64 tomono;
 946	unsigned int seq;
 947	u64 nsec;
 948
 949	WARN_ON(timekeeping_suspended);
 950
 951	do {
 952		seq = read_seqcount_begin(&tk_core.seq);
 953		ts->tv_sec = tk->xtime_sec;
 954		nsec = timekeeping_get_ns(&tk->tkr_mono);
 955		tomono = tk->wall_to_monotonic;
 956
 957	} while (read_seqcount_retry(&tk_core.seq, seq));
 958
 959	ts->tv_sec += tomono.tv_sec;
 960	ts->tv_nsec = 0;
 961	timespec64_add_ns(ts, nsec + tomono.tv_nsec);
 962}
 963EXPORT_SYMBOL_GPL(ktime_get_ts64);
 964
 965/**
 966 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
 967 *
 968 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
 969 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
 970 * works on both 32 and 64 bit systems. On 32 bit systems the readout
 971 * covers ~136 years of uptime which should be enough to prevent
 972 * premature wrap arounds.
 973 */
 974time64_t ktime_get_seconds(void)
 975{
 976	struct timekeeper *tk = &tk_core.timekeeper;
 977
 978	WARN_ON(timekeeping_suspended);
 979	return tk->ktime_sec;
 980}
 981EXPORT_SYMBOL_GPL(ktime_get_seconds);
 982
 983/**
 984 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
 985 *
 986 * Returns the wall clock seconds since 1970.
 987 *
 988 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
 989 * 32bit systems the access must be protected with the sequence
 990 * counter to provide "atomic" access to the 64bit tk->xtime_sec
 991 * value.
 992 */
 993time64_t ktime_get_real_seconds(void)
 994{
 995	struct timekeeper *tk = &tk_core.timekeeper;
 996	time64_t seconds;
 997	unsigned int seq;
 998
 999	if (IS_ENABLED(CONFIG_64BIT))
1000		return tk->xtime_sec;
1001
1002	do {
1003		seq = read_seqcount_begin(&tk_core.seq);
1004		seconds = tk->xtime_sec;
1005
1006	} while (read_seqcount_retry(&tk_core.seq, seq));
1007
1008	return seconds;
1009}
1010EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
1011
1012/**
1013 * __ktime_get_real_seconds - The same as ktime_get_real_seconds
1014 * but without the sequence counter protect. This internal function
1015 * is called just when timekeeping lock is already held.
1016 */
1017noinstr time64_t __ktime_get_real_seconds(void)
1018{
1019	struct timekeeper *tk = &tk_core.timekeeper;
1020
1021	return tk->xtime_sec;
1022}
1023
1024/**
1025 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
1026 * @systime_snapshot:	pointer to struct receiving the system time snapshot
1027 */
1028void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
1029{
1030	struct timekeeper *tk = &tk_core.timekeeper;
1031	unsigned int seq;
1032	ktime_t base_raw;
1033	ktime_t base_real;
1034	ktime_t base_boot;
1035	u64 nsec_raw;
1036	u64 nsec_real;
1037	u64 now;
1038
1039	WARN_ON_ONCE(timekeeping_suspended);
1040
1041	do {
1042		seq = read_seqcount_begin(&tk_core.seq);
1043		now = tk_clock_read(&tk->tkr_mono);
1044		systime_snapshot->cs_id = tk->tkr_mono.clock->id;
1045		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
1046		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
1047		base_real = ktime_add(tk->tkr_mono.base,
1048				      tk_core.timekeeper.offs_real);
1049		base_boot = ktime_add(tk->tkr_mono.base,
1050				      tk_core.timekeeper.offs_boot);
1051		base_raw = tk->tkr_raw.base;
1052		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
1053		nsec_raw  = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
1054	} while (read_seqcount_retry(&tk_core.seq, seq));
1055
1056	systime_snapshot->cycles = now;
1057	systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
1058	systime_snapshot->boot = ktime_add_ns(base_boot, nsec_real);
1059	systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
1060}
1061EXPORT_SYMBOL_GPL(ktime_get_snapshot);
1062
1063/* Scale base by mult/div checking for overflow */
1064static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
1065{
1066	u64 tmp, rem;
1067
1068	tmp = div64_u64_rem(*base, div, &rem);
1069
1070	if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
1071	    ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
1072		return -EOVERFLOW;
1073	tmp *= mult;
1074
1075	rem = div64_u64(rem * mult, div);
1076	*base = tmp + rem;
1077	return 0;
1078}
1079
1080/**
1081 * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
1082 * @history:			Snapshot representing start of history
1083 * @partial_history_cycles:	Cycle offset into history (fractional part)
1084 * @total_history_cycles:	Total history length in cycles
1085 * @discontinuity:		True indicates clock was set on history period
1086 * @ts:				Cross timestamp that should be adjusted using
1087 *	partial/total ratio
1088 *
1089 * Helper function used by get_device_system_crosststamp() to correct the
1090 * crosstimestamp corresponding to the start of the current interval to the
1091 * system counter value (timestamp point) provided by the driver. The
1092 * total_history_* quantities are the total history starting at the provided
1093 * reference point and ending at the start of the current interval. The cycle
1094 * count between the driver timestamp point and the start of the current
1095 * interval is partial_history_cycles.
1096 */
1097static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1098					 u64 partial_history_cycles,
1099					 u64 total_history_cycles,
1100					 bool discontinuity,
1101					 struct system_device_crosststamp *ts)
1102{
1103	struct timekeeper *tk = &tk_core.timekeeper;
1104	u64 corr_raw, corr_real;
1105	bool interp_forward;
1106	int ret;
1107
1108	if (total_history_cycles == 0 || partial_history_cycles == 0)
1109		return 0;
1110
1111	/* Interpolate shortest distance from beginning or end of history */
1112	interp_forward = partial_history_cycles > total_history_cycles / 2;
1113	partial_history_cycles = interp_forward ?
1114		total_history_cycles - partial_history_cycles :
1115		partial_history_cycles;
1116
1117	/*
1118	 * Scale the monotonic raw time delta by:
1119	 *	partial_history_cycles / total_history_cycles
1120	 */
1121	corr_raw = (u64)ktime_to_ns(
1122		ktime_sub(ts->sys_monoraw, history->raw));
1123	ret = scale64_check_overflow(partial_history_cycles,
1124				     total_history_cycles, &corr_raw);
1125	if (ret)
1126		return ret;
1127
1128	/*
1129	 * If there is a discontinuity in the history, scale monotonic raw
1130	 *	correction by:
1131	 *	mult(real)/mult(raw) yielding the realtime correction
1132	 * Otherwise, calculate the realtime correction similar to monotonic
1133	 *	raw calculation
1134	 */
1135	if (discontinuity) {
1136		corr_real = mul_u64_u32_div
1137			(corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1138	} else {
1139		corr_real = (u64)ktime_to_ns(
1140			ktime_sub(ts->sys_realtime, history->real));
1141		ret = scale64_check_overflow(partial_history_cycles,
1142					     total_history_cycles, &corr_real);
1143		if (ret)
1144			return ret;
1145	}
1146
1147	/* Fixup monotonic raw and real time time values */
1148	if (interp_forward) {
1149		ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1150		ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1151	} else {
1152		ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1153		ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1154	}
1155
1156	return 0;
1157}
1158
1159/*
1160 * timestamp_in_interval - true if ts is chronologically in [start, end]
1161 *
1162 * True if ts occurs chronologically at or after start, and before or at end.
1163 */
1164static bool timestamp_in_interval(u64 start, u64 end, u64 ts)
1165{
1166	if (ts >= start && ts <= end)
1167		return true;
1168	if (start > end && (ts >= start || ts <= end))
1169		return true;
1170	return false;
1171}
1172
1173static bool convert_clock(u64 *val, u32 numerator, u32 denominator)
1174{
1175	u64 rem, res;
1176
1177	if (!numerator || !denominator)
1178		return false;
1179
1180	res = div64_u64_rem(*val, denominator, &rem) * numerator;
1181	*val = res + div_u64(rem * numerator, denominator);
1182	return true;
1183}
1184
1185static bool convert_base_to_cs(struct system_counterval_t *scv)
1186{
1187	struct clocksource *cs = tk_core.timekeeper.tkr_mono.clock;
1188	struct clocksource_base *base;
1189	u32 num, den;
1190
1191	/* The timestamp was taken from the time keeper clock source */
1192	if (cs->id == scv->cs_id)
1193		return true;
1194
1195	/*
1196	 * Check whether cs_id matches the base clock. Prevent the compiler from
1197	 * re-evaluating @base as the clocksource might change concurrently.
1198	 */
1199	base = READ_ONCE(cs->base);
1200	if (!base || base->id != scv->cs_id)
1201		return false;
1202
1203	num = scv->use_nsecs ? cs->freq_khz : base->numerator;
1204	den = scv->use_nsecs ? USEC_PER_SEC : base->denominator;
1205
1206	if (!convert_clock(&scv->cycles, num, den))
1207		return false;
1208
1209	scv->cycles += base->offset;
1210	return true;
1211}
1212
1213static bool convert_cs_to_base(u64 *cycles, enum clocksource_ids base_id)
1214{
1215	struct clocksource *cs = tk_core.timekeeper.tkr_mono.clock;
1216	struct clocksource_base *base;
1217
1218	/*
1219	 * Check whether base_id matches the base clock. Prevent the compiler from
1220	 * re-evaluating @base as the clocksource might change concurrently.
1221	 */
1222	base = READ_ONCE(cs->base);
1223	if (!base || base->id != base_id)
1224		return false;
1225
1226	*cycles -= base->offset;
1227	if (!convert_clock(cycles, base->denominator, base->numerator))
1228		return false;
1229	return true;
1230}
1231
1232static bool convert_ns_to_cs(u64 *delta)
1233{
1234	struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
1235
1236	if (BITS_TO_BYTES(fls64(*delta) + tkr->shift) >= sizeof(*delta))
1237		return false;
1238
1239	*delta = div_u64((*delta << tkr->shift) - tkr->xtime_nsec, tkr->mult);
1240	return true;
1241}
1242
1243/**
1244 * ktime_real_to_base_clock() - Convert CLOCK_REALTIME timestamp to a base clock timestamp
1245 * @treal:	CLOCK_REALTIME timestamp to convert
1246 * @base_id:	base clocksource id
1247 * @cycles:	pointer to store the converted base clock timestamp
1248 *
1249 * Converts a supplied, future realtime clock value to the corresponding base clock value.
1250 *
1251 * Return:  true if the conversion is successful, false otherwise.
1252 */
1253bool ktime_real_to_base_clock(ktime_t treal, enum clocksource_ids base_id, u64 *cycles)
1254{
1255	struct timekeeper *tk = &tk_core.timekeeper;
1256	unsigned int seq;
1257	u64 delta;
1258
1259	do {
1260		seq = read_seqcount_begin(&tk_core.seq);
1261		if ((u64)treal < tk->tkr_mono.base_real)
1262			return false;
1263		delta = (u64)treal - tk->tkr_mono.base_real;
1264		if (!convert_ns_to_cs(&delta))
1265			return false;
1266		*cycles = tk->tkr_mono.cycle_last + delta;
1267		if (!convert_cs_to_base(cycles, base_id))
1268			return false;
1269	} while (read_seqcount_retry(&tk_core.seq, seq));
1270
1271	return true;
1272}
1273EXPORT_SYMBOL_GPL(ktime_real_to_base_clock);
1274
1275/**
1276 * get_device_system_crosststamp - Synchronously capture system/device timestamp
1277 * @get_time_fn:	Callback to get simultaneous device time and
1278 *	system counter from the device driver
1279 * @ctx:		Context passed to get_time_fn()
1280 * @history_begin:	Historical reference point used to interpolate system
1281 *	time when counter provided by the driver is before the current interval
1282 * @xtstamp:		Receives simultaneously captured system and device time
1283 *
1284 * Reads a timestamp from a device and correlates it to system time
1285 */
1286int get_device_system_crosststamp(int (*get_time_fn)
1287				  (ktime_t *device_time,
1288				   struct system_counterval_t *sys_counterval,
1289				   void *ctx),
1290				  void *ctx,
1291				  struct system_time_snapshot *history_begin,
1292				  struct system_device_crosststamp *xtstamp)
1293{
1294	struct system_counterval_t system_counterval;
1295	struct timekeeper *tk = &tk_core.timekeeper;
1296	u64 cycles, now, interval_start;
1297	unsigned int clock_was_set_seq = 0;
1298	ktime_t base_real, base_raw;
1299	u64 nsec_real, nsec_raw;
1300	u8 cs_was_changed_seq;
1301	unsigned int seq;
1302	bool do_interp;
1303	int ret;
1304
1305	do {
1306		seq = read_seqcount_begin(&tk_core.seq);
1307		/*
1308		 * Try to synchronously capture device time and a system
1309		 * counter value calling back into the device driver
1310		 */
1311		ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1312		if (ret)
1313			return ret;
1314
1315		/*
1316		 * Verify that the clocksource ID associated with the captured
1317		 * system counter value is the same as for the currently
1318		 * installed timekeeper clocksource
1319		 */
1320		if (system_counterval.cs_id == CSID_GENERIC ||
1321		    !convert_base_to_cs(&system_counterval))
1322			return -ENODEV;
1323		cycles = system_counterval.cycles;
1324
1325		/*
1326		 * Check whether the system counter value provided by the
1327		 * device driver is on the current timekeeping interval.
1328		 */
1329		now = tk_clock_read(&tk->tkr_mono);
1330		interval_start = tk->tkr_mono.cycle_last;
1331		if (!timestamp_in_interval(interval_start, now, cycles)) {
1332			clock_was_set_seq = tk->clock_was_set_seq;
1333			cs_was_changed_seq = tk->cs_was_changed_seq;
1334			cycles = interval_start;
1335			do_interp = true;
1336		} else {
1337			do_interp = false;
1338		}
1339
1340		base_real = ktime_add(tk->tkr_mono.base,
1341				      tk_core.timekeeper.offs_real);
1342		base_raw = tk->tkr_raw.base;
1343
1344		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles);
1345		nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles);
 
 
1346	} while (read_seqcount_retry(&tk_core.seq, seq));
1347
1348	xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1349	xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1350
1351	/*
1352	 * Interpolate if necessary, adjusting back from the start of the
1353	 * current interval
1354	 */
1355	if (do_interp) {
1356		u64 partial_history_cycles, total_history_cycles;
1357		bool discontinuity;
1358
1359		/*
1360		 * Check that the counter value is not before the provided
1361		 * history reference and that the history doesn't cross a
1362		 * clocksource change
1363		 */
1364		if (!history_begin ||
1365		    !timestamp_in_interval(history_begin->cycles,
1366					   cycles, system_counterval.cycles) ||
1367		    history_begin->cs_was_changed_seq != cs_was_changed_seq)
1368			return -EINVAL;
1369		partial_history_cycles = cycles - system_counterval.cycles;
1370		total_history_cycles = cycles - history_begin->cycles;
1371		discontinuity =
1372			history_begin->clock_was_set_seq != clock_was_set_seq;
1373
1374		ret = adjust_historical_crosststamp(history_begin,
1375						    partial_history_cycles,
1376						    total_history_cycles,
1377						    discontinuity, xtstamp);
1378		if (ret)
1379			return ret;
1380	}
1381
1382	return 0;
1383}
1384EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1385
1386/**
1387 * timekeeping_clocksource_has_base - Check whether the current clocksource
1388 *				      is based on given a base clock
1389 * @id:		base clocksource ID
1390 *
1391 * Note:	The return value is a snapshot which can become invalid right
1392 *		after the function returns.
1393 *
1394 * Return:	true if the timekeeper clocksource has a base clock with @id,
1395 *		false otherwise
1396 */
1397bool timekeeping_clocksource_has_base(enum clocksource_ids id)
1398{
1399	/*
1400	 * This is a snapshot, so no point in using the sequence
1401	 * count. Just prevent the compiler from re-evaluating @base as the
1402	 * clocksource might change concurrently.
1403	 */
1404	struct clocksource_base *base = READ_ONCE(tk_core.timekeeper.tkr_mono.clock->base);
1405
1406	return base ? base->id == id : false;
1407}
1408EXPORT_SYMBOL_GPL(timekeeping_clocksource_has_base);
1409
1410/**
1411 * do_settimeofday64 - Sets the time of day.
1412 * @ts:     pointer to the timespec64 variable containing the new time
1413 *
1414 * Sets the time of day to the new time and update NTP and notify hrtimers
1415 */
1416int do_settimeofday64(const struct timespec64 *ts)
1417{
 
1418	struct timespec64 ts_delta, xt;
 
 
1419
1420	if (!timespec64_valid_settod(ts))
1421		return -EINVAL;
1422
1423	scoped_guard (raw_spinlock_irqsave, &tk_core.lock) {
1424		struct timekeeper *tks = &tk_core.shadow_timekeeper;
1425
1426		timekeeping_forward_now(tks);
1427
1428		xt = tk_xtime(tks);
1429		ts_delta = timespec64_sub(*ts, xt);
1430
1431		if (timespec64_compare(&tks->wall_to_monotonic, &ts_delta) > 0) {
1432			timekeeping_restore_shadow(&tk_core);
1433			return -EINVAL;
1434		}
 
 
 
 
 
 
1435
1436		tk_set_wall_to_mono(tks, timespec64_sub(tks->wall_to_monotonic, ts_delta));
1437		tk_set_xtime(tks, ts);
1438		timekeeping_update_from_shadow(&tk_core, TK_UPDATE_ALL);
1439	}
1440
1441	/* Signal hrtimers about time change */
1442	clock_was_set(CLOCK_SET_WALL);
1443
1444	audit_tk_injoffset(ts_delta);
1445	add_device_randomness(ts, sizeof(*ts));
1446	return 0;
 
 
 
1447}
1448EXPORT_SYMBOL(do_settimeofday64);
1449
1450/**
1451 * timekeeping_inject_offset - Adds or subtracts from the current time.
1452 * @ts:		Pointer to the timespec variable containing the offset
1453 *
1454 * Adds or subtracts an offset value from the current time.
1455 */
1456static int timekeeping_inject_offset(const struct timespec64 *ts)
1457{
 
 
 
 
 
1458	if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
1459		return -EINVAL;
1460
1461	scoped_guard (raw_spinlock_irqsave, &tk_core.lock) {
1462		struct timekeeper *tks = &tk_core.shadow_timekeeper;
1463		struct timespec64 tmp;
1464
1465		timekeeping_forward_now(tks);
1466
1467		/* Make sure the proposed value is valid */
1468		tmp = timespec64_add(tk_xtime(tks), *ts);
1469		if (timespec64_compare(&tks->wall_to_monotonic, ts) > 0 ||
1470		    !timespec64_valid_settod(&tmp)) {
1471			timekeeping_restore_shadow(&tk_core);
1472			return -EINVAL;
1473		}
1474
1475		tk_xtime_add(tks, ts);
1476		tk_set_wall_to_mono(tks, timespec64_sub(tks->wall_to_monotonic, *ts));
1477		timekeeping_update_from_shadow(&tk_core, TK_UPDATE_ALL);
 
 
 
1478	}
1479
 
 
 
 
 
 
 
 
 
1480	/* Signal hrtimers about time change */
1481	clock_was_set(CLOCK_SET_WALL);
1482	return 0;
 
1483}
1484
1485/*
1486 * Indicates if there is an offset between the system clock and the hardware
1487 * clock/persistent clock/rtc.
1488 */
1489int persistent_clock_is_local;
1490
1491/*
1492 * Adjust the time obtained from the CMOS to be UTC time instead of
1493 * local time.
1494 *
1495 * This is ugly, but preferable to the alternatives.  Otherwise we
1496 * would either need to write a program to do it in /etc/rc (and risk
1497 * confusion if the program gets run more than once; it would also be
1498 * hard to make the program warp the clock precisely n hours)  or
1499 * compile in the timezone information into the kernel.  Bad, bad....
1500 *
1501 *						- TYT, 1992-01-01
1502 *
1503 * The best thing to do is to keep the CMOS clock in universal time (UTC)
1504 * as real UNIX machines always do it. This avoids all headaches about
1505 * daylight saving times and warping kernel clocks.
1506 */
1507void timekeeping_warp_clock(void)
1508{
1509	if (sys_tz.tz_minuteswest != 0) {
1510		struct timespec64 adjust;
1511
1512		persistent_clock_is_local = 1;
1513		adjust.tv_sec = sys_tz.tz_minuteswest * 60;
1514		adjust.tv_nsec = 0;
1515		timekeeping_inject_offset(&adjust);
1516	}
1517}
1518
1519/*
1520 * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
1521 */
1522static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1523{
1524	tk->tai_offset = tai_offset;
1525	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1526}
1527
1528/*
1529 * change_clocksource - Swaps clocksources if a new one is available
1530 *
1531 * Accumulates current time interval and initializes new clocksource
1532 */
1533static int change_clocksource(void *data)
1534{
1535	struct clocksource *new = data, *old = NULL;
 
 
 
 
 
1536
1537	/*
1538	 * If the clocksource is in a module, get a module reference.
1539	 * Succeeds for built-in code (owner == NULL) as well. Abort if the
1540	 * reference can't be acquired.
1541	 */
1542	if (!try_module_get(new->owner))
1543		return 0;
 
 
 
 
 
 
 
 
 
1544
1545	/* Abort if the device can't be enabled */
1546	if (new->enable && new->enable(new) != 0) {
1547		module_put(new->owner);
1548		return 0;
1549	}
1550
1551	scoped_guard (raw_spinlock_irqsave, &tk_core.lock) {
1552		struct timekeeper *tks = &tk_core.shadow_timekeeper;
1553
1554		timekeeping_forward_now(tks);
1555		old = tks->tkr_mono.clock;
1556		tk_setup_internals(tks, new);
1557		timekeeping_update_from_shadow(&tk_core, TK_UPDATE_ALL);
1558	}
1559
1560	if (old) {
1561		if (old->disable)
1562			old->disable(old);
 
1563		module_put(old->owner);
1564	}
1565
1566	return 0;
1567}
1568
1569/**
1570 * timekeeping_notify - Install a new clock source
1571 * @clock:		pointer to the clock source
1572 *
1573 * This function is called from clocksource.c after a new, better clock
1574 * source has been registered. The caller holds the clocksource_mutex.
1575 */
1576int timekeeping_notify(struct clocksource *clock)
1577{
1578	struct timekeeper *tk = &tk_core.timekeeper;
1579
1580	if (tk->tkr_mono.clock == clock)
1581		return 0;
1582	stop_machine(change_clocksource, clock, NULL);
1583	tick_clock_notify();
1584	return tk->tkr_mono.clock == clock ? 0 : -1;
1585}
1586
1587/**
1588 * ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
1589 * @ts:		pointer to the timespec64 to be set
1590 *
1591 * Returns the raw monotonic time (completely un-modified by ntp)
1592 */
1593void ktime_get_raw_ts64(struct timespec64 *ts)
1594{
1595	struct timekeeper *tk = &tk_core.timekeeper;
1596	unsigned int seq;
1597	u64 nsecs;
1598
1599	do {
1600		seq = read_seqcount_begin(&tk_core.seq);
1601		ts->tv_sec = tk->raw_sec;
1602		nsecs = timekeeping_get_ns(&tk->tkr_raw);
1603
1604	} while (read_seqcount_retry(&tk_core.seq, seq));
1605
1606	ts->tv_nsec = 0;
1607	timespec64_add_ns(ts, nsecs);
1608}
1609EXPORT_SYMBOL(ktime_get_raw_ts64);
1610
1611
1612/**
1613 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1614 */
1615int timekeeping_valid_for_hres(void)
1616{
1617	struct timekeeper *tk = &tk_core.timekeeper;
1618	unsigned int seq;
1619	int ret;
1620
1621	do {
1622		seq = read_seqcount_begin(&tk_core.seq);
1623
1624		ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1625
1626	} while (read_seqcount_retry(&tk_core.seq, seq));
1627
1628	return ret;
1629}
1630
1631/**
1632 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1633 */
1634u64 timekeeping_max_deferment(void)
1635{
1636	struct timekeeper *tk = &tk_core.timekeeper;
1637	unsigned int seq;
1638	u64 ret;
1639
1640	do {
1641		seq = read_seqcount_begin(&tk_core.seq);
1642
1643		ret = tk->tkr_mono.clock->max_idle_ns;
1644
1645	} while (read_seqcount_retry(&tk_core.seq, seq));
1646
1647	return ret;
1648}
1649
1650/**
1651 * read_persistent_clock64 -  Return time from the persistent clock.
1652 * @ts: Pointer to the storage for the readout value
1653 *
1654 * Weak dummy function for arches that do not yet support it.
1655 * Reads the time from the battery backed persistent clock.
1656 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1657 *
1658 *  XXX - Do be sure to remove it once all arches implement it.
1659 */
1660void __weak read_persistent_clock64(struct timespec64 *ts)
1661{
1662	ts->tv_sec = 0;
1663	ts->tv_nsec = 0;
1664}
1665
1666/**
1667 * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
1668 *                                        from the boot.
1669 * @wall_time:	  current time as returned by persistent clock
1670 * @boot_offset:  offset that is defined as wall_time - boot_time
1671 *
1672 * Weak dummy function for arches that do not yet support it.
1673 *
1674 * The default function calculates offset based on the current value of
1675 * local_clock(). This way architectures that support sched_clock() but don't
1676 * support dedicated boot time clock will provide the best estimate of the
1677 * boot time.
1678 */
1679void __weak __init
1680read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
1681				     struct timespec64 *boot_offset)
1682{
1683	read_persistent_clock64(wall_time);
1684	*boot_offset = ns_to_timespec64(local_clock());
1685}
1686
1687static __init void tkd_basic_setup(struct tk_data *tkd)
1688{
1689	raw_spin_lock_init(&tkd->lock);
1690	seqcount_raw_spinlock_init(&tkd->seq, &tkd->lock);
1691}
1692
1693/*
1694 * Flag reflecting whether timekeeping_resume() has injected sleeptime.
1695 *
1696 * The flag starts of false and is only set when a suspend reaches
1697 * timekeeping_suspend(), timekeeping_resume() sets it to false when the
1698 * timekeeper clocksource is not stopping across suspend and has been
1699 * used to update sleep time. If the timekeeper clocksource has stopped
1700 * then the flag stays true and is used by the RTC resume code to decide
1701 * whether sleeptime must be injected and if so the flag gets false then.
1702 *
1703 * If a suspend fails before reaching timekeeping_resume() then the flag
1704 * stays false and prevents erroneous sleeptime injection.
1705 */
1706static bool suspend_timing_needed;
1707
1708/* Flag for if there is a persistent clock on this platform */
1709static bool persistent_clock_exists;
1710
1711/*
1712 * timekeeping_init - Initializes the clocksource and common timekeeping values
1713 */
1714void __init timekeeping_init(void)
1715{
1716	struct timespec64 wall_time, boot_offset, wall_to_mono;
1717	struct timekeeper *tks = &tk_core.shadow_timekeeper;
1718	struct clocksource *clock;
1719
1720	tkd_basic_setup(&tk_core);
1721
1722	read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
1723	if (timespec64_valid_settod(&wall_time) &&
1724	    timespec64_to_ns(&wall_time) > 0) {
1725		persistent_clock_exists = true;
1726	} else if (timespec64_to_ns(&wall_time) != 0) {
1727		pr_warn("Persistent clock returned invalid value");
1728		wall_time = (struct timespec64){0};
1729	}
1730
1731	if (timespec64_compare(&wall_time, &boot_offset) < 0)
1732		boot_offset = (struct timespec64){0};
1733
1734	/*
1735	 * We want set wall_to_mono, so the following is true:
1736	 * wall time + wall_to_mono = boot time
1737	 */
1738	wall_to_mono = timespec64_sub(boot_offset, wall_time);
1739
1740	guard(raw_spinlock_irqsave)(&tk_core.lock);
1741
1742	ntp_init();
1743
1744	clock = clocksource_default_clock();
1745	if (clock->enable)
1746		clock->enable(clock);
1747	tk_setup_internals(tks, clock);
1748
1749	tk_set_xtime(tks, &wall_time);
1750	tks->raw_sec = 0;
1751
1752	tk_set_wall_to_mono(tks, wall_to_mono);
1753
1754	timekeeping_update_from_shadow(&tk_core, TK_CLOCK_WAS_SET);
 
 
 
1755}
1756
1757/* time in seconds when suspend began for persistent clock */
1758static struct timespec64 timekeeping_suspend_time;
1759
1760/**
1761 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1762 * @tk:		Pointer to the timekeeper to be updated
1763 * @delta:	Pointer to the delta value in timespec64 format
1764 *
1765 * Takes a timespec offset measuring a suspend interval and properly
1766 * adds the sleep offset to the timekeeping variables.
1767 */
1768static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1769					   const struct timespec64 *delta)
1770{
1771	if (!timespec64_valid_strict(delta)) {
1772		printk_deferred(KERN_WARNING
1773				"__timekeeping_inject_sleeptime: Invalid "
1774				"sleep delta value!\n");
1775		return;
1776	}
1777	tk_xtime_add(tk, delta);
1778	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1779	tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1780	tk_debug_account_sleep_time(delta);
1781}
1782
1783#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1784/*
1785 * We have three kinds of time sources to use for sleep time
1786 * injection, the preference order is:
1787 * 1) non-stop clocksource
1788 * 2) persistent clock (ie: RTC accessible when irqs are off)
1789 * 3) RTC
1790 *
1791 * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
1792 * If system has neither 1) nor 2), 3) will be used finally.
1793 *
1794 *
1795 * If timekeeping has injected sleeptime via either 1) or 2),
1796 * 3) becomes needless, so in this case we don't need to call
1797 * rtc_resume(), and this is what timekeeping_rtc_skipresume()
1798 * means.
1799 */
1800bool timekeeping_rtc_skipresume(void)
1801{
1802	return !suspend_timing_needed;
1803}
1804
1805/*
1806 * 1) can be determined whether to use or not only when doing
1807 * timekeeping_resume() which is invoked after rtc_suspend(),
1808 * so we can't skip rtc_suspend() surely if system has 1).
1809 *
1810 * But if system has 2), 2) will definitely be used, so in this
1811 * case we don't need to call rtc_suspend(), and this is what
1812 * timekeeping_rtc_skipsuspend() means.
1813 */
1814bool timekeeping_rtc_skipsuspend(void)
1815{
1816	return persistent_clock_exists;
1817}
1818
1819/**
1820 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1821 * @delta: pointer to a timespec64 delta value
1822 *
1823 * This hook is for architectures that cannot support read_persistent_clock64
1824 * because their RTC/persistent clock is only accessible when irqs are enabled.
1825 * and also don't have an effective nonstop clocksource.
1826 *
1827 * This function should only be called by rtc_resume(), and allows
1828 * a suspend offset to be injected into the timekeeping values.
1829 */
1830void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
1831{
1832	scoped_guard(raw_spinlock_irqsave, &tk_core.lock) {
1833		struct timekeeper *tks = &tk_core.shadow_timekeeper;
 
 
 
 
 
1834
1835		suspend_timing_needed = false;
1836		timekeeping_forward_now(tks);
1837		__timekeeping_inject_sleeptime(tks, delta);
1838		timekeeping_update_from_shadow(&tk_core, TK_UPDATE_ALL);
1839	}
 
 
 
1840
1841	/* Signal hrtimers about time change */
1842	clock_was_set(CLOCK_SET_WALL | CLOCK_SET_BOOT);
1843}
1844#endif
1845
1846/**
1847 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1848 */
1849void timekeeping_resume(void)
1850{
1851	struct timekeeper *tks = &tk_core.shadow_timekeeper;
1852	struct clocksource *clock = tks->tkr_mono.clock;
 
1853	struct timespec64 ts_new, ts_delta;
 
1854	bool inject_sleeptime = false;
1855	u64 cycle_now, nsec;
1856	unsigned long flags;
1857
1858	read_persistent_clock64(&ts_new);
1859
1860	clockevents_resume();
1861	clocksource_resume();
1862
1863	raw_spin_lock_irqsave(&tk_core.lock, flags);
 
1864
1865	/*
1866	 * After system resumes, we need to calculate the suspended time and
1867	 * compensate it for the OS time. There are 3 sources that could be
1868	 * used: Nonstop clocksource during suspend, persistent clock and rtc
1869	 * device.
1870	 *
1871	 * One specific platform may have 1 or 2 or all of them, and the
1872	 * preference will be:
1873	 *	suspend-nonstop clocksource -> persistent clock -> rtc
1874	 * The less preferred source will only be tried if there is no better
1875	 * usable source. The rtc part is handled separately in rtc core code.
1876	 */
1877	cycle_now = tk_clock_read(&tks->tkr_mono);
1878	nsec = clocksource_stop_suspend_timing(clock, cycle_now);
1879	if (nsec > 0) {
1880		ts_delta = ns_to_timespec64(nsec);
1881		inject_sleeptime = true;
1882	} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1883		ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1884		inject_sleeptime = true;
1885	}
1886
1887	if (inject_sleeptime) {
1888		suspend_timing_needed = false;
1889		__timekeeping_inject_sleeptime(tks, &ts_delta);
1890	}
1891
1892	/* Re-base the last cycle value */
1893	tks->tkr_mono.cycle_last = cycle_now;
1894	tks->tkr_raw.cycle_last  = cycle_now;
1895
1896	tks->ntp_error = 0;
1897	timekeeping_suspended = 0;
1898	timekeeping_update_from_shadow(&tk_core, TK_CLOCK_WAS_SET);
1899	raw_spin_unlock_irqrestore(&tk_core.lock, flags);
 
1900
1901	touch_softlockup_watchdog();
1902
1903	/* Resume the clockevent device(s) and hrtimers */
1904	tick_resume();
1905	/* Notify timerfd as resume is equivalent to clock_was_set() */
1906	timerfd_resume();
1907}
1908
1909int timekeeping_suspend(void)
1910{
1911	struct timekeeper *tks = &tk_core.shadow_timekeeper;
1912	struct timespec64 delta, delta_delta;
1913	static struct timespec64 old_delta;
 
1914	struct clocksource *curr_clock;
1915	unsigned long flags;
1916	u64 cycle_now;
1917
1918	read_persistent_clock64(&timekeeping_suspend_time);
1919
1920	/*
1921	 * On some systems the persistent_clock can not be detected at
1922	 * timekeeping_init by its return value, so if we see a valid
1923	 * value returned, update the persistent_clock_exists flag.
1924	 */
1925	if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1926		persistent_clock_exists = true;
1927
1928	suspend_timing_needed = true;
1929
1930	raw_spin_lock_irqsave(&tk_core.lock, flags);
1931	timekeeping_forward_now(tks);
 
1932	timekeeping_suspended = 1;
1933
1934	/*
1935	 * Since we've called forward_now, cycle_last stores the value
1936	 * just read from the current clocksource. Save this to potentially
1937	 * use in suspend timing.
1938	 */
1939	curr_clock = tks->tkr_mono.clock;
1940	cycle_now = tks->tkr_mono.cycle_last;
1941	clocksource_start_suspend_timing(curr_clock, cycle_now);
1942
1943	if (persistent_clock_exists) {
1944		/*
1945		 * To avoid drift caused by repeated suspend/resumes,
1946		 * which each can add ~1 second drift error,
1947		 * try to compensate so the difference in system time
1948		 * and persistent_clock time stays close to constant.
1949		 */
1950		delta = timespec64_sub(tk_xtime(tks), timekeeping_suspend_time);
1951		delta_delta = timespec64_sub(delta, old_delta);
1952		if (abs(delta_delta.tv_sec) >= 2) {
1953			/*
1954			 * if delta_delta is too large, assume time correction
1955			 * has occurred and set old_delta to the current delta.
1956			 */
1957			old_delta = delta;
1958		} else {
1959			/* Otherwise try to adjust old_system to compensate */
1960			timekeeping_suspend_time =
1961				timespec64_add(timekeeping_suspend_time, delta_delta);
1962		}
1963	}
1964
1965	timekeeping_update_from_shadow(&tk_core, 0);
1966	halt_fast_timekeeper(tks);
1967	raw_spin_unlock_irqrestore(&tk_core.lock, flags);
 
1968
1969	tick_suspend();
1970	clocksource_suspend();
1971	clockevents_suspend();
1972
1973	return 0;
1974}
1975
1976/* sysfs resume/suspend bits for timekeeping */
1977static struct syscore_ops timekeeping_syscore_ops = {
1978	.resume		= timekeeping_resume,
1979	.suspend	= timekeeping_suspend,
1980};
1981
1982static int __init timekeeping_init_ops(void)
1983{
1984	register_syscore_ops(&timekeeping_syscore_ops);
1985	return 0;
1986}
1987device_initcall(timekeeping_init_ops);
1988
1989/*
1990 * Apply a multiplier adjustment to the timekeeper
1991 */
1992static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1993							 s64 offset,
1994							 s32 mult_adj)
1995{
1996	s64 interval = tk->cycle_interval;
1997
1998	if (mult_adj == 0) {
1999		return;
2000	} else if (mult_adj == -1) {
2001		interval = -interval;
2002		offset = -offset;
2003	} else if (mult_adj != 1) {
2004		interval *= mult_adj;
2005		offset *= mult_adj;
2006	}
2007
2008	/*
2009	 * So the following can be confusing.
2010	 *
2011	 * To keep things simple, lets assume mult_adj == 1 for now.
2012	 *
2013	 * When mult_adj != 1, remember that the interval and offset values
2014	 * have been appropriately scaled so the math is the same.
2015	 *
2016	 * The basic idea here is that we're increasing the multiplier
2017	 * by one, this causes the xtime_interval to be incremented by
2018	 * one cycle_interval. This is because:
2019	 *	xtime_interval = cycle_interval * mult
2020	 * So if mult is being incremented by one:
2021	 *	xtime_interval = cycle_interval * (mult + 1)
2022	 * Its the same as:
2023	 *	xtime_interval = (cycle_interval * mult) + cycle_interval
2024	 * Which can be shortened to:
2025	 *	xtime_interval += cycle_interval
2026	 *
2027	 * So offset stores the non-accumulated cycles. Thus the current
2028	 * time (in shifted nanoseconds) is:
2029	 *	now = (offset * adj) + xtime_nsec
2030	 * Now, even though we're adjusting the clock frequency, we have
2031	 * to keep time consistent. In other words, we can't jump back
2032	 * in time, and we also want to avoid jumping forward in time.
2033	 *
2034	 * So given the same offset value, we need the time to be the same
2035	 * both before and after the freq adjustment.
2036	 *	now = (offset * adj_1) + xtime_nsec_1
2037	 *	now = (offset * adj_2) + xtime_nsec_2
2038	 * So:
2039	 *	(offset * adj_1) + xtime_nsec_1 =
2040	 *		(offset * adj_2) + xtime_nsec_2
2041	 * And we know:
2042	 *	adj_2 = adj_1 + 1
2043	 * So:
2044	 *	(offset * adj_1) + xtime_nsec_1 =
2045	 *		(offset * (adj_1+1)) + xtime_nsec_2
2046	 *	(offset * adj_1) + xtime_nsec_1 =
2047	 *		(offset * adj_1) + offset + xtime_nsec_2
2048	 * Canceling the sides:
2049	 *	xtime_nsec_1 = offset + xtime_nsec_2
2050	 * Which gives us:
2051	 *	xtime_nsec_2 = xtime_nsec_1 - offset
2052	 * Which simplifies to:
2053	 *	xtime_nsec -= offset
2054	 */
2055	if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
2056		/* NTP adjustment caused clocksource mult overflow */
2057		WARN_ON_ONCE(1);
2058		return;
2059	}
2060
2061	tk->tkr_mono.mult += mult_adj;
2062	tk->xtime_interval += interval;
2063	tk->tkr_mono.xtime_nsec -= offset;
2064}
2065
2066/*
2067 * Adjust the timekeeper's multiplier to the correct frequency
2068 * and also to reduce the accumulated error value.
2069 */
2070static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
2071{
2072	u64 ntp_tl = ntp_tick_length();
2073	u32 mult;
2074
2075	/*
2076	 * Determine the multiplier from the current NTP tick length.
2077	 * Avoid expensive division when the tick length doesn't change.
2078	 */
2079	if (likely(tk->ntp_tick == ntp_tl)) {
2080		mult = tk->tkr_mono.mult - tk->ntp_err_mult;
2081	} else {
2082		tk->ntp_tick = ntp_tl;
2083		mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
2084				 tk->xtime_remainder, tk->cycle_interval);
2085	}
2086
2087	/*
2088	 * If the clock is behind the NTP time, increase the multiplier by 1
2089	 * to catch up with it. If it's ahead and there was a remainder in the
2090	 * tick division, the clock will slow down. Otherwise it will stay
2091	 * ahead until the tick length changes to a non-divisible value.
2092	 */
2093	tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
2094	mult += tk->ntp_err_mult;
2095
2096	timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
2097
2098	if (unlikely(tk->tkr_mono.clock->maxadj &&
2099		(abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
2100			> tk->tkr_mono.clock->maxadj))) {
2101		printk_once(KERN_WARNING
2102			"Adjusting %s more than 11%% (%ld vs %ld)\n",
2103			tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
2104			(long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
2105	}
2106
2107	/*
2108	 * It may be possible that when we entered this function, xtime_nsec
2109	 * was very small.  Further, if we're slightly speeding the clocksource
2110	 * in the code above, its possible the required corrective factor to
2111	 * xtime_nsec could cause it to underflow.
2112	 *
2113	 * Now, since we have already accumulated the second and the NTP
2114	 * subsystem has been notified via second_overflow(), we need to skip
2115	 * the next update.
2116	 */
2117	if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
2118		tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
2119							tk->tkr_mono.shift;
2120		tk->xtime_sec--;
2121		tk->skip_second_overflow = 1;
2122	}
2123}
2124
2125/*
2126 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
2127 *
2128 * Helper function that accumulates the nsecs greater than a second
2129 * from the xtime_nsec field to the xtime_secs field.
2130 * It also calls into the NTP code to handle leapsecond processing.
2131 */
2132static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
2133{
2134	u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
2135	unsigned int clock_set = 0;
2136
2137	while (tk->tkr_mono.xtime_nsec >= nsecps) {
2138		int leap;
2139
2140		tk->tkr_mono.xtime_nsec -= nsecps;
2141		tk->xtime_sec++;
2142
2143		/*
2144		 * Skip NTP update if this second was accumulated before,
2145		 * i.e. xtime_nsec underflowed in timekeeping_adjust()
2146		 */
2147		if (unlikely(tk->skip_second_overflow)) {
2148			tk->skip_second_overflow = 0;
2149			continue;
2150		}
2151
2152		/* Figure out if its a leap sec and apply if needed */
2153		leap = second_overflow(tk->xtime_sec);
2154		if (unlikely(leap)) {
2155			struct timespec64 ts;
2156
2157			tk->xtime_sec += leap;
2158
2159			ts.tv_sec = leap;
2160			ts.tv_nsec = 0;
2161			tk_set_wall_to_mono(tk,
2162				timespec64_sub(tk->wall_to_monotonic, ts));
2163
2164			__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
2165
2166			clock_set = TK_CLOCK_WAS_SET;
2167		}
2168	}
2169	return clock_set;
2170}
2171
2172/*
2173 * logarithmic_accumulation - shifted accumulation of cycles
2174 *
2175 * This functions accumulates a shifted interval of cycles into
2176 * a shifted interval nanoseconds. Allows for O(log) accumulation
2177 * loop.
2178 *
2179 * Returns the unconsumed cycles.
2180 */
2181static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2182				    u32 shift, unsigned int *clock_set)
2183{
2184	u64 interval = tk->cycle_interval << shift;
2185	u64 snsec_per_sec;
2186
2187	/* If the offset is smaller than a shifted interval, do nothing */
2188	if (offset < interval)
2189		return offset;
2190
2191	/* Accumulate one shifted interval */
2192	offset -= interval;
2193	tk->tkr_mono.cycle_last += interval;
2194	tk->tkr_raw.cycle_last  += interval;
2195
2196	tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2197	*clock_set |= accumulate_nsecs_to_secs(tk);
2198
2199	/* Accumulate raw time */
2200	tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2201	snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2202	while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2203		tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2204		tk->raw_sec++;
2205	}
2206
2207	/* Accumulate error between NTP and clock interval */
2208	tk->ntp_error += tk->ntp_tick << shift;
2209	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2210						(tk->ntp_error_shift + shift);
2211
2212	return offset;
2213}
2214
2215/*
2216 * timekeeping_advance - Updates the timekeeper to the current time and
2217 * current NTP tick length
2218 */
2219static bool timekeeping_advance(enum timekeeping_adv_mode mode)
2220{
2221	struct timekeeper *tk = &tk_core.shadow_timekeeper;
2222	struct timekeeper *real_tk = &tk_core.timekeeper;
 
 
 
2223	unsigned int clock_set = 0;
2224	int shift = 0, maxshift;
2225	u64 offset;
2226
2227	guard(raw_spinlock_irqsave)(&tk_core.lock);
2228
2229	/* Make sure we're fully resumed: */
2230	if (unlikely(timekeeping_suspended))
2231		return false;
2232
2233	offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2234				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
2235				   tk->tkr_mono.clock->max_raw_delta);
2236
2237	/* Check if there's really nothing to do */
2238	if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
2239		return false;
 
 
 
2240
2241	/*
2242	 * With NO_HZ we may have to accumulate many cycle_intervals
2243	 * (think "ticks") worth of time at once. To do this efficiently,
2244	 * we calculate the largest doubling multiple of cycle_intervals
2245	 * that is smaller than the offset.  We then accumulate that
2246	 * chunk in one go, and then try to consume the next smaller
2247	 * doubled multiple.
2248	 */
2249	shift = ilog2(offset) - ilog2(tk->cycle_interval);
2250	shift = max(0, shift);
2251	/* Bound shift to one less than what overflows tick_length */
2252	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2253	shift = min(shift, maxshift);
2254	while (offset >= tk->cycle_interval) {
2255		offset = logarithmic_accumulation(tk, offset, shift, &clock_set);
 
2256		if (offset < tk->cycle_interval<<shift)
2257			shift--;
2258	}
2259
2260	/* Adjust the multiplier to correct NTP error */
2261	timekeeping_adjust(tk, offset);
2262
2263	/*
2264	 * Finally, make sure that after the rounding
2265	 * xtime_nsec isn't larger than NSEC_PER_SEC
2266	 */
2267	clock_set |= accumulate_nsecs_to_secs(tk);
2268
2269	timekeeping_update_from_shadow(&tk_core, clock_set);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2270
2271	return !!clock_set;
2272}
2273
2274/**
2275 * update_wall_time - Uses the current clocksource to increment the wall time
2276 *
2277 */
2278void update_wall_time(void)
2279{
2280	if (timekeeping_advance(TK_ADV_TICK))
2281		clock_was_set_delayed();
2282}
2283
2284/**
2285 * getboottime64 - Return the real time of system boot.
2286 * @ts:		pointer to the timespec64 to be set
2287 *
2288 * Returns the wall-time of boot in a timespec64.
2289 *
2290 * This is based on the wall_to_monotonic offset and the total suspend
2291 * time. Calls to settimeofday will affect the value returned (which
2292 * basically means that however wrong your real time clock is at boot time,
2293 * you get the right time here).
2294 */
2295void getboottime64(struct timespec64 *ts)
2296{
2297	struct timekeeper *tk = &tk_core.timekeeper;
2298	ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2299
2300	*ts = ktime_to_timespec64(t);
2301}
2302EXPORT_SYMBOL_GPL(getboottime64);
2303
2304void ktime_get_coarse_real_ts64(struct timespec64 *ts)
2305{
2306	struct timekeeper *tk = &tk_core.timekeeper;
2307	unsigned int seq;
2308
2309	do {
2310		seq = read_seqcount_begin(&tk_core.seq);
2311
2312		*ts = tk_xtime(tk);
2313	} while (read_seqcount_retry(&tk_core.seq, seq));
2314}
2315EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
2316
2317/**
2318 * ktime_get_coarse_real_ts64_mg - return latter of coarse grained time or floor
2319 * @ts:		timespec64 to be filled
2320 *
2321 * Fetch the global mg_floor value, convert it to realtime and compare it
2322 * to the current coarse-grained time. Fill @ts with whichever is
2323 * latest. Note that this is a filesystem-specific interface and should be
2324 * avoided outside of that context.
2325 */
2326void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts)
2327{
2328	struct timekeeper *tk = &tk_core.timekeeper;
2329	u64 floor = atomic64_read(&mg_floor);
2330	ktime_t f_real, offset, coarse;
2331	unsigned int seq;
2332
2333	do {
2334		seq = read_seqcount_begin(&tk_core.seq);
2335		*ts = tk_xtime(tk);
2336		offset = tk_core.timekeeper.offs_real;
2337	} while (read_seqcount_retry(&tk_core.seq, seq));
2338
2339	coarse = timespec64_to_ktime(*ts);
2340	f_real = ktime_add(floor, offset);
2341	if (ktime_after(f_real, coarse))
2342		*ts = ktime_to_timespec64(f_real);
2343}
2344
2345/**
2346 * ktime_get_real_ts64_mg - attempt to update floor value and return result
2347 * @ts:		pointer to the timespec to be set
2348 *
2349 * Get a monotonic fine-grained time value and attempt to swap it into
2350 * mg_floor. If that succeeds then accept the new floor value. If it fails
2351 * then another task raced in during the interim time and updated the
2352 * floor.  Since any update to the floor must be later than the previous
2353 * floor, either outcome is acceptable.
2354 *
2355 * Typically this will be called after calling ktime_get_coarse_real_ts64_mg(),
2356 * and determining that the resulting coarse-grained timestamp did not effect
2357 * a change in ctime. Any more recent floor value would effect a change to
2358 * ctime, so there is no need to retry the atomic64_try_cmpxchg() on failure.
2359 *
2360 * @ts will be filled with the latest floor value, regardless of the outcome of
2361 * the cmpxchg. Note that this is a filesystem specific interface and should be
2362 * avoided outside of that context.
2363 */
2364void ktime_get_real_ts64_mg(struct timespec64 *ts)
2365{
2366	struct timekeeper *tk = &tk_core.timekeeper;
2367	ktime_t old = atomic64_read(&mg_floor);
2368	ktime_t offset, mono;
2369	unsigned int seq;
2370	u64 nsecs;
2371
2372	do {
2373		seq = read_seqcount_begin(&tk_core.seq);
2374
2375		ts->tv_sec = tk->xtime_sec;
2376		mono = tk->tkr_mono.base;
2377		nsecs = timekeeping_get_ns(&tk->tkr_mono);
2378		offset = tk_core.timekeeper.offs_real;
2379	} while (read_seqcount_retry(&tk_core.seq, seq));
2380
2381	mono = ktime_add_ns(mono, nsecs);
2382
2383	/*
2384	 * Attempt to update the floor with the new time value. As any
2385	 * update must be later then the existing floor, and would effect
2386	 * a change to ctime from the perspective of the current task,
2387	 * accept the resulting floor value regardless of the outcome of
2388	 * the swap.
2389	 */
2390	if (atomic64_try_cmpxchg(&mg_floor, &old, mono)) {
2391		ts->tv_nsec = 0;
2392		timespec64_add_ns(ts, nsecs);
2393		timekeeping_inc_mg_floor_swaps();
2394	} else {
2395		/*
2396		 * Another task changed mg_floor since "old" was fetched.
2397		 * "old" has been updated with the latest value of "mg_floor".
2398		 * That value is newer than the previous floor value, which
2399		 * is enough to effect a change to ctime. Accept it.
2400		 */
2401		*ts = ktime_to_timespec64(ktime_add(old, offset));
2402	}
2403}
2404
2405void ktime_get_coarse_ts64(struct timespec64 *ts)
2406{
2407	struct timekeeper *tk = &tk_core.timekeeper;
2408	struct timespec64 now, mono;
2409	unsigned int seq;
2410
2411	do {
2412		seq = read_seqcount_begin(&tk_core.seq);
2413
2414		now = tk_xtime(tk);
2415		mono = tk->wall_to_monotonic;
2416	} while (read_seqcount_retry(&tk_core.seq, seq));
2417
2418	set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
2419				now.tv_nsec + mono.tv_nsec);
2420}
2421EXPORT_SYMBOL(ktime_get_coarse_ts64);
2422
2423/*
2424 * Must hold jiffies_lock
2425 */
2426void do_timer(unsigned long ticks)
2427{
2428	jiffies_64 += ticks;
2429	calc_global_load();
2430}
2431
2432/**
2433 * ktime_get_update_offsets_now - hrtimer helper
2434 * @cwsseq:	pointer to check and store the clock was set sequence number
2435 * @offs_real:	pointer to storage for monotonic -> realtime offset
2436 * @offs_boot:	pointer to storage for monotonic -> boottime offset
2437 * @offs_tai:	pointer to storage for monotonic -> clock tai offset
2438 *
2439 * Returns current monotonic time and updates the offsets if the
2440 * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
2441 * different.
2442 *
2443 * Called from hrtimer_interrupt() or retrigger_next_event()
2444 */
2445ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2446				     ktime_t *offs_boot, ktime_t *offs_tai)
2447{
2448	struct timekeeper *tk = &tk_core.timekeeper;
2449	unsigned int seq;
2450	ktime_t base;
2451	u64 nsecs;
2452
2453	do {
2454		seq = read_seqcount_begin(&tk_core.seq);
2455
2456		base = tk->tkr_mono.base;
2457		nsecs = timekeeping_get_ns(&tk->tkr_mono);
2458		base = ktime_add_ns(base, nsecs);
2459
2460		if (*cwsseq != tk->clock_was_set_seq) {
2461			*cwsseq = tk->clock_was_set_seq;
2462			*offs_real = tk->offs_real;
2463			*offs_boot = tk->offs_boot;
2464			*offs_tai = tk->offs_tai;
2465		}
2466
2467		/* Handle leapsecond insertion adjustments */
2468		if (unlikely(base >= tk->next_leap_ktime))
2469			*offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2470
2471	} while (read_seqcount_retry(&tk_core.seq, seq));
2472
2473	return base;
2474}
2475
2476/*
2477 * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
2478 */
2479static int timekeeping_validate_timex(const struct __kernel_timex *txc)
2480{
2481	if (txc->modes & ADJ_ADJTIME) {
2482		/* singleshot must not be used with any other mode bits */
2483		if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
2484			return -EINVAL;
2485		if (!(txc->modes & ADJ_OFFSET_READONLY) &&
2486		    !capable(CAP_SYS_TIME))
2487			return -EPERM;
2488	} else {
2489		/* In order to modify anything, you gotta be super-user! */
2490		if (txc->modes && !capable(CAP_SYS_TIME))
2491			return -EPERM;
2492		/*
2493		 * if the quartz is off by more than 10% then
2494		 * something is VERY wrong!
2495		 */
2496		if (txc->modes & ADJ_TICK &&
2497		    (txc->tick <  900000/USER_HZ ||
2498		     txc->tick > 1100000/USER_HZ))
2499			return -EINVAL;
2500	}
2501
2502	if (txc->modes & ADJ_SETOFFSET) {
2503		/* In order to inject time, you gotta be super-user! */
2504		if (!capable(CAP_SYS_TIME))
2505			return -EPERM;
2506
2507		/*
2508		 * Validate if a timespec/timeval used to inject a time
2509		 * offset is valid.  Offsets can be positive or negative, so
2510		 * we don't check tv_sec. The value of the timeval/timespec
2511		 * is the sum of its fields,but *NOTE*:
2512		 * The field tv_usec/tv_nsec must always be non-negative and
2513		 * we can't have more nanoseconds/microseconds than a second.
2514		 */
2515		if (txc->time.tv_usec < 0)
2516			return -EINVAL;
2517
2518		if (txc->modes & ADJ_NANO) {
2519			if (txc->time.tv_usec >= NSEC_PER_SEC)
2520				return -EINVAL;
2521		} else {
2522			if (txc->time.tv_usec >= USEC_PER_SEC)
2523				return -EINVAL;
2524		}
2525	}
2526
2527	/*
2528	 * Check for potential multiplication overflows that can
2529	 * only happen on 64-bit systems:
2530	 */
2531	if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
2532		if (LLONG_MIN / PPM_SCALE > txc->freq)
2533			return -EINVAL;
2534		if (LLONG_MAX / PPM_SCALE < txc->freq)
2535			return -EINVAL;
2536	}
2537
2538	return 0;
2539}
2540
2541/**
2542 * random_get_entropy_fallback - Returns the raw clock source value,
2543 * used by random.c for platforms with no valid random_get_entropy().
2544 */
2545unsigned long random_get_entropy_fallback(void)
2546{
2547	struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
2548	struct clocksource *clock = READ_ONCE(tkr->clock);
2549
2550	if (unlikely(timekeeping_suspended || !clock))
2551		return 0;
2552	return clock->read(clock);
2553}
2554EXPORT_SYMBOL_GPL(random_get_entropy_fallback);
2555
2556/**
2557 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2558 * @txc:	Pointer to kernel_timex structure containing NTP parameters
2559 */
2560int do_adjtimex(struct __kernel_timex *txc)
2561{
 
2562	struct audit_ntp_data ad;
2563	bool offset_set = false;
2564	bool clock_set = false;
2565	struct timespec64 ts;
 
 
2566	int ret;
2567
2568	/* Validate the data before disabling interrupts */
2569	ret = timekeeping_validate_timex(txc);
2570	if (ret)
2571		return ret;
2572	add_device_randomness(txc, sizeof(*txc));
2573
2574	if (txc->modes & ADJ_SETOFFSET) {
2575		struct timespec64 delta;
2576
2577		delta.tv_sec  = txc->time.tv_sec;
2578		delta.tv_nsec = txc->time.tv_usec;
2579		if (!(txc->modes & ADJ_NANO))
2580			delta.tv_nsec *= 1000;
2581		ret = timekeeping_inject_offset(&delta);
2582		if (ret)
2583			return ret;
2584
2585		offset_set = delta.tv_sec != 0;
2586		audit_tk_injoffset(delta);
2587	}
2588
2589	audit_ntp_init(&ad);
2590
2591	ktime_get_real_ts64(&ts);
2592	add_device_randomness(&ts, sizeof(ts));
2593
2594	scoped_guard (raw_spinlock_irqsave, &tk_core.lock) {
2595		struct timekeeper *tks = &tk_core.shadow_timekeeper;
2596		s32 orig_tai, tai;
2597
2598		orig_tai = tai = tks->tai_offset;
2599		ret = __do_adjtimex(txc, &ts, &tai, &ad);
2600
2601		if (tai != orig_tai) {
2602			__timekeeping_set_tai_offset(tks, tai);
2603			timekeeping_update_from_shadow(&tk_core, TK_CLOCK_WAS_SET);
2604			clock_set = true;
2605		} else {
2606			tk_update_leap_state_all(&tk_core);
2607		}
2608	}
 
 
 
 
2609
2610	audit_ntp_log(&ad);
2611
2612	/* Update the multiplier immediately if frequency was set directly */
2613	if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
2614		clock_set |= timekeeping_advance(TK_ADV_FREQ);
2615
2616	if (clock_set)
2617		clock_was_set(CLOCK_SET_WALL);
2618
2619	ntp_notify_cmos_timer(offset_set);
2620
2621	return ret;
2622}
2623
2624#ifdef CONFIG_NTP_PPS
2625/**
2626 * hardpps() - Accessor function to NTP __hardpps function
2627 * @phase_ts:	Pointer to timespec64 structure representing phase timestamp
2628 * @raw_ts:	Pointer to timespec64 structure representing raw timestamp
2629 */
2630void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2631{
2632	guard(raw_spinlock_irqsave)(&tk_core.lock);
 
 
 
 
2633	__hardpps(phase_ts, raw_ts);
 
 
 
2634}
2635EXPORT_SYMBOL(hardpps);
2636#endif /* CONFIG_NTP_PPS */