Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kernel timekeeping code and accessor functions. Based on code from
4 * timer.c, moved in commit 8524070b7982.
5 */
6#include <linux/timekeeper_internal.h>
7#include <linux/module.h>
8#include <linux/interrupt.h>
9#include <linux/percpu.h>
10#include <linux/init.h>
11#include <linux/mm.h>
12#include <linux/nmi.h>
13#include <linux/sched.h>
14#include <linux/sched/loadavg.h>
15#include <linux/sched/clock.h>
16#include <linux/syscore_ops.h>
17#include <linux/clocksource.h>
18#include <linux/jiffies.h>
19#include <linux/time.h>
20#include <linux/tick.h>
21#include <linux/stop_machine.h>
22#include <linux/pvclock_gtod.h>
23#include <linux/compiler.h>
24#include <linux/audit.h>
25
26#include "tick-internal.h"
27#include "ntp_internal.h"
28#include "timekeeping_internal.h"
29
30#define TK_CLEAR_NTP (1 << 0)
31#define TK_MIRROR (1 << 1)
32#define TK_CLOCK_WAS_SET (1 << 2)
33
34enum timekeeping_adv_mode {
35 /* Update timekeeper when a tick has passed */
36 TK_ADV_TICK,
37
38 /* Update timekeeper on a direct frequency change */
39 TK_ADV_FREQ
40};
41
42DEFINE_RAW_SPINLOCK(timekeeper_lock);
43
44/*
45 * The most important data for readout fits into a single 64 byte
46 * cache line.
47 */
48static struct {
49 seqcount_raw_spinlock_t seq;
50 struct timekeeper timekeeper;
51} tk_core ____cacheline_aligned = {
52 .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
53};
54
55static struct timekeeper shadow_timekeeper;
56
57/* flag for if timekeeping is suspended */
58int __read_mostly timekeeping_suspended;
59
60/**
61 * struct tk_fast - NMI safe timekeeper
62 * @seq: Sequence counter for protecting updates. The lowest bit
63 * is the index for the tk_read_base array
64 * @base: tk_read_base array. Access is indexed by the lowest bit of
65 * @seq.
66 *
67 * See @update_fast_timekeeper() below.
68 */
69struct tk_fast {
70 seqcount_latch_t seq;
71 struct tk_read_base base[2];
72};
73
74/* Suspend-time cycles value for halted fast timekeeper. */
75static u64 cycles_at_suspend;
76
77static u64 dummy_clock_read(struct clocksource *cs)
78{
79 if (timekeeping_suspended)
80 return cycles_at_suspend;
81 return local_clock();
82}
83
84static struct clocksource dummy_clock = {
85 .read = dummy_clock_read,
86};
87
88/*
89 * Boot time initialization which allows local_clock() to be utilized
90 * during early boot when clocksources are not available. local_clock()
91 * returns nanoseconds already so no conversion is required, hence mult=1
92 * and shift=0. When the first proper clocksource is installed then
93 * the fast time keepers are updated with the correct values.
94 */
95#define FAST_TK_INIT \
96 { \
97 .clock = &dummy_clock, \
98 .mask = CLOCKSOURCE_MASK(64), \
99 .mult = 1, \
100 .shift = 0, \
101 }
102
103static struct tk_fast tk_fast_mono ____cacheline_aligned = {
104 .seq = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
105 .base[0] = FAST_TK_INIT,
106 .base[1] = FAST_TK_INIT,
107};
108
109static struct tk_fast tk_fast_raw ____cacheline_aligned = {
110 .seq = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
111 .base[0] = FAST_TK_INIT,
112 .base[1] = FAST_TK_INIT,
113};
114
115static inline void tk_normalize_xtime(struct timekeeper *tk)
116{
117 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
118 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
119 tk->xtime_sec++;
120 }
121 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
122 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
123 tk->raw_sec++;
124 }
125}
126
127static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
128{
129 struct timespec64 ts;
130
131 ts.tv_sec = tk->xtime_sec;
132 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
133 return ts;
134}
135
136static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
137{
138 tk->xtime_sec = ts->tv_sec;
139 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
140}
141
142static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
143{
144 tk->xtime_sec += ts->tv_sec;
145 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
146 tk_normalize_xtime(tk);
147}
148
149static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
150{
151 struct timespec64 tmp;
152
153 /*
154 * Verify consistency of: offset_real = -wall_to_monotonic
155 * before modifying anything
156 */
157 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
158 -tk->wall_to_monotonic.tv_nsec);
159 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
160 tk->wall_to_monotonic = wtm;
161 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
162 tk->offs_real = timespec64_to_ktime(tmp);
163 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
164}
165
166static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
167{
168 tk->offs_boot = ktime_add(tk->offs_boot, delta);
169 /*
170 * Timespec representation for VDSO update to avoid 64bit division
171 * on every update.
172 */
173 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
174}
175
176/*
177 * tk_clock_read - atomic clocksource read() helper
178 *
179 * This helper is necessary to use in the read paths because, while the
180 * seqcount ensures we don't return a bad value while structures are updated,
181 * it doesn't protect from potential crashes. There is the possibility that
182 * the tkr's clocksource may change between the read reference, and the
183 * clock reference passed to the read function. This can cause crashes if
184 * the wrong clocksource is passed to the wrong read function.
185 * This isn't necessary to use when holding the timekeeper_lock or doing
186 * a read of the fast-timekeeper tkrs (which is protected by its own locking
187 * and update logic).
188 */
189static inline u64 tk_clock_read(const struct tk_read_base *tkr)
190{
191 struct clocksource *clock = READ_ONCE(tkr->clock);
192
193 return clock->read(clock);
194}
195
196#ifdef CONFIG_DEBUG_TIMEKEEPING
197#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
198
199static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
200{
201
202 u64 max_cycles = tk->tkr_mono.clock->max_cycles;
203 const char *name = tk->tkr_mono.clock->name;
204
205 if (offset > max_cycles) {
206 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
207 offset, name, max_cycles);
208 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
209 } else {
210 if (offset > (max_cycles >> 1)) {
211 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
212 offset, name, max_cycles >> 1);
213 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
214 }
215 }
216
217 if (tk->underflow_seen) {
218 if (jiffies - tk->last_warning > WARNING_FREQ) {
219 printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
220 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
221 printk_deferred(" Your kernel is probably still fine.\n");
222 tk->last_warning = jiffies;
223 }
224 tk->underflow_seen = 0;
225 }
226
227 if (tk->overflow_seen) {
228 if (jiffies - tk->last_warning > WARNING_FREQ) {
229 printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
230 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
231 printk_deferred(" Your kernel is probably still fine.\n");
232 tk->last_warning = jiffies;
233 }
234 tk->overflow_seen = 0;
235 }
236}
237
238static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
239{
240 struct timekeeper *tk = &tk_core.timekeeper;
241 u64 now, last, mask, max, delta;
242 unsigned int seq;
243
244 /*
245 * Since we're called holding a seqcount, the data may shift
246 * under us while we're doing the calculation. This can cause
247 * false positives, since we'd note a problem but throw the
248 * results away. So nest another seqcount here to atomically
249 * grab the points we are checking with.
250 */
251 do {
252 seq = read_seqcount_begin(&tk_core.seq);
253 now = tk_clock_read(tkr);
254 last = tkr->cycle_last;
255 mask = tkr->mask;
256 max = tkr->clock->max_cycles;
257 } while (read_seqcount_retry(&tk_core.seq, seq));
258
259 delta = clocksource_delta(now, last, mask);
260
261 /*
262 * Try to catch underflows by checking if we are seeing small
263 * mask-relative negative values.
264 */
265 if (unlikely((~delta & mask) < (mask >> 3))) {
266 tk->underflow_seen = 1;
267 delta = 0;
268 }
269
270 /* Cap delta value to the max_cycles values to avoid mult overflows */
271 if (unlikely(delta > max)) {
272 tk->overflow_seen = 1;
273 delta = tkr->clock->max_cycles;
274 }
275
276 return delta;
277}
278#else
279static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
280{
281}
282static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
283{
284 u64 cycle_now, delta;
285
286 /* read clocksource */
287 cycle_now = tk_clock_read(tkr);
288
289 /* calculate the delta since the last update_wall_time */
290 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
291
292 return delta;
293}
294#endif
295
296/**
297 * tk_setup_internals - Set up internals to use clocksource clock.
298 *
299 * @tk: The target timekeeper to setup.
300 * @clock: Pointer to clocksource.
301 *
302 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
303 * pair and interval request.
304 *
305 * Unless you're the timekeeping code, you should not be using this!
306 */
307static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
308{
309 u64 interval;
310 u64 tmp, ntpinterval;
311 struct clocksource *old_clock;
312
313 ++tk->cs_was_changed_seq;
314 old_clock = tk->tkr_mono.clock;
315 tk->tkr_mono.clock = clock;
316 tk->tkr_mono.mask = clock->mask;
317 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
318
319 tk->tkr_raw.clock = clock;
320 tk->tkr_raw.mask = clock->mask;
321 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
322
323 /* Do the ns -> cycle conversion first, using original mult */
324 tmp = NTP_INTERVAL_LENGTH;
325 tmp <<= clock->shift;
326 ntpinterval = tmp;
327 tmp += clock->mult/2;
328 do_div(tmp, clock->mult);
329 if (tmp == 0)
330 tmp = 1;
331
332 interval = (u64) tmp;
333 tk->cycle_interval = interval;
334
335 /* Go back from cycles -> shifted ns */
336 tk->xtime_interval = interval * clock->mult;
337 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
338 tk->raw_interval = interval * clock->mult;
339
340 /* if changing clocks, convert xtime_nsec shift units */
341 if (old_clock) {
342 int shift_change = clock->shift - old_clock->shift;
343 if (shift_change < 0) {
344 tk->tkr_mono.xtime_nsec >>= -shift_change;
345 tk->tkr_raw.xtime_nsec >>= -shift_change;
346 } else {
347 tk->tkr_mono.xtime_nsec <<= shift_change;
348 tk->tkr_raw.xtime_nsec <<= shift_change;
349 }
350 }
351
352 tk->tkr_mono.shift = clock->shift;
353 tk->tkr_raw.shift = clock->shift;
354
355 tk->ntp_error = 0;
356 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
357 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
358
359 /*
360 * The timekeeper keeps its own mult values for the currently
361 * active clocksource. These value will be adjusted via NTP
362 * to counteract clock drifting.
363 */
364 tk->tkr_mono.mult = clock->mult;
365 tk->tkr_raw.mult = clock->mult;
366 tk->ntp_err_mult = 0;
367 tk->skip_second_overflow = 0;
368}
369
370/* Timekeeper helper functions. */
371
372static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
373{
374 u64 nsec;
375
376 nsec = delta * tkr->mult + tkr->xtime_nsec;
377 nsec >>= tkr->shift;
378
379 return nsec;
380}
381
382static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
383{
384 u64 delta;
385
386 delta = timekeeping_get_delta(tkr);
387 return timekeeping_delta_to_ns(tkr, delta);
388}
389
390static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
391{
392 u64 delta;
393
394 /* calculate the delta since the last update_wall_time */
395 delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
396 return timekeeping_delta_to_ns(tkr, delta);
397}
398
399/**
400 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
401 * @tkr: Timekeeping readout base from which we take the update
402 * @tkf: Pointer to NMI safe timekeeper
403 *
404 * We want to use this from any context including NMI and tracing /
405 * instrumenting the timekeeping code itself.
406 *
407 * Employ the latch technique; see @raw_write_seqcount_latch.
408 *
409 * So if a NMI hits the update of base[0] then it will use base[1]
410 * which is still consistent. In the worst case this can result is a
411 * slightly wrong timestamp (a few nanoseconds). See
412 * @ktime_get_mono_fast_ns.
413 */
414static void update_fast_timekeeper(const struct tk_read_base *tkr,
415 struct tk_fast *tkf)
416{
417 struct tk_read_base *base = tkf->base;
418
419 /* Force readers off to base[1] */
420 raw_write_seqcount_latch(&tkf->seq);
421
422 /* Update base[0] */
423 memcpy(base, tkr, sizeof(*base));
424
425 /* Force readers back to base[0] */
426 raw_write_seqcount_latch(&tkf->seq);
427
428 /* Update base[1] */
429 memcpy(base + 1, base, sizeof(*base));
430}
431
432static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
433{
434 struct tk_read_base *tkr;
435 unsigned int seq;
436 u64 now;
437
438 do {
439 seq = raw_read_seqcount_latch(&tkf->seq);
440 tkr = tkf->base + (seq & 0x01);
441 now = ktime_to_ns(tkr->base);
442
443 now += timekeeping_delta_to_ns(tkr,
444 clocksource_delta(
445 tk_clock_read(tkr),
446 tkr->cycle_last,
447 tkr->mask));
448 } while (read_seqcount_latch_retry(&tkf->seq, seq));
449
450 return now;
451}
452
453/**
454 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
455 *
456 * This timestamp is not guaranteed to be monotonic across an update.
457 * The timestamp is calculated by:
458 *
459 * now = base_mono + clock_delta * slope
460 *
461 * So if the update lowers the slope, readers who are forced to the
462 * not yet updated second array are still using the old steeper slope.
463 *
464 * tmono
465 * ^
466 * | o n
467 * | o n
468 * | u
469 * | o
470 * |o
471 * |12345678---> reader order
472 *
473 * o = old slope
474 * u = update
475 * n = new slope
476 *
477 * So reader 6 will observe time going backwards versus reader 5.
478 *
479 * While other CPUs are likely to be able to observe that, the only way
480 * for a CPU local observation is when an NMI hits in the middle of
481 * the update. Timestamps taken from that NMI context might be ahead
482 * of the following timestamps. Callers need to be aware of that and
483 * deal with it.
484 */
485u64 ktime_get_mono_fast_ns(void)
486{
487 return __ktime_get_fast_ns(&tk_fast_mono);
488}
489EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
490
491/**
492 * ktime_get_raw_fast_ns - Fast NMI safe access to clock monotonic raw
493 *
494 * Contrary to ktime_get_mono_fast_ns() this is always correct because the
495 * conversion factor is not affected by NTP/PTP correction.
496 */
497u64 ktime_get_raw_fast_ns(void)
498{
499 return __ktime_get_fast_ns(&tk_fast_raw);
500}
501EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
502
503/**
504 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
505 *
506 * To keep it NMI safe since we're accessing from tracing, we're not using a
507 * separate timekeeper with updates to monotonic clock and boot offset
508 * protected with seqcounts. This has the following minor side effects:
509 *
510 * (1) Its possible that a timestamp be taken after the boot offset is updated
511 * but before the timekeeper is updated. If this happens, the new boot offset
512 * is added to the old timekeeping making the clock appear to update slightly
513 * earlier:
514 * CPU 0 CPU 1
515 * timekeeping_inject_sleeptime64()
516 * __timekeeping_inject_sleeptime(tk, delta);
517 * timestamp();
518 * timekeeping_update(tk, TK_CLEAR_NTP...);
519 *
520 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
521 * partially updated. Since the tk->offs_boot update is a rare event, this
522 * should be a rare occurrence which postprocessing should be able to handle.
523 *
524 * The caveats vs. timestamp ordering as documented for ktime_get_fast_ns()
525 * apply as well.
526 */
527u64 notrace ktime_get_boot_fast_ns(void)
528{
529 struct timekeeper *tk = &tk_core.timekeeper;
530
531 return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
532}
533EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
534
535static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
536{
537 struct tk_read_base *tkr;
538 u64 basem, baser, delta;
539 unsigned int seq;
540
541 do {
542 seq = raw_read_seqcount_latch(&tkf->seq);
543 tkr = tkf->base + (seq & 0x01);
544 basem = ktime_to_ns(tkr->base);
545 baser = ktime_to_ns(tkr->base_real);
546
547 delta = timekeeping_delta_to_ns(tkr,
548 clocksource_delta(tk_clock_read(tkr),
549 tkr->cycle_last, tkr->mask));
550 } while (read_seqcount_latch_retry(&tkf->seq, seq));
551
552 if (mono)
553 *mono = basem + delta;
554 return baser + delta;
555}
556
557/**
558 * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
559 *
560 * See ktime_get_fast_ns() for documentation of the time stamp ordering.
561 */
562u64 ktime_get_real_fast_ns(void)
563{
564 return __ktime_get_real_fast(&tk_fast_mono, NULL);
565}
566EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
567
568/**
569 * ktime_get_fast_timestamps: - NMI safe timestamps
570 * @snapshot: Pointer to timestamp storage
571 *
572 * Stores clock monotonic, boottime and realtime timestamps.
573 *
574 * Boot time is a racy access on 32bit systems if the sleep time injection
575 * happens late during resume and not in timekeeping_resume(). That could
576 * be avoided by expanding struct tk_read_base with boot offset for 32bit
577 * and adding more overhead to the update. As this is a hard to observe
578 * once per resume event which can be filtered with reasonable effort using
579 * the accurate mono/real timestamps, it's probably not worth the trouble.
580 *
581 * Aside of that it might be possible on 32 and 64 bit to observe the
582 * following when the sleep time injection happens late:
583 *
584 * CPU 0 CPU 1
585 * timekeeping_resume()
586 * ktime_get_fast_timestamps()
587 * mono, real = __ktime_get_real_fast()
588 * inject_sleep_time()
589 * update boot offset
590 * boot = mono + bootoffset;
591 *
592 * That means that boot time already has the sleep time adjustment, but
593 * real time does not. On the next readout both are in sync again.
594 *
595 * Preventing this for 64bit is not really feasible without destroying the
596 * careful cache layout of the timekeeper because the sequence count and
597 * struct tk_read_base would then need two cache lines instead of one.
598 *
599 * Access to the time keeper clock source is disabled across the innermost
600 * steps of suspend/resume. The accessors still work, but the timestamps
601 * are frozen until time keeping is resumed which happens very early.
602 *
603 * For regular suspend/resume there is no observable difference vs. sched
604 * clock, but it might affect some of the nasty low level debug printks.
605 *
606 * OTOH, access to sched clock is not guaranteed across suspend/resume on
607 * all systems either so it depends on the hardware in use.
608 *
609 * If that turns out to be a real problem then this could be mitigated by
610 * using sched clock in a similar way as during early boot. But it's not as
611 * trivial as on early boot because it needs some careful protection
612 * against the clock monotonic timestamp jumping backwards on resume.
613 */
614void ktime_get_fast_timestamps(struct ktime_timestamps *snapshot)
615{
616 struct timekeeper *tk = &tk_core.timekeeper;
617
618 snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono);
619 snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot));
620}
621
622/**
623 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
624 * @tk: Timekeeper to snapshot.
625 *
626 * It generally is unsafe to access the clocksource after timekeeping has been
627 * suspended, so take a snapshot of the readout base of @tk and use it as the
628 * fast timekeeper's readout base while suspended. It will return the same
629 * number of cycles every time until timekeeping is resumed at which time the
630 * proper readout base for the fast timekeeper will be restored automatically.
631 */
632static void halt_fast_timekeeper(const struct timekeeper *tk)
633{
634 static struct tk_read_base tkr_dummy;
635 const struct tk_read_base *tkr = &tk->tkr_mono;
636
637 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
638 cycles_at_suspend = tk_clock_read(tkr);
639 tkr_dummy.clock = &dummy_clock;
640 tkr_dummy.base_real = tkr->base + tk->offs_real;
641 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
642
643 tkr = &tk->tkr_raw;
644 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
645 tkr_dummy.clock = &dummy_clock;
646 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
647}
648
649static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
650
651static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
652{
653 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
654}
655
656/**
657 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
658 * @nb: Pointer to the notifier block to register
659 */
660int pvclock_gtod_register_notifier(struct notifier_block *nb)
661{
662 struct timekeeper *tk = &tk_core.timekeeper;
663 unsigned long flags;
664 int ret;
665
666 raw_spin_lock_irqsave(&timekeeper_lock, flags);
667 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
668 update_pvclock_gtod(tk, true);
669 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
670
671 return ret;
672}
673EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
674
675/**
676 * pvclock_gtod_unregister_notifier - unregister a pvclock
677 * timedata update listener
678 * @nb: Pointer to the notifier block to unregister
679 */
680int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
681{
682 unsigned long flags;
683 int ret;
684
685 raw_spin_lock_irqsave(&timekeeper_lock, flags);
686 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
687 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
688
689 return ret;
690}
691EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
692
693/*
694 * tk_update_leap_state - helper to update the next_leap_ktime
695 */
696static inline void tk_update_leap_state(struct timekeeper *tk)
697{
698 tk->next_leap_ktime = ntp_get_next_leap();
699 if (tk->next_leap_ktime != KTIME_MAX)
700 /* Convert to monotonic time */
701 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
702}
703
704/*
705 * Update the ktime_t based scalar nsec members of the timekeeper
706 */
707static inline void tk_update_ktime_data(struct timekeeper *tk)
708{
709 u64 seconds;
710 u32 nsec;
711
712 /*
713 * The xtime based monotonic readout is:
714 * nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
715 * The ktime based monotonic readout is:
716 * nsec = base_mono + now();
717 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
718 */
719 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
720 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
721 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
722
723 /*
724 * The sum of the nanoseconds portions of xtime and
725 * wall_to_monotonic can be greater/equal one second. Take
726 * this into account before updating tk->ktime_sec.
727 */
728 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
729 if (nsec >= NSEC_PER_SEC)
730 seconds++;
731 tk->ktime_sec = seconds;
732
733 /* Update the monotonic raw base */
734 tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
735}
736
737/* must hold timekeeper_lock */
738static void timekeeping_update(struct timekeeper *tk, unsigned int action)
739{
740 if (action & TK_CLEAR_NTP) {
741 tk->ntp_error = 0;
742 ntp_clear();
743 }
744
745 tk_update_leap_state(tk);
746 tk_update_ktime_data(tk);
747
748 update_vsyscall(tk);
749 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
750
751 tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
752 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
753 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
754
755 if (action & TK_CLOCK_WAS_SET)
756 tk->clock_was_set_seq++;
757 /*
758 * The mirroring of the data to the shadow-timekeeper needs
759 * to happen last here to ensure we don't over-write the
760 * timekeeper structure on the next update with stale data
761 */
762 if (action & TK_MIRROR)
763 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
764 sizeof(tk_core.timekeeper));
765}
766
767/**
768 * timekeeping_forward_now - update clock to the current time
769 * @tk: Pointer to the timekeeper to update
770 *
771 * Forward the current clock to update its state since the last call to
772 * update_wall_time(). This is useful before significant clock changes,
773 * as it avoids having to deal with this time offset explicitly.
774 */
775static void timekeeping_forward_now(struct timekeeper *tk)
776{
777 u64 cycle_now, delta;
778
779 cycle_now = tk_clock_read(&tk->tkr_mono);
780 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
781 tk->tkr_mono.cycle_last = cycle_now;
782 tk->tkr_raw.cycle_last = cycle_now;
783
784 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
785 tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
786
787 tk_normalize_xtime(tk);
788}
789
790/**
791 * ktime_get_real_ts64 - Returns the time of day in a timespec64.
792 * @ts: pointer to the timespec to be set
793 *
794 * Returns the time of day in a timespec64 (WARN if suspended).
795 */
796void ktime_get_real_ts64(struct timespec64 *ts)
797{
798 struct timekeeper *tk = &tk_core.timekeeper;
799 unsigned int seq;
800 u64 nsecs;
801
802 WARN_ON(timekeeping_suspended);
803
804 do {
805 seq = read_seqcount_begin(&tk_core.seq);
806
807 ts->tv_sec = tk->xtime_sec;
808 nsecs = timekeeping_get_ns(&tk->tkr_mono);
809
810 } while (read_seqcount_retry(&tk_core.seq, seq));
811
812 ts->tv_nsec = 0;
813 timespec64_add_ns(ts, nsecs);
814}
815EXPORT_SYMBOL(ktime_get_real_ts64);
816
817ktime_t ktime_get(void)
818{
819 struct timekeeper *tk = &tk_core.timekeeper;
820 unsigned int seq;
821 ktime_t base;
822 u64 nsecs;
823
824 WARN_ON(timekeeping_suspended);
825
826 do {
827 seq = read_seqcount_begin(&tk_core.seq);
828 base = tk->tkr_mono.base;
829 nsecs = timekeeping_get_ns(&tk->tkr_mono);
830
831 } while (read_seqcount_retry(&tk_core.seq, seq));
832
833 return ktime_add_ns(base, nsecs);
834}
835EXPORT_SYMBOL_GPL(ktime_get);
836
837u32 ktime_get_resolution_ns(void)
838{
839 struct timekeeper *tk = &tk_core.timekeeper;
840 unsigned int seq;
841 u32 nsecs;
842
843 WARN_ON(timekeeping_suspended);
844
845 do {
846 seq = read_seqcount_begin(&tk_core.seq);
847 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
848 } while (read_seqcount_retry(&tk_core.seq, seq));
849
850 return nsecs;
851}
852EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
853
854static ktime_t *offsets[TK_OFFS_MAX] = {
855 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
856 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
857 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
858};
859
860ktime_t ktime_get_with_offset(enum tk_offsets offs)
861{
862 struct timekeeper *tk = &tk_core.timekeeper;
863 unsigned int seq;
864 ktime_t base, *offset = offsets[offs];
865 u64 nsecs;
866
867 WARN_ON(timekeeping_suspended);
868
869 do {
870 seq = read_seqcount_begin(&tk_core.seq);
871 base = ktime_add(tk->tkr_mono.base, *offset);
872 nsecs = timekeeping_get_ns(&tk->tkr_mono);
873
874 } while (read_seqcount_retry(&tk_core.seq, seq));
875
876 return ktime_add_ns(base, nsecs);
877
878}
879EXPORT_SYMBOL_GPL(ktime_get_with_offset);
880
881ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
882{
883 struct timekeeper *tk = &tk_core.timekeeper;
884 unsigned int seq;
885 ktime_t base, *offset = offsets[offs];
886 u64 nsecs;
887
888 WARN_ON(timekeeping_suspended);
889
890 do {
891 seq = read_seqcount_begin(&tk_core.seq);
892 base = ktime_add(tk->tkr_mono.base, *offset);
893 nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
894
895 } while (read_seqcount_retry(&tk_core.seq, seq));
896
897 return ktime_add_ns(base, nsecs);
898}
899EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
900
901/**
902 * ktime_mono_to_any() - convert monotonic time to any other time
903 * @tmono: time to convert.
904 * @offs: which offset to use
905 */
906ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
907{
908 ktime_t *offset = offsets[offs];
909 unsigned int seq;
910 ktime_t tconv;
911
912 do {
913 seq = read_seqcount_begin(&tk_core.seq);
914 tconv = ktime_add(tmono, *offset);
915 } while (read_seqcount_retry(&tk_core.seq, seq));
916
917 return tconv;
918}
919EXPORT_SYMBOL_GPL(ktime_mono_to_any);
920
921/**
922 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
923 */
924ktime_t ktime_get_raw(void)
925{
926 struct timekeeper *tk = &tk_core.timekeeper;
927 unsigned int seq;
928 ktime_t base;
929 u64 nsecs;
930
931 do {
932 seq = read_seqcount_begin(&tk_core.seq);
933 base = tk->tkr_raw.base;
934 nsecs = timekeeping_get_ns(&tk->tkr_raw);
935
936 } while (read_seqcount_retry(&tk_core.seq, seq));
937
938 return ktime_add_ns(base, nsecs);
939}
940EXPORT_SYMBOL_GPL(ktime_get_raw);
941
942/**
943 * ktime_get_ts64 - get the monotonic clock in timespec64 format
944 * @ts: pointer to timespec variable
945 *
946 * The function calculates the monotonic clock from the realtime
947 * clock and the wall_to_monotonic offset and stores the result
948 * in normalized timespec64 format in the variable pointed to by @ts.
949 */
950void ktime_get_ts64(struct timespec64 *ts)
951{
952 struct timekeeper *tk = &tk_core.timekeeper;
953 struct timespec64 tomono;
954 unsigned int seq;
955 u64 nsec;
956
957 WARN_ON(timekeeping_suspended);
958
959 do {
960 seq = read_seqcount_begin(&tk_core.seq);
961 ts->tv_sec = tk->xtime_sec;
962 nsec = timekeeping_get_ns(&tk->tkr_mono);
963 tomono = tk->wall_to_monotonic;
964
965 } while (read_seqcount_retry(&tk_core.seq, seq));
966
967 ts->tv_sec += tomono.tv_sec;
968 ts->tv_nsec = 0;
969 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
970}
971EXPORT_SYMBOL_GPL(ktime_get_ts64);
972
973/**
974 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
975 *
976 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
977 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
978 * works on both 32 and 64 bit systems. On 32 bit systems the readout
979 * covers ~136 years of uptime which should be enough to prevent
980 * premature wrap arounds.
981 */
982time64_t ktime_get_seconds(void)
983{
984 struct timekeeper *tk = &tk_core.timekeeper;
985
986 WARN_ON(timekeeping_suspended);
987 return tk->ktime_sec;
988}
989EXPORT_SYMBOL_GPL(ktime_get_seconds);
990
991/**
992 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
993 *
994 * Returns the wall clock seconds since 1970.
995 *
996 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
997 * 32bit systems the access must be protected with the sequence
998 * counter to provide "atomic" access to the 64bit tk->xtime_sec
999 * value.
1000 */
1001time64_t ktime_get_real_seconds(void)
1002{
1003 struct timekeeper *tk = &tk_core.timekeeper;
1004 time64_t seconds;
1005 unsigned int seq;
1006
1007 if (IS_ENABLED(CONFIG_64BIT))
1008 return tk->xtime_sec;
1009
1010 do {
1011 seq = read_seqcount_begin(&tk_core.seq);
1012 seconds = tk->xtime_sec;
1013
1014 } while (read_seqcount_retry(&tk_core.seq, seq));
1015
1016 return seconds;
1017}
1018EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
1019
1020/**
1021 * __ktime_get_real_seconds - The same as ktime_get_real_seconds
1022 * but without the sequence counter protect. This internal function
1023 * is called just when timekeeping lock is already held.
1024 */
1025noinstr time64_t __ktime_get_real_seconds(void)
1026{
1027 struct timekeeper *tk = &tk_core.timekeeper;
1028
1029 return tk->xtime_sec;
1030}
1031
1032/**
1033 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
1034 * @systime_snapshot: pointer to struct receiving the system time snapshot
1035 */
1036void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
1037{
1038 struct timekeeper *tk = &tk_core.timekeeper;
1039 unsigned int seq;
1040 ktime_t base_raw;
1041 ktime_t base_real;
1042 u64 nsec_raw;
1043 u64 nsec_real;
1044 u64 now;
1045
1046 WARN_ON_ONCE(timekeeping_suspended);
1047
1048 do {
1049 seq = read_seqcount_begin(&tk_core.seq);
1050 now = tk_clock_read(&tk->tkr_mono);
1051 systime_snapshot->cs_id = tk->tkr_mono.clock->id;
1052 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
1053 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
1054 base_real = ktime_add(tk->tkr_mono.base,
1055 tk_core.timekeeper.offs_real);
1056 base_raw = tk->tkr_raw.base;
1057 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
1058 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
1059 } while (read_seqcount_retry(&tk_core.seq, seq));
1060
1061 systime_snapshot->cycles = now;
1062 systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
1063 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
1064}
1065EXPORT_SYMBOL_GPL(ktime_get_snapshot);
1066
1067/* Scale base by mult/div checking for overflow */
1068static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
1069{
1070 u64 tmp, rem;
1071
1072 tmp = div64_u64_rem(*base, div, &rem);
1073
1074 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
1075 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
1076 return -EOVERFLOW;
1077 tmp *= mult;
1078
1079 rem = div64_u64(rem * mult, div);
1080 *base = tmp + rem;
1081 return 0;
1082}
1083
1084/**
1085 * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
1086 * @history: Snapshot representing start of history
1087 * @partial_history_cycles: Cycle offset into history (fractional part)
1088 * @total_history_cycles: Total history length in cycles
1089 * @discontinuity: True indicates clock was set on history period
1090 * @ts: Cross timestamp that should be adjusted using
1091 * partial/total ratio
1092 *
1093 * Helper function used by get_device_system_crosststamp() to correct the
1094 * crosstimestamp corresponding to the start of the current interval to the
1095 * system counter value (timestamp point) provided by the driver. The
1096 * total_history_* quantities are the total history starting at the provided
1097 * reference point and ending at the start of the current interval. The cycle
1098 * count between the driver timestamp point and the start of the current
1099 * interval is partial_history_cycles.
1100 */
1101static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1102 u64 partial_history_cycles,
1103 u64 total_history_cycles,
1104 bool discontinuity,
1105 struct system_device_crosststamp *ts)
1106{
1107 struct timekeeper *tk = &tk_core.timekeeper;
1108 u64 corr_raw, corr_real;
1109 bool interp_forward;
1110 int ret;
1111
1112 if (total_history_cycles == 0 || partial_history_cycles == 0)
1113 return 0;
1114
1115 /* Interpolate shortest distance from beginning or end of history */
1116 interp_forward = partial_history_cycles > total_history_cycles / 2;
1117 partial_history_cycles = interp_forward ?
1118 total_history_cycles - partial_history_cycles :
1119 partial_history_cycles;
1120
1121 /*
1122 * Scale the monotonic raw time delta by:
1123 * partial_history_cycles / total_history_cycles
1124 */
1125 corr_raw = (u64)ktime_to_ns(
1126 ktime_sub(ts->sys_monoraw, history->raw));
1127 ret = scale64_check_overflow(partial_history_cycles,
1128 total_history_cycles, &corr_raw);
1129 if (ret)
1130 return ret;
1131
1132 /*
1133 * If there is a discontinuity in the history, scale monotonic raw
1134 * correction by:
1135 * mult(real)/mult(raw) yielding the realtime correction
1136 * Otherwise, calculate the realtime correction similar to monotonic
1137 * raw calculation
1138 */
1139 if (discontinuity) {
1140 corr_real = mul_u64_u32_div
1141 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1142 } else {
1143 corr_real = (u64)ktime_to_ns(
1144 ktime_sub(ts->sys_realtime, history->real));
1145 ret = scale64_check_overflow(partial_history_cycles,
1146 total_history_cycles, &corr_real);
1147 if (ret)
1148 return ret;
1149 }
1150
1151 /* Fixup monotonic raw and real time time values */
1152 if (interp_forward) {
1153 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1154 ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1155 } else {
1156 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1157 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1158 }
1159
1160 return 0;
1161}
1162
1163/*
1164 * cycle_between - true if test occurs chronologically between before and after
1165 */
1166static bool cycle_between(u64 before, u64 test, u64 after)
1167{
1168 if (test > before && test < after)
1169 return true;
1170 if (test < before && before > after)
1171 return true;
1172 return false;
1173}
1174
1175/**
1176 * get_device_system_crosststamp - Synchronously capture system/device timestamp
1177 * @get_time_fn: Callback to get simultaneous device time and
1178 * system counter from the device driver
1179 * @ctx: Context passed to get_time_fn()
1180 * @history_begin: Historical reference point used to interpolate system
1181 * time when counter provided by the driver is before the current interval
1182 * @xtstamp: Receives simultaneously captured system and device time
1183 *
1184 * Reads a timestamp from a device and correlates it to system time
1185 */
1186int get_device_system_crosststamp(int (*get_time_fn)
1187 (ktime_t *device_time,
1188 struct system_counterval_t *sys_counterval,
1189 void *ctx),
1190 void *ctx,
1191 struct system_time_snapshot *history_begin,
1192 struct system_device_crosststamp *xtstamp)
1193{
1194 struct system_counterval_t system_counterval;
1195 struct timekeeper *tk = &tk_core.timekeeper;
1196 u64 cycles, now, interval_start;
1197 unsigned int clock_was_set_seq = 0;
1198 ktime_t base_real, base_raw;
1199 u64 nsec_real, nsec_raw;
1200 u8 cs_was_changed_seq;
1201 unsigned int seq;
1202 bool do_interp;
1203 int ret;
1204
1205 do {
1206 seq = read_seqcount_begin(&tk_core.seq);
1207 /*
1208 * Try to synchronously capture device time and a system
1209 * counter value calling back into the device driver
1210 */
1211 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1212 if (ret)
1213 return ret;
1214
1215 /*
1216 * Verify that the clocksource associated with the captured
1217 * system counter value is the same as the currently installed
1218 * timekeeper clocksource
1219 */
1220 if (tk->tkr_mono.clock != system_counterval.cs)
1221 return -ENODEV;
1222 cycles = system_counterval.cycles;
1223
1224 /*
1225 * Check whether the system counter value provided by the
1226 * device driver is on the current timekeeping interval.
1227 */
1228 now = tk_clock_read(&tk->tkr_mono);
1229 interval_start = tk->tkr_mono.cycle_last;
1230 if (!cycle_between(interval_start, cycles, now)) {
1231 clock_was_set_seq = tk->clock_was_set_seq;
1232 cs_was_changed_seq = tk->cs_was_changed_seq;
1233 cycles = interval_start;
1234 do_interp = true;
1235 } else {
1236 do_interp = false;
1237 }
1238
1239 base_real = ktime_add(tk->tkr_mono.base,
1240 tk_core.timekeeper.offs_real);
1241 base_raw = tk->tkr_raw.base;
1242
1243 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1244 system_counterval.cycles);
1245 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1246 system_counterval.cycles);
1247 } while (read_seqcount_retry(&tk_core.seq, seq));
1248
1249 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1250 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1251
1252 /*
1253 * Interpolate if necessary, adjusting back from the start of the
1254 * current interval
1255 */
1256 if (do_interp) {
1257 u64 partial_history_cycles, total_history_cycles;
1258 bool discontinuity;
1259
1260 /*
1261 * Check that the counter value occurs after the provided
1262 * history reference and that the history doesn't cross a
1263 * clocksource change
1264 */
1265 if (!history_begin ||
1266 !cycle_between(history_begin->cycles,
1267 system_counterval.cycles, cycles) ||
1268 history_begin->cs_was_changed_seq != cs_was_changed_seq)
1269 return -EINVAL;
1270 partial_history_cycles = cycles - system_counterval.cycles;
1271 total_history_cycles = cycles - history_begin->cycles;
1272 discontinuity =
1273 history_begin->clock_was_set_seq != clock_was_set_seq;
1274
1275 ret = adjust_historical_crosststamp(history_begin,
1276 partial_history_cycles,
1277 total_history_cycles,
1278 discontinuity, xtstamp);
1279 if (ret)
1280 return ret;
1281 }
1282
1283 return 0;
1284}
1285EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1286
1287/**
1288 * do_settimeofday64 - Sets the time of day.
1289 * @ts: pointer to the timespec64 variable containing the new time
1290 *
1291 * Sets the time of day to the new time and update NTP and notify hrtimers
1292 */
1293int do_settimeofday64(const struct timespec64 *ts)
1294{
1295 struct timekeeper *tk = &tk_core.timekeeper;
1296 struct timespec64 ts_delta, xt;
1297 unsigned long flags;
1298 int ret = 0;
1299
1300 if (!timespec64_valid_settod(ts))
1301 return -EINVAL;
1302
1303 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1304 write_seqcount_begin(&tk_core.seq);
1305
1306 timekeeping_forward_now(tk);
1307
1308 xt = tk_xtime(tk);
1309 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1310 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1311
1312 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1313 ret = -EINVAL;
1314 goto out;
1315 }
1316
1317 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1318
1319 tk_set_xtime(tk, ts);
1320out:
1321 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1322
1323 write_seqcount_end(&tk_core.seq);
1324 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1325
1326 /* signal hrtimers about time change */
1327 clock_was_set();
1328
1329 if (!ret)
1330 audit_tk_injoffset(ts_delta);
1331
1332 return ret;
1333}
1334EXPORT_SYMBOL(do_settimeofday64);
1335
1336/**
1337 * timekeeping_inject_offset - Adds or subtracts from the current time.
1338 * @ts: Pointer to the timespec variable containing the offset
1339 *
1340 * Adds or subtracts an offset value from the current time.
1341 */
1342static int timekeeping_inject_offset(const struct timespec64 *ts)
1343{
1344 struct timekeeper *tk = &tk_core.timekeeper;
1345 unsigned long flags;
1346 struct timespec64 tmp;
1347 int ret = 0;
1348
1349 if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
1350 return -EINVAL;
1351
1352 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1353 write_seqcount_begin(&tk_core.seq);
1354
1355 timekeeping_forward_now(tk);
1356
1357 /* Make sure the proposed value is valid */
1358 tmp = timespec64_add(tk_xtime(tk), *ts);
1359 if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
1360 !timespec64_valid_settod(&tmp)) {
1361 ret = -EINVAL;
1362 goto error;
1363 }
1364
1365 tk_xtime_add(tk, ts);
1366 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
1367
1368error: /* even if we error out, we forwarded the time, so call update */
1369 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1370
1371 write_seqcount_end(&tk_core.seq);
1372 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1373
1374 /* signal hrtimers about time change */
1375 clock_was_set();
1376
1377 return ret;
1378}
1379
1380/*
1381 * Indicates if there is an offset between the system clock and the hardware
1382 * clock/persistent clock/rtc.
1383 */
1384int persistent_clock_is_local;
1385
1386/*
1387 * Adjust the time obtained from the CMOS to be UTC time instead of
1388 * local time.
1389 *
1390 * This is ugly, but preferable to the alternatives. Otherwise we
1391 * would either need to write a program to do it in /etc/rc (and risk
1392 * confusion if the program gets run more than once; it would also be
1393 * hard to make the program warp the clock precisely n hours) or
1394 * compile in the timezone information into the kernel. Bad, bad....
1395 *
1396 * - TYT, 1992-01-01
1397 *
1398 * The best thing to do is to keep the CMOS clock in universal time (UTC)
1399 * as real UNIX machines always do it. This avoids all headaches about
1400 * daylight saving times and warping kernel clocks.
1401 */
1402void timekeeping_warp_clock(void)
1403{
1404 if (sys_tz.tz_minuteswest != 0) {
1405 struct timespec64 adjust;
1406
1407 persistent_clock_is_local = 1;
1408 adjust.tv_sec = sys_tz.tz_minuteswest * 60;
1409 adjust.tv_nsec = 0;
1410 timekeeping_inject_offset(&adjust);
1411 }
1412}
1413
1414/*
1415 * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
1416 */
1417static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1418{
1419 tk->tai_offset = tai_offset;
1420 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1421}
1422
1423/*
1424 * change_clocksource - Swaps clocksources if a new one is available
1425 *
1426 * Accumulates current time interval and initializes new clocksource
1427 */
1428static int change_clocksource(void *data)
1429{
1430 struct timekeeper *tk = &tk_core.timekeeper;
1431 struct clocksource *new, *old = NULL;
1432 unsigned long flags;
1433 bool change = false;
1434
1435 new = (struct clocksource *) data;
1436
1437 /*
1438 * If the cs is in module, get a module reference. Succeeds
1439 * for built-in code (owner == NULL) as well.
1440 */
1441 if (try_module_get(new->owner)) {
1442 if (!new->enable || new->enable(new) == 0)
1443 change = true;
1444 else
1445 module_put(new->owner);
1446 }
1447
1448 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1449 write_seqcount_begin(&tk_core.seq);
1450
1451 timekeeping_forward_now(tk);
1452
1453 if (change) {
1454 old = tk->tkr_mono.clock;
1455 tk_setup_internals(tk, new);
1456 }
1457
1458 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1459
1460 write_seqcount_end(&tk_core.seq);
1461 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1462
1463 if (old) {
1464 if (old->disable)
1465 old->disable(old);
1466
1467 module_put(old->owner);
1468 }
1469
1470 return 0;
1471}
1472
1473/**
1474 * timekeeping_notify - Install a new clock source
1475 * @clock: pointer to the clock source
1476 *
1477 * This function is called from clocksource.c after a new, better clock
1478 * source has been registered. The caller holds the clocksource_mutex.
1479 */
1480int timekeeping_notify(struct clocksource *clock)
1481{
1482 struct timekeeper *tk = &tk_core.timekeeper;
1483
1484 if (tk->tkr_mono.clock == clock)
1485 return 0;
1486 stop_machine(change_clocksource, clock, NULL);
1487 tick_clock_notify();
1488 return tk->tkr_mono.clock == clock ? 0 : -1;
1489}
1490
1491/**
1492 * ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
1493 * @ts: pointer to the timespec64 to be set
1494 *
1495 * Returns the raw monotonic time (completely un-modified by ntp)
1496 */
1497void ktime_get_raw_ts64(struct timespec64 *ts)
1498{
1499 struct timekeeper *tk = &tk_core.timekeeper;
1500 unsigned int seq;
1501 u64 nsecs;
1502
1503 do {
1504 seq = read_seqcount_begin(&tk_core.seq);
1505 ts->tv_sec = tk->raw_sec;
1506 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1507
1508 } while (read_seqcount_retry(&tk_core.seq, seq));
1509
1510 ts->tv_nsec = 0;
1511 timespec64_add_ns(ts, nsecs);
1512}
1513EXPORT_SYMBOL(ktime_get_raw_ts64);
1514
1515
1516/**
1517 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1518 */
1519int timekeeping_valid_for_hres(void)
1520{
1521 struct timekeeper *tk = &tk_core.timekeeper;
1522 unsigned int seq;
1523 int ret;
1524
1525 do {
1526 seq = read_seqcount_begin(&tk_core.seq);
1527
1528 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1529
1530 } while (read_seqcount_retry(&tk_core.seq, seq));
1531
1532 return ret;
1533}
1534
1535/**
1536 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1537 */
1538u64 timekeeping_max_deferment(void)
1539{
1540 struct timekeeper *tk = &tk_core.timekeeper;
1541 unsigned int seq;
1542 u64 ret;
1543
1544 do {
1545 seq = read_seqcount_begin(&tk_core.seq);
1546
1547 ret = tk->tkr_mono.clock->max_idle_ns;
1548
1549 } while (read_seqcount_retry(&tk_core.seq, seq));
1550
1551 return ret;
1552}
1553
1554/**
1555 * read_persistent_clock64 - Return time from the persistent clock.
1556 * @ts: Pointer to the storage for the readout value
1557 *
1558 * Weak dummy function for arches that do not yet support it.
1559 * Reads the time from the battery backed persistent clock.
1560 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1561 *
1562 * XXX - Do be sure to remove it once all arches implement it.
1563 */
1564void __weak read_persistent_clock64(struct timespec64 *ts)
1565{
1566 ts->tv_sec = 0;
1567 ts->tv_nsec = 0;
1568}
1569
1570/**
1571 * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
1572 * from the boot.
1573 *
1574 * Weak dummy function for arches that do not yet support it.
1575 * @wall_time: - current time as returned by persistent clock
1576 * @boot_offset: - offset that is defined as wall_time - boot_time
1577 *
1578 * The default function calculates offset based on the current value of
1579 * local_clock(). This way architectures that support sched_clock() but don't
1580 * support dedicated boot time clock will provide the best estimate of the
1581 * boot time.
1582 */
1583void __weak __init
1584read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
1585 struct timespec64 *boot_offset)
1586{
1587 read_persistent_clock64(wall_time);
1588 *boot_offset = ns_to_timespec64(local_clock());
1589}
1590
1591/*
1592 * Flag reflecting whether timekeeping_resume() has injected sleeptime.
1593 *
1594 * The flag starts of false and is only set when a suspend reaches
1595 * timekeeping_suspend(), timekeeping_resume() sets it to false when the
1596 * timekeeper clocksource is not stopping across suspend and has been
1597 * used to update sleep time. If the timekeeper clocksource has stopped
1598 * then the flag stays true and is used by the RTC resume code to decide
1599 * whether sleeptime must be injected and if so the flag gets false then.
1600 *
1601 * If a suspend fails before reaching timekeeping_resume() then the flag
1602 * stays false and prevents erroneous sleeptime injection.
1603 */
1604static bool suspend_timing_needed;
1605
1606/* Flag for if there is a persistent clock on this platform */
1607static bool persistent_clock_exists;
1608
1609/*
1610 * timekeeping_init - Initializes the clocksource and common timekeeping values
1611 */
1612void __init timekeeping_init(void)
1613{
1614 struct timespec64 wall_time, boot_offset, wall_to_mono;
1615 struct timekeeper *tk = &tk_core.timekeeper;
1616 struct clocksource *clock;
1617 unsigned long flags;
1618
1619 read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
1620 if (timespec64_valid_settod(&wall_time) &&
1621 timespec64_to_ns(&wall_time) > 0) {
1622 persistent_clock_exists = true;
1623 } else if (timespec64_to_ns(&wall_time) != 0) {
1624 pr_warn("Persistent clock returned invalid value");
1625 wall_time = (struct timespec64){0};
1626 }
1627
1628 if (timespec64_compare(&wall_time, &boot_offset) < 0)
1629 boot_offset = (struct timespec64){0};
1630
1631 /*
1632 * We want set wall_to_mono, so the following is true:
1633 * wall time + wall_to_mono = boot time
1634 */
1635 wall_to_mono = timespec64_sub(boot_offset, wall_time);
1636
1637 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1638 write_seqcount_begin(&tk_core.seq);
1639 ntp_init();
1640
1641 clock = clocksource_default_clock();
1642 if (clock->enable)
1643 clock->enable(clock);
1644 tk_setup_internals(tk, clock);
1645
1646 tk_set_xtime(tk, &wall_time);
1647 tk->raw_sec = 0;
1648
1649 tk_set_wall_to_mono(tk, wall_to_mono);
1650
1651 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1652
1653 write_seqcount_end(&tk_core.seq);
1654 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1655}
1656
1657/* time in seconds when suspend began for persistent clock */
1658static struct timespec64 timekeeping_suspend_time;
1659
1660/**
1661 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1662 * @tk: Pointer to the timekeeper to be updated
1663 * @delta: Pointer to the delta value in timespec64 format
1664 *
1665 * Takes a timespec offset measuring a suspend interval and properly
1666 * adds the sleep offset to the timekeeping variables.
1667 */
1668static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1669 const struct timespec64 *delta)
1670{
1671 if (!timespec64_valid_strict(delta)) {
1672 printk_deferred(KERN_WARNING
1673 "__timekeeping_inject_sleeptime: Invalid "
1674 "sleep delta value!\n");
1675 return;
1676 }
1677 tk_xtime_add(tk, delta);
1678 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1679 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1680 tk_debug_account_sleep_time(delta);
1681}
1682
1683#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1684/**
1685 * We have three kinds of time sources to use for sleep time
1686 * injection, the preference order is:
1687 * 1) non-stop clocksource
1688 * 2) persistent clock (ie: RTC accessible when irqs are off)
1689 * 3) RTC
1690 *
1691 * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
1692 * If system has neither 1) nor 2), 3) will be used finally.
1693 *
1694 *
1695 * If timekeeping has injected sleeptime via either 1) or 2),
1696 * 3) becomes needless, so in this case we don't need to call
1697 * rtc_resume(), and this is what timekeeping_rtc_skipresume()
1698 * means.
1699 */
1700bool timekeeping_rtc_skipresume(void)
1701{
1702 return !suspend_timing_needed;
1703}
1704
1705/**
1706 * 1) can be determined whether to use or not only when doing
1707 * timekeeping_resume() which is invoked after rtc_suspend(),
1708 * so we can't skip rtc_suspend() surely if system has 1).
1709 *
1710 * But if system has 2), 2) will definitely be used, so in this
1711 * case we don't need to call rtc_suspend(), and this is what
1712 * timekeeping_rtc_skipsuspend() means.
1713 */
1714bool timekeeping_rtc_skipsuspend(void)
1715{
1716 return persistent_clock_exists;
1717}
1718
1719/**
1720 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1721 * @delta: pointer to a timespec64 delta value
1722 *
1723 * This hook is for architectures that cannot support read_persistent_clock64
1724 * because their RTC/persistent clock is only accessible when irqs are enabled.
1725 * and also don't have an effective nonstop clocksource.
1726 *
1727 * This function should only be called by rtc_resume(), and allows
1728 * a suspend offset to be injected into the timekeeping values.
1729 */
1730void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
1731{
1732 struct timekeeper *tk = &tk_core.timekeeper;
1733 unsigned long flags;
1734
1735 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1736 write_seqcount_begin(&tk_core.seq);
1737
1738 suspend_timing_needed = false;
1739
1740 timekeeping_forward_now(tk);
1741
1742 __timekeeping_inject_sleeptime(tk, delta);
1743
1744 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1745
1746 write_seqcount_end(&tk_core.seq);
1747 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1748
1749 /* signal hrtimers about time change */
1750 clock_was_set();
1751}
1752#endif
1753
1754/**
1755 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1756 */
1757void timekeeping_resume(void)
1758{
1759 struct timekeeper *tk = &tk_core.timekeeper;
1760 struct clocksource *clock = tk->tkr_mono.clock;
1761 unsigned long flags;
1762 struct timespec64 ts_new, ts_delta;
1763 u64 cycle_now, nsec;
1764 bool inject_sleeptime = false;
1765
1766 read_persistent_clock64(&ts_new);
1767
1768 clockevents_resume();
1769 clocksource_resume();
1770
1771 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1772 write_seqcount_begin(&tk_core.seq);
1773
1774 /*
1775 * After system resumes, we need to calculate the suspended time and
1776 * compensate it for the OS time. There are 3 sources that could be
1777 * used: Nonstop clocksource during suspend, persistent clock and rtc
1778 * device.
1779 *
1780 * One specific platform may have 1 or 2 or all of them, and the
1781 * preference will be:
1782 * suspend-nonstop clocksource -> persistent clock -> rtc
1783 * The less preferred source will only be tried if there is no better
1784 * usable source. The rtc part is handled separately in rtc core code.
1785 */
1786 cycle_now = tk_clock_read(&tk->tkr_mono);
1787 nsec = clocksource_stop_suspend_timing(clock, cycle_now);
1788 if (nsec > 0) {
1789 ts_delta = ns_to_timespec64(nsec);
1790 inject_sleeptime = true;
1791 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1792 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1793 inject_sleeptime = true;
1794 }
1795
1796 if (inject_sleeptime) {
1797 suspend_timing_needed = false;
1798 __timekeeping_inject_sleeptime(tk, &ts_delta);
1799 }
1800
1801 /* Re-base the last cycle value */
1802 tk->tkr_mono.cycle_last = cycle_now;
1803 tk->tkr_raw.cycle_last = cycle_now;
1804
1805 tk->ntp_error = 0;
1806 timekeeping_suspended = 0;
1807 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1808 write_seqcount_end(&tk_core.seq);
1809 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1810
1811 touch_softlockup_watchdog();
1812
1813 tick_resume();
1814 hrtimers_resume();
1815}
1816
1817int timekeeping_suspend(void)
1818{
1819 struct timekeeper *tk = &tk_core.timekeeper;
1820 unsigned long flags;
1821 struct timespec64 delta, delta_delta;
1822 static struct timespec64 old_delta;
1823 struct clocksource *curr_clock;
1824 u64 cycle_now;
1825
1826 read_persistent_clock64(&timekeeping_suspend_time);
1827
1828 /*
1829 * On some systems the persistent_clock can not be detected at
1830 * timekeeping_init by its return value, so if we see a valid
1831 * value returned, update the persistent_clock_exists flag.
1832 */
1833 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1834 persistent_clock_exists = true;
1835
1836 suspend_timing_needed = true;
1837
1838 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1839 write_seqcount_begin(&tk_core.seq);
1840 timekeeping_forward_now(tk);
1841 timekeeping_suspended = 1;
1842
1843 /*
1844 * Since we've called forward_now, cycle_last stores the value
1845 * just read from the current clocksource. Save this to potentially
1846 * use in suspend timing.
1847 */
1848 curr_clock = tk->tkr_mono.clock;
1849 cycle_now = tk->tkr_mono.cycle_last;
1850 clocksource_start_suspend_timing(curr_clock, cycle_now);
1851
1852 if (persistent_clock_exists) {
1853 /*
1854 * To avoid drift caused by repeated suspend/resumes,
1855 * which each can add ~1 second drift error,
1856 * try to compensate so the difference in system time
1857 * and persistent_clock time stays close to constant.
1858 */
1859 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1860 delta_delta = timespec64_sub(delta, old_delta);
1861 if (abs(delta_delta.tv_sec) >= 2) {
1862 /*
1863 * if delta_delta is too large, assume time correction
1864 * has occurred and set old_delta to the current delta.
1865 */
1866 old_delta = delta;
1867 } else {
1868 /* Otherwise try to adjust old_system to compensate */
1869 timekeeping_suspend_time =
1870 timespec64_add(timekeeping_suspend_time, delta_delta);
1871 }
1872 }
1873
1874 timekeeping_update(tk, TK_MIRROR);
1875 halt_fast_timekeeper(tk);
1876 write_seqcount_end(&tk_core.seq);
1877 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1878
1879 tick_suspend();
1880 clocksource_suspend();
1881 clockevents_suspend();
1882
1883 return 0;
1884}
1885
1886/* sysfs resume/suspend bits for timekeeping */
1887static struct syscore_ops timekeeping_syscore_ops = {
1888 .resume = timekeeping_resume,
1889 .suspend = timekeeping_suspend,
1890};
1891
1892static int __init timekeeping_init_ops(void)
1893{
1894 register_syscore_ops(&timekeeping_syscore_ops);
1895 return 0;
1896}
1897device_initcall(timekeeping_init_ops);
1898
1899/*
1900 * Apply a multiplier adjustment to the timekeeper
1901 */
1902static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1903 s64 offset,
1904 s32 mult_adj)
1905{
1906 s64 interval = tk->cycle_interval;
1907
1908 if (mult_adj == 0) {
1909 return;
1910 } else if (mult_adj == -1) {
1911 interval = -interval;
1912 offset = -offset;
1913 } else if (mult_adj != 1) {
1914 interval *= mult_adj;
1915 offset *= mult_adj;
1916 }
1917
1918 /*
1919 * So the following can be confusing.
1920 *
1921 * To keep things simple, lets assume mult_adj == 1 for now.
1922 *
1923 * When mult_adj != 1, remember that the interval and offset values
1924 * have been appropriately scaled so the math is the same.
1925 *
1926 * The basic idea here is that we're increasing the multiplier
1927 * by one, this causes the xtime_interval to be incremented by
1928 * one cycle_interval. This is because:
1929 * xtime_interval = cycle_interval * mult
1930 * So if mult is being incremented by one:
1931 * xtime_interval = cycle_interval * (mult + 1)
1932 * Its the same as:
1933 * xtime_interval = (cycle_interval * mult) + cycle_interval
1934 * Which can be shortened to:
1935 * xtime_interval += cycle_interval
1936 *
1937 * So offset stores the non-accumulated cycles. Thus the current
1938 * time (in shifted nanoseconds) is:
1939 * now = (offset * adj) + xtime_nsec
1940 * Now, even though we're adjusting the clock frequency, we have
1941 * to keep time consistent. In other words, we can't jump back
1942 * in time, and we also want to avoid jumping forward in time.
1943 *
1944 * So given the same offset value, we need the time to be the same
1945 * both before and after the freq adjustment.
1946 * now = (offset * adj_1) + xtime_nsec_1
1947 * now = (offset * adj_2) + xtime_nsec_2
1948 * So:
1949 * (offset * adj_1) + xtime_nsec_1 =
1950 * (offset * adj_2) + xtime_nsec_2
1951 * And we know:
1952 * adj_2 = adj_1 + 1
1953 * So:
1954 * (offset * adj_1) + xtime_nsec_1 =
1955 * (offset * (adj_1+1)) + xtime_nsec_2
1956 * (offset * adj_1) + xtime_nsec_1 =
1957 * (offset * adj_1) + offset + xtime_nsec_2
1958 * Canceling the sides:
1959 * xtime_nsec_1 = offset + xtime_nsec_2
1960 * Which gives us:
1961 * xtime_nsec_2 = xtime_nsec_1 - offset
1962 * Which simplifies to:
1963 * xtime_nsec -= offset
1964 */
1965 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1966 /* NTP adjustment caused clocksource mult overflow */
1967 WARN_ON_ONCE(1);
1968 return;
1969 }
1970
1971 tk->tkr_mono.mult += mult_adj;
1972 tk->xtime_interval += interval;
1973 tk->tkr_mono.xtime_nsec -= offset;
1974}
1975
1976/*
1977 * Adjust the timekeeper's multiplier to the correct frequency
1978 * and also to reduce the accumulated error value.
1979 */
1980static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1981{
1982 u32 mult;
1983
1984 /*
1985 * Determine the multiplier from the current NTP tick length.
1986 * Avoid expensive division when the tick length doesn't change.
1987 */
1988 if (likely(tk->ntp_tick == ntp_tick_length())) {
1989 mult = tk->tkr_mono.mult - tk->ntp_err_mult;
1990 } else {
1991 tk->ntp_tick = ntp_tick_length();
1992 mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
1993 tk->xtime_remainder, tk->cycle_interval);
1994 }
1995
1996 /*
1997 * If the clock is behind the NTP time, increase the multiplier by 1
1998 * to catch up with it. If it's ahead and there was a remainder in the
1999 * tick division, the clock will slow down. Otherwise it will stay
2000 * ahead until the tick length changes to a non-divisible value.
2001 */
2002 tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
2003 mult += tk->ntp_err_mult;
2004
2005 timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
2006
2007 if (unlikely(tk->tkr_mono.clock->maxadj &&
2008 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
2009 > tk->tkr_mono.clock->maxadj))) {
2010 printk_once(KERN_WARNING
2011 "Adjusting %s more than 11%% (%ld vs %ld)\n",
2012 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
2013 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
2014 }
2015
2016 /*
2017 * It may be possible that when we entered this function, xtime_nsec
2018 * was very small. Further, if we're slightly speeding the clocksource
2019 * in the code above, its possible the required corrective factor to
2020 * xtime_nsec could cause it to underflow.
2021 *
2022 * Now, since we have already accumulated the second and the NTP
2023 * subsystem has been notified via second_overflow(), we need to skip
2024 * the next update.
2025 */
2026 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
2027 tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
2028 tk->tkr_mono.shift;
2029 tk->xtime_sec--;
2030 tk->skip_second_overflow = 1;
2031 }
2032}
2033
2034/*
2035 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
2036 *
2037 * Helper function that accumulates the nsecs greater than a second
2038 * from the xtime_nsec field to the xtime_secs field.
2039 * It also calls into the NTP code to handle leapsecond processing.
2040 */
2041static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
2042{
2043 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
2044 unsigned int clock_set = 0;
2045
2046 while (tk->tkr_mono.xtime_nsec >= nsecps) {
2047 int leap;
2048
2049 tk->tkr_mono.xtime_nsec -= nsecps;
2050 tk->xtime_sec++;
2051
2052 /*
2053 * Skip NTP update if this second was accumulated before,
2054 * i.e. xtime_nsec underflowed in timekeeping_adjust()
2055 */
2056 if (unlikely(tk->skip_second_overflow)) {
2057 tk->skip_second_overflow = 0;
2058 continue;
2059 }
2060
2061 /* Figure out if its a leap sec and apply if needed */
2062 leap = second_overflow(tk->xtime_sec);
2063 if (unlikely(leap)) {
2064 struct timespec64 ts;
2065
2066 tk->xtime_sec += leap;
2067
2068 ts.tv_sec = leap;
2069 ts.tv_nsec = 0;
2070 tk_set_wall_to_mono(tk,
2071 timespec64_sub(tk->wall_to_monotonic, ts));
2072
2073 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
2074
2075 clock_set = TK_CLOCK_WAS_SET;
2076 }
2077 }
2078 return clock_set;
2079}
2080
2081/*
2082 * logarithmic_accumulation - shifted accumulation of cycles
2083 *
2084 * This functions accumulates a shifted interval of cycles into
2085 * a shifted interval nanoseconds. Allows for O(log) accumulation
2086 * loop.
2087 *
2088 * Returns the unconsumed cycles.
2089 */
2090static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2091 u32 shift, unsigned int *clock_set)
2092{
2093 u64 interval = tk->cycle_interval << shift;
2094 u64 snsec_per_sec;
2095
2096 /* If the offset is smaller than a shifted interval, do nothing */
2097 if (offset < interval)
2098 return offset;
2099
2100 /* Accumulate one shifted interval */
2101 offset -= interval;
2102 tk->tkr_mono.cycle_last += interval;
2103 tk->tkr_raw.cycle_last += interval;
2104
2105 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2106 *clock_set |= accumulate_nsecs_to_secs(tk);
2107
2108 /* Accumulate raw time */
2109 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2110 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2111 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2112 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2113 tk->raw_sec++;
2114 }
2115
2116 /* Accumulate error between NTP and clock interval */
2117 tk->ntp_error += tk->ntp_tick << shift;
2118 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2119 (tk->ntp_error_shift + shift);
2120
2121 return offset;
2122}
2123
2124/*
2125 * timekeeping_advance - Updates the timekeeper to the current time and
2126 * current NTP tick length
2127 */
2128static void timekeeping_advance(enum timekeeping_adv_mode mode)
2129{
2130 struct timekeeper *real_tk = &tk_core.timekeeper;
2131 struct timekeeper *tk = &shadow_timekeeper;
2132 u64 offset;
2133 int shift = 0, maxshift;
2134 unsigned int clock_set = 0;
2135 unsigned long flags;
2136
2137 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2138
2139 /* Make sure we're fully resumed: */
2140 if (unlikely(timekeeping_suspended))
2141 goto out;
2142
2143 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2144 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2145
2146 /* Check if there's really nothing to do */
2147 if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
2148 goto out;
2149
2150 /* Do some additional sanity checking */
2151 timekeeping_check_update(tk, offset);
2152
2153 /*
2154 * With NO_HZ we may have to accumulate many cycle_intervals
2155 * (think "ticks") worth of time at once. To do this efficiently,
2156 * we calculate the largest doubling multiple of cycle_intervals
2157 * that is smaller than the offset. We then accumulate that
2158 * chunk in one go, and then try to consume the next smaller
2159 * doubled multiple.
2160 */
2161 shift = ilog2(offset) - ilog2(tk->cycle_interval);
2162 shift = max(0, shift);
2163 /* Bound shift to one less than what overflows tick_length */
2164 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2165 shift = min(shift, maxshift);
2166 while (offset >= tk->cycle_interval) {
2167 offset = logarithmic_accumulation(tk, offset, shift,
2168 &clock_set);
2169 if (offset < tk->cycle_interval<<shift)
2170 shift--;
2171 }
2172
2173 /* Adjust the multiplier to correct NTP error */
2174 timekeeping_adjust(tk, offset);
2175
2176 /*
2177 * Finally, make sure that after the rounding
2178 * xtime_nsec isn't larger than NSEC_PER_SEC
2179 */
2180 clock_set |= accumulate_nsecs_to_secs(tk);
2181
2182 write_seqcount_begin(&tk_core.seq);
2183 /*
2184 * Update the real timekeeper.
2185 *
2186 * We could avoid this memcpy by switching pointers, but that
2187 * requires changes to all other timekeeper usage sites as
2188 * well, i.e. move the timekeeper pointer getter into the
2189 * spinlocked/seqcount protected sections. And we trade this
2190 * memcpy under the tk_core.seq against one before we start
2191 * updating.
2192 */
2193 timekeeping_update(tk, clock_set);
2194 memcpy(real_tk, tk, sizeof(*tk));
2195 /* The memcpy must come last. Do not put anything here! */
2196 write_seqcount_end(&tk_core.seq);
2197out:
2198 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2199 if (clock_set)
2200 /* Have to call _delayed version, since in irq context*/
2201 clock_was_set_delayed();
2202}
2203
2204/**
2205 * update_wall_time - Uses the current clocksource to increment the wall time
2206 *
2207 */
2208void update_wall_time(void)
2209{
2210 timekeeping_advance(TK_ADV_TICK);
2211}
2212
2213/**
2214 * getboottime64 - Return the real time of system boot.
2215 * @ts: pointer to the timespec64 to be set
2216 *
2217 * Returns the wall-time of boot in a timespec64.
2218 *
2219 * This is based on the wall_to_monotonic offset and the total suspend
2220 * time. Calls to settimeofday will affect the value returned (which
2221 * basically means that however wrong your real time clock is at boot time,
2222 * you get the right time here).
2223 */
2224void getboottime64(struct timespec64 *ts)
2225{
2226 struct timekeeper *tk = &tk_core.timekeeper;
2227 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2228
2229 *ts = ktime_to_timespec64(t);
2230}
2231EXPORT_SYMBOL_GPL(getboottime64);
2232
2233void ktime_get_coarse_real_ts64(struct timespec64 *ts)
2234{
2235 struct timekeeper *tk = &tk_core.timekeeper;
2236 unsigned int seq;
2237
2238 do {
2239 seq = read_seqcount_begin(&tk_core.seq);
2240
2241 *ts = tk_xtime(tk);
2242 } while (read_seqcount_retry(&tk_core.seq, seq));
2243}
2244EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
2245
2246void ktime_get_coarse_ts64(struct timespec64 *ts)
2247{
2248 struct timekeeper *tk = &tk_core.timekeeper;
2249 struct timespec64 now, mono;
2250 unsigned int seq;
2251
2252 do {
2253 seq = read_seqcount_begin(&tk_core.seq);
2254
2255 now = tk_xtime(tk);
2256 mono = tk->wall_to_monotonic;
2257 } while (read_seqcount_retry(&tk_core.seq, seq));
2258
2259 set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
2260 now.tv_nsec + mono.tv_nsec);
2261}
2262EXPORT_SYMBOL(ktime_get_coarse_ts64);
2263
2264/*
2265 * Must hold jiffies_lock
2266 */
2267void do_timer(unsigned long ticks)
2268{
2269 jiffies_64 += ticks;
2270 calc_global_load();
2271}
2272
2273/**
2274 * ktime_get_update_offsets_now - hrtimer helper
2275 * @cwsseq: pointer to check and store the clock was set sequence number
2276 * @offs_real: pointer to storage for monotonic -> realtime offset
2277 * @offs_boot: pointer to storage for monotonic -> boottime offset
2278 * @offs_tai: pointer to storage for monotonic -> clock tai offset
2279 *
2280 * Returns current monotonic time and updates the offsets if the
2281 * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
2282 * different.
2283 *
2284 * Called from hrtimer_interrupt() or retrigger_next_event()
2285 */
2286ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2287 ktime_t *offs_boot, ktime_t *offs_tai)
2288{
2289 struct timekeeper *tk = &tk_core.timekeeper;
2290 unsigned int seq;
2291 ktime_t base;
2292 u64 nsecs;
2293
2294 do {
2295 seq = read_seqcount_begin(&tk_core.seq);
2296
2297 base = tk->tkr_mono.base;
2298 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2299 base = ktime_add_ns(base, nsecs);
2300
2301 if (*cwsseq != tk->clock_was_set_seq) {
2302 *cwsseq = tk->clock_was_set_seq;
2303 *offs_real = tk->offs_real;
2304 *offs_boot = tk->offs_boot;
2305 *offs_tai = tk->offs_tai;
2306 }
2307
2308 /* Handle leapsecond insertion adjustments */
2309 if (unlikely(base >= tk->next_leap_ktime))
2310 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2311
2312 } while (read_seqcount_retry(&tk_core.seq, seq));
2313
2314 return base;
2315}
2316
2317/*
2318 * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
2319 */
2320static int timekeeping_validate_timex(const struct __kernel_timex *txc)
2321{
2322 if (txc->modes & ADJ_ADJTIME) {
2323 /* singleshot must not be used with any other mode bits */
2324 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
2325 return -EINVAL;
2326 if (!(txc->modes & ADJ_OFFSET_READONLY) &&
2327 !capable(CAP_SYS_TIME))
2328 return -EPERM;
2329 } else {
2330 /* In order to modify anything, you gotta be super-user! */
2331 if (txc->modes && !capable(CAP_SYS_TIME))
2332 return -EPERM;
2333 /*
2334 * if the quartz is off by more than 10% then
2335 * something is VERY wrong!
2336 */
2337 if (txc->modes & ADJ_TICK &&
2338 (txc->tick < 900000/USER_HZ ||
2339 txc->tick > 1100000/USER_HZ))
2340 return -EINVAL;
2341 }
2342
2343 if (txc->modes & ADJ_SETOFFSET) {
2344 /* In order to inject time, you gotta be super-user! */
2345 if (!capable(CAP_SYS_TIME))
2346 return -EPERM;
2347
2348 /*
2349 * Validate if a timespec/timeval used to inject a time
2350 * offset is valid. Offsets can be positive or negative, so
2351 * we don't check tv_sec. The value of the timeval/timespec
2352 * is the sum of its fields,but *NOTE*:
2353 * The field tv_usec/tv_nsec must always be non-negative and
2354 * we can't have more nanoseconds/microseconds than a second.
2355 */
2356 if (txc->time.tv_usec < 0)
2357 return -EINVAL;
2358
2359 if (txc->modes & ADJ_NANO) {
2360 if (txc->time.tv_usec >= NSEC_PER_SEC)
2361 return -EINVAL;
2362 } else {
2363 if (txc->time.tv_usec >= USEC_PER_SEC)
2364 return -EINVAL;
2365 }
2366 }
2367
2368 /*
2369 * Check for potential multiplication overflows that can
2370 * only happen on 64-bit systems:
2371 */
2372 if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
2373 if (LLONG_MIN / PPM_SCALE > txc->freq)
2374 return -EINVAL;
2375 if (LLONG_MAX / PPM_SCALE < txc->freq)
2376 return -EINVAL;
2377 }
2378
2379 return 0;
2380}
2381
2382
2383/**
2384 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2385 */
2386int do_adjtimex(struct __kernel_timex *txc)
2387{
2388 struct timekeeper *tk = &tk_core.timekeeper;
2389 struct audit_ntp_data ad;
2390 unsigned long flags;
2391 struct timespec64 ts;
2392 s32 orig_tai, tai;
2393 int ret;
2394
2395 /* Validate the data before disabling interrupts */
2396 ret = timekeeping_validate_timex(txc);
2397 if (ret)
2398 return ret;
2399
2400 if (txc->modes & ADJ_SETOFFSET) {
2401 struct timespec64 delta;
2402 delta.tv_sec = txc->time.tv_sec;
2403 delta.tv_nsec = txc->time.tv_usec;
2404 if (!(txc->modes & ADJ_NANO))
2405 delta.tv_nsec *= 1000;
2406 ret = timekeeping_inject_offset(&delta);
2407 if (ret)
2408 return ret;
2409
2410 audit_tk_injoffset(delta);
2411 }
2412
2413 audit_ntp_init(&ad);
2414
2415 ktime_get_real_ts64(&ts);
2416
2417 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2418 write_seqcount_begin(&tk_core.seq);
2419
2420 orig_tai = tai = tk->tai_offset;
2421 ret = __do_adjtimex(txc, &ts, &tai, &ad);
2422
2423 if (tai != orig_tai) {
2424 __timekeeping_set_tai_offset(tk, tai);
2425 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2426 }
2427 tk_update_leap_state(tk);
2428
2429 write_seqcount_end(&tk_core.seq);
2430 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2431
2432 audit_ntp_log(&ad);
2433
2434 /* Update the multiplier immediately if frequency was set directly */
2435 if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
2436 timekeeping_advance(TK_ADV_FREQ);
2437
2438 if (tai != orig_tai)
2439 clock_was_set();
2440
2441 ntp_notify_cmos_timer();
2442
2443 return ret;
2444}
2445
2446#ifdef CONFIG_NTP_PPS
2447/**
2448 * hardpps() - Accessor function to NTP __hardpps function
2449 */
2450void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2451{
2452 unsigned long flags;
2453
2454 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2455 write_seqcount_begin(&tk_core.seq);
2456
2457 __hardpps(phase_ts, raw_ts);
2458
2459 write_seqcount_end(&tk_core.seq);
2460 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2461}
2462EXPORT_SYMBOL(hardpps);
2463#endif /* CONFIG_NTP_PPS */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kernel timekeeping code and accessor functions. Based on code from
4 * timer.c, moved in commit 8524070b7982.
5 */
6#include <linux/timekeeper_internal.h>
7#include <linux/module.h>
8#include <linux/interrupt.h>
9#include <linux/percpu.h>
10#include <linux/init.h>
11#include <linux/mm.h>
12#include <linux/nmi.h>
13#include <linux/sched.h>
14#include <linux/sched/loadavg.h>
15#include <linux/sched/clock.h>
16#include <linux/syscore_ops.h>
17#include <linux/clocksource.h>
18#include <linux/jiffies.h>
19#include <linux/time.h>
20#include <linux/timex.h>
21#include <linux/tick.h>
22#include <linux/stop_machine.h>
23#include <linux/pvclock_gtod.h>
24#include <linux/compiler.h>
25#include <linux/audit.h>
26#include <linux/random.h>
27
28#include "tick-internal.h"
29#include "ntp_internal.h"
30#include "timekeeping_internal.h"
31
32#define TK_CLEAR_NTP (1 << 0)
33#define TK_CLOCK_WAS_SET (1 << 1)
34
35#define TK_UPDATE_ALL (TK_CLEAR_NTP | TK_CLOCK_WAS_SET)
36
37enum timekeeping_adv_mode {
38 /* Update timekeeper when a tick has passed */
39 TK_ADV_TICK,
40
41 /* Update timekeeper on a direct frequency change */
42 TK_ADV_FREQ
43};
44
45/*
46 * The most important data for readout fits into a single 64 byte
47 * cache line.
48 */
49struct tk_data {
50 seqcount_raw_spinlock_t seq;
51 struct timekeeper timekeeper;
52 struct timekeeper shadow_timekeeper;
53 raw_spinlock_t lock;
54} ____cacheline_aligned;
55
56static struct tk_data tk_core;
57
58/* flag for if timekeeping is suspended */
59int __read_mostly timekeeping_suspended;
60
61/**
62 * struct tk_fast - NMI safe timekeeper
63 * @seq: Sequence counter for protecting updates. The lowest bit
64 * is the index for the tk_read_base array
65 * @base: tk_read_base array. Access is indexed by the lowest bit of
66 * @seq.
67 *
68 * See @update_fast_timekeeper() below.
69 */
70struct tk_fast {
71 seqcount_latch_t seq;
72 struct tk_read_base base[2];
73};
74
75/* Suspend-time cycles value for halted fast timekeeper. */
76static u64 cycles_at_suspend;
77
78static u64 dummy_clock_read(struct clocksource *cs)
79{
80 if (timekeeping_suspended)
81 return cycles_at_suspend;
82 return local_clock();
83}
84
85static struct clocksource dummy_clock = {
86 .read = dummy_clock_read,
87};
88
89/*
90 * Boot time initialization which allows local_clock() to be utilized
91 * during early boot when clocksources are not available. local_clock()
92 * returns nanoseconds already so no conversion is required, hence mult=1
93 * and shift=0. When the first proper clocksource is installed then
94 * the fast time keepers are updated with the correct values.
95 */
96#define FAST_TK_INIT \
97 { \
98 .clock = &dummy_clock, \
99 .mask = CLOCKSOURCE_MASK(64), \
100 .mult = 1, \
101 .shift = 0, \
102 }
103
104static struct tk_fast tk_fast_mono ____cacheline_aligned = {
105 .seq = SEQCNT_LATCH_ZERO(tk_fast_mono.seq),
106 .base[0] = FAST_TK_INIT,
107 .base[1] = FAST_TK_INIT,
108};
109
110static struct tk_fast tk_fast_raw ____cacheline_aligned = {
111 .seq = SEQCNT_LATCH_ZERO(tk_fast_raw.seq),
112 .base[0] = FAST_TK_INIT,
113 .base[1] = FAST_TK_INIT,
114};
115
116unsigned long timekeeper_lock_irqsave(void)
117{
118 unsigned long flags;
119
120 raw_spin_lock_irqsave(&tk_core.lock, flags);
121 return flags;
122}
123
124void timekeeper_unlock_irqrestore(unsigned long flags)
125{
126 raw_spin_unlock_irqrestore(&tk_core.lock, flags);
127}
128
129/*
130 * Multigrain timestamps require tracking the latest fine-grained timestamp
131 * that has been issued, and never returning a coarse-grained timestamp that is
132 * earlier than that value.
133 *
134 * mg_floor represents the latest fine-grained time that has been handed out as
135 * a file timestamp on the system. This is tracked as a monotonic ktime_t, and
136 * converted to a realtime clock value on an as-needed basis.
137 *
138 * Maintaining mg_floor ensures the multigrain interfaces never issue a
139 * timestamp earlier than one that has been previously issued.
140 *
141 * The exception to this rule is when there is a backward realtime clock jump. If
142 * such an event occurs, a timestamp can appear to be earlier than a previous one.
143 */
144static __cacheline_aligned_in_smp atomic64_t mg_floor;
145
146static inline void tk_normalize_xtime(struct timekeeper *tk)
147{
148 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
149 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
150 tk->xtime_sec++;
151 }
152 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
153 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
154 tk->raw_sec++;
155 }
156}
157
158static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
159{
160 struct timespec64 ts;
161
162 ts.tv_sec = tk->xtime_sec;
163 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
164 return ts;
165}
166
167static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
168{
169 tk->xtime_sec = ts->tv_sec;
170 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
171}
172
173static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
174{
175 tk->xtime_sec += ts->tv_sec;
176 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
177 tk_normalize_xtime(tk);
178}
179
180static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
181{
182 struct timespec64 tmp;
183
184 /*
185 * Verify consistency of: offset_real = -wall_to_monotonic
186 * before modifying anything
187 */
188 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
189 -tk->wall_to_monotonic.tv_nsec);
190 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
191 tk->wall_to_monotonic = wtm;
192 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
193 /* Paired with READ_ONCE() in ktime_mono_to_any() */
194 WRITE_ONCE(tk->offs_real, timespec64_to_ktime(tmp));
195 WRITE_ONCE(tk->offs_tai, ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0)));
196}
197
198static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
199{
200 /* Paired with READ_ONCE() in ktime_mono_to_any() */
201 WRITE_ONCE(tk->offs_boot, ktime_add(tk->offs_boot, delta));
202 /*
203 * Timespec representation for VDSO update to avoid 64bit division
204 * on every update.
205 */
206 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
207}
208
209/*
210 * tk_clock_read - atomic clocksource read() helper
211 *
212 * This helper is necessary to use in the read paths because, while the
213 * seqcount ensures we don't return a bad value while structures are updated,
214 * it doesn't protect from potential crashes. There is the possibility that
215 * the tkr's clocksource may change between the read reference, and the
216 * clock reference passed to the read function. This can cause crashes if
217 * the wrong clocksource is passed to the wrong read function.
218 * This isn't necessary to use when holding the tk_core.lock or doing
219 * a read of the fast-timekeeper tkrs (which is protected by its own locking
220 * and update logic).
221 */
222static inline u64 tk_clock_read(const struct tk_read_base *tkr)
223{
224 struct clocksource *clock = READ_ONCE(tkr->clock);
225
226 return clock->read(clock);
227}
228
229/**
230 * tk_setup_internals - Set up internals to use clocksource clock.
231 *
232 * @tk: The target timekeeper to setup.
233 * @clock: Pointer to clocksource.
234 *
235 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
236 * pair and interval request.
237 *
238 * Unless you're the timekeeping code, you should not be using this!
239 */
240static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
241{
242 u64 interval;
243 u64 tmp, ntpinterval;
244 struct clocksource *old_clock;
245
246 ++tk->cs_was_changed_seq;
247 old_clock = tk->tkr_mono.clock;
248 tk->tkr_mono.clock = clock;
249 tk->tkr_mono.mask = clock->mask;
250 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
251
252 tk->tkr_raw.clock = clock;
253 tk->tkr_raw.mask = clock->mask;
254 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
255
256 /* Do the ns -> cycle conversion first, using original mult */
257 tmp = NTP_INTERVAL_LENGTH;
258 tmp <<= clock->shift;
259 ntpinterval = tmp;
260 tmp += clock->mult/2;
261 do_div(tmp, clock->mult);
262 if (tmp == 0)
263 tmp = 1;
264
265 interval = (u64) tmp;
266 tk->cycle_interval = interval;
267
268 /* Go back from cycles -> shifted ns */
269 tk->xtime_interval = interval * clock->mult;
270 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
271 tk->raw_interval = interval * clock->mult;
272
273 /* if changing clocks, convert xtime_nsec shift units */
274 if (old_clock) {
275 int shift_change = clock->shift - old_clock->shift;
276 if (shift_change < 0) {
277 tk->tkr_mono.xtime_nsec >>= -shift_change;
278 tk->tkr_raw.xtime_nsec >>= -shift_change;
279 } else {
280 tk->tkr_mono.xtime_nsec <<= shift_change;
281 tk->tkr_raw.xtime_nsec <<= shift_change;
282 }
283 }
284
285 tk->tkr_mono.shift = clock->shift;
286 tk->tkr_raw.shift = clock->shift;
287
288 tk->ntp_error = 0;
289 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
290 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
291
292 /*
293 * The timekeeper keeps its own mult values for the currently
294 * active clocksource. These value will be adjusted via NTP
295 * to counteract clock drifting.
296 */
297 tk->tkr_mono.mult = clock->mult;
298 tk->tkr_raw.mult = clock->mult;
299 tk->ntp_err_mult = 0;
300 tk->skip_second_overflow = 0;
301}
302
303/* Timekeeper helper functions. */
304static noinline u64 delta_to_ns_safe(const struct tk_read_base *tkr, u64 delta)
305{
306 return mul_u64_u32_add_u64_shr(delta, tkr->mult, tkr->xtime_nsec, tkr->shift);
307}
308
309static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
310{
311 /* Calculate the delta since the last update_wall_time() */
312 u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask;
313
314 /*
315 * This detects both negative motion and the case where the delta
316 * overflows the multiplication with tkr->mult.
317 */
318 if (unlikely(delta > tkr->clock->max_cycles)) {
319 /*
320 * Handle clocksource inconsistency between CPUs to prevent
321 * time from going backwards by checking for the MSB of the
322 * mask being set in the delta.
323 */
324 if (delta & ~(mask >> 1))
325 return tkr->xtime_nsec >> tkr->shift;
326
327 return delta_to_ns_safe(tkr, delta);
328 }
329
330 return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift;
331}
332
333static __always_inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
334{
335 return timekeeping_cycles_to_ns(tkr, tk_clock_read(tkr));
336}
337
338/**
339 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
340 * @tkr: Timekeeping readout base from which we take the update
341 * @tkf: Pointer to NMI safe timekeeper
342 *
343 * We want to use this from any context including NMI and tracing /
344 * instrumenting the timekeeping code itself.
345 *
346 * Employ the latch technique; see @write_seqcount_latch.
347 *
348 * So if a NMI hits the update of base[0] then it will use base[1]
349 * which is still consistent. In the worst case this can result is a
350 * slightly wrong timestamp (a few nanoseconds). See
351 * @ktime_get_mono_fast_ns.
352 */
353static void update_fast_timekeeper(const struct tk_read_base *tkr,
354 struct tk_fast *tkf)
355{
356 struct tk_read_base *base = tkf->base;
357
358 /* Force readers off to base[1] */
359 write_seqcount_latch_begin(&tkf->seq);
360
361 /* Update base[0] */
362 memcpy(base, tkr, sizeof(*base));
363
364 /* Force readers back to base[0] */
365 write_seqcount_latch(&tkf->seq);
366
367 /* Update base[1] */
368 memcpy(base + 1, base, sizeof(*base));
369
370 write_seqcount_latch_end(&tkf->seq);
371}
372
373static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
374{
375 struct tk_read_base *tkr;
376 unsigned int seq;
377 u64 now;
378
379 do {
380 seq = read_seqcount_latch(&tkf->seq);
381 tkr = tkf->base + (seq & 0x01);
382 now = ktime_to_ns(tkr->base);
383 now += timekeeping_get_ns(tkr);
384 } while (read_seqcount_latch_retry(&tkf->seq, seq));
385
386 return now;
387}
388
389/**
390 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
391 *
392 * This timestamp is not guaranteed to be monotonic across an update.
393 * The timestamp is calculated by:
394 *
395 * now = base_mono + clock_delta * slope
396 *
397 * So if the update lowers the slope, readers who are forced to the
398 * not yet updated second array are still using the old steeper slope.
399 *
400 * tmono
401 * ^
402 * | o n
403 * | o n
404 * | u
405 * | o
406 * |o
407 * |12345678---> reader order
408 *
409 * o = old slope
410 * u = update
411 * n = new slope
412 *
413 * So reader 6 will observe time going backwards versus reader 5.
414 *
415 * While other CPUs are likely to be able to observe that, the only way
416 * for a CPU local observation is when an NMI hits in the middle of
417 * the update. Timestamps taken from that NMI context might be ahead
418 * of the following timestamps. Callers need to be aware of that and
419 * deal with it.
420 */
421u64 notrace ktime_get_mono_fast_ns(void)
422{
423 return __ktime_get_fast_ns(&tk_fast_mono);
424}
425EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
426
427/**
428 * ktime_get_raw_fast_ns - Fast NMI safe access to clock monotonic raw
429 *
430 * Contrary to ktime_get_mono_fast_ns() this is always correct because the
431 * conversion factor is not affected by NTP/PTP correction.
432 */
433u64 notrace ktime_get_raw_fast_ns(void)
434{
435 return __ktime_get_fast_ns(&tk_fast_raw);
436}
437EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
438
439/**
440 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
441 *
442 * To keep it NMI safe since we're accessing from tracing, we're not using a
443 * separate timekeeper with updates to monotonic clock and boot offset
444 * protected with seqcounts. This has the following minor side effects:
445 *
446 * (1) Its possible that a timestamp be taken after the boot offset is updated
447 * but before the timekeeper is updated. If this happens, the new boot offset
448 * is added to the old timekeeping making the clock appear to update slightly
449 * earlier:
450 * CPU 0 CPU 1
451 * timekeeping_inject_sleeptime64()
452 * __timekeeping_inject_sleeptime(tk, delta);
453 * timestamp();
454 * timekeeping_update_staged(tkd, TK_CLEAR_NTP...);
455 *
456 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
457 * partially updated. Since the tk->offs_boot update is a rare event, this
458 * should be a rare occurrence which postprocessing should be able to handle.
459 *
460 * The caveats vs. timestamp ordering as documented for ktime_get_mono_fast_ns()
461 * apply as well.
462 */
463u64 notrace ktime_get_boot_fast_ns(void)
464{
465 struct timekeeper *tk = &tk_core.timekeeper;
466
467 return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_boot)));
468}
469EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
470
471/**
472 * ktime_get_tai_fast_ns - NMI safe and fast access to tai clock.
473 *
474 * The same limitations as described for ktime_get_boot_fast_ns() apply. The
475 * mono time and the TAI offset are not read atomically which may yield wrong
476 * readouts. However, an update of the TAI offset is an rare event e.g., caused
477 * by settime or adjtimex with an offset. The user of this function has to deal
478 * with the possibility of wrong timestamps in post processing.
479 */
480u64 notrace ktime_get_tai_fast_ns(void)
481{
482 struct timekeeper *tk = &tk_core.timekeeper;
483
484 return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_tai)));
485}
486EXPORT_SYMBOL_GPL(ktime_get_tai_fast_ns);
487
488static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
489{
490 struct tk_read_base *tkr;
491 u64 basem, baser, delta;
492 unsigned int seq;
493
494 do {
495 seq = raw_read_seqcount_latch(&tkf->seq);
496 tkr = tkf->base + (seq & 0x01);
497 basem = ktime_to_ns(tkr->base);
498 baser = ktime_to_ns(tkr->base_real);
499 delta = timekeeping_get_ns(tkr);
500 } while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
501
502 if (mono)
503 *mono = basem + delta;
504 return baser + delta;
505}
506
507/**
508 * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
509 *
510 * See ktime_get_mono_fast_ns() for documentation of the time stamp ordering.
511 */
512u64 ktime_get_real_fast_ns(void)
513{
514 return __ktime_get_real_fast(&tk_fast_mono, NULL);
515}
516EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
517
518/**
519 * ktime_get_fast_timestamps: - NMI safe timestamps
520 * @snapshot: Pointer to timestamp storage
521 *
522 * Stores clock monotonic, boottime and realtime timestamps.
523 *
524 * Boot time is a racy access on 32bit systems if the sleep time injection
525 * happens late during resume and not in timekeeping_resume(). That could
526 * be avoided by expanding struct tk_read_base with boot offset for 32bit
527 * and adding more overhead to the update. As this is a hard to observe
528 * once per resume event which can be filtered with reasonable effort using
529 * the accurate mono/real timestamps, it's probably not worth the trouble.
530 *
531 * Aside of that it might be possible on 32 and 64 bit to observe the
532 * following when the sleep time injection happens late:
533 *
534 * CPU 0 CPU 1
535 * timekeeping_resume()
536 * ktime_get_fast_timestamps()
537 * mono, real = __ktime_get_real_fast()
538 * inject_sleep_time()
539 * update boot offset
540 * boot = mono + bootoffset;
541 *
542 * That means that boot time already has the sleep time adjustment, but
543 * real time does not. On the next readout both are in sync again.
544 *
545 * Preventing this for 64bit is not really feasible without destroying the
546 * careful cache layout of the timekeeper because the sequence count and
547 * struct tk_read_base would then need two cache lines instead of one.
548 *
549 * Access to the time keeper clock source is disabled across the innermost
550 * steps of suspend/resume. The accessors still work, but the timestamps
551 * are frozen until time keeping is resumed which happens very early.
552 *
553 * For regular suspend/resume there is no observable difference vs. sched
554 * clock, but it might affect some of the nasty low level debug printks.
555 *
556 * OTOH, access to sched clock is not guaranteed across suspend/resume on
557 * all systems either so it depends on the hardware in use.
558 *
559 * If that turns out to be a real problem then this could be mitigated by
560 * using sched clock in a similar way as during early boot. But it's not as
561 * trivial as on early boot because it needs some careful protection
562 * against the clock monotonic timestamp jumping backwards on resume.
563 */
564void ktime_get_fast_timestamps(struct ktime_timestamps *snapshot)
565{
566 struct timekeeper *tk = &tk_core.timekeeper;
567
568 snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono);
569 snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot));
570}
571
572/**
573 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
574 * @tk: Timekeeper to snapshot.
575 *
576 * It generally is unsafe to access the clocksource after timekeeping has been
577 * suspended, so take a snapshot of the readout base of @tk and use it as the
578 * fast timekeeper's readout base while suspended. It will return the same
579 * number of cycles every time until timekeeping is resumed at which time the
580 * proper readout base for the fast timekeeper will be restored automatically.
581 */
582static void halt_fast_timekeeper(const struct timekeeper *tk)
583{
584 static struct tk_read_base tkr_dummy;
585 const struct tk_read_base *tkr = &tk->tkr_mono;
586
587 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
588 cycles_at_suspend = tk_clock_read(tkr);
589 tkr_dummy.clock = &dummy_clock;
590 tkr_dummy.base_real = tkr->base + tk->offs_real;
591 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
592
593 tkr = &tk->tkr_raw;
594 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
595 tkr_dummy.clock = &dummy_clock;
596 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
597}
598
599static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
600
601static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
602{
603 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
604}
605
606/**
607 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
608 * @nb: Pointer to the notifier block to register
609 */
610int pvclock_gtod_register_notifier(struct notifier_block *nb)
611{
612 struct timekeeper *tk = &tk_core.timekeeper;
613 int ret;
614
615 guard(raw_spinlock_irqsave)(&tk_core.lock);
616 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
617 update_pvclock_gtod(tk, true);
618
619 return ret;
620}
621EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
622
623/**
624 * pvclock_gtod_unregister_notifier - unregister a pvclock
625 * timedata update listener
626 * @nb: Pointer to the notifier block to unregister
627 */
628int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
629{
630 guard(raw_spinlock_irqsave)(&tk_core.lock);
631 return raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
632}
633EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
634
635/*
636 * tk_update_leap_state - helper to update the next_leap_ktime
637 */
638static inline void tk_update_leap_state(struct timekeeper *tk)
639{
640 tk->next_leap_ktime = ntp_get_next_leap();
641 if (tk->next_leap_ktime != KTIME_MAX)
642 /* Convert to monotonic time */
643 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
644}
645
646/*
647 * Leap state update for both shadow and the real timekeeper
648 * Separate to spare a full memcpy() of the timekeeper.
649 */
650static void tk_update_leap_state_all(struct tk_data *tkd)
651{
652 write_seqcount_begin(&tkd->seq);
653 tk_update_leap_state(&tkd->shadow_timekeeper);
654 tkd->timekeeper.next_leap_ktime = tkd->shadow_timekeeper.next_leap_ktime;
655 write_seqcount_end(&tkd->seq);
656}
657
658/*
659 * Update the ktime_t based scalar nsec members of the timekeeper
660 */
661static inline void tk_update_ktime_data(struct timekeeper *tk)
662{
663 u64 seconds;
664 u32 nsec;
665
666 /*
667 * The xtime based monotonic readout is:
668 * nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
669 * The ktime based monotonic readout is:
670 * nsec = base_mono + now();
671 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
672 */
673 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
674 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
675 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
676
677 /*
678 * The sum of the nanoseconds portions of xtime and
679 * wall_to_monotonic can be greater/equal one second. Take
680 * this into account before updating tk->ktime_sec.
681 */
682 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
683 if (nsec >= NSEC_PER_SEC)
684 seconds++;
685 tk->ktime_sec = seconds;
686
687 /* Update the monotonic raw base */
688 tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
689}
690
691/*
692 * Restore the shadow timekeeper from the real timekeeper.
693 */
694static void timekeeping_restore_shadow(struct tk_data *tkd)
695{
696 lockdep_assert_held(&tkd->lock);
697 memcpy(&tkd->shadow_timekeeper, &tkd->timekeeper, sizeof(tkd->timekeeper));
698}
699
700static void timekeeping_update_from_shadow(struct tk_data *tkd, unsigned int action)
701{
702 struct timekeeper *tk = &tk_core.shadow_timekeeper;
703
704 lockdep_assert_held(&tkd->lock);
705
706 /*
707 * Block out readers before running the updates below because that
708 * updates VDSO and other time related infrastructure. Not blocking
709 * the readers might let a reader see time going backwards when
710 * reading from the VDSO after the VDSO update and then reading in
711 * the kernel from the timekeeper before that got updated.
712 */
713 write_seqcount_begin(&tkd->seq);
714
715 if (action & TK_CLEAR_NTP) {
716 tk->ntp_error = 0;
717 ntp_clear();
718 }
719
720 tk_update_leap_state(tk);
721 tk_update_ktime_data(tk);
722
723 update_vsyscall(tk);
724 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
725
726 tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
727 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
728 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
729
730 if (action & TK_CLOCK_WAS_SET)
731 tk->clock_was_set_seq++;
732
733 /*
734 * Update the real timekeeper.
735 *
736 * We could avoid this memcpy() by switching pointers, but that has
737 * the downside that the reader side does not longer benefit from
738 * the cacheline optimized data layout of the timekeeper and requires
739 * another indirection.
740 */
741 memcpy(&tkd->timekeeper, tk, sizeof(*tk));
742 write_seqcount_end(&tkd->seq);
743}
744
745/**
746 * timekeeping_forward_now - update clock to the current time
747 * @tk: Pointer to the timekeeper to update
748 *
749 * Forward the current clock to update its state since the last call to
750 * update_wall_time(). This is useful before significant clock changes,
751 * as it avoids having to deal with this time offset explicitly.
752 */
753static void timekeeping_forward_now(struct timekeeper *tk)
754{
755 u64 cycle_now, delta;
756
757 cycle_now = tk_clock_read(&tk->tkr_mono);
758 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
759 tk->tkr_mono.clock->max_raw_delta);
760 tk->tkr_mono.cycle_last = cycle_now;
761 tk->tkr_raw.cycle_last = cycle_now;
762
763 while (delta > 0) {
764 u64 max = tk->tkr_mono.clock->max_cycles;
765 u64 incr = delta < max ? delta : max;
766
767 tk->tkr_mono.xtime_nsec += incr * tk->tkr_mono.mult;
768 tk->tkr_raw.xtime_nsec += incr * tk->tkr_raw.mult;
769 tk_normalize_xtime(tk);
770 delta -= incr;
771 }
772}
773
774/**
775 * ktime_get_real_ts64 - Returns the time of day in a timespec64.
776 * @ts: pointer to the timespec to be set
777 *
778 * Returns the time of day in a timespec64 (WARN if suspended).
779 */
780void ktime_get_real_ts64(struct timespec64 *ts)
781{
782 struct timekeeper *tk = &tk_core.timekeeper;
783 unsigned int seq;
784 u64 nsecs;
785
786 WARN_ON(timekeeping_suspended);
787
788 do {
789 seq = read_seqcount_begin(&tk_core.seq);
790
791 ts->tv_sec = tk->xtime_sec;
792 nsecs = timekeeping_get_ns(&tk->tkr_mono);
793
794 } while (read_seqcount_retry(&tk_core.seq, seq));
795
796 ts->tv_nsec = 0;
797 timespec64_add_ns(ts, nsecs);
798}
799EXPORT_SYMBOL(ktime_get_real_ts64);
800
801ktime_t ktime_get(void)
802{
803 struct timekeeper *tk = &tk_core.timekeeper;
804 unsigned int seq;
805 ktime_t base;
806 u64 nsecs;
807
808 WARN_ON(timekeeping_suspended);
809
810 do {
811 seq = read_seqcount_begin(&tk_core.seq);
812 base = tk->tkr_mono.base;
813 nsecs = timekeeping_get_ns(&tk->tkr_mono);
814
815 } while (read_seqcount_retry(&tk_core.seq, seq));
816
817 return ktime_add_ns(base, nsecs);
818}
819EXPORT_SYMBOL_GPL(ktime_get);
820
821u32 ktime_get_resolution_ns(void)
822{
823 struct timekeeper *tk = &tk_core.timekeeper;
824 unsigned int seq;
825 u32 nsecs;
826
827 WARN_ON(timekeeping_suspended);
828
829 do {
830 seq = read_seqcount_begin(&tk_core.seq);
831 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
832 } while (read_seqcount_retry(&tk_core.seq, seq));
833
834 return nsecs;
835}
836EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
837
838static ktime_t *offsets[TK_OFFS_MAX] = {
839 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
840 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
841 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
842};
843
844ktime_t ktime_get_with_offset(enum tk_offsets offs)
845{
846 struct timekeeper *tk = &tk_core.timekeeper;
847 unsigned int seq;
848 ktime_t base, *offset = offsets[offs];
849 u64 nsecs;
850
851 WARN_ON(timekeeping_suspended);
852
853 do {
854 seq = read_seqcount_begin(&tk_core.seq);
855 base = ktime_add(tk->tkr_mono.base, *offset);
856 nsecs = timekeeping_get_ns(&tk->tkr_mono);
857
858 } while (read_seqcount_retry(&tk_core.seq, seq));
859
860 return ktime_add_ns(base, nsecs);
861
862}
863EXPORT_SYMBOL_GPL(ktime_get_with_offset);
864
865ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
866{
867 struct timekeeper *tk = &tk_core.timekeeper;
868 unsigned int seq;
869 ktime_t base, *offset = offsets[offs];
870 u64 nsecs;
871
872 WARN_ON(timekeeping_suspended);
873
874 do {
875 seq = read_seqcount_begin(&tk_core.seq);
876 base = ktime_add(tk->tkr_mono.base, *offset);
877 nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
878
879 } while (read_seqcount_retry(&tk_core.seq, seq));
880
881 return ktime_add_ns(base, nsecs);
882}
883EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
884
885/**
886 * ktime_mono_to_any() - convert monotonic time to any other time
887 * @tmono: time to convert.
888 * @offs: which offset to use
889 */
890ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
891{
892 ktime_t *offset = offsets[offs];
893 unsigned int seq;
894 ktime_t tconv;
895
896 if (IS_ENABLED(CONFIG_64BIT)) {
897 /*
898 * Paired with WRITE_ONCE()s in tk_set_wall_to_mono() and
899 * tk_update_sleep_time().
900 */
901 return ktime_add(tmono, READ_ONCE(*offset));
902 }
903
904 do {
905 seq = read_seqcount_begin(&tk_core.seq);
906 tconv = ktime_add(tmono, *offset);
907 } while (read_seqcount_retry(&tk_core.seq, seq));
908
909 return tconv;
910}
911EXPORT_SYMBOL_GPL(ktime_mono_to_any);
912
913/**
914 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
915 */
916ktime_t ktime_get_raw(void)
917{
918 struct timekeeper *tk = &tk_core.timekeeper;
919 unsigned int seq;
920 ktime_t base;
921 u64 nsecs;
922
923 do {
924 seq = read_seqcount_begin(&tk_core.seq);
925 base = tk->tkr_raw.base;
926 nsecs = timekeeping_get_ns(&tk->tkr_raw);
927
928 } while (read_seqcount_retry(&tk_core.seq, seq));
929
930 return ktime_add_ns(base, nsecs);
931}
932EXPORT_SYMBOL_GPL(ktime_get_raw);
933
934/**
935 * ktime_get_ts64 - get the monotonic clock in timespec64 format
936 * @ts: pointer to timespec variable
937 *
938 * The function calculates the monotonic clock from the realtime
939 * clock and the wall_to_monotonic offset and stores the result
940 * in normalized timespec64 format in the variable pointed to by @ts.
941 */
942void ktime_get_ts64(struct timespec64 *ts)
943{
944 struct timekeeper *tk = &tk_core.timekeeper;
945 struct timespec64 tomono;
946 unsigned int seq;
947 u64 nsec;
948
949 WARN_ON(timekeeping_suspended);
950
951 do {
952 seq = read_seqcount_begin(&tk_core.seq);
953 ts->tv_sec = tk->xtime_sec;
954 nsec = timekeeping_get_ns(&tk->tkr_mono);
955 tomono = tk->wall_to_monotonic;
956
957 } while (read_seqcount_retry(&tk_core.seq, seq));
958
959 ts->tv_sec += tomono.tv_sec;
960 ts->tv_nsec = 0;
961 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
962}
963EXPORT_SYMBOL_GPL(ktime_get_ts64);
964
965/**
966 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
967 *
968 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
969 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
970 * works on both 32 and 64 bit systems. On 32 bit systems the readout
971 * covers ~136 years of uptime which should be enough to prevent
972 * premature wrap arounds.
973 */
974time64_t ktime_get_seconds(void)
975{
976 struct timekeeper *tk = &tk_core.timekeeper;
977
978 WARN_ON(timekeeping_suspended);
979 return tk->ktime_sec;
980}
981EXPORT_SYMBOL_GPL(ktime_get_seconds);
982
983/**
984 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
985 *
986 * Returns the wall clock seconds since 1970.
987 *
988 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
989 * 32bit systems the access must be protected with the sequence
990 * counter to provide "atomic" access to the 64bit tk->xtime_sec
991 * value.
992 */
993time64_t ktime_get_real_seconds(void)
994{
995 struct timekeeper *tk = &tk_core.timekeeper;
996 time64_t seconds;
997 unsigned int seq;
998
999 if (IS_ENABLED(CONFIG_64BIT))
1000 return tk->xtime_sec;
1001
1002 do {
1003 seq = read_seqcount_begin(&tk_core.seq);
1004 seconds = tk->xtime_sec;
1005
1006 } while (read_seqcount_retry(&tk_core.seq, seq));
1007
1008 return seconds;
1009}
1010EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
1011
1012/**
1013 * __ktime_get_real_seconds - The same as ktime_get_real_seconds
1014 * but without the sequence counter protect. This internal function
1015 * is called just when timekeeping lock is already held.
1016 */
1017noinstr time64_t __ktime_get_real_seconds(void)
1018{
1019 struct timekeeper *tk = &tk_core.timekeeper;
1020
1021 return tk->xtime_sec;
1022}
1023
1024/**
1025 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
1026 * @systime_snapshot: pointer to struct receiving the system time snapshot
1027 */
1028void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
1029{
1030 struct timekeeper *tk = &tk_core.timekeeper;
1031 unsigned int seq;
1032 ktime_t base_raw;
1033 ktime_t base_real;
1034 ktime_t base_boot;
1035 u64 nsec_raw;
1036 u64 nsec_real;
1037 u64 now;
1038
1039 WARN_ON_ONCE(timekeeping_suspended);
1040
1041 do {
1042 seq = read_seqcount_begin(&tk_core.seq);
1043 now = tk_clock_read(&tk->tkr_mono);
1044 systime_snapshot->cs_id = tk->tkr_mono.clock->id;
1045 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
1046 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
1047 base_real = ktime_add(tk->tkr_mono.base,
1048 tk_core.timekeeper.offs_real);
1049 base_boot = ktime_add(tk->tkr_mono.base,
1050 tk_core.timekeeper.offs_boot);
1051 base_raw = tk->tkr_raw.base;
1052 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
1053 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
1054 } while (read_seqcount_retry(&tk_core.seq, seq));
1055
1056 systime_snapshot->cycles = now;
1057 systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
1058 systime_snapshot->boot = ktime_add_ns(base_boot, nsec_real);
1059 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
1060}
1061EXPORT_SYMBOL_GPL(ktime_get_snapshot);
1062
1063/* Scale base by mult/div checking for overflow */
1064static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
1065{
1066 u64 tmp, rem;
1067
1068 tmp = div64_u64_rem(*base, div, &rem);
1069
1070 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
1071 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
1072 return -EOVERFLOW;
1073 tmp *= mult;
1074
1075 rem = div64_u64(rem * mult, div);
1076 *base = tmp + rem;
1077 return 0;
1078}
1079
1080/**
1081 * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
1082 * @history: Snapshot representing start of history
1083 * @partial_history_cycles: Cycle offset into history (fractional part)
1084 * @total_history_cycles: Total history length in cycles
1085 * @discontinuity: True indicates clock was set on history period
1086 * @ts: Cross timestamp that should be adjusted using
1087 * partial/total ratio
1088 *
1089 * Helper function used by get_device_system_crosststamp() to correct the
1090 * crosstimestamp corresponding to the start of the current interval to the
1091 * system counter value (timestamp point) provided by the driver. The
1092 * total_history_* quantities are the total history starting at the provided
1093 * reference point and ending at the start of the current interval. The cycle
1094 * count between the driver timestamp point and the start of the current
1095 * interval is partial_history_cycles.
1096 */
1097static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1098 u64 partial_history_cycles,
1099 u64 total_history_cycles,
1100 bool discontinuity,
1101 struct system_device_crosststamp *ts)
1102{
1103 struct timekeeper *tk = &tk_core.timekeeper;
1104 u64 corr_raw, corr_real;
1105 bool interp_forward;
1106 int ret;
1107
1108 if (total_history_cycles == 0 || partial_history_cycles == 0)
1109 return 0;
1110
1111 /* Interpolate shortest distance from beginning or end of history */
1112 interp_forward = partial_history_cycles > total_history_cycles / 2;
1113 partial_history_cycles = interp_forward ?
1114 total_history_cycles - partial_history_cycles :
1115 partial_history_cycles;
1116
1117 /*
1118 * Scale the monotonic raw time delta by:
1119 * partial_history_cycles / total_history_cycles
1120 */
1121 corr_raw = (u64)ktime_to_ns(
1122 ktime_sub(ts->sys_monoraw, history->raw));
1123 ret = scale64_check_overflow(partial_history_cycles,
1124 total_history_cycles, &corr_raw);
1125 if (ret)
1126 return ret;
1127
1128 /*
1129 * If there is a discontinuity in the history, scale monotonic raw
1130 * correction by:
1131 * mult(real)/mult(raw) yielding the realtime correction
1132 * Otherwise, calculate the realtime correction similar to monotonic
1133 * raw calculation
1134 */
1135 if (discontinuity) {
1136 corr_real = mul_u64_u32_div
1137 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1138 } else {
1139 corr_real = (u64)ktime_to_ns(
1140 ktime_sub(ts->sys_realtime, history->real));
1141 ret = scale64_check_overflow(partial_history_cycles,
1142 total_history_cycles, &corr_real);
1143 if (ret)
1144 return ret;
1145 }
1146
1147 /* Fixup monotonic raw and real time time values */
1148 if (interp_forward) {
1149 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1150 ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1151 } else {
1152 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1153 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1154 }
1155
1156 return 0;
1157}
1158
1159/*
1160 * timestamp_in_interval - true if ts is chronologically in [start, end]
1161 *
1162 * True if ts occurs chronologically at or after start, and before or at end.
1163 */
1164static bool timestamp_in_interval(u64 start, u64 end, u64 ts)
1165{
1166 if (ts >= start && ts <= end)
1167 return true;
1168 if (start > end && (ts >= start || ts <= end))
1169 return true;
1170 return false;
1171}
1172
1173static bool convert_clock(u64 *val, u32 numerator, u32 denominator)
1174{
1175 u64 rem, res;
1176
1177 if (!numerator || !denominator)
1178 return false;
1179
1180 res = div64_u64_rem(*val, denominator, &rem) * numerator;
1181 *val = res + div_u64(rem * numerator, denominator);
1182 return true;
1183}
1184
1185static bool convert_base_to_cs(struct system_counterval_t *scv)
1186{
1187 struct clocksource *cs = tk_core.timekeeper.tkr_mono.clock;
1188 struct clocksource_base *base;
1189 u32 num, den;
1190
1191 /* The timestamp was taken from the time keeper clock source */
1192 if (cs->id == scv->cs_id)
1193 return true;
1194
1195 /*
1196 * Check whether cs_id matches the base clock. Prevent the compiler from
1197 * re-evaluating @base as the clocksource might change concurrently.
1198 */
1199 base = READ_ONCE(cs->base);
1200 if (!base || base->id != scv->cs_id)
1201 return false;
1202
1203 num = scv->use_nsecs ? cs->freq_khz : base->numerator;
1204 den = scv->use_nsecs ? USEC_PER_SEC : base->denominator;
1205
1206 if (!convert_clock(&scv->cycles, num, den))
1207 return false;
1208
1209 scv->cycles += base->offset;
1210 return true;
1211}
1212
1213static bool convert_cs_to_base(u64 *cycles, enum clocksource_ids base_id)
1214{
1215 struct clocksource *cs = tk_core.timekeeper.tkr_mono.clock;
1216 struct clocksource_base *base;
1217
1218 /*
1219 * Check whether base_id matches the base clock. Prevent the compiler from
1220 * re-evaluating @base as the clocksource might change concurrently.
1221 */
1222 base = READ_ONCE(cs->base);
1223 if (!base || base->id != base_id)
1224 return false;
1225
1226 *cycles -= base->offset;
1227 if (!convert_clock(cycles, base->denominator, base->numerator))
1228 return false;
1229 return true;
1230}
1231
1232static bool convert_ns_to_cs(u64 *delta)
1233{
1234 struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
1235
1236 if (BITS_TO_BYTES(fls64(*delta) + tkr->shift) >= sizeof(*delta))
1237 return false;
1238
1239 *delta = div_u64((*delta << tkr->shift) - tkr->xtime_nsec, tkr->mult);
1240 return true;
1241}
1242
1243/**
1244 * ktime_real_to_base_clock() - Convert CLOCK_REALTIME timestamp to a base clock timestamp
1245 * @treal: CLOCK_REALTIME timestamp to convert
1246 * @base_id: base clocksource id
1247 * @cycles: pointer to store the converted base clock timestamp
1248 *
1249 * Converts a supplied, future realtime clock value to the corresponding base clock value.
1250 *
1251 * Return: true if the conversion is successful, false otherwise.
1252 */
1253bool ktime_real_to_base_clock(ktime_t treal, enum clocksource_ids base_id, u64 *cycles)
1254{
1255 struct timekeeper *tk = &tk_core.timekeeper;
1256 unsigned int seq;
1257 u64 delta;
1258
1259 do {
1260 seq = read_seqcount_begin(&tk_core.seq);
1261 if ((u64)treal < tk->tkr_mono.base_real)
1262 return false;
1263 delta = (u64)treal - tk->tkr_mono.base_real;
1264 if (!convert_ns_to_cs(&delta))
1265 return false;
1266 *cycles = tk->tkr_mono.cycle_last + delta;
1267 if (!convert_cs_to_base(cycles, base_id))
1268 return false;
1269 } while (read_seqcount_retry(&tk_core.seq, seq));
1270
1271 return true;
1272}
1273EXPORT_SYMBOL_GPL(ktime_real_to_base_clock);
1274
1275/**
1276 * get_device_system_crosststamp - Synchronously capture system/device timestamp
1277 * @get_time_fn: Callback to get simultaneous device time and
1278 * system counter from the device driver
1279 * @ctx: Context passed to get_time_fn()
1280 * @history_begin: Historical reference point used to interpolate system
1281 * time when counter provided by the driver is before the current interval
1282 * @xtstamp: Receives simultaneously captured system and device time
1283 *
1284 * Reads a timestamp from a device and correlates it to system time
1285 */
1286int get_device_system_crosststamp(int (*get_time_fn)
1287 (ktime_t *device_time,
1288 struct system_counterval_t *sys_counterval,
1289 void *ctx),
1290 void *ctx,
1291 struct system_time_snapshot *history_begin,
1292 struct system_device_crosststamp *xtstamp)
1293{
1294 struct system_counterval_t system_counterval;
1295 struct timekeeper *tk = &tk_core.timekeeper;
1296 u64 cycles, now, interval_start;
1297 unsigned int clock_was_set_seq = 0;
1298 ktime_t base_real, base_raw;
1299 u64 nsec_real, nsec_raw;
1300 u8 cs_was_changed_seq;
1301 unsigned int seq;
1302 bool do_interp;
1303 int ret;
1304
1305 do {
1306 seq = read_seqcount_begin(&tk_core.seq);
1307 /*
1308 * Try to synchronously capture device time and a system
1309 * counter value calling back into the device driver
1310 */
1311 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1312 if (ret)
1313 return ret;
1314
1315 /*
1316 * Verify that the clocksource ID associated with the captured
1317 * system counter value is the same as for the currently
1318 * installed timekeeper clocksource
1319 */
1320 if (system_counterval.cs_id == CSID_GENERIC ||
1321 !convert_base_to_cs(&system_counterval))
1322 return -ENODEV;
1323 cycles = system_counterval.cycles;
1324
1325 /*
1326 * Check whether the system counter value provided by the
1327 * device driver is on the current timekeeping interval.
1328 */
1329 now = tk_clock_read(&tk->tkr_mono);
1330 interval_start = tk->tkr_mono.cycle_last;
1331 if (!timestamp_in_interval(interval_start, now, cycles)) {
1332 clock_was_set_seq = tk->clock_was_set_seq;
1333 cs_was_changed_seq = tk->cs_was_changed_seq;
1334 cycles = interval_start;
1335 do_interp = true;
1336 } else {
1337 do_interp = false;
1338 }
1339
1340 base_real = ktime_add(tk->tkr_mono.base,
1341 tk_core.timekeeper.offs_real);
1342 base_raw = tk->tkr_raw.base;
1343
1344 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles);
1345 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles);
1346 } while (read_seqcount_retry(&tk_core.seq, seq));
1347
1348 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1349 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1350
1351 /*
1352 * Interpolate if necessary, adjusting back from the start of the
1353 * current interval
1354 */
1355 if (do_interp) {
1356 u64 partial_history_cycles, total_history_cycles;
1357 bool discontinuity;
1358
1359 /*
1360 * Check that the counter value is not before the provided
1361 * history reference and that the history doesn't cross a
1362 * clocksource change
1363 */
1364 if (!history_begin ||
1365 !timestamp_in_interval(history_begin->cycles,
1366 cycles, system_counterval.cycles) ||
1367 history_begin->cs_was_changed_seq != cs_was_changed_seq)
1368 return -EINVAL;
1369 partial_history_cycles = cycles - system_counterval.cycles;
1370 total_history_cycles = cycles - history_begin->cycles;
1371 discontinuity =
1372 history_begin->clock_was_set_seq != clock_was_set_seq;
1373
1374 ret = adjust_historical_crosststamp(history_begin,
1375 partial_history_cycles,
1376 total_history_cycles,
1377 discontinuity, xtstamp);
1378 if (ret)
1379 return ret;
1380 }
1381
1382 return 0;
1383}
1384EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1385
1386/**
1387 * timekeeping_clocksource_has_base - Check whether the current clocksource
1388 * is based on given a base clock
1389 * @id: base clocksource ID
1390 *
1391 * Note: The return value is a snapshot which can become invalid right
1392 * after the function returns.
1393 *
1394 * Return: true if the timekeeper clocksource has a base clock with @id,
1395 * false otherwise
1396 */
1397bool timekeeping_clocksource_has_base(enum clocksource_ids id)
1398{
1399 /*
1400 * This is a snapshot, so no point in using the sequence
1401 * count. Just prevent the compiler from re-evaluating @base as the
1402 * clocksource might change concurrently.
1403 */
1404 struct clocksource_base *base = READ_ONCE(tk_core.timekeeper.tkr_mono.clock->base);
1405
1406 return base ? base->id == id : false;
1407}
1408EXPORT_SYMBOL_GPL(timekeeping_clocksource_has_base);
1409
1410/**
1411 * do_settimeofday64 - Sets the time of day.
1412 * @ts: pointer to the timespec64 variable containing the new time
1413 *
1414 * Sets the time of day to the new time and update NTP and notify hrtimers
1415 */
1416int do_settimeofday64(const struct timespec64 *ts)
1417{
1418 struct timespec64 ts_delta, xt;
1419
1420 if (!timespec64_valid_settod(ts))
1421 return -EINVAL;
1422
1423 scoped_guard (raw_spinlock_irqsave, &tk_core.lock) {
1424 struct timekeeper *tks = &tk_core.shadow_timekeeper;
1425
1426 timekeeping_forward_now(tks);
1427
1428 xt = tk_xtime(tks);
1429 ts_delta = timespec64_sub(*ts, xt);
1430
1431 if (timespec64_compare(&tks->wall_to_monotonic, &ts_delta) > 0) {
1432 timekeeping_restore_shadow(&tk_core);
1433 return -EINVAL;
1434 }
1435
1436 tk_set_wall_to_mono(tks, timespec64_sub(tks->wall_to_monotonic, ts_delta));
1437 tk_set_xtime(tks, ts);
1438 timekeeping_update_from_shadow(&tk_core, TK_UPDATE_ALL);
1439 }
1440
1441 /* Signal hrtimers about time change */
1442 clock_was_set(CLOCK_SET_WALL);
1443
1444 audit_tk_injoffset(ts_delta);
1445 add_device_randomness(ts, sizeof(*ts));
1446 return 0;
1447}
1448EXPORT_SYMBOL(do_settimeofday64);
1449
1450/**
1451 * timekeeping_inject_offset - Adds or subtracts from the current time.
1452 * @ts: Pointer to the timespec variable containing the offset
1453 *
1454 * Adds or subtracts an offset value from the current time.
1455 */
1456static int timekeeping_inject_offset(const struct timespec64 *ts)
1457{
1458 if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
1459 return -EINVAL;
1460
1461 scoped_guard (raw_spinlock_irqsave, &tk_core.lock) {
1462 struct timekeeper *tks = &tk_core.shadow_timekeeper;
1463 struct timespec64 tmp;
1464
1465 timekeeping_forward_now(tks);
1466
1467 /* Make sure the proposed value is valid */
1468 tmp = timespec64_add(tk_xtime(tks), *ts);
1469 if (timespec64_compare(&tks->wall_to_monotonic, ts) > 0 ||
1470 !timespec64_valid_settod(&tmp)) {
1471 timekeeping_restore_shadow(&tk_core);
1472 return -EINVAL;
1473 }
1474
1475 tk_xtime_add(tks, ts);
1476 tk_set_wall_to_mono(tks, timespec64_sub(tks->wall_to_monotonic, *ts));
1477 timekeeping_update_from_shadow(&tk_core, TK_UPDATE_ALL);
1478 }
1479
1480 /* Signal hrtimers about time change */
1481 clock_was_set(CLOCK_SET_WALL);
1482 return 0;
1483}
1484
1485/*
1486 * Indicates if there is an offset between the system clock and the hardware
1487 * clock/persistent clock/rtc.
1488 */
1489int persistent_clock_is_local;
1490
1491/*
1492 * Adjust the time obtained from the CMOS to be UTC time instead of
1493 * local time.
1494 *
1495 * This is ugly, but preferable to the alternatives. Otherwise we
1496 * would either need to write a program to do it in /etc/rc (and risk
1497 * confusion if the program gets run more than once; it would also be
1498 * hard to make the program warp the clock precisely n hours) or
1499 * compile in the timezone information into the kernel. Bad, bad....
1500 *
1501 * - TYT, 1992-01-01
1502 *
1503 * The best thing to do is to keep the CMOS clock in universal time (UTC)
1504 * as real UNIX machines always do it. This avoids all headaches about
1505 * daylight saving times and warping kernel clocks.
1506 */
1507void timekeeping_warp_clock(void)
1508{
1509 if (sys_tz.tz_minuteswest != 0) {
1510 struct timespec64 adjust;
1511
1512 persistent_clock_is_local = 1;
1513 adjust.tv_sec = sys_tz.tz_minuteswest * 60;
1514 adjust.tv_nsec = 0;
1515 timekeeping_inject_offset(&adjust);
1516 }
1517}
1518
1519/*
1520 * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
1521 */
1522static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1523{
1524 tk->tai_offset = tai_offset;
1525 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1526}
1527
1528/*
1529 * change_clocksource - Swaps clocksources if a new one is available
1530 *
1531 * Accumulates current time interval and initializes new clocksource
1532 */
1533static int change_clocksource(void *data)
1534{
1535 struct clocksource *new = data, *old = NULL;
1536
1537 /*
1538 * If the clocksource is in a module, get a module reference.
1539 * Succeeds for built-in code (owner == NULL) as well. Abort if the
1540 * reference can't be acquired.
1541 */
1542 if (!try_module_get(new->owner))
1543 return 0;
1544
1545 /* Abort if the device can't be enabled */
1546 if (new->enable && new->enable(new) != 0) {
1547 module_put(new->owner);
1548 return 0;
1549 }
1550
1551 scoped_guard (raw_spinlock_irqsave, &tk_core.lock) {
1552 struct timekeeper *tks = &tk_core.shadow_timekeeper;
1553
1554 timekeeping_forward_now(tks);
1555 old = tks->tkr_mono.clock;
1556 tk_setup_internals(tks, new);
1557 timekeeping_update_from_shadow(&tk_core, TK_UPDATE_ALL);
1558 }
1559
1560 if (old) {
1561 if (old->disable)
1562 old->disable(old);
1563 module_put(old->owner);
1564 }
1565
1566 return 0;
1567}
1568
1569/**
1570 * timekeeping_notify - Install a new clock source
1571 * @clock: pointer to the clock source
1572 *
1573 * This function is called from clocksource.c after a new, better clock
1574 * source has been registered. The caller holds the clocksource_mutex.
1575 */
1576int timekeeping_notify(struct clocksource *clock)
1577{
1578 struct timekeeper *tk = &tk_core.timekeeper;
1579
1580 if (tk->tkr_mono.clock == clock)
1581 return 0;
1582 stop_machine(change_clocksource, clock, NULL);
1583 tick_clock_notify();
1584 return tk->tkr_mono.clock == clock ? 0 : -1;
1585}
1586
1587/**
1588 * ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
1589 * @ts: pointer to the timespec64 to be set
1590 *
1591 * Returns the raw monotonic time (completely un-modified by ntp)
1592 */
1593void ktime_get_raw_ts64(struct timespec64 *ts)
1594{
1595 struct timekeeper *tk = &tk_core.timekeeper;
1596 unsigned int seq;
1597 u64 nsecs;
1598
1599 do {
1600 seq = read_seqcount_begin(&tk_core.seq);
1601 ts->tv_sec = tk->raw_sec;
1602 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1603
1604 } while (read_seqcount_retry(&tk_core.seq, seq));
1605
1606 ts->tv_nsec = 0;
1607 timespec64_add_ns(ts, nsecs);
1608}
1609EXPORT_SYMBOL(ktime_get_raw_ts64);
1610
1611
1612/**
1613 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1614 */
1615int timekeeping_valid_for_hres(void)
1616{
1617 struct timekeeper *tk = &tk_core.timekeeper;
1618 unsigned int seq;
1619 int ret;
1620
1621 do {
1622 seq = read_seqcount_begin(&tk_core.seq);
1623
1624 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1625
1626 } while (read_seqcount_retry(&tk_core.seq, seq));
1627
1628 return ret;
1629}
1630
1631/**
1632 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1633 */
1634u64 timekeeping_max_deferment(void)
1635{
1636 struct timekeeper *tk = &tk_core.timekeeper;
1637 unsigned int seq;
1638 u64 ret;
1639
1640 do {
1641 seq = read_seqcount_begin(&tk_core.seq);
1642
1643 ret = tk->tkr_mono.clock->max_idle_ns;
1644
1645 } while (read_seqcount_retry(&tk_core.seq, seq));
1646
1647 return ret;
1648}
1649
1650/**
1651 * read_persistent_clock64 - Return time from the persistent clock.
1652 * @ts: Pointer to the storage for the readout value
1653 *
1654 * Weak dummy function for arches that do not yet support it.
1655 * Reads the time from the battery backed persistent clock.
1656 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1657 *
1658 * XXX - Do be sure to remove it once all arches implement it.
1659 */
1660void __weak read_persistent_clock64(struct timespec64 *ts)
1661{
1662 ts->tv_sec = 0;
1663 ts->tv_nsec = 0;
1664}
1665
1666/**
1667 * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
1668 * from the boot.
1669 * @wall_time: current time as returned by persistent clock
1670 * @boot_offset: offset that is defined as wall_time - boot_time
1671 *
1672 * Weak dummy function for arches that do not yet support it.
1673 *
1674 * The default function calculates offset based on the current value of
1675 * local_clock(). This way architectures that support sched_clock() but don't
1676 * support dedicated boot time clock will provide the best estimate of the
1677 * boot time.
1678 */
1679void __weak __init
1680read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
1681 struct timespec64 *boot_offset)
1682{
1683 read_persistent_clock64(wall_time);
1684 *boot_offset = ns_to_timespec64(local_clock());
1685}
1686
1687static __init void tkd_basic_setup(struct tk_data *tkd)
1688{
1689 raw_spin_lock_init(&tkd->lock);
1690 seqcount_raw_spinlock_init(&tkd->seq, &tkd->lock);
1691}
1692
1693/*
1694 * Flag reflecting whether timekeeping_resume() has injected sleeptime.
1695 *
1696 * The flag starts of false and is only set when a suspend reaches
1697 * timekeeping_suspend(), timekeeping_resume() sets it to false when the
1698 * timekeeper clocksource is not stopping across suspend and has been
1699 * used to update sleep time. If the timekeeper clocksource has stopped
1700 * then the flag stays true and is used by the RTC resume code to decide
1701 * whether sleeptime must be injected and if so the flag gets false then.
1702 *
1703 * If a suspend fails before reaching timekeeping_resume() then the flag
1704 * stays false and prevents erroneous sleeptime injection.
1705 */
1706static bool suspend_timing_needed;
1707
1708/* Flag for if there is a persistent clock on this platform */
1709static bool persistent_clock_exists;
1710
1711/*
1712 * timekeeping_init - Initializes the clocksource and common timekeeping values
1713 */
1714void __init timekeeping_init(void)
1715{
1716 struct timespec64 wall_time, boot_offset, wall_to_mono;
1717 struct timekeeper *tks = &tk_core.shadow_timekeeper;
1718 struct clocksource *clock;
1719
1720 tkd_basic_setup(&tk_core);
1721
1722 read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
1723 if (timespec64_valid_settod(&wall_time) &&
1724 timespec64_to_ns(&wall_time) > 0) {
1725 persistent_clock_exists = true;
1726 } else if (timespec64_to_ns(&wall_time) != 0) {
1727 pr_warn("Persistent clock returned invalid value");
1728 wall_time = (struct timespec64){0};
1729 }
1730
1731 if (timespec64_compare(&wall_time, &boot_offset) < 0)
1732 boot_offset = (struct timespec64){0};
1733
1734 /*
1735 * We want set wall_to_mono, so the following is true:
1736 * wall time + wall_to_mono = boot time
1737 */
1738 wall_to_mono = timespec64_sub(boot_offset, wall_time);
1739
1740 guard(raw_spinlock_irqsave)(&tk_core.lock);
1741
1742 ntp_init();
1743
1744 clock = clocksource_default_clock();
1745 if (clock->enable)
1746 clock->enable(clock);
1747 tk_setup_internals(tks, clock);
1748
1749 tk_set_xtime(tks, &wall_time);
1750 tks->raw_sec = 0;
1751
1752 tk_set_wall_to_mono(tks, wall_to_mono);
1753
1754 timekeeping_update_from_shadow(&tk_core, TK_CLOCK_WAS_SET);
1755}
1756
1757/* time in seconds when suspend began for persistent clock */
1758static struct timespec64 timekeeping_suspend_time;
1759
1760/**
1761 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1762 * @tk: Pointer to the timekeeper to be updated
1763 * @delta: Pointer to the delta value in timespec64 format
1764 *
1765 * Takes a timespec offset measuring a suspend interval and properly
1766 * adds the sleep offset to the timekeeping variables.
1767 */
1768static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1769 const struct timespec64 *delta)
1770{
1771 if (!timespec64_valid_strict(delta)) {
1772 printk_deferred(KERN_WARNING
1773 "__timekeeping_inject_sleeptime: Invalid "
1774 "sleep delta value!\n");
1775 return;
1776 }
1777 tk_xtime_add(tk, delta);
1778 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1779 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1780 tk_debug_account_sleep_time(delta);
1781}
1782
1783#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1784/*
1785 * We have three kinds of time sources to use for sleep time
1786 * injection, the preference order is:
1787 * 1) non-stop clocksource
1788 * 2) persistent clock (ie: RTC accessible when irqs are off)
1789 * 3) RTC
1790 *
1791 * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
1792 * If system has neither 1) nor 2), 3) will be used finally.
1793 *
1794 *
1795 * If timekeeping has injected sleeptime via either 1) or 2),
1796 * 3) becomes needless, so in this case we don't need to call
1797 * rtc_resume(), and this is what timekeeping_rtc_skipresume()
1798 * means.
1799 */
1800bool timekeeping_rtc_skipresume(void)
1801{
1802 return !suspend_timing_needed;
1803}
1804
1805/*
1806 * 1) can be determined whether to use or not only when doing
1807 * timekeeping_resume() which is invoked after rtc_suspend(),
1808 * so we can't skip rtc_suspend() surely if system has 1).
1809 *
1810 * But if system has 2), 2) will definitely be used, so in this
1811 * case we don't need to call rtc_suspend(), and this is what
1812 * timekeeping_rtc_skipsuspend() means.
1813 */
1814bool timekeeping_rtc_skipsuspend(void)
1815{
1816 return persistent_clock_exists;
1817}
1818
1819/**
1820 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1821 * @delta: pointer to a timespec64 delta value
1822 *
1823 * This hook is for architectures that cannot support read_persistent_clock64
1824 * because their RTC/persistent clock is only accessible when irqs are enabled.
1825 * and also don't have an effective nonstop clocksource.
1826 *
1827 * This function should only be called by rtc_resume(), and allows
1828 * a suspend offset to be injected into the timekeeping values.
1829 */
1830void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
1831{
1832 scoped_guard(raw_spinlock_irqsave, &tk_core.lock) {
1833 struct timekeeper *tks = &tk_core.shadow_timekeeper;
1834
1835 suspend_timing_needed = false;
1836 timekeeping_forward_now(tks);
1837 __timekeeping_inject_sleeptime(tks, delta);
1838 timekeeping_update_from_shadow(&tk_core, TK_UPDATE_ALL);
1839 }
1840
1841 /* Signal hrtimers about time change */
1842 clock_was_set(CLOCK_SET_WALL | CLOCK_SET_BOOT);
1843}
1844#endif
1845
1846/**
1847 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1848 */
1849void timekeeping_resume(void)
1850{
1851 struct timekeeper *tks = &tk_core.shadow_timekeeper;
1852 struct clocksource *clock = tks->tkr_mono.clock;
1853 struct timespec64 ts_new, ts_delta;
1854 bool inject_sleeptime = false;
1855 u64 cycle_now, nsec;
1856 unsigned long flags;
1857
1858 read_persistent_clock64(&ts_new);
1859
1860 clockevents_resume();
1861 clocksource_resume();
1862
1863 raw_spin_lock_irqsave(&tk_core.lock, flags);
1864
1865 /*
1866 * After system resumes, we need to calculate the suspended time and
1867 * compensate it for the OS time. There are 3 sources that could be
1868 * used: Nonstop clocksource during suspend, persistent clock and rtc
1869 * device.
1870 *
1871 * One specific platform may have 1 or 2 or all of them, and the
1872 * preference will be:
1873 * suspend-nonstop clocksource -> persistent clock -> rtc
1874 * The less preferred source will only be tried if there is no better
1875 * usable source. The rtc part is handled separately in rtc core code.
1876 */
1877 cycle_now = tk_clock_read(&tks->tkr_mono);
1878 nsec = clocksource_stop_suspend_timing(clock, cycle_now);
1879 if (nsec > 0) {
1880 ts_delta = ns_to_timespec64(nsec);
1881 inject_sleeptime = true;
1882 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1883 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1884 inject_sleeptime = true;
1885 }
1886
1887 if (inject_sleeptime) {
1888 suspend_timing_needed = false;
1889 __timekeeping_inject_sleeptime(tks, &ts_delta);
1890 }
1891
1892 /* Re-base the last cycle value */
1893 tks->tkr_mono.cycle_last = cycle_now;
1894 tks->tkr_raw.cycle_last = cycle_now;
1895
1896 tks->ntp_error = 0;
1897 timekeeping_suspended = 0;
1898 timekeeping_update_from_shadow(&tk_core, TK_CLOCK_WAS_SET);
1899 raw_spin_unlock_irqrestore(&tk_core.lock, flags);
1900
1901 touch_softlockup_watchdog();
1902
1903 /* Resume the clockevent device(s) and hrtimers */
1904 tick_resume();
1905 /* Notify timerfd as resume is equivalent to clock_was_set() */
1906 timerfd_resume();
1907}
1908
1909int timekeeping_suspend(void)
1910{
1911 struct timekeeper *tks = &tk_core.shadow_timekeeper;
1912 struct timespec64 delta, delta_delta;
1913 static struct timespec64 old_delta;
1914 struct clocksource *curr_clock;
1915 unsigned long flags;
1916 u64 cycle_now;
1917
1918 read_persistent_clock64(&timekeeping_suspend_time);
1919
1920 /*
1921 * On some systems the persistent_clock can not be detected at
1922 * timekeeping_init by its return value, so if we see a valid
1923 * value returned, update the persistent_clock_exists flag.
1924 */
1925 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1926 persistent_clock_exists = true;
1927
1928 suspend_timing_needed = true;
1929
1930 raw_spin_lock_irqsave(&tk_core.lock, flags);
1931 timekeeping_forward_now(tks);
1932 timekeeping_suspended = 1;
1933
1934 /*
1935 * Since we've called forward_now, cycle_last stores the value
1936 * just read from the current clocksource. Save this to potentially
1937 * use in suspend timing.
1938 */
1939 curr_clock = tks->tkr_mono.clock;
1940 cycle_now = tks->tkr_mono.cycle_last;
1941 clocksource_start_suspend_timing(curr_clock, cycle_now);
1942
1943 if (persistent_clock_exists) {
1944 /*
1945 * To avoid drift caused by repeated suspend/resumes,
1946 * which each can add ~1 second drift error,
1947 * try to compensate so the difference in system time
1948 * and persistent_clock time stays close to constant.
1949 */
1950 delta = timespec64_sub(tk_xtime(tks), timekeeping_suspend_time);
1951 delta_delta = timespec64_sub(delta, old_delta);
1952 if (abs(delta_delta.tv_sec) >= 2) {
1953 /*
1954 * if delta_delta is too large, assume time correction
1955 * has occurred and set old_delta to the current delta.
1956 */
1957 old_delta = delta;
1958 } else {
1959 /* Otherwise try to adjust old_system to compensate */
1960 timekeeping_suspend_time =
1961 timespec64_add(timekeeping_suspend_time, delta_delta);
1962 }
1963 }
1964
1965 timekeeping_update_from_shadow(&tk_core, 0);
1966 halt_fast_timekeeper(tks);
1967 raw_spin_unlock_irqrestore(&tk_core.lock, flags);
1968
1969 tick_suspend();
1970 clocksource_suspend();
1971 clockevents_suspend();
1972
1973 return 0;
1974}
1975
1976/* sysfs resume/suspend bits for timekeeping */
1977static struct syscore_ops timekeeping_syscore_ops = {
1978 .resume = timekeeping_resume,
1979 .suspend = timekeeping_suspend,
1980};
1981
1982static int __init timekeeping_init_ops(void)
1983{
1984 register_syscore_ops(&timekeeping_syscore_ops);
1985 return 0;
1986}
1987device_initcall(timekeeping_init_ops);
1988
1989/*
1990 * Apply a multiplier adjustment to the timekeeper
1991 */
1992static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1993 s64 offset,
1994 s32 mult_adj)
1995{
1996 s64 interval = tk->cycle_interval;
1997
1998 if (mult_adj == 0) {
1999 return;
2000 } else if (mult_adj == -1) {
2001 interval = -interval;
2002 offset = -offset;
2003 } else if (mult_adj != 1) {
2004 interval *= mult_adj;
2005 offset *= mult_adj;
2006 }
2007
2008 /*
2009 * So the following can be confusing.
2010 *
2011 * To keep things simple, lets assume mult_adj == 1 for now.
2012 *
2013 * When mult_adj != 1, remember that the interval and offset values
2014 * have been appropriately scaled so the math is the same.
2015 *
2016 * The basic idea here is that we're increasing the multiplier
2017 * by one, this causes the xtime_interval to be incremented by
2018 * one cycle_interval. This is because:
2019 * xtime_interval = cycle_interval * mult
2020 * So if mult is being incremented by one:
2021 * xtime_interval = cycle_interval * (mult + 1)
2022 * Its the same as:
2023 * xtime_interval = (cycle_interval * mult) + cycle_interval
2024 * Which can be shortened to:
2025 * xtime_interval += cycle_interval
2026 *
2027 * So offset stores the non-accumulated cycles. Thus the current
2028 * time (in shifted nanoseconds) is:
2029 * now = (offset * adj) + xtime_nsec
2030 * Now, even though we're adjusting the clock frequency, we have
2031 * to keep time consistent. In other words, we can't jump back
2032 * in time, and we also want to avoid jumping forward in time.
2033 *
2034 * So given the same offset value, we need the time to be the same
2035 * both before and after the freq adjustment.
2036 * now = (offset * adj_1) + xtime_nsec_1
2037 * now = (offset * adj_2) + xtime_nsec_2
2038 * So:
2039 * (offset * adj_1) + xtime_nsec_1 =
2040 * (offset * adj_2) + xtime_nsec_2
2041 * And we know:
2042 * adj_2 = adj_1 + 1
2043 * So:
2044 * (offset * adj_1) + xtime_nsec_1 =
2045 * (offset * (adj_1+1)) + xtime_nsec_2
2046 * (offset * adj_1) + xtime_nsec_1 =
2047 * (offset * adj_1) + offset + xtime_nsec_2
2048 * Canceling the sides:
2049 * xtime_nsec_1 = offset + xtime_nsec_2
2050 * Which gives us:
2051 * xtime_nsec_2 = xtime_nsec_1 - offset
2052 * Which simplifies to:
2053 * xtime_nsec -= offset
2054 */
2055 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
2056 /* NTP adjustment caused clocksource mult overflow */
2057 WARN_ON_ONCE(1);
2058 return;
2059 }
2060
2061 tk->tkr_mono.mult += mult_adj;
2062 tk->xtime_interval += interval;
2063 tk->tkr_mono.xtime_nsec -= offset;
2064}
2065
2066/*
2067 * Adjust the timekeeper's multiplier to the correct frequency
2068 * and also to reduce the accumulated error value.
2069 */
2070static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
2071{
2072 u64 ntp_tl = ntp_tick_length();
2073 u32 mult;
2074
2075 /*
2076 * Determine the multiplier from the current NTP tick length.
2077 * Avoid expensive division when the tick length doesn't change.
2078 */
2079 if (likely(tk->ntp_tick == ntp_tl)) {
2080 mult = tk->tkr_mono.mult - tk->ntp_err_mult;
2081 } else {
2082 tk->ntp_tick = ntp_tl;
2083 mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
2084 tk->xtime_remainder, tk->cycle_interval);
2085 }
2086
2087 /*
2088 * If the clock is behind the NTP time, increase the multiplier by 1
2089 * to catch up with it. If it's ahead and there was a remainder in the
2090 * tick division, the clock will slow down. Otherwise it will stay
2091 * ahead until the tick length changes to a non-divisible value.
2092 */
2093 tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
2094 mult += tk->ntp_err_mult;
2095
2096 timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
2097
2098 if (unlikely(tk->tkr_mono.clock->maxadj &&
2099 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
2100 > tk->tkr_mono.clock->maxadj))) {
2101 printk_once(KERN_WARNING
2102 "Adjusting %s more than 11%% (%ld vs %ld)\n",
2103 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
2104 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
2105 }
2106
2107 /*
2108 * It may be possible that when we entered this function, xtime_nsec
2109 * was very small. Further, if we're slightly speeding the clocksource
2110 * in the code above, its possible the required corrective factor to
2111 * xtime_nsec could cause it to underflow.
2112 *
2113 * Now, since we have already accumulated the second and the NTP
2114 * subsystem has been notified via second_overflow(), we need to skip
2115 * the next update.
2116 */
2117 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
2118 tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
2119 tk->tkr_mono.shift;
2120 tk->xtime_sec--;
2121 tk->skip_second_overflow = 1;
2122 }
2123}
2124
2125/*
2126 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
2127 *
2128 * Helper function that accumulates the nsecs greater than a second
2129 * from the xtime_nsec field to the xtime_secs field.
2130 * It also calls into the NTP code to handle leapsecond processing.
2131 */
2132static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
2133{
2134 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
2135 unsigned int clock_set = 0;
2136
2137 while (tk->tkr_mono.xtime_nsec >= nsecps) {
2138 int leap;
2139
2140 tk->tkr_mono.xtime_nsec -= nsecps;
2141 tk->xtime_sec++;
2142
2143 /*
2144 * Skip NTP update if this second was accumulated before,
2145 * i.e. xtime_nsec underflowed in timekeeping_adjust()
2146 */
2147 if (unlikely(tk->skip_second_overflow)) {
2148 tk->skip_second_overflow = 0;
2149 continue;
2150 }
2151
2152 /* Figure out if its a leap sec and apply if needed */
2153 leap = second_overflow(tk->xtime_sec);
2154 if (unlikely(leap)) {
2155 struct timespec64 ts;
2156
2157 tk->xtime_sec += leap;
2158
2159 ts.tv_sec = leap;
2160 ts.tv_nsec = 0;
2161 tk_set_wall_to_mono(tk,
2162 timespec64_sub(tk->wall_to_monotonic, ts));
2163
2164 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
2165
2166 clock_set = TK_CLOCK_WAS_SET;
2167 }
2168 }
2169 return clock_set;
2170}
2171
2172/*
2173 * logarithmic_accumulation - shifted accumulation of cycles
2174 *
2175 * This functions accumulates a shifted interval of cycles into
2176 * a shifted interval nanoseconds. Allows for O(log) accumulation
2177 * loop.
2178 *
2179 * Returns the unconsumed cycles.
2180 */
2181static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2182 u32 shift, unsigned int *clock_set)
2183{
2184 u64 interval = tk->cycle_interval << shift;
2185 u64 snsec_per_sec;
2186
2187 /* If the offset is smaller than a shifted interval, do nothing */
2188 if (offset < interval)
2189 return offset;
2190
2191 /* Accumulate one shifted interval */
2192 offset -= interval;
2193 tk->tkr_mono.cycle_last += interval;
2194 tk->tkr_raw.cycle_last += interval;
2195
2196 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2197 *clock_set |= accumulate_nsecs_to_secs(tk);
2198
2199 /* Accumulate raw time */
2200 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2201 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2202 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2203 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2204 tk->raw_sec++;
2205 }
2206
2207 /* Accumulate error between NTP and clock interval */
2208 tk->ntp_error += tk->ntp_tick << shift;
2209 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2210 (tk->ntp_error_shift + shift);
2211
2212 return offset;
2213}
2214
2215/*
2216 * timekeeping_advance - Updates the timekeeper to the current time and
2217 * current NTP tick length
2218 */
2219static bool timekeeping_advance(enum timekeeping_adv_mode mode)
2220{
2221 struct timekeeper *tk = &tk_core.shadow_timekeeper;
2222 struct timekeeper *real_tk = &tk_core.timekeeper;
2223 unsigned int clock_set = 0;
2224 int shift = 0, maxshift;
2225 u64 offset;
2226
2227 guard(raw_spinlock_irqsave)(&tk_core.lock);
2228
2229 /* Make sure we're fully resumed: */
2230 if (unlikely(timekeeping_suspended))
2231 return false;
2232
2233 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2234 tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
2235 tk->tkr_mono.clock->max_raw_delta);
2236
2237 /* Check if there's really nothing to do */
2238 if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
2239 return false;
2240
2241 /*
2242 * With NO_HZ we may have to accumulate many cycle_intervals
2243 * (think "ticks") worth of time at once. To do this efficiently,
2244 * we calculate the largest doubling multiple of cycle_intervals
2245 * that is smaller than the offset. We then accumulate that
2246 * chunk in one go, and then try to consume the next smaller
2247 * doubled multiple.
2248 */
2249 shift = ilog2(offset) - ilog2(tk->cycle_interval);
2250 shift = max(0, shift);
2251 /* Bound shift to one less than what overflows tick_length */
2252 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2253 shift = min(shift, maxshift);
2254 while (offset >= tk->cycle_interval) {
2255 offset = logarithmic_accumulation(tk, offset, shift, &clock_set);
2256 if (offset < tk->cycle_interval<<shift)
2257 shift--;
2258 }
2259
2260 /* Adjust the multiplier to correct NTP error */
2261 timekeeping_adjust(tk, offset);
2262
2263 /*
2264 * Finally, make sure that after the rounding
2265 * xtime_nsec isn't larger than NSEC_PER_SEC
2266 */
2267 clock_set |= accumulate_nsecs_to_secs(tk);
2268
2269 timekeeping_update_from_shadow(&tk_core, clock_set);
2270
2271 return !!clock_set;
2272}
2273
2274/**
2275 * update_wall_time - Uses the current clocksource to increment the wall time
2276 *
2277 */
2278void update_wall_time(void)
2279{
2280 if (timekeeping_advance(TK_ADV_TICK))
2281 clock_was_set_delayed();
2282}
2283
2284/**
2285 * getboottime64 - Return the real time of system boot.
2286 * @ts: pointer to the timespec64 to be set
2287 *
2288 * Returns the wall-time of boot in a timespec64.
2289 *
2290 * This is based on the wall_to_monotonic offset and the total suspend
2291 * time. Calls to settimeofday will affect the value returned (which
2292 * basically means that however wrong your real time clock is at boot time,
2293 * you get the right time here).
2294 */
2295void getboottime64(struct timespec64 *ts)
2296{
2297 struct timekeeper *tk = &tk_core.timekeeper;
2298 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2299
2300 *ts = ktime_to_timespec64(t);
2301}
2302EXPORT_SYMBOL_GPL(getboottime64);
2303
2304void ktime_get_coarse_real_ts64(struct timespec64 *ts)
2305{
2306 struct timekeeper *tk = &tk_core.timekeeper;
2307 unsigned int seq;
2308
2309 do {
2310 seq = read_seqcount_begin(&tk_core.seq);
2311
2312 *ts = tk_xtime(tk);
2313 } while (read_seqcount_retry(&tk_core.seq, seq));
2314}
2315EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
2316
2317/**
2318 * ktime_get_coarse_real_ts64_mg - return latter of coarse grained time or floor
2319 * @ts: timespec64 to be filled
2320 *
2321 * Fetch the global mg_floor value, convert it to realtime and compare it
2322 * to the current coarse-grained time. Fill @ts with whichever is
2323 * latest. Note that this is a filesystem-specific interface and should be
2324 * avoided outside of that context.
2325 */
2326void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts)
2327{
2328 struct timekeeper *tk = &tk_core.timekeeper;
2329 u64 floor = atomic64_read(&mg_floor);
2330 ktime_t f_real, offset, coarse;
2331 unsigned int seq;
2332
2333 do {
2334 seq = read_seqcount_begin(&tk_core.seq);
2335 *ts = tk_xtime(tk);
2336 offset = tk_core.timekeeper.offs_real;
2337 } while (read_seqcount_retry(&tk_core.seq, seq));
2338
2339 coarse = timespec64_to_ktime(*ts);
2340 f_real = ktime_add(floor, offset);
2341 if (ktime_after(f_real, coarse))
2342 *ts = ktime_to_timespec64(f_real);
2343}
2344
2345/**
2346 * ktime_get_real_ts64_mg - attempt to update floor value and return result
2347 * @ts: pointer to the timespec to be set
2348 *
2349 * Get a monotonic fine-grained time value and attempt to swap it into
2350 * mg_floor. If that succeeds then accept the new floor value. If it fails
2351 * then another task raced in during the interim time and updated the
2352 * floor. Since any update to the floor must be later than the previous
2353 * floor, either outcome is acceptable.
2354 *
2355 * Typically this will be called after calling ktime_get_coarse_real_ts64_mg(),
2356 * and determining that the resulting coarse-grained timestamp did not effect
2357 * a change in ctime. Any more recent floor value would effect a change to
2358 * ctime, so there is no need to retry the atomic64_try_cmpxchg() on failure.
2359 *
2360 * @ts will be filled with the latest floor value, regardless of the outcome of
2361 * the cmpxchg. Note that this is a filesystem specific interface and should be
2362 * avoided outside of that context.
2363 */
2364void ktime_get_real_ts64_mg(struct timespec64 *ts)
2365{
2366 struct timekeeper *tk = &tk_core.timekeeper;
2367 ktime_t old = atomic64_read(&mg_floor);
2368 ktime_t offset, mono;
2369 unsigned int seq;
2370 u64 nsecs;
2371
2372 do {
2373 seq = read_seqcount_begin(&tk_core.seq);
2374
2375 ts->tv_sec = tk->xtime_sec;
2376 mono = tk->tkr_mono.base;
2377 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2378 offset = tk_core.timekeeper.offs_real;
2379 } while (read_seqcount_retry(&tk_core.seq, seq));
2380
2381 mono = ktime_add_ns(mono, nsecs);
2382
2383 /*
2384 * Attempt to update the floor with the new time value. As any
2385 * update must be later then the existing floor, and would effect
2386 * a change to ctime from the perspective of the current task,
2387 * accept the resulting floor value regardless of the outcome of
2388 * the swap.
2389 */
2390 if (atomic64_try_cmpxchg(&mg_floor, &old, mono)) {
2391 ts->tv_nsec = 0;
2392 timespec64_add_ns(ts, nsecs);
2393 timekeeping_inc_mg_floor_swaps();
2394 } else {
2395 /*
2396 * Another task changed mg_floor since "old" was fetched.
2397 * "old" has been updated with the latest value of "mg_floor".
2398 * That value is newer than the previous floor value, which
2399 * is enough to effect a change to ctime. Accept it.
2400 */
2401 *ts = ktime_to_timespec64(ktime_add(old, offset));
2402 }
2403}
2404
2405void ktime_get_coarse_ts64(struct timespec64 *ts)
2406{
2407 struct timekeeper *tk = &tk_core.timekeeper;
2408 struct timespec64 now, mono;
2409 unsigned int seq;
2410
2411 do {
2412 seq = read_seqcount_begin(&tk_core.seq);
2413
2414 now = tk_xtime(tk);
2415 mono = tk->wall_to_monotonic;
2416 } while (read_seqcount_retry(&tk_core.seq, seq));
2417
2418 set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
2419 now.tv_nsec + mono.tv_nsec);
2420}
2421EXPORT_SYMBOL(ktime_get_coarse_ts64);
2422
2423/*
2424 * Must hold jiffies_lock
2425 */
2426void do_timer(unsigned long ticks)
2427{
2428 jiffies_64 += ticks;
2429 calc_global_load();
2430}
2431
2432/**
2433 * ktime_get_update_offsets_now - hrtimer helper
2434 * @cwsseq: pointer to check and store the clock was set sequence number
2435 * @offs_real: pointer to storage for monotonic -> realtime offset
2436 * @offs_boot: pointer to storage for monotonic -> boottime offset
2437 * @offs_tai: pointer to storage for monotonic -> clock tai offset
2438 *
2439 * Returns current monotonic time and updates the offsets if the
2440 * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
2441 * different.
2442 *
2443 * Called from hrtimer_interrupt() or retrigger_next_event()
2444 */
2445ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2446 ktime_t *offs_boot, ktime_t *offs_tai)
2447{
2448 struct timekeeper *tk = &tk_core.timekeeper;
2449 unsigned int seq;
2450 ktime_t base;
2451 u64 nsecs;
2452
2453 do {
2454 seq = read_seqcount_begin(&tk_core.seq);
2455
2456 base = tk->tkr_mono.base;
2457 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2458 base = ktime_add_ns(base, nsecs);
2459
2460 if (*cwsseq != tk->clock_was_set_seq) {
2461 *cwsseq = tk->clock_was_set_seq;
2462 *offs_real = tk->offs_real;
2463 *offs_boot = tk->offs_boot;
2464 *offs_tai = tk->offs_tai;
2465 }
2466
2467 /* Handle leapsecond insertion adjustments */
2468 if (unlikely(base >= tk->next_leap_ktime))
2469 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2470
2471 } while (read_seqcount_retry(&tk_core.seq, seq));
2472
2473 return base;
2474}
2475
2476/*
2477 * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
2478 */
2479static int timekeeping_validate_timex(const struct __kernel_timex *txc)
2480{
2481 if (txc->modes & ADJ_ADJTIME) {
2482 /* singleshot must not be used with any other mode bits */
2483 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
2484 return -EINVAL;
2485 if (!(txc->modes & ADJ_OFFSET_READONLY) &&
2486 !capable(CAP_SYS_TIME))
2487 return -EPERM;
2488 } else {
2489 /* In order to modify anything, you gotta be super-user! */
2490 if (txc->modes && !capable(CAP_SYS_TIME))
2491 return -EPERM;
2492 /*
2493 * if the quartz is off by more than 10% then
2494 * something is VERY wrong!
2495 */
2496 if (txc->modes & ADJ_TICK &&
2497 (txc->tick < 900000/USER_HZ ||
2498 txc->tick > 1100000/USER_HZ))
2499 return -EINVAL;
2500 }
2501
2502 if (txc->modes & ADJ_SETOFFSET) {
2503 /* In order to inject time, you gotta be super-user! */
2504 if (!capable(CAP_SYS_TIME))
2505 return -EPERM;
2506
2507 /*
2508 * Validate if a timespec/timeval used to inject a time
2509 * offset is valid. Offsets can be positive or negative, so
2510 * we don't check tv_sec. The value of the timeval/timespec
2511 * is the sum of its fields,but *NOTE*:
2512 * The field tv_usec/tv_nsec must always be non-negative and
2513 * we can't have more nanoseconds/microseconds than a second.
2514 */
2515 if (txc->time.tv_usec < 0)
2516 return -EINVAL;
2517
2518 if (txc->modes & ADJ_NANO) {
2519 if (txc->time.tv_usec >= NSEC_PER_SEC)
2520 return -EINVAL;
2521 } else {
2522 if (txc->time.tv_usec >= USEC_PER_SEC)
2523 return -EINVAL;
2524 }
2525 }
2526
2527 /*
2528 * Check for potential multiplication overflows that can
2529 * only happen on 64-bit systems:
2530 */
2531 if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
2532 if (LLONG_MIN / PPM_SCALE > txc->freq)
2533 return -EINVAL;
2534 if (LLONG_MAX / PPM_SCALE < txc->freq)
2535 return -EINVAL;
2536 }
2537
2538 return 0;
2539}
2540
2541/**
2542 * random_get_entropy_fallback - Returns the raw clock source value,
2543 * used by random.c for platforms with no valid random_get_entropy().
2544 */
2545unsigned long random_get_entropy_fallback(void)
2546{
2547 struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
2548 struct clocksource *clock = READ_ONCE(tkr->clock);
2549
2550 if (unlikely(timekeeping_suspended || !clock))
2551 return 0;
2552 return clock->read(clock);
2553}
2554EXPORT_SYMBOL_GPL(random_get_entropy_fallback);
2555
2556/**
2557 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2558 * @txc: Pointer to kernel_timex structure containing NTP parameters
2559 */
2560int do_adjtimex(struct __kernel_timex *txc)
2561{
2562 struct audit_ntp_data ad;
2563 bool offset_set = false;
2564 bool clock_set = false;
2565 struct timespec64 ts;
2566 int ret;
2567
2568 /* Validate the data before disabling interrupts */
2569 ret = timekeeping_validate_timex(txc);
2570 if (ret)
2571 return ret;
2572 add_device_randomness(txc, sizeof(*txc));
2573
2574 if (txc->modes & ADJ_SETOFFSET) {
2575 struct timespec64 delta;
2576
2577 delta.tv_sec = txc->time.tv_sec;
2578 delta.tv_nsec = txc->time.tv_usec;
2579 if (!(txc->modes & ADJ_NANO))
2580 delta.tv_nsec *= 1000;
2581 ret = timekeeping_inject_offset(&delta);
2582 if (ret)
2583 return ret;
2584
2585 offset_set = delta.tv_sec != 0;
2586 audit_tk_injoffset(delta);
2587 }
2588
2589 audit_ntp_init(&ad);
2590
2591 ktime_get_real_ts64(&ts);
2592 add_device_randomness(&ts, sizeof(ts));
2593
2594 scoped_guard (raw_spinlock_irqsave, &tk_core.lock) {
2595 struct timekeeper *tks = &tk_core.shadow_timekeeper;
2596 s32 orig_tai, tai;
2597
2598 orig_tai = tai = tks->tai_offset;
2599 ret = __do_adjtimex(txc, &ts, &tai, &ad);
2600
2601 if (tai != orig_tai) {
2602 __timekeeping_set_tai_offset(tks, tai);
2603 timekeeping_update_from_shadow(&tk_core, TK_CLOCK_WAS_SET);
2604 clock_set = true;
2605 } else {
2606 tk_update_leap_state_all(&tk_core);
2607 }
2608 }
2609
2610 audit_ntp_log(&ad);
2611
2612 /* Update the multiplier immediately if frequency was set directly */
2613 if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
2614 clock_set |= timekeeping_advance(TK_ADV_FREQ);
2615
2616 if (clock_set)
2617 clock_was_set(CLOCK_SET_WALL);
2618
2619 ntp_notify_cmos_timer(offset_set);
2620
2621 return ret;
2622}
2623
2624#ifdef CONFIG_NTP_PPS
2625/**
2626 * hardpps() - Accessor function to NTP __hardpps function
2627 * @phase_ts: Pointer to timespec64 structure representing phase timestamp
2628 * @raw_ts: Pointer to timespec64 structure representing raw timestamp
2629 */
2630void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2631{
2632 guard(raw_spinlock_irqsave)(&tk_core.lock);
2633 __hardpps(phase_ts, raw_ts);
2634}
2635EXPORT_SYMBOL(hardpps);
2636#endif /* CONFIG_NTP_PPS */