Loading...
1/*
2 * linux/kernel/time/timekeeping.c
3 *
4 * Kernel timekeeping code and accessor functions
5 *
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
8 *
9 */
10
11#include <linux/timekeeper_internal.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/syscore_ops.h>
19#include <linux/clocksource.h>
20#include <linux/jiffies.h>
21#include <linux/time.h>
22#include <linux/tick.h>
23#include <linux/stop_machine.h>
24#include <linux/pvclock_gtod.h>
25#include <linux/compiler.h>
26
27#include "tick-internal.h"
28#include "ntp_internal.h"
29#include "timekeeping_internal.h"
30
31#define TK_CLEAR_NTP (1 << 0)
32#define TK_MIRROR (1 << 1)
33#define TK_CLOCK_WAS_SET (1 << 2)
34
35static struct timekeeper timekeeper;
36static DEFINE_RAW_SPINLOCK(timekeeper_lock);
37static seqcount_t timekeeper_seq;
38static struct timekeeper shadow_timekeeper;
39
40/* flag for if timekeeping is suspended */
41int __read_mostly timekeeping_suspended;
42
43/* Flag for if there is a persistent clock on this platform */
44bool __read_mostly persistent_clock_exist = false;
45
46static inline void tk_normalize_xtime(struct timekeeper *tk)
47{
48 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
49 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
50 tk->xtime_sec++;
51 }
52}
53
54static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
55{
56 tk->xtime_sec = ts->tv_sec;
57 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
58}
59
60static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
61{
62 tk->xtime_sec += ts->tv_sec;
63 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
64 tk_normalize_xtime(tk);
65}
66
67static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
68{
69 struct timespec tmp;
70
71 /*
72 * Verify consistency of: offset_real = -wall_to_monotonic
73 * before modifying anything
74 */
75 set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
76 -tk->wall_to_monotonic.tv_nsec);
77 WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
78 tk->wall_to_monotonic = wtm;
79 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
80 tk->offs_real = timespec_to_ktime(tmp);
81 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
82}
83
84static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
85{
86 /* Verify consistency before modifying */
87 WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
88
89 tk->total_sleep_time = t;
90 tk->offs_boot = timespec_to_ktime(t);
91}
92
93/**
94 * tk_setup_internals - Set up internals to use clocksource clock.
95 *
96 * @tk: The target timekeeper to setup.
97 * @clock: Pointer to clocksource.
98 *
99 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
100 * pair and interval request.
101 *
102 * Unless you're the timekeeping code, you should not be using this!
103 */
104static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
105{
106 cycle_t interval;
107 u64 tmp, ntpinterval;
108 struct clocksource *old_clock;
109
110 old_clock = tk->clock;
111 tk->clock = clock;
112 tk->cycle_last = clock->cycle_last = clock->read(clock);
113
114 /* Do the ns -> cycle conversion first, using original mult */
115 tmp = NTP_INTERVAL_LENGTH;
116 tmp <<= clock->shift;
117 ntpinterval = tmp;
118 tmp += clock->mult/2;
119 do_div(tmp, clock->mult);
120 if (tmp == 0)
121 tmp = 1;
122
123 interval = (cycle_t) tmp;
124 tk->cycle_interval = interval;
125
126 /* Go back from cycles -> shifted ns */
127 tk->xtime_interval = (u64) interval * clock->mult;
128 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
129 tk->raw_interval =
130 ((u64) interval * clock->mult) >> clock->shift;
131
132 /* if changing clocks, convert xtime_nsec shift units */
133 if (old_clock) {
134 int shift_change = clock->shift - old_clock->shift;
135 if (shift_change < 0)
136 tk->xtime_nsec >>= -shift_change;
137 else
138 tk->xtime_nsec <<= shift_change;
139 }
140 tk->shift = clock->shift;
141
142 tk->ntp_error = 0;
143 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
144
145 /*
146 * The timekeeper keeps its own mult values for the currently
147 * active clocksource. These value will be adjusted via NTP
148 * to counteract clock drifting.
149 */
150 tk->mult = clock->mult;
151}
152
153/* Timekeeper helper functions. */
154
155#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
156u32 (*arch_gettimeoffset)(void);
157
158u32 get_arch_timeoffset(void)
159{
160 if (likely(arch_gettimeoffset))
161 return arch_gettimeoffset();
162 return 0;
163}
164#else
165static inline u32 get_arch_timeoffset(void) { return 0; }
166#endif
167
168static inline s64 timekeeping_get_ns(struct timekeeper *tk)
169{
170 cycle_t cycle_now, cycle_delta;
171 struct clocksource *clock;
172 s64 nsec;
173
174 /* read clocksource: */
175 clock = tk->clock;
176 cycle_now = clock->read(clock);
177
178 /* calculate the delta since the last update_wall_time: */
179 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
180
181 nsec = cycle_delta * tk->mult + tk->xtime_nsec;
182 nsec >>= tk->shift;
183
184 /* If arch requires, add in get_arch_timeoffset() */
185 return nsec + get_arch_timeoffset();
186}
187
188static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
189{
190 cycle_t cycle_now, cycle_delta;
191 struct clocksource *clock;
192 s64 nsec;
193
194 /* read clocksource: */
195 clock = tk->clock;
196 cycle_now = clock->read(clock);
197
198 /* calculate the delta since the last update_wall_time: */
199 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
200
201 /* convert delta to nanoseconds. */
202 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
203
204 /* If arch requires, add in get_arch_timeoffset() */
205 return nsec + get_arch_timeoffset();
206}
207
208static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
209
210static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
211{
212 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
213}
214
215/**
216 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
217 */
218int pvclock_gtod_register_notifier(struct notifier_block *nb)
219{
220 struct timekeeper *tk = &timekeeper;
221 unsigned long flags;
222 int ret;
223
224 raw_spin_lock_irqsave(&timekeeper_lock, flags);
225 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
226 update_pvclock_gtod(tk, true);
227 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
228
229 return ret;
230}
231EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
232
233/**
234 * pvclock_gtod_unregister_notifier - unregister a pvclock
235 * timedata update listener
236 */
237int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
238{
239 unsigned long flags;
240 int ret;
241
242 raw_spin_lock_irqsave(&timekeeper_lock, flags);
243 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
244 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
245
246 return ret;
247}
248EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
249
250/* must hold timekeeper_lock */
251static void timekeeping_update(struct timekeeper *tk, unsigned int action)
252{
253 if (action & TK_CLEAR_NTP) {
254 tk->ntp_error = 0;
255 ntp_clear();
256 }
257 update_vsyscall(tk);
258 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
259
260 if (action & TK_MIRROR)
261 memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
262}
263
264/**
265 * timekeeping_forward_now - update clock to the current time
266 *
267 * Forward the current clock to update its state since the last call to
268 * update_wall_time(). This is useful before significant clock changes,
269 * as it avoids having to deal with this time offset explicitly.
270 */
271static void timekeeping_forward_now(struct timekeeper *tk)
272{
273 cycle_t cycle_now, cycle_delta;
274 struct clocksource *clock;
275 s64 nsec;
276
277 clock = tk->clock;
278 cycle_now = clock->read(clock);
279 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
280 tk->cycle_last = clock->cycle_last = cycle_now;
281
282 tk->xtime_nsec += cycle_delta * tk->mult;
283
284 /* If arch requires, add in get_arch_timeoffset() */
285 tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift;
286
287 tk_normalize_xtime(tk);
288
289 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
290 timespec_add_ns(&tk->raw_time, nsec);
291}
292
293/**
294 * __getnstimeofday - Returns the time of day in a timespec.
295 * @ts: pointer to the timespec to be set
296 *
297 * Updates the time of day in the timespec.
298 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
299 */
300int __getnstimeofday(struct timespec *ts)
301{
302 struct timekeeper *tk = &timekeeper;
303 unsigned long seq;
304 s64 nsecs = 0;
305
306 do {
307 seq = read_seqcount_begin(&timekeeper_seq);
308
309 ts->tv_sec = tk->xtime_sec;
310 nsecs = timekeeping_get_ns(tk);
311
312 } while (read_seqcount_retry(&timekeeper_seq, seq));
313
314 ts->tv_nsec = 0;
315 timespec_add_ns(ts, nsecs);
316
317 /*
318 * Do not bail out early, in case there were callers still using
319 * the value, even in the face of the WARN_ON.
320 */
321 if (unlikely(timekeeping_suspended))
322 return -EAGAIN;
323 return 0;
324}
325EXPORT_SYMBOL(__getnstimeofday);
326
327/**
328 * getnstimeofday - Returns the time of day in a timespec.
329 * @ts: pointer to the timespec to be set
330 *
331 * Returns the time of day in a timespec (WARN if suspended).
332 */
333void getnstimeofday(struct timespec *ts)
334{
335 WARN_ON(__getnstimeofday(ts));
336}
337EXPORT_SYMBOL(getnstimeofday);
338
339ktime_t ktime_get(void)
340{
341 struct timekeeper *tk = &timekeeper;
342 unsigned int seq;
343 s64 secs, nsecs;
344
345 WARN_ON(timekeeping_suspended);
346
347 do {
348 seq = read_seqcount_begin(&timekeeper_seq);
349 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
350 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
351
352 } while (read_seqcount_retry(&timekeeper_seq, seq));
353 /*
354 * Use ktime_set/ktime_add_ns to create a proper ktime on
355 * 32-bit architectures without CONFIG_KTIME_SCALAR.
356 */
357 return ktime_add_ns(ktime_set(secs, 0), nsecs);
358}
359EXPORT_SYMBOL_GPL(ktime_get);
360
361/**
362 * ktime_get_ts - get the monotonic clock in timespec format
363 * @ts: pointer to timespec variable
364 *
365 * The function calculates the monotonic clock from the realtime
366 * clock and the wall_to_monotonic offset and stores the result
367 * in normalized timespec format in the variable pointed to by @ts.
368 */
369void ktime_get_ts(struct timespec *ts)
370{
371 struct timekeeper *tk = &timekeeper;
372 struct timespec tomono;
373 s64 nsec;
374 unsigned int seq;
375
376 WARN_ON(timekeeping_suspended);
377
378 do {
379 seq = read_seqcount_begin(&timekeeper_seq);
380 ts->tv_sec = tk->xtime_sec;
381 nsec = timekeeping_get_ns(tk);
382 tomono = tk->wall_to_monotonic;
383
384 } while (read_seqcount_retry(&timekeeper_seq, seq));
385
386 ts->tv_sec += tomono.tv_sec;
387 ts->tv_nsec = 0;
388 timespec_add_ns(ts, nsec + tomono.tv_nsec);
389}
390EXPORT_SYMBOL_GPL(ktime_get_ts);
391
392
393/**
394 * timekeeping_clocktai - Returns the TAI time of day in a timespec
395 * @ts: pointer to the timespec to be set
396 *
397 * Returns the time of day in a timespec.
398 */
399void timekeeping_clocktai(struct timespec *ts)
400{
401 struct timekeeper *tk = &timekeeper;
402 unsigned long seq;
403 u64 nsecs;
404
405 WARN_ON(timekeeping_suspended);
406
407 do {
408 seq = read_seqcount_begin(&timekeeper_seq);
409
410 ts->tv_sec = tk->xtime_sec + tk->tai_offset;
411 nsecs = timekeeping_get_ns(tk);
412
413 } while (read_seqcount_retry(&timekeeper_seq, seq));
414
415 ts->tv_nsec = 0;
416 timespec_add_ns(ts, nsecs);
417
418}
419EXPORT_SYMBOL(timekeeping_clocktai);
420
421
422/**
423 * ktime_get_clocktai - Returns the TAI time of day in a ktime
424 *
425 * Returns the time of day in a ktime.
426 */
427ktime_t ktime_get_clocktai(void)
428{
429 struct timespec ts;
430
431 timekeeping_clocktai(&ts);
432 return timespec_to_ktime(ts);
433}
434EXPORT_SYMBOL(ktime_get_clocktai);
435
436#ifdef CONFIG_NTP_PPS
437
438/**
439 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
440 * @ts_raw: pointer to the timespec to be set to raw monotonic time
441 * @ts_real: pointer to the timespec to be set to the time of day
442 *
443 * This function reads both the time of day and raw monotonic time at the
444 * same time atomically and stores the resulting timestamps in timespec
445 * format.
446 */
447void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
448{
449 struct timekeeper *tk = &timekeeper;
450 unsigned long seq;
451 s64 nsecs_raw, nsecs_real;
452
453 WARN_ON_ONCE(timekeeping_suspended);
454
455 do {
456 seq = read_seqcount_begin(&timekeeper_seq);
457
458 *ts_raw = tk->raw_time;
459 ts_real->tv_sec = tk->xtime_sec;
460 ts_real->tv_nsec = 0;
461
462 nsecs_raw = timekeeping_get_ns_raw(tk);
463 nsecs_real = timekeeping_get_ns(tk);
464
465 } while (read_seqcount_retry(&timekeeper_seq, seq));
466
467 timespec_add_ns(ts_raw, nsecs_raw);
468 timespec_add_ns(ts_real, nsecs_real);
469}
470EXPORT_SYMBOL(getnstime_raw_and_real);
471
472#endif /* CONFIG_NTP_PPS */
473
474/**
475 * do_gettimeofday - Returns the time of day in a timeval
476 * @tv: pointer to the timeval to be set
477 *
478 * NOTE: Users should be converted to using getnstimeofday()
479 */
480void do_gettimeofday(struct timeval *tv)
481{
482 struct timespec now;
483
484 getnstimeofday(&now);
485 tv->tv_sec = now.tv_sec;
486 tv->tv_usec = now.tv_nsec/1000;
487}
488EXPORT_SYMBOL(do_gettimeofday);
489
490/**
491 * do_settimeofday - Sets the time of day
492 * @tv: pointer to the timespec variable containing the new time
493 *
494 * Sets the time of day to the new time and update NTP and notify hrtimers
495 */
496int do_settimeofday(const struct timespec *tv)
497{
498 struct timekeeper *tk = &timekeeper;
499 struct timespec ts_delta, xt;
500 unsigned long flags;
501
502 if (!timespec_valid_strict(tv))
503 return -EINVAL;
504
505 raw_spin_lock_irqsave(&timekeeper_lock, flags);
506 write_seqcount_begin(&timekeeper_seq);
507
508 timekeeping_forward_now(tk);
509
510 xt = tk_xtime(tk);
511 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
512 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
513
514 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
515
516 tk_set_xtime(tk, tv);
517
518 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
519
520 write_seqcount_end(&timekeeper_seq);
521 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
522
523 /* signal hrtimers about time change */
524 clock_was_set();
525
526 return 0;
527}
528EXPORT_SYMBOL(do_settimeofday);
529
530/**
531 * timekeeping_inject_offset - Adds or subtracts from the current time.
532 * @tv: pointer to the timespec variable containing the offset
533 *
534 * Adds or subtracts an offset value from the current time.
535 */
536int timekeeping_inject_offset(struct timespec *ts)
537{
538 struct timekeeper *tk = &timekeeper;
539 unsigned long flags;
540 struct timespec tmp;
541 int ret = 0;
542
543 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
544 return -EINVAL;
545
546 raw_spin_lock_irqsave(&timekeeper_lock, flags);
547 write_seqcount_begin(&timekeeper_seq);
548
549 timekeeping_forward_now(tk);
550
551 /* Make sure the proposed value is valid */
552 tmp = timespec_add(tk_xtime(tk), *ts);
553 if (!timespec_valid_strict(&tmp)) {
554 ret = -EINVAL;
555 goto error;
556 }
557
558 tk_xtime_add(tk, ts);
559 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
560
561error: /* even if we error out, we forwarded the time, so call update */
562 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
563
564 write_seqcount_end(&timekeeper_seq);
565 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
566
567 /* signal hrtimers about time change */
568 clock_was_set();
569
570 return ret;
571}
572EXPORT_SYMBOL(timekeeping_inject_offset);
573
574
575/**
576 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
577 *
578 */
579s32 timekeeping_get_tai_offset(void)
580{
581 struct timekeeper *tk = &timekeeper;
582 unsigned int seq;
583 s32 ret;
584
585 do {
586 seq = read_seqcount_begin(&timekeeper_seq);
587 ret = tk->tai_offset;
588 } while (read_seqcount_retry(&timekeeper_seq, seq));
589
590 return ret;
591}
592
593/**
594 * __timekeeping_set_tai_offset - Lock free worker function
595 *
596 */
597static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
598{
599 tk->tai_offset = tai_offset;
600 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
601}
602
603/**
604 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
605 *
606 */
607void timekeeping_set_tai_offset(s32 tai_offset)
608{
609 struct timekeeper *tk = &timekeeper;
610 unsigned long flags;
611
612 raw_spin_lock_irqsave(&timekeeper_lock, flags);
613 write_seqcount_begin(&timekeeper_seq);
614 __timekeeping_set_tai_offset(tk, tai_offset);
615 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
616 write_seqcount_end(&timekeeper_seq);
617 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
618 clock_was_set();
619}
620
621/**
622 * change_clocksource - Swaps clocksources if a new one is available
623 *
624 * Accumulates current time interval and initializes new clocksource
625 */
626static int change_clocksource(void *data)
627{
628 struct timekeeper *tk = &timekeeper;
629 struct clocksource *new, *old;
630 unsigned long flags;
631
632 new = (struct clocksource *) data;
633
634 raw_spin_lock_irqsave(&timekeeper_lock, flags);
635 write_seqcount_begin(&timekeeper_seq);
636
637 timekeeping_forward_now(tk);
638 /*
639 * If the cs is in module, get a module reference. Succeeds
640 * for built-in code (owner == NULL) as well.
641 */
642 if (try_module_get(new->owner)) {
643 if (!new->enable || new->enable(new) == 0) {
644 old = tk->clock;
645 tk_setup_internals(tk, new);
646 if (old->disable)
647 old->disable(old);
648 module_put(old->owner);
649 } else {
650 module_put(new->owner);
651 }
652 }
653 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
654
655 write_seqcount_end(&timekeeper_seq);
656 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
657
658 return 0;
659}
660
661/**
662 * timekeeping_notify - Install a new clock source
663 * @clock: pointer to the clock source
664 *
665 * This function is called from clocksource.c after a new, better clock
666 * source has been registered. The caller holds the clocksource_mutex.
667 */
668int timekeeping_notify(struct clocksource *clock)
669{
670 struct timekeeper *tk = &timekeeper;
671
672 if (tk->clock == clock)
673 return 0;
674 stop_machine(change_clocksource, clock, NULL);
675 tick_clock_notify();
676 return tk->clock == clock ? 0 : -1;
677}
678
679/**
680 * ktime_get_real - get the real (wall-) time in ktime_t format
681 *
682 * returns the time in ktime_t format
683 */
684ktime_t ktime_get_real(void)
685{
686 struct timespec now;
687
688 getnstimeofday(&now);
689
690 return timespec_to_ktime(now);
691}
692EXPORT_SYMBOL_GPL(ktime_get_real);
693
694/**
695 * getrawmonotonic - Returns the raw monotonic time in a timespec
696 * @ts: pointer to the timespec to be set
697 *
698 * Returns the raw monotonic time (completely un-modified by ntp)
699 */
700void getrawmonotonic(struct timespec *ts)
701{
702 struct timekeeper *tk = &timekeeper;
703 unsigned long seq;
704 s64 nsecs;
705
706 do {
707 seq = read_seqcount_begin(&timekeeper_seq);
708 nsecs = timekeeping_get_ns_raw(tk);
709 *ts = tk->raw_time;
710
711 } while (read_seqcount_retry(&timekeeper_seq, seq));
712
713 timespec_add_ns(ts, nsecs);
714}
715EXPORT_SYMBOL(getrawmonotonic);
716
717/**
718 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
719 */
720int timekeeping_valid_for_hres(void)
721{
722 struct timekeeper *tk = &timekeeper;
723 unsigned long seq;
724 int ret;
725
726 do {
727 seq = read_seqcount_begin(&timekeeper_seq);
728
729 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
730
731 } while (read_seqcount_retry(&timekeeper_seq, seq));
732
733 return ret;
734}
735
736/**
737 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
738 */
739u64 timekeeping_max_deferment(void)
740{
741 struct timekeeper *tk = &timekeeper;
742 unsigned long seq;
743 u64 ret;
744
745 do {
746 seq = read_seqcount_begin(&timekeeper_seq);
747
748 ret = tk->clock->max_idle_ns;
749
750 } while (read_seqcount_retry(&timekeeper_seq, seq));
751
752 return ret;
753}
754
755/**
756 * read_persistent_clock - Return time from the persistent clock.
757 *
758 * Weak dummy function for arches that do not yet support it.
759 * Reads the time from the battery backed persistent clock.
760 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
761 *
762 * XXX - Do be sure to remove it once all arches implement it.
763 */
764void __weak read_persistent_clock(struct timespec *ts)
765{
766 ts->tv_sec = 0;
767 ts->tv_nsec = 0;
768}
769
770/**
771 * read_boot_clock - Return time of the system start.
772 *
773 * Weak dummy function for arches that do not yet support it.
774 * Function to read the exact time the system has been started.
775 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
776 *
777 * XXX - Do be sure to remove it once all arches implement it.
778 */
779void __weak read_boot_clock(struct timespec *ts)
780{
781 ts->tv_sec = 0;
782 ts->tv_nsec = 0;
783}
784
785/*
786 * timekeeping_init - Initializes the clocksource and common timekeeping values
787 */
788void __init timekeeping_init(void)
789{
790 struct timekeeper *tk = &timekeeper;
791 struct clocksource *clock;
792 unsigned long flags;
793 struct timespec now, boot, tmp;
794
795 read_persistent_clock(&now);
796
797 if (!timespec_valid_strict(&now)) {
798 pr_warn("WARNING: Persistent clock returned invalid value!\n"
799 " Check your CMOS/BIOS settings.\n");
800 now.tv_sec = 0;
801 now.tv_nsec = 0;
802 } else if (now.tv_sec || now.tv_nsec)
803 persistent_clock_exist = true;
804
805 read_boot_clock(&boot);
806 if (!timespec_valid_strict(&boot)) {
807 pr_warn("WARNING: Boot clock returned invalid value!\n"
808 " Check your CMOS/BIOS settings.\n");
809 boot.tv_sec = 0;
810 boot.tv_nsec = 0;
811 }
812
813 raw_spin_lock_irqsave(&timekeeper_lock, flags);
814 write_seqcount_begin(&timekeeper_seq);
815 ntp_init();
816
817 clock = clocksource_default_clock();
818 if (clock->enable)
819 clock->enable(clock);
820 tk_setup_internals(tk, clock);
821
822 tk_set_xtime(tk, &now);
823 tk->raw_time.tv_sec = 0;
824 tk->raw_time.tv_nsec = 0;
825 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
826 boot = tk_xtime(tk);
827
828 set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
829 tk_set_wall_to_mono(tk, tmp);
830
831 tmp.tv_sec = 0;
832 tmp.tv_nsec = 0;
833 tk_set_sleep_time(tk, tmp);
834
835 memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
836
837 write_seqcount_end(&timekeeper_seq);
838 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
839}
840
841/* time in seconds when suspend began */
842static struct timespec timekeeping_suspend_time;
843
844/**
845 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
846 * @delta: pointer to a timespec delta value
847 *
848 * Takes a timespec offset measuring a suspend interval and properly
849 * adds the sleep offset to the timekeeping variables.
850 */
851static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
852 struct timespec *delta)
853{
854 if (!timespec_valid_strict(delta)) {
855 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
856 "sleep delta value!\n");
857 return;
858 }
859 tk_xtime_add(tk, delta);
860 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
861 tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
862 tk_debug_account_sleep_time(delta);
863}
864
865/**
866 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
867 * @delta: pointer to a timespec delta value
868 *
869 * This hook is for architectures that cannot support read_persistent_clock
870 * because their RTC/persistent clock is only accessible when irqs are enabled.
871 *
872 * This function should only be called by rtc_resume(), and allows
873 * a suspend offset to be injected into the timekeeping values.
874 */
875void timekeeping_inject_sleeptime(struct timespec *delta)
876{
877 struct timekeeper *tk = &timekeeper;
878 unsigned long flags;
879
880 /*
881 * Make sure we don't set the clock twice, as timekeeping_resume()
882 * already did it
883 */
884 if (has_persistent_clock())
885 return;
886
887 raw_spin_lock_irqsave(&timekeeper_lock, flags);
888 write_seqcount_begin(&timekeeper_seq);
889
890 timekeeping_forward_now(tk);
891
892 __timekeeping_inject_sleeptime(tk, delta);
893
894 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
895
896 write_seqcount_end(&timekeeper_seq);
897 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
898
899 /* signal hrtimers about time change */
900 clock_was_set();
901}
902
903/**
904 * timekeeping_resume - Resumes the generic timekeeping subsystem.
905 *
906 * This is for the generic clocksource timekeeping.
907 * xtime/wall_to_monotonic/jiffies/etc are
908 * still managed by arch specific suspend/resume code.
909 */
910static void timekeeping_resume(void)
911{
912 struct timekeeper *tk = &timekeeper;
913 struct clocksource *clock = tk->clock;
914 unsigned long flags;
915 struct timespec ts_new, ts_delta;
916 cycle_t cycle_now, cycle_delta;
917 bool suspendtime_found = false;
918
919 read_persistent_clock(&ts_new);
920
921 clockevents_resume();
922 clocksource_resume();
923
924 raw_spin_lock_irqsave(&timekeeper_lock, flags);
925 write_seqcount_begin(&timekeeper_seq);
926
927 /*
928 * After system resumes, we need to calculate the suspended time and
929 * compensate it for the OS time. There are 3 sources that could be
930 * used: Nonstop clocksource during suspend, persistent clock and rtc
931 * device.
932 *
933 * One specific platform may have 1 or 2 or all of them, and the
934 * preference will be:
935 * suspend-nonstop clocksource -> persistent clock -> rtc
936 * The less preferred source will only be tried if there is no better
937 * usable source. The rtc part is handled separately in rtc core code.
938 */
939 cycle_now = clock->read(clock);
940 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
941 cycle_now > clock->cycle_last) {
942 u64 num, max = ULLONG_MAX;
943 u32 mult = clock->mult;
944 u32 shift = clock->shift;
945 s64 nsec = 0;
946
947 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
948
949 /*
950 * "cycle_delta * mutl" may cause 64 bits overflow, if the
951 * suspended time is too long. In that case we need do the
952 * 64 bits math carefully
953 */
954 do_div(max, mult);
955 if (cycle_delta > max) {
956 num = div64_u64(cycle_delta, max);
957 nsec = (((u64) max * mult) >> shift) * num;
958 cycle_delta -= num * max;
959 }
960 nsec += ((u64) cycle_delta * mult) >> shift;
961
962 ts_delta = ns_to_timespec(nsec);
963 suspendtime_found = true;
964 } else if (timespec_compare(&ts_new, &timekeeping_suspend_time) > 0) {
965 ts_delta = timespec_sub(ts_new, timekeeping_suspend_time);
966 suspendtime_found = true;
967 }
968
969 if (suspendtime_found)
970 __timekeeping_inject_sleeptime(tk, &ts_delta);
971
972 /* Re-base the last cycle value */
973 tk->cycle_last = clock->cycle_last = cycle_now;
974 tk->ntp_error = 0;
975 timekeeping_suspended = 0;
976 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
977 write_seqcount_end(&timekeeper_seq);
978 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
979
980 touch_softlockup_watchdog();
981
982 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
983
984 /* Resume hrtimers */
985 hrtimers_resume();
986}
987
988static int timekeeping_suspend(void)
989{
990 struct timekeeper *tk = &timekeeper;
991 unsigned long flags;
992 struct timespec delta, delta_delta;
993 static struct timespec old_delta;
994
995 read_persistent_clock(&timekeeping_suspend_time);
996
997 /*
998 * On some systems the persistent_clock can not be detected at
999 * timekeeping_init by its return value, so if we see a valid
1000 * value returned, update the persistent_clock_exists flag.
1001 */
1002 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1003 persistent_clock_exist = true;
1004
1005 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1006 write_seqcount_begin(&timekeeper_seq);
1007 timekeeping_forward_now(tk);
1008 timekeeping_suspended = 1;
1009
1010 /*
1011 * To avoid drift caused by repeated suspend/resumes,
1012 * which each can add ~1 second drift error,
1013 * try to compensate so the difference in system time
1014 * and persistent_clock time stays close to constant.
1015 */
1016 delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
1017 delta_delta = timespec_sub(delta, old_delta);
1018 if (abs(delta_delta.tv_sec) >= 2) {
1019 /*
1020 * if delta_delta is too large, assume time correction
1021 * has occured and set old_delta to the current delta.
1022 */
1023 old_delta = delta;
1024 } else {
1025 /* Otherwise try to adjust old_system to compensate */
1026 timekeeping_suspend_time =
1027 timespec_add(timekeeping_suspend_time, delta_delta);
1028 }
1029
1030 timekeeping_update(tk, TK_MIRROR);
1031 write_seqcount_end(&timekeeper_seq);
1032 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1033
1034 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
1035 clocksource_suspend();
1036 clockevents_suspend();
1037
1038 return 0;
1039}
1040
1041/* sysfs resume/suspend bits for timekeeping */
1042static struct syscore_ops timekeeping_syscore_ops = {
1043 .resume = timekeeping_resume,
1044 .suspend = timekeeping_suspend,
1045};
1046
1047static int __init timekeeping_init_ops(void)
1048{
1049 register_syscore_ops(&timekeeping_syscore_ops);
1050 return 0;
1051}
1052
1053device_initcall(timekeeping_init_ops);
1054
1055/*
1056 * If the error is already larger, we look ahead even further
1057 * to compensate for late or lost adjustments.
1058 */
1059static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
1060 s64 error, s64 *interval,
1061 s64 *offset)
1062{
1063 s64 tick_error, i;
1064 u32 look_ahead, adj;
1065 s32 error2, mult;
1066
1067 /*
1068 * Use the current error value to determine how much to look ahead.
1069 * The larger the error the slower we adjust for it to avoid problems
1070 * with losing too many ticks, otherwise we would overadjust and
1071 * produce an even larger error. The smaller the adjustment the
1072 * faster we try to adjust for it, as lost ticks can do less harm
1073 * here. This is tuned so that an error of about 1 msec is adjusted
1074 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
1075 */
1076 error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
1077 error2 = abs(error2);
1078 for (look_ahead = 0; error2 > 0; look_ahead++)
1079 error2 >>= 2;
1080
1081 /*
1082 * Now calculate the error in (1 << look_ahead) ticks, but first
1083 * remove the single look ahead already included in the error.
1084 */
1085 tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
1086 tick_error -= tk->xtime_interval >> 1;
1087 error = ((error - tick_error) >> look_ahead) + tick_error;
1088
1089 /* Finally calculate the adjustment shift value. */
1090 i = *interval;
1091 mult = 1;
1092 if (error < 0) {
1093 error = -error;
1094 *interval = -*interval;
1095 *offset = -*offset;
1096 mult = -1;
1097 }
1098 for (adj = 0; error > i; adj++)
1099 error >>= 1;
1100
1101 *interval <<= adj;
1102 *offset <<= adj;
1103 return mult << adj;
1104}
1105
1106/*
1107 * Adjust the multiplier to reduce the error value,
1108 * this is optimized for the most common adjustments of -1,0,1,
1109 * for other values we can do a bit more work.
1110 */
1111static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1112{
1113 s64 error, interval = tk->cycle_interval;
1114 int adj;
1115
1116 /*
1117 * The point of this is to check if the error is greater than half
1118 * an interval.
1119 *
1120 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
1121 *
1122 * Note we subtract one in the shift, so that error is really error*2.
1123 * This "saves" dividing(shifting) interval twice, but keeps the
1124 * (error > interval) comparison as still measuring if error is
1125 * larger than half an interval.
1126 *
1127 * Note: It does not "save" on aggravation when reading the code.
1128 */
1129 error = tk->ntp_error >> (tk->ntp_error_shift - 1);
1130 if (error > interval) {
1131 /*
1132 * We now divide error by 4(via shift), which checks if
1133 * the error is greater than twice the interval.
1134 * If it is greater, we need a bigadjust, if its smaller,
1135 * we can adjust by 1.
1136 */
1137 error >>= 2;
1138 if (likely(error <= interval))
1139 adj = 1;
1140 else
1141 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1142 } else {
1143 if (error < -interval) {
1144 /* See comment above, this is just switched for the negative */
1145 error >>= 2;
1146 if (likely(error >= -interval)) {
1147 adj = -1;
1148 interval = -interval;
1149 offset = -offset;
1150 } else {
1151 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1152 }
1153 } else {
1154 goto out_adjust;
1155 }
1156 }
1157
1158 if (unlikely(tk->clock->maxadj &&
1159 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
1160 printk_once(KERN_WARNING
1161 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1162 tk->clock->name, (long)tk->mult + adj,
1163 (long)tk->clock->mult + tk->clock->maxadj);
1164 }
1165 /*
1166 * So the following can be confusing.
1167 *
1168 * To keep things simple, lets assume adj == 1 for now.
1169 *
1170 * When adj != 1, remember that the interval and offset values
1171 * have been appropriately scaled so the math is the same.
1172 *
1173 * The basic idea here is that we're increasing the multiplier
1174 * by one, this causes the xtime_interval to be incremented by
1175 * one cycle_interval. This is because:
1176 * xtime_interval = cycle_interval * mult
1177 * So if mult is being incremented by one:
1178 * xtime_interval = cycle_interval * (mult + 1)
1179 * Its the same as:
1180 * xtime_interval = (cycle_interval * mult) + cycle_interval
1181 * Which can be shortened to:
1182 * xtime_interval += cycle_interval
1183 *
1184 * So offset stores the non-accumulated cycles. Thus the current
1185 * time (in shifted nanoseconds) is:
1186 * now = (offset * adj) + xtime_nsec
1187 * Now, even though we're adjusting the clock frequency, we have
1188 * to keep time consistent. In other words, we can't jump back
1189 * in time, and we also want to avoid jumping forward in time.
1190 *
1191 * So given the same offset value, we need the time to be the same
1192 * both before and after the freq adjustment.
1193 * now = (offset * adj_1) + xtime_nsec_1
1194 * now = (offset * adj_2) + xtime_nsec_2
1195 * So:
1196 * (offset * adj_1) + xtime_nsec_1 =
1197 * (offset * adj_2) + xtime_nsec_2
1198 * And we know:
1199 * adj_2 = adj_1 + 1
1200 * So:
1201 * (offset * adj_1) + xtime_nsec_1 =
1202 * (offset * (adj_1+1)) + xtime_nsec_2
1203 * (offset * adj_1) + xtime_nsec_1 =
1204 * (offset * adj_1) + offset + xtime_nsec_2
1205 * Canceling the sides:
1206 * xtime_nsec_1 = offset + xtime_nsec_2
1207 * Which gives us:
1208 * xtime_nsec_2 = xtime_nsec_1 - offset
1209 * Which simplfies to:
1210 * xtime_nsec -= offset
1211 *
1212 * XXX - TODO: Doc ntp_error calculation.
1213 */
1214 tk->mult += adj;
1215 tk->xtime_interval += interval;
1216 tk->xtime_nsec -= offset;
1217 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1218
1219out_adjust:
1220 /*
1221 * It may be possible that when we entered this function, xtime_nsec
1222 * was very small. Further, if we're slightly speeding the clocksource
1223 * in the code above, its possible the required corrective factor to
1224 * xtime_nsec could cause it to underflow.
1225 *
1226 * Now, since we already accumulated the second, cannot simply roll
1227 * the accumulated second back, since the NTP subsystem has been
1228 * notified via second_overflow. So instead we push xtime_nsec forward
1229 * by the amount we underflowed, and add that amount into the error.
1230 *
1231 * We'll correct this error next time through this function, when
1232 * xtime_nsec is not as small.
1233 */
1234 if (unlikely((s64)tk->xtime_nsec < 0)) {
1235 s64 neg = -(s64)tk->xtime_nsec;
1236 tk->xtime_nsec = 0;
1237 tk->ntp_error += neg << tk->ntp_error_shift;
1238 }
1239
1240}
1241
1242/**
1243 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1244 *
1245 * Helper function that accumulates a the nsecs greater then a second
1246 * from the xtime_nsec field to the xtime_secs field.
1247 * It also calls into the NTP code to handle leapsecond processing.
1248 *
1249 */
1250static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1251{
1252 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
1253 unsigned int clock_set = 0;
1254
1255 while (tk->xtime_nsec >= nsecps) {
1256 int leap;
1257
1258 tk->xtime_nsec -= nsecps;
1259 tk->xtime_sec++;
1260
1261 /* Figure out if its a leap sec and apply if needed */
1262 leap = second_overflow(tk->xtime_sec);
1263 if (unlikely(leap)) {
1264 struct timespec ts;
1265
1266 tk->xtime_sec += leap;
1267
1268 ts.tv_sec = leap;
1269 ts.tv_nsec = 0;
1270 tk_set_wall_to_mono(tk,
1271 timespec_sub(tk->wall_to_monotonic, ts));
1272
1273 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1274
1275 clock_set = TK_CLOCK_WAS_SET;
1276 }
1277 }
1278 return clock_set;
1279}
1280
1281/**
1282 * logarithmic_accumulation - shifted accumulation of cycles
1283 *
1284 * This functions accumulates a shifted interval of cycles into
1285 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1286 * loop.
1287 *
1288 * Returns the unconsumed cycles.
1289 */
1290static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1291 u32 shift,
1292 unsigned int *clock_set)
1293{
1294 cycle_t interval = tk->cycle_interval << shift;
1295 u64 raw_nsecs;
1296
1297 /* If the offset is smaller then a shifted interval, do nothing */
1298 if (offset < interval)
1299 return offset;
1300
1301 /* Accumulate one shifted interval */
1302 offset -= interval;
1303 tk->cycle_last += interval;
1304
1305 tk->xtime_nsec += tk->xtime_interval << shift;
1306 *clock_set |= accumulate_nsecs_to_secs(tk);
1307
1308 /* Accumulate raw time */
1309 raw_nsecs = (u64)tk->raw_interval << shift;
1310 raw_nsecs += tk->raw_time.tv_nsec;
1311 if (raw_nsecs >= NSEC_PER_SEC) {
1312 u64 raw_secs = raw_nsecs;
1313 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1314 tk->raw_time.tv_sec += raw_secs;
1315 }
1316 tk->raw_time.tv_nsec = raw_nsecs;
1317
1318 /* Accumulate error between NTP and clock interval */
1319 tk->ntp_error += ntp_tick_length() << shift;
1320 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1321 (tk->ntp_error_shift + shift);
1322
1323 return offset;
1324}
1325
1326#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
1327static inline void old_vsyscall_fixup(struct timekeeper *tk)
1328{
1329 s64 remainder;
1330
1331 /*
1332 * Store only full nanoseconds into xtime_nsec after rounding
1333 * it up and add the remainder to the error difference.
1334 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
1335 * by truncating the remainder in vsyscalls. However, it causes
1336 * additional work to be done in timekeeping_adjust(). Once
1337 * the vsyscall implementations are converted to use xtime_nsec
1338 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1339 * users are removed, this can be killed.
1340 */
1341 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1342 tk->xtime_nsec -= remainder;
1343 tk->xtime_nsec += 1ULL << tk->shift;
1344 tk->ntp_error += remainder << tk->ntp_error_shift;
1345 tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;
1346}
1347#else
1348#define old_vsyscall_fixup(tk)
1349#endif
1350
1351
1352
1353/**
1354 * update_wall_time - Uses the current clocksource to increment the wall time
1355 *
1356 */
1357void update_wall_time(void)
1358{
1359 struct clocksource *clock;
1360 struct timekeeper *real_tk = &timekeeper;
1361 struct timekeeper *tk = &shadow_timekeeper;
1362 cycle_t offset;
1363 int shift = 0, maxshift;
1364 unsigned int clock_set = 0;
1365 unsigned long flags;
1366
1367 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1368
1369 /* Make sure we're fully resumed: */
1370 if (unlikely(timekeeping_suspended))
1371 goto out;
1372
1373 clock = real_tk->clock;
1374
1375#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1376 offset = real_tk->cycle_interval;
1377#else
1378 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1379#endif
1380
1381 /* Check if there's really nothing to do */
1382 if (offset < real_tk->cycle_interval)
1383 goto out;
1384
1385 /*
1386 * With NO_HZ we may have to accumulate many cycle_intervals
1387 * (think "ticks") worth of time at once. To do this efficiently,
1388 * we calculate the largest doubling multiple of cycle_intervals
1389 * that is smaller than the offset. We then accumulate that
1390 * chunk in one go, and then try to consume the next smaller
1391 * doubled multiple.
1392 */
1393 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1394 shift = max(0, shift);
1395 /* Bound shift to one less than what overflows tick_length */
1396 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1397 shift = min(shift, maxshift);
1398 while (offset >= tk->cycle_interval) {
1399 offset = logarithmic_accumulation(tk, offset, shift,
1400 &clock_set);
1401 if (offset < tk->cycle_interval<<shift)
1402 shift--;
1403 }
1404
1405 /* correct the clock when NTP error is too big */
1406 timekeeping_adjust(tk, offset);
1407
1408 /*
1409 * XXX This can be killed once everyone converts
1410 * to the new update_vsyscall.
1411 */
1412 old_vsyscall_fixup(tk);
1413
1414 /*
1415 * Finally, make sure that after the rounding
1416 * xtime_nsec isn't larger than NSEC_PER_SEC
1417 */
1418 clock_set |= accumulate_nsecs_to_secs(tk);
1419
1420 write_seqcount_begin(&timekeeper_seq);
1421 /* Update clock->cycle_last with the new value */
1422 clock->cycle_last = tk->cycle_last;
1423 /*
1424 * Update the real timekeeper.
1425 *
1426 * We could avoid this memcpy by switching pointers, but that
1427 * requires changes to all other timekeeper usage sites as
1428 * well, i.e. move the timekeeper pointer getter into the
1429 * spinlocked/seqcount protected sections. And we trade this
1430 * memcpy under the timekeeper_seq against one before we start
1431 * updating.
1432 */
1433 memcpy(real_tk, tk, sizeof(*tk));
1434 timekeeping_update(real_tk, clock_set);
1435 write_seqcount_end(&timekeeper_seq);
1436out:
1437 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1438 if (clock_set)
1439 /* Have to call _delayed version, since in irq context*/
1440 clock_was_set_delayed();
1441}
1442
1443/**
1444 * getboottime - Return the real time of system boot.
1445 * @ts: pointer to the timespec to be set
1446 *
1447 * Returns the wall-time of boot in a timespec.
1448 *
1449 * This is based on the wall_to_monotonic offset and the total suspend
1450 * time. Calls to settimeofday will affect the value returned (which
1451 * basically means that however wrong your real time clock is at boot time,
1452 * you get the right time here).
1453 */
1454void getboottime(struct timespec *ts)
1455{
1456 struct timekeeper *tk = &timekeeper;
1457 struct timespec boottime = {
1458 .tv_sec = tk->wall_to_monotonic.tv_sec +
1459 tk->total_sleep_time.tv_sec,
1460 .tv_nsec = tk->wall_to_monotonic.tv_nsec +
1461 tk->total_sleep_time.tv_nsec
1462 };
1463
1464 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1465}
1466EXPORT_SYMBOL_GPL(getboottime);
1467
1468/**
1469 * get_monotonic_boottime - Returns monotonic time since boot
1470 * @ts: pointer to the timespec to be set
1471 *
1472 * Returns the monotonic time since boot in a timespec.
1473 *
1474 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1475 * includes the time spent in suspend.
1476 */
1477void get_monotonic_boottime(struct timespec *ts)
1478{
1479 struct timekeeper *tk = &timekeeper;
1480 struct timespec tomono, sleep;
1481 s64 nsec;
1482 unsigned int seq;
1483
1484 WARN_ON(timekeeping_suspended);
1485
1486 do {
1487 seq = read_seqcount_begin(&timekeeper_seq);
1488 ts->tv_sec = tk->xtime_sec;
1489 nsec = timekeeping_get_ns(tk);
1490 tomono = tk->wall_to_monotonic;
1491 sleep = tk->total_sleep_time;
1492
1493 } while (read_seqcount_retry(&timekeeper_seq, seq));
1494
1495 ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1496 ts->tv_nsec = 0;
1497 timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
1498}
1499EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1500
1501/**
1502 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1503 *
1504 * Returns the monotonic time since boot in a ktime
1505 *
1506 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1507 * includes the time spent in suspend.
1508 */
1509ktime_t ktime_get_boottime(void)
1510{
1511 struct timespec ts;
1512
1513 get_monotonic_boottime(&ts);
1514 return timespec_to_ktime(ts);
1515}
1516EXPORT_SYMBOL_GPL(ktime_get_boottime);
1517
1518/**
1519 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1520 * @ts: pointer to the timespec to be converted
1521 */
1522void monotonic_to_bootbased(struct timespec *ts)
1523{
1524 struct timekeeper *tk = &timekeeper;
1525
1526 *ts = timespec_add(*ts, tk->total_sleep_time);
1527}
1528EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1529
1530unsigned long get_seconds(void)
1531{
1532 struct timekeeper *tk = &timekeeper;
1533
1534 return tk->xtime_sec;
1535}
1536EXPORT_SYMBOL(get_seconds);
1537
1538struct timespec __current_kernel_time(void)
1539{
1540 struct timekeeper *tk = &timekeeper;
1541
1542 return tk_xtime(tk);
1543}
1544
1545struct timespec current_kernel_time(void)
1546{
1547 struct timekeeper *tk = &timekeeper;
1548 struct timespec now;
1549 unsigned long seq;
1550
1551 do {
1552 seq = read_seqcount_begin(&timekeeper_seq);
1553
1554 now = tk_xtime(tk);
1555 } while (read_seqcount_retry(&timekeeper_seq, seq));
1556
1557 return now;
1558}
1559EXPORT_SYMBOL(current_kernel_time);
1560
1561struct timespec get_monotonic_coarse(void)
1562{
1563 struct timekeeper *tk = &timekeeper;
1564 struct timespec now, mono;
1565 unsigned long seq;
1566
1567 do {
1568 seq = read_seqcount_begin(&timekeeper_seq);
1569
1570 now = tk_xtime(tk);
1571 mono = tk->wall_to_monotonic;
1572 } while (read_seqcount_retry(&timekeeper_seq, seq));
1573
1574 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1575 now.tv_nsec + mono.tv_nsec);
1576 return now;
1577}
1578
1579/*
1580 * Must hold jiffies_lock
1581 */
1582void do_timer(unsigned long ticks)
1583{
1584 jiffies_64 += ticks;
1585 calc_global_load(ticks);
1586}
1587
1588/**
1589 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1590 * and sleep offsets.
1591 * @xtim: pointer to timespec to be set with xtime
1592 * @wtom: pointer to timespec to be set with wall_to_monotonic
1593 * @sleep: pointer to timespec to be set with time in suspend
1594 */
1595void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1596 struct timespec *wtom, struct timespec *sleep)
1597{
1598 struct timekeeper *tk = &timekeeper;
1599 unsigned long seq;
1600
1601 do {
1602 seq = read_seqcount_begin(&timekeeper_seq);
1603 *xtim = tk_xtime(tk);
1604 *wtom = tk->wall_to_monotonic;
1605 *sleep = tk->total_sleep_time;
1606 } while (read_seqcount_retry(&timekeeper_seq, seq));
1607}
1608
1609#ifdef CONFIG_HIGH_RES_TIMERS
1610/**
1611 * ktime_get_update_offsets - hrtimer helper
1612 * @offs_real: pointer to storage for monotonic -> realtime offset
1613 * @offs_boot: pointer to storage for monotonic -> boottime offset
1614 * @offs_tai: pointer to storage for monotonic -> clock tai offset
1615 *
1616 * Returns current monotonic time and updates the offsets
1617 * Called from hrtimer_interrupt() or retrigger_next_event()
1618 */
1619ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
1620 ktime_t *offs_tai)
1621{
1622 struct timekeeper *tk = &timekeeper;
1623 ktime_t now;
1624 unsigned int seq;
1625 u64 secs, nsecs;
1626
1627 do {
1628 seq = read_seqcount_begin(&timekeeper_seq);
1629
1630 secs = tk->xtime_sec;
1631 nsecs = timekeeping_get_ns(tk);
1632
1633 *offs_real = tk->offs_real;
1634 *offs_boot = tk->offs_boot;
1635 *offs_tai = tk->offs_tai;
1636 } while (read_seqcount_retry(&timekeeper_seq, seq));
1637
1638 now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1639 now = ktime_sub(now, *offs_real);
1640 return now;
1641}
1642#endif
1643
1644/**
1645 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1646 */
1647ktime_t ktime_get_monotonic_offset(void)
1648{
1649 struct timekeeper *tk = &timekeeper;
1650 unsigned long seq;
1651 struct timespec wtom;
1652
1653 do {
1654 seq = read_seqcount_begin(&timekeeper_seq);
1655 wtom = tk->wall_to_monotonic;
1656 } while (read_seqcount_retry(&timekeeper_seq, seq));
1657
1658 return timespec_to_ktime(wtom);
1659}
1660EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1661
1662/**
1663 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
1664 */
1665int do_adjtimex(struct timex *txc)
1666{
1667 struct timekeeper *tk = &timekeeper;
1668 unsigned long flags;
1669 struct timespec ts;
1670 s32 orig_tai, tai;
1671 int ret;
1672
1673 /* Validate the data before disabling interrupts */
1674 ret = ntp_validate_timex(txc);
1675 if (ret)
1676 return ret;
1677
1678 if (txc->modes & ADJ_SETOFFSET) {
1679 struct timespec delta;
1680 delta.tv_sec = txc->time.tv_sec;
1681 delta.tv_nsec = txc->time.tv_usec;
1682 if (!(txc->modes & ADJ_NANO))
1683 delta.tv_nsec *= 1000;
1684 ret = timekeeping_inject_offset(&delta);
1685 if (ret)
1686 return ret;
1687 }
1688
1689 getnstimeofday(&ts);
1690
1691 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1692 write_seqcount_begin(&timekeeper_seq);
1693
1694 orig_tai = tai = tk->tai_offset;
1695 ret = __do_adjtimex(txc, &ts, &tai);
1696
1697 if (tai != orig_tai) {
1698 __timekeeping_set_tai_offset(tk, tai);
1699 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1700 }
1701 write_seqcount_end(&timekeeper_seq);
1702 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1703
1704 if (tai != orig_tai)
1705 clock_was_set();
1706
1707 ntp_notify_cmos_timer();
1708
1709 return ret;
1710}
1711
1712#ifdef CONFIG_NTP_PPS
1713/**
1714 * hardpps() - Accessor function to NTP __hardpps function
1715 */
1716void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
1717{
1718 unsigned long flags;
1719
1720 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1721 write_seqcount_begin(&timekeeper_seq);
1722
1723 __hardpps(phase_ts, raw_ts);
1724
1725 write_seqcount_end(&timekeeper_seq);
1726 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1727}
1728EXPORT_SYMBOL(hardpps);
1729#endif
1730
1731/**
1732 * xtime_update() - advances the timekeeping infrastructure
1733 * @ticks: number of ticks, that have elapsed since the last call.
1734 *
1735 * Must be called with interrupts disabled.
1736 */
1737void xtime_update(unsigned long ticks)
1738{
1739 write_seqlock(&jiffies_lock);
1740 do_timer(ticks);
1741 write_sequnlock(&jiffies_lock);
1742 update_wall_time();
1743}
1/*
2 * linux/kernel/time/timekeeping.c
3 *
4 * Kernel timekeeping code and accessor functions
5 *
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
8 *
9 */
10
11#include <linux/timekeeper_internal.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/syscore_ops.h>
19#include <linux/clocksource.h>
20#include <linux/jiffies.h>
21#include <linux/time.h>
22#include <linux/tick.h>
23#include <linux/stop_machine.h>
24#include <linux/pvclock_gtod.h>
25#include <linux/compiler.h>
26
27#include "tick-internal.h"
28#include "ntp_internal.h"
29#include "timekeeping_internal.h"
30
31#define TK_CLEAR_NTP (1 << 0)
32#define TK_MIRROR (1 << 1)
33#define TK_CLOCK_WAS_SET (1 << 2)
34
35/*
36 * The most important data for readout fits into a single 64 byte
37 * cache line.
38 */
39static struct {
40 seqcount_t seq;
41 struct timekeeper timekeeper;
42} tk_core ____cacheline_aligned;
43
44static DEFINE_RAW_SPINLOCK(timekeeper_lock);
45static struct timekeeper shadow_timekeeper;
46
47/**
48 * struct tk_fast - NMI safe timekeeper
49 * @seq: Sequence counter for protecting updates. The lowest bit
50 * is the index for the tk_read_base array
51 * @base: tk_read_base array. Access is indexed by the lowest bit of
52 * @seq.
53 *
54 * See @update_fast_timekeeper() below.
55 */
56struct tk_fast {
57 seqcount_t seq;
58 struct tk_read_base base[2];
59};
60
61static struct tk_fast tk_fast_mono ____cacheline_aligned;
62static struct tk_fast tk_fast_raw ____cacheline_aligned;
63
64/* flag for if timekeeping is suspended */
65int __read_mostly timekeeping_suspended;
66
67static inline void tk_normalize_xtime(struct timekeeper *tk)
68{
69 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
70 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
71 tk->xtime_sec++;
72 }
73}
74
75static inline struct timespec64 tk_xtime(struct timekeeper *tk)
76{
77 struct timespec64 ts;
78
79 ts.tv_sec = tk->xtime_sec;
80 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
81 return ts;
82}
83
84static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
85{
86 tk->xtime_sec = ts->tv_sec;
87 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
88}
89
90static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
91{
92 tk->xtime_sec += ts->tv_sec;
93 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
94 tk_normalize_xtime(tk);
95}
96
97static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
98{
99 struct timespec64 tmp;
100
101 /*
102 * Verify consistency of: offset_real = -wall_to_monotonic
103 * before modifying anything
104 */
105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
106 -tk->wall_to_monotonic.tv_nsec);
107 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
108 tk->wall_to_monotonic = wtm;
109 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
110 tk->offs_real = timespec64_to_ktime(tmp);
111 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
112}
113
114static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
115{
116 tk->offs_boot = ktime_add(tk->offs_boot, delta);
117}
118
119#ifdef CONFIG_DEBUG_TIMEKEEPING
120#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
121
122static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
123{
124
125 cycle_t max_cycles = tk->tkr_mono.clock->max_cycles;
126 const char *name = tk->tkr_mono.clock->name;
127
128 if (offset > max_cycles) {
129 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
130 offset, name, max_cycles);
131 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
132 } else {
133 if (offset > (max_cycles >> 1)) {
134 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
135 offset, name, max_cycles >> 1);
136 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
137 }
138 }
139
140 if (tk->underflow_seen) {
141 if (jiffies - tk->last_warning > WARNING_FREQ) {
142 printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
143 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
144 printk_deferred(" Your kernel is probably still fine.\n");
145 tk->last_warning = jiffies;
146 }
147 tk->underflow_seen = 0;
148 }
149
150 if (tk->overflow_seen) {
151 if (jiffies - tk->last_warning > WARNING_FREQ) {
152 printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
153 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
154 printk_deferred(" Your kernel is probably still fine.\n");
155 tk->last_warning = jiffies;
156 }
157 tk->overflow_seen = 0;
158 }
159}
160
161static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
162{
163 struct timekeeper *tk = &tk_core.timekeeper;
164 cycle_t now, last, mask, max, delta;
165 unsigned int seq;
166
167 /*
168 * Since we're called holding a seqlock, the data may shift
169 * under us while we're doing the calculation. This can cause
170 * false positives, since we'd note a problem but throw the
171 * results away. So nest another seqlock here to atomically
172 * grab the points we are checking with.
173 */
174 do {
175 seq = read_seqcount_begin(&tk_core.seq);
176 now = tkr->read(tkr->clock);
177 last = tkr->cycle_last;
178 mask = tkr->mask;
179 max = tkr->clock->max_cycles;
180 } while (read_seqcount_retry(&tk_core.seq, seq));
181
182 delta = clocksource_delta(now, last, mask);
183
184 /*
185 * Try to catch underflows by checking if we are seeing small
186 * mask-relative negative values.
187 */
188 if (unlikely((~delta & mask) < (mask >> 3))) {
189 tk->underflow_seen = 1;
190 delta = 0;
191 }
192
193 /* Cap delta value to the max_cycles values to avoid mult overflows */
194 if (unlikely(delta > max)) {
195 tk->overflow_seen = 1;
196 delta = tkr->clock->max_cycles;
197 }
198
199 return delta;
200}
201#else
202static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
203{
204}
205static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
206{
207 cycle_t cycle_now, delta;
208
209 /* read clocksource */
210 cycle_now = tkr->read(tkr->clock);
211
212 /* calculate the delta since the last update_wall_time */
213 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
214
215 return delta;
216}
217#endif
218
219/**
220 * tk_setup_internals - Set up internals to use clocksource clock.
221 *
222 * @tk: The target timekeeper to setup.
223 * @clock: Pointer to clocksource.
224 *
225 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
226 * pair and interval request.
227 *
228 * Unless you're the timekeeping code, you should not be using this!
229 */
230static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
231{
232 cycle_t interval;
233 u64 tmp, ntpinterval;
234 struct clocksource *old_clock;
235
236 ++tk->cs_was_changed_seq;
237 old_clock = tk->tkr_mono.clock;
238 tk->tkr_mono.clock = clock;
239 tk->tkr_mono.read = clock->read;
240 tk->tkr_mono.mask = clock->mask;
241 tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
242
243 tk->tkr_raw.clock = clock;
244 tk->tkr_raw.read = clock->read;
245 tk->tkr_raw.mask = clock->mask;
246 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
247
248 /* Do the ns -> cycle conversion first, using original mult */
249 tmp = NTP_INTERVAL_LENGTH;
250 tmp <<= clock->shift;
251 ntpinterval = tmp;
252 tmp += clock->mult/2;
253 do_div(tmp, clock->mult);
254 if (tmp == 0)
255 tmp = 1;
256
257 interval = (cycle_t) tmp;
258 tk->cycle_interval = interval;
259
260 /* Go back from cycles -> shifted ns */
261 tk->xtime_interval = (u64) interval * clock->mult;
262 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
263 tk->raw_interval =
264 ((u64) interval * clock->mult) >> clock->shift;
265
266 /* if changing clocks, convert xtime_nsec shift units */
267 if (old_clock) {
268 int shift_change = clock->shift - old_clock->shift;
269 if (shift_change < 0)
270 tk->tkr_mono.xtime_nsec >>= -shift_change;
271 else
272 tk->tkr_mono.xtime_nsec <<= shift_change;
273 }
274 tk->tkr_raw.xtime_nsec = 0;
275
276 tk->tkr_mono.shift = clock->shift;
277 tk->tkr_raw.shift = clock->shift;
278
279 tk->ntp_error = 0;
280 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
281 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
282
283 /*
284 * The timekeeper keeps its own mult values for the currently
285 * active clocksource. These value will be adjusted via NTP
286 * to counteract clock drifting.
287 */
288 tk->tkr_mono.mult = clock->mult;
289 tk->tkr_raw.mult = clock->mult;
290 tk->ntp_err_mult = 0;
291}
292
293/* Timekeeper helper functions. */
294
295#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
296static u32 default_arch_gettimeoffset(void) { return 0; }
297u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
298#else
299static inline u32 arch_gettimeoffset(void) { return 0; }
300#endif
301
302static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
303 cycle_t delta)
304{
305 s64 nsec;
306
307 nsec = delta * tkr->mult + tkr->xtime_nsec;
308 nsec >>= tkr->shift;
309
310 /* If arch requires, add in get_arch_timeoffset() */
311 return nsec + arch_gettimeoffset();
312}
313
314static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
315{
316 cycle_t delta;
317
318 delta = timekeeping_get_delta(tkr);
319 return timekeeping_delta_to_ns(tkr, delta);
320}
321
322static inline s64 timekeeping_cycles_to_ns(struct tk_read_base *tkr,
323 cycle_t cycles)
324{
325 cycle_t delta;
326
327 /* calculate the delta since the last update_wall_time */
328 delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
329 return timekeeping_delta_to_ns(tkr, delta);
330}
331
332/**
333 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
334 * @tkr: Timekeeping readout base from which we take the update
335 *
336 * We want to use this from any context including NMI and tracing /
337 * instrumenting the timekeeping code itself.
338 *
339 * Employ the latch technique; see @raw_write_seqcount_latch.
340 *
341 * So if a NMI hits the update of base[0] then it will use base[1]
342 * which is still consistent. In the worst case this can result is a
343 * slightly wrong timestamp (a few nanoseconds). See
344 * @ktime_get_mono_fast_ns.
345 */
346static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
347{
348 struct tk_read_base *base = tkf->base;
349
350 /* Force readers off to base[1] */
351 raw_write_seqcount_latch(&tkf->seq);
352
353 /* Update base[0] */
354 memcpy(base, tkr, sizeof(*base));
355
356 /* Force readers back to base[0] */
357 raw_write_seqcount_latch(&tkf->seq);
358
359 /* Update base[1] */
360 memcpy(base + 1, base, sizeof(*base));
361}
362
363/**
364 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
365 *
366 * This timestamp is not guaranteed to be monotonic across an update.
367 * The timestamp is calculated by:
368 *
369 * now = base_mono + clock_delta * slope
370 *
371 * So if the update lowers the slope, readers who are forced to the
372 * not yet updated second array are still using the old steeper slope.
373 *
374 * tmono
375 * ^
376 * | o n
377 * | o n
378 * | u
379 * | o
380 * |o
381 * |12345678---> reader order
382 *
383 * o = old slope
384 * u = update
385 * n = new slope
386 *
387 * So reader 6 will observe time going backwards versus reader 5.
388 *
389 * While other CPUs are likely to be able observe that, the only way
390 * for a CPU local observation is when an NMI hits in the middle of
391 * the update. Timestamps taken from that NMI context might be ahead
392 * of the following timestamps. Callers need to be aware of that and
393 * deal with it.
394 */
395static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
396{
397 struct tk_read_base *tkr;
398 unsigned int seq;
399 u64 now;
400
401 do {
402 seq = raw_read_seqcount_latch(&tkf->seq);
403 tkr = tkf->base + (seq & 0x01);
404 now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
405 } while (read_seqcount_retry(&tkf->seq, seq));
406
407 return now;
408}
409
410u64 ktime_get_mono_fast_ns(void)
411{
412 return __ktime_get_fast_ns(&tk_fast_mono);
413}
414EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
415
416u64 ktime_get_raw_fast_ns(void)
417{
418 return __ktime_get_fast_ns(&tk_fast_raw);
419}
420EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
421
422/* Suspend-time cycles value for halted fast timekeeper. */
423static cycle_t cycles_at_suspend;
424
425static cycle_t dummy_clock_read(struct clocksource *cs)
426{
427 return cycles_at_suspend;
428}
429
430/**
431 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
432 * @tk: Timekeeper to snapshot.
433 *
434 * It generally is unsafe to access the clocksource after timekeeping has been
435 * suspended, so take a snapshot of the readout base of @tk and use it as the
436 * fast timekeeper's readout base while suspended. It will return the same
437 * number of cycles every time until timekeeping is resumed at which time the
438 * proper readout base for the fast timekeeper will be restored automatically.
439 */
440static void halt_fast_timekeeper(struct timekeeper *tk)
441{
442 static struct tk_read_base tkr_dummy;
443 struct tk_read_base *tkr = &tk->tkr_mono;
444
445 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
446 cycles_at_suspend = tkr->read(tkr->clock);
447 tkr_dummy.read = dummy_clock_read;
448 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
449
450 tkr = &tk->tkr_raw;
451 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
452 tkr_dummy.read = dummy_clock_read;
453 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
454}
455
456#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
457
458static inline void update_vsyscall(struct timekeeper *tk)
459{
460 struct timespec xt, wm;
461
462 xt = timespec64_to_timespec(tk_xtime(tk));
463 wm = timespec64_to_timespec(tk->wall_to_monotonic);
464 update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
465 tk->tkr_mono.cycle_last);
466}
467
468static inline void old_vsyscall_fixup(struct timekeeper *tk)
469{
470 s64 remainder;
471
472 /*
473 * Store only full nanoseconds into xtime_nsec after rounding
474 * it up and add the remainder to the error difference.
475 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
476 * by truncating the remainder in vsyscalls. However, it causes
477 * additional work to be done in timekeeping_adjust(). Once
478 * the vsyscall implementations are converted to use xtime_nsec
479 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
480 * users are removed, this can be killed.
481 */
482 remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
483 tk->tkr_mono.xtime_nsec -= remainder;
484 tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
485 tk->ntp_error += remainder << tk->ntp_error_shift;
486 tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
487}
488#else
489#define old_vsyscall_fixup(tk)
490#endif
491
492static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
493
494static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
495{
496 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
497}
498
499/**
500 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
501 */
502int pvclock_gtod_register_notifier(struct notifier_block *nb)
503{
504 struct timekeeper *tk = &tk_core.timekeeper;
505 unsigned long flags;
506 int ret;
507
508 raw_spin_lock_irqsave(&timekeeper_lock, flags);
509 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
510 update_pvclock_gtod(tk, true);
511 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
512
513 return ret;
514}
515EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
516
517/**
518 * pvclock_gtod_unregister_notifier - unregister a pvclock
519 * timedata update listener
520 */
521int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
522{
523 unsigned long flags;
524 int ret;
525
526 raw_spin_lock_irqsave(&timekeeper_lock, flags);
527 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
528 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
529
530 return ret;
531}
532EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
533
534/*
535 * tk_update_leap_state - helper to update the next_leap_ktime
536 */
537static inline void tk_update_leap_state(struct timekeeper *tk)
538{
539 tk->next_leap_ktime = ntp_get_next_leap();
540 if (tk->next_leap_ktime.tv64 != KTIME_MAX)
541 /* Convert to monotonic time */
542 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
543}
544
545/*
546 * Update the ktime_t based scalar nsec members of the timekeeper
547 */
548static inline void tk_update_ktime_data(struct timekeeper *tk)
549{
550 u64 seconds;
551 u32 nsec;
552
553 /*
554 * The xtime based monotonic readout is:
555 * nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
556 * The ktime based monotonic readout is:
557 * nsec = base_mono + now();
558 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
559 */
560 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
561 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
562 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
563
564 /* Update the monotonic raw base */
565 tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
566
567 /*
568 * The sum of the nanoseconds portions of xtime and
569 * wall_to_monotonic can be greater/equal one second. Take
570 * this into account before updating tk->ktime_sec.
571 */
572 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
573 if (nsec >= NSEC_PER_SEC)
574 seconds++;
575 tk->ktime_sec = seconds;
576}
577
578/* must hold timekeeper_lock */
579static void timekeeping_update(struct timekeeper *tk, unsigned int action)
580{
581 if (action & TK_CLEAR_NTP) {
582 tk->ntp_error = 0;
583 ntp_clear();
584 }
585
586 tk_update_leap_state(tk);
587 tk_update_ktime_data(tk);
588
589 update_vsyscall(tk);
590 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
591
592 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
593 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
594
595 if (action & TK_CLOCK_WAS_SET)
596 tk->clock_was_set_seq++;
597 /*
598 * The mirroring of the data to the shadow-timekeeper needs
599 * to happen last here to ensure we don't over-write the
600 * timekeeper structure on the next update with stale data
601 */
602 if (action & TK_MIRROR)
603 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
604 sizeof(tk_core.timekeeper));
605}
606
607/**
608 * timekeeping_forward_now - update clock to the current time
609 *
610 * Forward the current clock to update its state since the last call to
611 * update_wall_time(). This is useful before significant clock changes,
612 * as it avoids having to deal with this time offset explicitly.
613 */
614static void timekeeping_forward_now(struct timekeeper *tk)
615{
616 struct clocksource *clock = tk->tkr_mono.clock;
617 cycle_t cycle_now, delta;
618 s64 nsec;
619
620 cycle_now = tk->tkr_mono.read(clock);
621 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
622 tk->tkr_mono.cycle_last = cycle_now;
623 tk->tkr_raw.cycle_last = cycle_now;
624
625 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
626
627 /* If arch requires, add in get_arch_timeoffset() */
628 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
629
630 tk_normalize_xtime(tk);
631
632 nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
633 timespec64_add_ns(&tk->raw_time, nsec);
634}
635
636/**
637 * __getnstimeofday64 - Returns the time of day in a timespec64.
638 * @ts: pointer to the timespec to be set
639 *
640 * Updates the time of day in the timespec.
641 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
642 */
643int __getnstimeofday64(struct timespec64 *ts)
644{
645 struct timekeeper *tk = &tk_core.timekeeper;
646 unsigned long seq;
647 s64 nsecs = 0;
648
649 do {
650 seq = read_seqcount_begin(&tk_core.seq);
651
652 ts->tv_sec = tk->xtime_sec;
653 nsecs = timekeeping_get_ns(&tk->tkr_mono);
654
655 } while (read_seqcount_retry(&tk_core.seq, seq));
656
657 ts->tv_nsec = 0;
658 timespec64_add_ns(ts, nsecs);
659
660 /*
661 * Do not bail out early, in case there were callers still using
662 * the value, even in the face of the WARN_ON.
663 */
664 if (unlikely(timekeeping_suspended))
665 return -EAGAIN;
666 return 0;
667}
668EXPORT_SYMBOL(__getnstimeofday64);
669
670/**
671 * getnstimeofday64 - Returns the time of day in a timespec64.
672 * @ts: pointer to the timespec64 to be set
673 *
674 * Returns the time of day in a timespec64 (WARN if suspended).
675 */
676void getnstimeofday64(struct timespec64 *ts)
677{
678 WARN_ON(__getnstimeofday64(ts));
679}
680EXPORT_SYMBOL(getnstimeofday64);
681
682ktime_t ktime_get(void)
683{
684 struct timekeeper *tk = &tk_core.timekeeper;
685 unsigned int seq;
686 ktime_t base;
687 s64 nsecs;
688
689 WARN_ON(timekeeping_suspended);
690
691 do {
692 seq = read_seqcount_begin(&tk_core.seq);
693 base = tk->tkr_mono.base;
694 nsecs = timekeeping_get_ns(&tk->tkr_mono);
695
696 } while (read_seqcount_retry(&tk_core.seq, seq));
697
698 return ktime_add_ns(base, nsecs);
699}
700EXPORT_SYMBOL_GPL(ktime_get);
701
702u32 ktime_get_resolution_ns(void)
703{
704 struct timekeeper *tk = &tk_core.timekeeper;
705 unsigned int seq;
706 u32 nsecs;
707
708 WARN_ON(timekeeping_suspended);
709
710 do {
711 seq = read_seqcount_begin(&tk_core.seq);
712 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
713 } while (read_seqcount_retry(&tk_core.seq, seq));
714
715 return nsecs;
716}
717EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
718
719static ktime_t *offsets[TK_OFFS_MAX] = {
720 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
721 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
722 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
723};
724
725ktime_t ktime_get_with_offset(enum tk_offsets offs)
726{
727 struct timekeeper *tk = &tk_core.timekeeper;
728 unsigned int seq;
729 ktime_t base, *offset = offsets[offs];
730 s64 nsecs;
731
732 WARN_ON(timekeeping_suspended);
733
734 do {
735 seq = read_seqcount_begin(&tk_core.seq);
736 base = ktime_add(tk->tkr_mono.base, *offset);
737 nsecs = timekeeping_get_ns(&tk->tkr_mono);
738
739 } while (read_seqcount_retry(&tk_core.seq, seq));
740
741 return ktime_add_ns(base, nsecs);
742
743}
744EXPORT_SYMBOL_GPL(ktime_get_with_offset);
745
746/**
747 * ktime_mono_to_any() - convert mononotic time to any other time
748 * @tmono: time to convert.
749 * @offs: which offset to use
750 */
751ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
752{
753 ktime_t *offset = offsets[offs];
754 unsigned long seq;
755 ktime_t tconv;
756
757 do {
758 seq = read_seqcount_begin(&tk_core.seq);
759 tconv = ktime_add(tmono, *offset);
760 } while (read_seqcount_retry(&tk_core.seq, seq));
761
762 return tconv;
763}
764EXPORT_SYMBOL_GPL(ktime_mono_to_any);
765
766/**
767 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
768 */
769ktime_t ktime_get_raw(void)
770{
771 struct timekeeper *tk = &tk_core.timekeeper;
772 unsigned int seq;
773 ktime_t base;
774 s64 nsecs;
775
776 do {
777 seq = read_seqcount_begin(&tk_core.seq);
778 base = tk->tkr_raw.base;
779 nsecs = timekeeping_get_ns(&tk->tkr_raw);
780
781 } while (read_seqcount_retry(&tk_core.seq, seq));
782
783 return ktime_add_ns(base, nsecs);
784}
785EXPORT_SYMBOL_GPL(ktime_get_raw);
786
787/**
788 * ktime_get_ts64 - get the monotonic clock in timespec64 format
789 * @ts: pointer to timespec variable
790 *
791 * The function calculates the monotonic clock from the realtime
792 * clock and the wall_to_monotonic offset and stores the result
793 * in normalized timespec64 format in the variable pointed to by @ts.
794 */
795void ktime_get_ts64(struct timespec64 *ts)
796{
797 struct timekeeper *tk = &tk_core.timekeeper;
798 struct timespec64 tomono;
799 s64 nsec;
800 unsigned int seq;
801
802 WARN_ON(timekeeping_suspended);
803
804 do {
805 seq = read_seqcount_begin(&tk_core.seq);
806 ts->tv_sec = tk->xtime_sec;
807 nsec = timekeeping_get_ns(&tk->tkr_mono);
808 tomono = tk->wall_to_monotonic;
809
810 } while (read_seqcount_retry(&tk_core.seq, seq));
811
812 ts->tv_sec += tomono.tv_sec;
813 ts->tv_nsec = 0;
814 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
815}
816EXPORT_SYMBOL_GPL(ktime_get_ts64);
817
818/**
819 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
820 *
821 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
822 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
823 * works on both 32 and 64 bit systems. On 32 bit systems the readout
824 * covers ~136 years of uptime which should be enough to prevent
825 * premature wrap arounds.
826 */
827time64_t ktime_get_seconds(void)
828{
829 struct timekeeper *tk = &tk_core.timekeeper;
830
831 WARN_ON(timekeeping_suspended);
832 return tk->ktime_sec;
833}
834EXPORT_SYMBOL_GPL(ktime_get_seconds);
835
836/**
837 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
838 *
839 * Returns the wall clock seconds since 1970. This replaces the
840 * get_seconds() interface which is not y2038 safe on 32bit systems.
841 *
842 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
843 * 32bit systems the access must be protected with the sequence
844 * counter to provide "atomic" access to the 64bit tk->xtime_sec
845 * value.
846 */
847time64_t ktime_get_real_seconds(void)
848{
849 struct timekeeper *tk = &tk_core.timekeeper;
850 time64_t seconds;
851 unsigned int seq;
852
853 if (IS_ENABLED(CONFIG_64BIT))
854 return tk->xtime_sec;
855
856 do {
857 seq = read_seqcount_begin(&tk_core.seq);
858 seconds = tk->xtime_sec;
859
860 } while (read_seqcount_retry(&tk_core.seq, seq));
861
862 return seconds;
863}
864EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
865
866/**
867 * __ktime_get_real_seconds - The same as ktime_get_real_seconds
868 * but without the sequence counter protect. This internal function
869 * is called just when timekeeping lock is already held.
870 */
871time64_t __ktime_get_real_seconds(void)
872{
873 struct timekeeper *tk = &tk_core.timekeeper;
874
875 return tk->xtime_sec;
876}
877
878/**
879 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
880 * @systime_snapshot: pointer to struct receiving the system time snapshot
881 */
882void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
883{
884 struct timekeeper *tk = &tk_core.timekeeper;
885 unsigned long seq;
886 ktime_t base_raw;
887 ktime_t base_real;
888 s64 nsec_raw;
889 s64 nsec_real;
890 cycle_t now;
891
892 WARN_ON_ONCE(timekeeping_suspended);
893
894 do {
895 seq = read_seqcount_begin(&tk_core.seq);
896
897 now = tk->tkr_mono.read(tk->tkr_mono.clock);
898 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
899 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
900 base_real = ktime_add(tk->tkr_mono.base,
901 tk_core.timekeeper.offs_real);
902 base_raw = tk->tkr_raw.base;
903 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
904 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
905 } while (read_seqcount_retry(&tk_core.seq, seq));
906
907 systime_snapshot->cycles = now;
908 systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
909 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
910}
911EXPORT_SYMBOL_GPL(ktime_get_snapshot);
912
913/* Scale base by mult/div checking for overflow */
914static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
915{
916 u64 tmp, rem;
917
918 tmp = div64_u64_rem(*base, div, &rem);
919
920 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
921 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
922 return -EOVERFLOW;
923 tmp *= mult;
924 rem *= mult;
925
926 do_div(rem, div);
927 *base = tmp + rem;
928 return 0;
929}
930
931/**
932 * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
933 * @history: Snapshot representing start of history
934 * @partial_history_cycles: Cycle offset into history (fractional part)
935 * @total_history_cycles: Total history length in cycles
936 * @discontinuity: True indicates clock was set on history period
937 * @ts: Cross timestamp that should be adjusted using
938 * partial/total ratio
939 *
940 * Helper function used by get_device_system_crosststamp() to correct the
941 * crosstimestamp corresponding to the start of the current interval to the
942 * system counter value (timestamp point) provided by the driver. The
943 * total_history_* quantities are the total history starting at the provided
944 * reference point and ending at the start of the current interval. The cycle
945 * count between the driver timestamp point and the start of the current
946 * interval is partial_history_cycles.
947 */
948static int adjust_historical_crosststamp(struct system_time_snapshot *history,
949 cycle_t partial_history_cycles,
950 cycle_t total_history_cycles,
951 bool discontinuity,
952 struct system_device_crosststamp *ts)
953{
954 struct timekeeper *tk = &tk_core.timekeeper;
955 u64 corr_raw, corr_real;
956 bool interp_forward;
957 int ret;
958
959 if (total_history_cycles == 0 || partial_history_cycles == 0)
960 return 0;
961
962 /* Interpolate shortest distance from beginning or end of history */
963 interp_forward = partial_history_cycles > total_history_cycles/2 ?
964 true : false;
965 partial_history_cycles = interp_forward ?
966 total_history_cycles - partial_history_cycles :
967 partial_history_cycles;
968
969 /*
970 * Scale the monotonic raw time delta by:
971 * partial_history_cycles / total_history_cycles
972 */
973 corr_raw = (u64)ktime_to_ns(
974 ktime_sub(ts->sys_monoraw, history->raw));
975 ret = scale64_check_overflow(partial_history_cycles,
976 total_history_cycles, &corr_raw);
977 if (ret)
978 return ret;
979
980 /*
981 * If there is a discontinuity in the history, scale monotonic raw
982 * correction by:
983 * mult(real)/mult(raw) yielding the realtime correction
984 * Otherwise, calculate the realtime correction similar to monotonic
985 * raw calculation
986 */
987 if (discontinuity) {
988 corr_real = mul_u64_u32_div
989 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
990 } else {
991 corr_real = (u64)ktime_to_ns(
992 ktime_sub(ts->sys_realtime, history->real));
993 ret = scale64_check_overflow(partial_history_cycles,
994 total_history_cycles, &corr_real);
995 if (ret)
996 return ret;
997 }
998
999 /* Fixup monotonic raw and real time time values */
1000 if (interp_forward) {
1001 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1002 ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1003 } else {
1004 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1005 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1006 }
1007
1008 return 0;
1009}
1010
1011/*
1012 * cycle_between - true if test occurs chronologically between before and after
1013 */
1014static bool cycle_between(cycle_t before, cycle_t test, cycle_t after)
1015{
1016 if (test > before && test < after)
1017 return true;
1018 if (test < before && before > after)
1019 return true;
1020 return false;
1021}
1022
1023/**
1024 * get_device_system_crosststamp - Synchronously capture system/device timestamp
1025 * @get_time_fn: Callback to get simultaneous device time and
1026 * system counter from the device driver
1027 * @ctx: Context passed to get_time_fn()
1028 * @history_begin: Historical reference point used to interpolate system
1029 * time when counter provided by the driver is before the current interval
1030 * @xtstamp: Receives simultaneously captured system and device time
1031 *
1032 * Reads a timestamp from a device and correlates it to system time
1033 */
1034int get_device_system_crosststamp(int (*get_time_fn)
1035 (ktime_t *device_time,
1036 struct system_counterval_t *sys_counterval,
1037 void *ctx),
1038 void *ctx,
1039 struct system_time_snapshot *history_begin,
1040 struct system_device_crosststamp *xtstamp)
1041{
1042 struct system_counterval_t system_counterval;
1043 struct timekeeper *tk = &tk_core.timekeeper;
1044 cycle_t cycles, now, interval_start;
1045 unsigned int clock_was_set_seq = 0;
1046 ktime_t base_real, base_raw;
1047 s64 nsec_real, nsec_raw;
1048 u8 cs_was_changed_seq;
1049 unsigned long seq;
1050 bool do_interp;
1051 int ret;
1052
1053 do {
1054 seq = read_seqcount_begin(&tk_core.seq);
1055 /*
1056 * Try to synchronously capture device time and a system
1057 * counter value calling back into the device driver
1058 */
1059 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1060 if (ret)
1061 return ret;
1062
1063 /*
1064 * Verify that the clocksource associated with the captured
1065 * system counter value is the same as the currently installed
1066 * timekeeper clocksource
1067 */
1068 if (tk->tkr_mono.clock != system_counterval.cs)
1069 return -ENODEV;
1070 cycles = system_counterval.cycles;
1071
1072 /*
1073 * Check whether the system counter value provided by the
1074 * device driver is on the current timekeeping interval.
1075 */
1076 now = tk->tkr_mono.read(tk->tkr_mono.clock);
1077 interval_start = tk->tkr_mono.cycle_last;
1078 if (!cycle_between(interval_start, cycles, now)) {
1079 clock_was_set_seq = tk->clock_was_set_seq;
1080 cs_was_changed_seq = tk->cs_was_changed_seq;
1081 cycles = interval_start;
1082 do_interp = true;
1083 } else {
1084 do_interp = false;
1085 }
1086
1087 base_real = ktime_add(tk->tkr_mono.base,
1088 tk_core.timekeeper.offs_real);
1089 base_raw = tk->tkr_raw.base;
1090
1091 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1092 system_counterval.cycles);
1093 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1094 system_counterval.cycles);
1095 } while (read_seqcount_retry(&tk_core.seq, seq));
1096
1097 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1098 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1099
1100 /*
1101 * Interpolate if necessary, adjusting back from the start of the
1102 * current interval
1103 */
1104 if (do_interp) {
1105 cycle_t partial_history_cycles, total_history_cycles;
1106 bool discontinuity;
1107
1108 /*
1109 * Check that the counter value occurs after the provided
1110 * history reference and that the history doesn't cross a
1111 * clocksource change
1112 */
1113 if (!history_begin ||
1114 !cycle_between(history_begin->cycles,
1115 system_counterval.cycles, cycles) ||
1116 history_begin->cs_was_changed_seq != cs_was_changed_seq)
1117 return -EINVAL;
1118 partial_history_cycles = cycles - system_counterval.cycles;
1119 total_history_cycles = cycles - history_begin->cycles;
1120 discontinuity =
1121 history_begin->clock_was_set_seq != clock_was_set_seq;
1122
1123 ret = adjust_historical_crosststamp(history_begin,
1124 partial_history_cycles,
1125 total_history_cycles,
1126 discontinuity, xtstamp);
1127 if (ret)
1128 return ret;
1129 }
1130
1131 return 0;
1132}
1133EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1134
1135/**
1136 * do_gettimeofday - Returns the time of day in a timeval
1137 * @tv: pointer to the timeval to be set
1138 *
1139 * NOTE: Users should be converted to using getnstimeofday()
1140 */
1141void do_gettimeofday(struct timeval *tv)
1142{
1143 struct timespec64 now;
1144
1145 getnstimeofday64(&now);
1146 tv->tv_sec = now.tv_sec;
1147 tv->tv_usec = now.tv_nsec/1000;
1148}
1149EXPORT_SYMBOL(do_gettimeofday);
1150
1151/**
1152 * do_settimeofday64 - Sets the time of day.
1153 * @ts: pointer to the timespec64 variable containing the new time
1154 *
1155 * Sets the time of day to the new time and update NTP and notify hrtimers
1156 */
1157int do_settimeofday64(const struct timespec64 *ts)
1158{
1159 struct timekeeper *tk = &tk_core.timekeeper;
1160 struct timespec64 ts_delta, xt;
1161 unsigned long flags;
1162 int ret = 0;
1163
1164 if (!timespec64_valid_strict(ts))
1165 return -EINVAL;
1166
1167 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1168 write_seqcount_begin(&tk_core.seq);
1169
1170 timekeeping_forward_now(tk);
1171
1172 xt = tk_xtime(tk);
1173 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1174 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1175
1176 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1177 ret = -EINVAL;
1178 goto out;
1179 }
1180
1181 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1182
1183 tk_set_xtime(tk, ts);
1184out:
1185 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1186
1187 write_seqcount_end(&tk_core.seq);
1188 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1189
1190 /* signal hrtimers about time change */
1191 clock_was_set();
1192
1193 return ret;
1194}
1195EXPORT_SYMBOL(do_settimeofday64);
1196
1197/**
1198 * timekeeping_inject_offset - Adds or subtracts from the current time.
1199 * @tv: pointer to the timespec variable containing the offset
1200 *
1201 * Adds or subtracts an offset value from the current time.
1202 */
1203int timekeeping_inject_offset(struct timespec *ts)
1204{
1205 struct timekeeper *tk = &tk_core.timekeeper;
1206 unsigned long flags;
1207 struct timespec64 ts64, tmp;
1208 int ret = 0;
1209
1210 if (!timespec_inject_offset_valid(ts))
1211 return -EINVAL;
1212
1213 ts64 = timespec_to_timespec64(*ts);
1214
1215 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1216 write_seqcount_begin(&tk_core.seq);
1217
1218 timekeeping_forward_now(tk);
1219
1220 /* Make sure the proposed value is valid */
1221 tmp = timespec64_add(tk_xtime(tk), ts64);
1222 if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 ||
1223 !timespec64_valid_strict(&tmp)) {
1224 ret = -EINVAL;
1225 goto error;
1226 }
1227
1228 tk_xtime_add(tk, &ts64);
1229 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
1230
1231error: /* even if we error out, we forwarded the time, so call update */
1232 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1233
1234 write_seqcount_end(&tk_core.seq);
1235 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1236
1237 /* signal hrtimers about time change */
1238 clock_was_set();
1239
1240 return ret;
1241}
1242EXPORT_SYMBOL(timekeeping_inject_offset);
1243
1244
1245/**
1246 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
1247 *
1248 */
1249s32 timekeeping_get_tai_offset(void)
1250{
1251 struct timekeeper *tk = &tk_core.timekeeper;
1252 unsigned int seq;
1253 s32 ret;
1254
1255 do {
1256 seq = read_seqcount_begin(&tk_core.seq);
1257 ret = tk->tai_offset;
1258 } while (read_seqcount_retry(&tk_core.seq, seq));
1259
1260 return ret;
1261}
1262
1263/**
1264 * __timekeeping_set_tai_offset - Lock free worker function
1265 *
1266 */
1267static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1268{
1269 tk->tai_offset = tai_offset;
1270 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1271}
1272
1273/**
1274 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
1275 *
1276 */
1277void timekeeping_set_tai_offset(s32 tai_offset)
1278{
1279 struct timekeeper *tk = &tk_core.timekeeper;
1280 unsigned long flags;
1281
1282 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1283 write_seqcount_begin(&tk_core.seq);
1284 __timekeeping_set_tai_offset(tk, tai_offset);
1285 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1286 write_seqcount_end(&tk_core.seq);
1287 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1288 clock_was_set();
1289}
1290
1291/**
1292 * change_clocksource - Swaps clocksources if a new one is available
1293 *
1294 * Accumulates current time interval and initializes new clocksource
1295 */
1296static int change_clocksource(void *data)
1297{
1298 struct timekeeper *tk = &tk_core.timekeeper;
1299 struct clocksource *new, *old;
1300 unsigned long flags;
1301
1302 new = (struct clocksource *) data;
1303
1304 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1305 write_seqcount_begin(&tk_core.seq);
1306
1307 timekeeping_forward_now(tk);
1308 /*
1309 * If the cs is in module, get a module reference. Succeeds
1310 * for built-in code (owner == NULL) as well.
1311 */
1312 if (try_module_get(new->owner)) {
1313 if (!new->enable || new->enable(new) == 0) {
1314 old = tk->tkr_mono.clock;
1315 tk_setup_internals(tk, new);
1316 if (old->disable)
1317 old->disable(old);
1318 module_put(old->owner);
1319 } else {
1320 module_put(new->owner);
1321 }
1322 }
1323 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1324
1325 write_seqcount_end(&tk_core.seq);
1326 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1327
1328 return 0;
1329}
1330
1331/**
1332 * timekeeping_notify - Install a new clock source
1333 * @clock: pointer to the clock source
1334 *
1335 * This function is called from clocksource.c after a new, better clock
1336 * source has been registered. The caller holds the clocksource_mutex.
1337 */
1338int timekeeping_notify(struct clocksource *clock)
1339{
1340 struct timekeeper *tk = &tk_core.timekeeper;
1341
1342 if (tk->tkr_mono.clock == clock)
1343 return 0;
1344 stop_machine(change_clocksource, clock, NULL);
1345 tick_clock_notify();
1346 return tk->tkr_mono.clock == clock ? 0 : -1;
1347}
1348
1349/**
1350 * getrawmonotonic64 - Returns the raw monotonic time in a timespec
1351 * @ts: pointer to the timespec64 to be set
1352 *
1353 * Returns the raw monotonic time (completely un-modified by ntp)
1354 */
1355void getrawmonotonic64(struct timespec64 *ts)
1356{
1357 struct timekeeper *tk = &tk_core.timekeeper;
1358 struct timespec64 ts64;
1359 unsigned long seq;
1360 s64 nsecs;
1361
1362 do {
1363 seq = read_seqcount_begin(&tk_core.seq);
1364 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1365 ts64 = tk->raw_time;
1366
1367 } while (read_seqcount_retry(&tk_core.seq, seq));
1368
1369 timespec64_add_ns(&ts64, nsecs);
1370 *ts = ts64;
1371}
1372EXPORT_SYMBOL(getrawmonotonic64);
1373
1374
1375/**
1376 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1377 */
1378int timekeeping_valid_for_hres(void)
1379{
1380 struct timekeeper *tk = &tk_core.timekeeper;
1381 unsigned long seq;
1382 int ret;
1383
1384 do {
1385 seq = read_seqcount_begin(&tk_core.seq);
1386
1387 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1388
1389 } while (read_seqcount_retry(&tk_core.seq, seq));
1390
1391 return ret;
1392}
1393
1394/**
1395 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1396 */
1397u64 timekeeping_max_deferment(void)
1398{
1399 struct timekeeper *tk = &tk_core.timekeeper;
1400 unsigned long seq;
1401 u64 ret;
1402
1403 do {
1404 seq = read_seqcount_begin(&tk_core.seq);
1405
1406 ret = tk->tkr_mono.clock->max_idle_ns;
1407
1408 } while (read_seqcount_retry(&tk_core.seq, seq));
1409
1410 return ret;
1411}
1412
1413/**
1414 * read_persistent_clock - Return time from the persistent clock.
1415 *
1416 * Weak dummy function for arches that do not yet support it.
1417 * Reads the time from the battery backed persistent clock.
1418 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1419 *
1420 * XXX - Do be sure to remove it once all arches implement it.
1421 */
1422void __weak read_persistent_clock(struct timespec *ts)
1423{
1424 ts->tv_sec = 0;
1425 ts->tv_nsec = 0;
1426}
1427
1428void __weak read_persistent_clock64(struct timespec64 *ts64)
1429{
1430 struct timespec ts;
1431
1432 read_persistent_clock(&ts);
1433 *ts64 = timespec_to_timespec64(ts);
1434}
1435
1436/**
1437 * read_boot_clock64 - Return time of the system start.
1438 *
1439 * Weak dummy function for arches that do not yet support it.
1440 * Function to read the exact time the system has been started.
1441 * Returns a timespec64 with tv_sec=0 and tv_nsec=0 if unsupported.
1442 *
1443 * XXX - Do be sure to remove it once all arches implement it.
1444 */
1445void __weak read_boot_clock64(struct timespec64 *ts)
1446{
1447 ts->tv_sec = 0;
1448 ts->tv_nsec = 0;
1449}
1450
1451/* Flag for if timekeeping_resume() has injected sleeptime */
1452static bool sleeptime_injected;
1453
1454/* Flag for if there is a persistent clock on this platform */
1455static bool persistent_clock_exists;
1456
1457/*
1458 * timekeeping_init - Initializes the clocksource and common timekeeping values
1459 */
1460void __init timekeeping_init(void)
1461{
1462 struct timekeeper *tk = &tk_core.timekeeper;
1463 struct clocksource *clock;
1464 unsigned long flags;
1465 struct timespec64 now, boot, tmp;
1466
1467 read_persistent_clock64(&now);
1468 if (!timespec64_valid_strict(&now)) {
1469 pr_warn("WARNING: Persistent clock returned invalid value!\n"
1470 " Check your CMOS/BIOS settings.\n");
1471 now.tv_sec = 0;
1472 now.tv_nsec = 0;
1473 } else if (now.tv_sec || now.tv_nsec)
1474 persistent_clock_exists = true;
1475
1476 read_boot_clock64(&boot);
1477 if (!timespec64_valid_strict(&boot)) {
1478 pr_warn("WARNING: Boot clock returned invalid value!\n"
1479 " Check your CMOS/BIOS settings.\n");
1480 boot.tv_sec = 0;
1481 boot.tv_nsec = 0;
1482 }
1483
1484 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1485 write_seqcount_begin(&tk_core.seq);
1486 ntp_init();
1487
1488 clock = clocksource_default_clock();
1489 if (clock->enable)
1490 clock->enable(clock);
1491 tk_setup_internals(tk, clock);
1492
1493 tk_set_xtime(tk, &now);
1494 tk->raw_time.tv_sec = 0;
1495 tk->raw_time.tv_nsec = 0;
1496 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1497 boot = tk_xtime(tk);
1498
1499 set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1500 tk_set_wall_to_mono(tk, tmp);
1501
1502 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1503
1504 write_seqcount_end(&tk_core.seq);
1505 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1506}
1507
1508/* time in seconds when suspend began for persistent clock */
1509static struct timespec64 timekeeping_suspend_time;
1510
1511/**
1512 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1513 * @delta: pointer to a timespec delta value
1514 *
1515 * Takes a timespec offset measuring a suspend interval and properly
1516 * adds the sleep offset to the timekeeping variables.
1517 */
1518static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1519 struct timespec64 *delta)
1520{
1521 if (!timespec64_valid_strict(delta)) {
1522 printk_deferred(KERN_WARNING
1523 "__timekeeping_inject_sleeptime: Invalid "
1524 "sleep delta value!\n");
1525 return;
1526 }
1527 tk_xtime_add(tk, delta);
1528 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1529 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1530 tk_debug_account_sleep_time(delta);
1531}
1532
1533#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1534/**
1535 * We have three kinds of time sources to use for sleep time
1536 * injection, the preference order is:
1537 * 1) non-stop clocksource
1538 * 2) persistent clock (ie: RTC accessible when irqs are off)
1539 * 3) RTC
1540 *
1541 * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
1542 * If system has neither 1) nor 2), 3) will be used finally.
1543 *
1544 *
1545 * If timekeeping has injected sleeptime via either 1) or 2),
1546 * 3) becomes needless, so in this case we don't need to call
1547 * rtc_resume(), and this is what timekeeping_rtc_skipresume()
1548 * means.
1549 */
1550bool timekeeping_rtc_skipresume(void)
1551{
1552 return sleeptime_injected;
1553}
1554
1555/**
1556 * 1) can be determined whether to use or not only when doing
1557 * timekeeping_resume() which is invoked after rtc_suspend(),
1558 * so we can't skip rtc_suspend() surely if system has 1).
1559 *
1560 * But if system has 2), 2) will definitely be used, so in this
1561 * case we don't need to call rtc_suspend(), and this is what
1562 * timekeeping_rtc_skipsuspend() means.
1563 */
1564bool timekeeping_rtc_skipsuspend(void)
1565{
1566 return persistent_clock_exists;
1567}
1568
1569/**
1570 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1571 * @delta: pointer to a timespec64 delta value
1572 *
1573 * This hook is for architectures that cannot support read_persistent_clock64
1574 * because their RTC/persistent clock is only accessible when irqs are enabled.
1575 * and also don't have an effective nonstop clocksource.
1576 *
1577 * This function should only be called by rtc_resume(), and allows
1578 * a suspend offset to be injected into the timekeeping values.
1579 */
1580void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1581{
1582 struct timekeeper *tk = &tk_core.timekeeper;
1583 unsigned long flags;
1584
1585 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1586 write_seqcount_begin(&tk_core.seq);
1587
1588 timekeeping_forward_now(tk);
1589
1590 __timekeeping_inject_sleeptime(tk, delta);
1591
1592 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1593
1594 write_seqcount_end(&tk_core.seq);
1595 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1596
1597 /* signal hrtimers about time change */
1598 clock_was_set();
1599}
1600#endif
1601
1602/**
1603 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1604 */
1605void timekeeping_resume(void)
1606{
1607 struct timekeeper *tk = &tk_core.timekeeper;
1608 struct clocksource *clock = tk->tkr_mono.clock;
1609 unsigned long flags;
1610 struct timespec64 ts_new, ts_delta;
1611 cycle_t cycle_now, cycle_delta;
1612
1613 sleeptime_injected = false;
1614 read_persistent_clock64(&ts_new);
1615
1616 clockevents_resume();
1617 clocksource_resume();
1618
1619 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1620 write_seqcount_begin(&tk_core.seq);
1621
1622 /*
1623 * After system resumes, we need to calculate the suspended time and
1624 * compensate it for the OS time. There are 3 sources that could be
1625 * used: Nonstop clocksource during suspend, persistent clock and rtc
1626 * device.
1627 *
1628 * One specific platform may have 1 or 2 or all of them, and the
1629 * preference will be:
1630 * suspend-nonstop clocksource -> persistent clock -> rtc
1631 * The less preferred source will only be tried if there is no better
1632 * usable source. The rtc part is handled separately in rtc core code.
1633 */
1634 cycle_now = tk->tkr_mono.read(clock);
1635 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1636 cycle_now > tk->tkr_mono.cycle_last) {
1637 u64 num, max = ULLONG_MAX;
1638 u32 mult = clock->mult;
1639 u32 shift = clock->shift;
1640 s64 nsec = 0;
1641
1642 cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1643 tk->tkr_mono.mask);
1644
1645 /*
1646 * "cycle_delta * mutl" may cause 64 bits overflow, if the
1647 * suspended time is too long. In that case we need do the
1648 * 64 bits math carefully
1649 */
1650 do_div(max, mult);
1651 if (cycle_delta > max) {
1652 num = div64_u64(cycle_delta, max);
1653 nsec = (((u64) max * mult) >> shift) * num;
1654 cycle_delta -= num * max;
1655 }
1656 nsec += ((u64) cycle_delta * mult) >> shift;
1657
1658 ts_delta = ns_to_timespec64(nsec);
1659 sleeptime_injected = true;
1660 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1661 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1662 sleeptime_injected = true;
1663 }
1664
1665 if (sleeptime_injected)
1666 __timekeeping_inject_sleeptime(tk, &ts_delta);
1667
1668 /* Re-base the last cycle value */
1669 tk->tkr_mono.cycle_last = cycle_now;
1670 tk->tkr_raw.cycle_last = cycle_now;
1671
1672 tk->ntp_error = 0;
1673 timekeeping_suspended = 0;
1674 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1675 write_seqcount_end(&tk_core.seq);
1676 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1677
1678 touch_softlockup_watchdog();
1679
1680 tick_resume();
1681 hrtimers_resume();
1682}
1683
1684int timekeeping_suspend(void)
1685{
1686 struct timekeeper *tk = &tk_core.timekeeper;
1687 unsigned long flags;
1688 struct timespec64 delta, delta_delta;
1689 static struct timespec64 old_delta;
1690
1691 read_persistent_clock64(&timekeeping_suspend_time);
1692
1693 /*
1694 * On some systems the persistent_clock can not be detected at
1695 * timekeeping_init by its return value, so if we see a valid
1696 * value returned, update the persistent_clock_exists flag.
1697 */
1698 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1699 persistent_clock_exists = true;
1700
1701 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1702 write_seqcount_begin(&tk_core.seq);
1703 timekeeping_forward_now(tk);
1704 timekeeping_suspended = 1;
1705
1706 if (persistent_clock_exists) {
1707 /*
1708 * To avoid drift caused by repeated suspend/resumes,
1709 * which each can add ~1 second drift error,
1710 * try to compensate so the difference in system time
1711 * and persistent_clock time stays close to constant.
1712 */
1713 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1714 delta_delta = timespec64_sub(delta, old_delta);
1715 if (abs(delta_delta.tv_sec) >= 2) {
1716 /*
1717 * if delta_delta is too large, assume time correction
1718 * has occurred and set old_delta to the current delta.
1719 */
1720 old_delta = delta;
1721 } else {
1722 /* Otherwise try to adjust old_system to compensate */
1723 timekeeping_suspend_time =
1724 timespec64_add(timekeeping_suspend_time, delta_delta);
1725 }
1726 }
1727
1728 timekeeping_update(tk, TK_MIRROR);
1729 halt_fast_timekeeper(tk);
1730 write_seqcount_end(&tk_core.seq);
1731 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1732
1733 tick_suspend();
1734 clocksource_suspend();
1735 clockevents_suspend();
1736
1737 return 0;
1738}
1739
1740/* sysfs resume/suspend bits for timekeeping */
1741static struct syscore_ops timekeeping_syscore_ops = {
1742 .resume = timekeeping_resume,
1743 .suspend = timekeeping_suspend,
1744};
1745
1746static int __init timekeeping_init_ops(void)
1747{
1748 register_syscore_ops(&timekeeping_syscore_ops);
1749 return 0;
1750}
1751device_initcall(timekeeping_init_ops);
1752
1753/*
1754 * Apply a multiplier adjustment to the timekeeper
1755 */
1756static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1757 s64 offset,
1758 bool negative,
1759 int adj_scale)
1760{
1761 s64 interval = tk->cycle_interval;
1762 s32 mult_adj = 1;
1763
1764 if (negative) {
1765 mult_adj = -mult_adj;
1766 interval = -interval;
1767 offset = -offset;
1768 }
1769 mult_adj <<= adj_scale;
1770 interval <<= adj_scale;
1771 offset <<= adj_scale;
1772
1773 /*
1774 * So the following can be confusing.
1775 *
1776 * To keep things simple, lets assume mult_adj == 1 for now.
1777 *
1778 * When mult_adj != 1, remember that the interval and offset values
1779 * have been appropriately scaled so the math is the same.
1780 *
1781 * The basic idea here is that we're increasing the multiplier
1782 * by one, this causes the xtime_interval to be incremented by
1783 * one cycle_interval. This is because:
1784 * xtime_interval = cycle_interval * mult
1785 * So if mult is being incremented by one:
1786 * xtime_interval = cycle_interval * (mult + 1)
1787 * Its the same as:
1788 * xtime_interval = (cycle_interval * mult) + cycle_interval
1789 * Which can be shortened to:
1790 * xtime_interval += cycle_interval
1791 *
1792 * So offset stores the non-accumulated cycles. Thus the current
1793 * time (in shifted nanoseconds) is:
1794 * now = (offset * adj) + xtime_nsec
1795 * Now, even though we're adjusting the clock frequency, we have
1796 * to keep time consistent. In other words, we can't jump back
1797 * in time, and we also want to avoid jumping forward in time.
1798 *
1799 * So given the same offset value, we need the time to be the same
1800 * both before and after the freq adjustment.
1801 * now = (offset * adj_1) + xtime_nsec_1
1802 * now = (offset * adj_2) + xtime_nsec_2
1803 * So:
1804 * (offset * adj_1) + xtime_nsec_1 =
1805 * (offset * adj_2) + xtime_nsec_2
1806 * And we know:
1807 * adj_2 = adj_1 + 1
1808 * So:
1809 * (offset * adj_1) + xtime_nsec_1 =
1810 * (offset * (adj_1+1)) + xtime_nsec_2
1811 * (offset * adj_1) + xtime_nsec_1 =
1812 * (offset * adj_1) + offset + xtime_nsec_2
1813 * Canceling the sides:
1814 * xtime_nsec_1 = offset + xtime_nsec_2
1815 * Which gives us:
1816 * xtime_nsec_2 = xtime_nsec_1 - offset
1817 * Which simplfies to:
1818 * xtime_nsec -= offset
1819 *
1820 * XXX - TODO: Doc ntp_error calculation.
1821 */
1822 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1823 /* NTP adjustment caused clocksource mult overflow */
1824 WARN_ON_ONCE(1);
1825 return;
1826 }
1827
1828 tk->tkr_mono.mult += mult_adj;
1829 tk->xtime_interval += interval;
1830 tk->tkr_mono.xtime_nsec -= offset;
1831 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1832}
1833
1834/*
1835 * Calculate the multiplier adjustment needed to match the frequency
1836 * specified by NTP
1837 */
1838static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1839 s64 offset)
1840{
1841 s64 interval = tk->cycle_interval;
1842 s64 xinterval = tk->xtime_interval;
1843 u32 base = tk->tkr_mono.clock->mult;
1844 u32 max = tk->tkr_mono.clock->maxadj;
1845 u32 cur_adj = tk->tkr_mono.mult;
1846 s64 tick_error;
1847 bool negative;
1848 u32 adj_scale;
1849
1850 /* Remove any current error adj from freq calculation */
1851 if (tk->ntp_err_mult)
1852 xinterval -= tk->cycle_interval;
1853
1854 tk->ntp_tick = ntp_tick_length();
1855
1856 /* Calculate current error per tick */
1857 tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1858 tick_error -= (xinterval + tk->xtime_remainder);
1859
1860 /* Don't worry about correcting it if its small */
1861 if (likely((tick_error >= 0) && (tick_error <= interval)))
1862 return;
1863
1864 /* preserve the direction of correction */
1865 negative = (tick_error < 0);
1866
1867 /* If any adjustment would pass the max, just return */
1868 if (negative && (cur_adj - 1) <= (base - max))
1869 return;
1870 if (!negative && (cur_adj + 1) >= (base + max))
1871 return;
1872 /*
1873 * Sort out the magnitude of the correction, but
1874 * avoid making so large a correction that we go
1875 * over the max adjustment.
1876 */
1877 adj_scale = 0;
1878 tick_error = abs(tick_error);
1879 while (tick_error > interval) {
1880 u32 adj = 1 << (adj_scale + 1);
1881
1882 /* Check if adjustment gets us within 1 unit from the max */
1883 if (negative && (cur_adj - adj) <= (base - max))
1884 break;
1885 if (!negative && (cur_adj + adj) >= (base + max))
1886 break;
1887
1888 adj_scale++;
1889 tick_error >>= 1;
1890 }
1891
1892 /* scale the corrections */
1893 timekeeping_apply_adjustment(tk, offset, negative, adj_scale);
1894}
1895
1896/*
1897 * Adjust the timekeeper's multiplier to the correct frequency
1898 * and also to reduce the accumulated error value.
1899 */
1900static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1901{
1902 /* Correct for the current frequency error */
1903 timekeeping_freqadjust(tk, offset);
1904
1905 /* Next make a small adjustment to fix any cumulative error */
1906 if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1907 tk->ntp_err_mult = 1;
1908 timekeeping_apply_adjustment(tk, offset, 0, 0);
1909 } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1910 /* Undo any existing error adjustment */
1911 timekeeping_apply_adjustment(tk, offset, 1, 0);
1912 tk->ntp_err_mult = 0;
1913 }
1914
1915 if (unlikely(tk->tkr_mono.clock->maxadj &&
1916 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1917 > tk->tkr_mono.clock->maxadj))) {
1918 printk_once(KERN_WARNING
1919 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1920 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1921 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1922 }
1923
1924 /*
1925 * It may be possible that when we entered this function, xtime_nsec
1926 * was very small. Further, if we're slightly speeding the clocksource
1927 * in the code above, its possible the required corrective factor to
1928 * xtime_nsec could cause it to underflow.
1929 *
1930 * Now, since we already accumulated the second, cannot simply roll
1931 * the accumulated second back, since the NTP subsystem has been
1932 * notified via second_overflow. So instead we push xtime_nsec forward
1933 * by the amount we underflowed, and add that amount into the error.
1934 *
1935 * We'll correct this error next time through this function, when
1936 * xtime_nsec is not as small.
1937 */
1938 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1939 s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
1940 tk->tkr_mono.xtime_nsec = 0;
1941 tk->ntp_error += neg << tk->ntp_error_shift;
1942 }
1943}
1944
1945/**
1946 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1947 *
1948 * Helper function that accumulates the nsecs greater than a second
1949 * from the xtime_nsec field to the xtime_secs field.
1950 * It also calls into the NTP code to handle leapsecond processing.
1951 *
1952 */
1953static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1954{
1955 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1956 unsigned int clock_set = 0;
1957
1958 while (tk->tkr_mono.xtime_nsec >= nsecps) {
1959 int leap;
1960
1961 tk->tkr_mono.xtime_nsec -= nsecps;
1962 tk->xtime_sec++;
1963
1964 /* Figure out if its a leap sec and apply if needed */
1965 leap = second_overflow(tk->xtime_sec);
1966 if (unlikely(leap)) {
1967 struct timespec64 ts;
1968
1969 tk->xtime_sec += leap;
1970
1971 ts.tv_sec = leap;
1972 ts.tv_nsec = 0;
1973 tk_set_wall_to_mono(tk,
1974 timespec64_sub(tk->wall_to_monotonic, ts));
1975
1976 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1977
1978 clock_set = TK_CLOCK_WAS_SET;
1979 }
1980 }
1981 return clock_set;
1982}
1983
1984/**
1985 * logarithmic_accumulation - shifted accumulation of cycles
1986 *
1987 * This functions accumulates a shifted interval of cycles into
1988 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1989 * loop.
1990 *
1991 * Returns the unconsumed cycles.
1992 */
1993static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1994 u32 shift,
1995 unsigned int *clock_set)
1996{
1997 cycle_t interval = tk->cycle_interval << shift;
1998 u64 raw_nsecs;
1999
2000 /* If the offset is smaller than a shifted interval, do nothing */
2001 if (offset < interval)
2002 return offset;
2003
2004 /* Accumulate one shifted interval */
2005 offset -= interval;
2006 tk->tkr_mono.cycle_last += interval;
2007 tk->tkr_raw.cycle_last += interval;
2008
2009 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2010 *clock_set |= accumulate_nsecs_to_secs(tk);
2011
2012 /* Accumulate raw time */
2013 raw_nsecs = (u64)tk->raw_interval << shift;
2014 raw_nsecs += tk->raw_time.tv_nsec;
2015 if (raw_nsecs >= NSEC_PER_SEC) {
2016 u64 raw_secs = raw_nsecs;
2017 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
2018 tk->raw_time.tv_sec += raw_secs;
2019 }
2020 tk->raw_time.tv_nsec = raw_nsecs;
2021
2022 /* Accumulate error between NTP and clock interval */
2023 tk->ntp_error += tk->ntp_tick << shift;
2024 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2025 (tk->ntp_error_shift + shift);
2026
2027 return offset;
2028}
2029
2030/**
2031 * update_wall_time - Uses the current clocksource to increment the wall time
2032 *
2033 */
2034void update_wall_time(void)
2035{
2036 struct timekeeper *real_tk = &tk_core.timekeeper;
2037 struct timekeeper *tk = &shadow_timekeeper;
2038 cycle_t offset;
2039 int shift = 0, maxshift;
2040 unsigned int clock_set = 0;
2041 unsigned long flags;
2042
2043 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2044
2045 /* Make sure we're fully resumed: */
2046 if (unlikely(timekeeping_suspended))
2047 goto out;
2048
2049#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2050 offset = real_tk->cycle_interval;
2051#else
2052 offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
2053 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2054#endif
2055
2056 /* Check if there's really nothing to do */
2057 if (offset < real_tk->cycle_interval)
2058 goto out;
2059
2060 /* Do some additional sanity checking */
2061 timekeeping_check_update(real_tk, offset);
2062
2063 /*
2064 * With NO_HZ we may have to accumulate many cycle_intervals
2065 * (think "ticks") worth of time at once. To do this efficiently,
2066 * we calculate the largest doubling multiple of cycle_intervals
2067 * that is smaller than the offset. We then accumulate that
2068 * chunk in one go, and then try to consume the next smaller
2069 * doubled multiple.
2070 */
2071 shift = ilog2(offset) - ilog2(tk->cycle_interval);
2072 shift = max(0, shift);
2073 /* Bound shift to one less than what overflows tick_length */
2074 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2075 shift = min(shift, maxshift);
2076 while (offset >= tk->cycle_interval) {
2077 offset = logarithmic_accumulation(tk, offset, shift,
2078 &clock_set);
2079 if (offset < tk->cycle_interval<<shift)
2080 shift--;
2081 }
2082
2083 /* correct the clock when NTP error is too big */
2084 timekeeping_adjust(tk, offset);
2085
2086 /*
2087 * XXX This can be killed once everyone converts
2088 * to the new update_vsyscall.
2089 */
2090 old_vsyscall_fixup(tk);
2091
2092 /*
2093 * Finally, make sure that after the rounding
2094 * xtime_nsec isn't larger than NSEC_PER_SEC
2095 */
2096 clock_set |= accumulate_nsecs_to_secs(tk);
2097
2098 write_seqcount_begin(&tk_core.seq);
2099 /*
2100 * Update the real timekeeper.
2101 *
2102 * We could avoid this memcpy by switching pointers, but that
2103 * requires changes to all other timekeeper usage sites as
2104 * well, i.e. move the timekeeper pointer getter into the
2105 * spinlocked/seqcount protected sections. And we trade this
2106 * memcpy under the tk_core.seq against one before we start
2107 * updating.
2108 */
2109 timekeeping_update(tk, clock_set);
2110 memcpy(real_tk, tk, sizeof(*tk));
2111 /* The memcpy must come last. Do not put anything here! */
2112 write_seqcount_end(&tk_core.seq);
2113out:
2114 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2115 if (clock_set)
2116 /* Have to call _delayed version, since in irq context*/
2117 clock_was_set_delayed();
2118}
2119
2120/**
2121 * getboottime64 - Return the real time of system boot.
2122 * @ts: pointer to the timespec64 to be set
2123 *
2124 * Returns the wall-time of boot in a timespec64.
2125 *
2126 * This is based on the wall_to_monotonic offset and the total suspend
2127 * time. Calls to settimeofday will affect the value returned (which
2128 * basically means that however wrong your real time clock is at boot time,
2129 * you get the right time here).
2130 */
2131void getboottime64(struct timespec64 *ts)
2132{
2133 struct timekeeper *tk = &tk_core.timekeeper;
2134 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2135
2136 *ts = ktime_to_timespec64(t);
2137}
2138EXPORT_SYMBOL_GPL(getboottime64);
2139
2140unsigned long get_seconds(void)
2141{
2142 struct timekeeper *tk = &tk_core.timekeeper;
2143
2144 return tk->xtime_sec;
2145}
2146EXPORT_SYMBOL(get_seconds);
2147
2148struct timespec __current_kernel_time(void)
2149{
2150 struct timekeeper *tk = &tk_core.timekeeper;
2151
2152 return timespec64_to_timespec(tk_xtime(tk));
2153}
2154
2155struct timespec64 current_kernel_time64(void)
2156{
2157 struct timekeeper *tk = &tk_core.timekeeper;
2158 struct timespec64 now;
2159 unsigned long seq;
2160
2161 do {
2162 seq = read_seqcount_begin(&tk_core.seq);
2163
2164 now = tk_xtime(tk);
2165 } while (read_seqcount_retry(&tk_core.seq, seq));
2166
2167 return now;
2168}
2169EXPORT_SYMBOL(current_kernel_time64);
2170
2171struct timespec64 get_monotonic_coarse64(void)
2172{
2173 struct timekeeper *tk = &tk_core.timekeeper;
2174 struct timespec64 now, mono;
2175 unsigned long seq;
2176
2177 do {
2178 seq = read_seqcount_begin(&tk_core.seq);
2179
2180 now = tk_xtime(tk);
2181 mono = tk->wall_to_monotonic;
2182 } while (read_seqcount_retry(&tk_core.seq, seq));
2183
2184 set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
2185 now.tv_nsec + mono.tv_nsec);
2186
2187 return now;
2188}
2189
2190/*
2191 * Must hold jiffies_lock
2192 */
2193void do_timer(unsigned long ticks)
2194{
2195 jiffies_64 += ticks;
2196 calc_global_load(ticks);
2197}
2198
2199/**
2200 * ktime_get_update_offsets_now - hrtimer helper
2201 * @cwsseq: pointer to check and store the clock was set sequence number
2202 * @offs_real: pointer to storage for monotonic -> realtime offset
2203 * @offs_boot: pointer to storage for monotonic -> boottime offset
2204 * @offs_tai: pointer to storage for monotonic -> clock tai offset
2205 *
2206 * Returns current monotonic time and updates the offsets if the
2207 * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
2208 * different.
2209 *
2210 * Called from hrtimer_interrupt() or retrigger_next_event()
2211 */
2212ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2213 ktime_t *offs_boot, ktime_t *offs_tai)
2214{
2215 struct timekeeper *tk = &tk_core.timekeeper;
2216 unsigned int seq;
2217 ktime_t base;
2218 u64 nsecs;
2219
2220 do {
2221 seq = read_seqcount_begin(&tk_core.seq);
2222
2223 base = tk->tkr_mono.base;
2224 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2225 base = ktime_add_ns(base, nsecs);
2226
2227 if (*cwsseq != tk->clock_was_set_seq) {
2228 *cwsseq = tk->clock_was_set_seq;
2229 *offs_real = tk->offs_real;
2230 *offs_boot = tk->offs_boot;
2231 *offs_tai = tk->offs_tai;
2232 }
2233
2234 /* Handle leapsecond insertion adjustments */
2235 if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64))
2236 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2237
2238 } while (read_seqcount_retry(&tk_core.seq, seq));
2239
2240 return base;
2241}
2242
2243/**
2244 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2245 */
2246int do_adjtimex(struct timex *txc)
2247{
2248 struct timekeeper *tk = &tk_core.timekeeper;
2249 unsigned long flags;
2250 struct timespec64 ts;
2251 s32 orig_tai, tai;
2252 int ret;
2253
2254 /* Validate the data before disabling interrupts */
2255 ret = ntp_validate_timex(txc);
2256 if (ret)
2257 return ret;
2258
2259 if (txc->modes & ADJ_SETOFFSET) {
2260 struct timespec delta;
2261 delta.tv_sec = txc->time.tv_sec;
2262 delta.tv_nsec = txc->time.tv_usec;
2263 if (!(txc->modes & ADJ_NANO))
2264 delta.tv_nsec *= 1000;
2265 ret = timekeeping_inject_offset(&delta);
2266 if (ret)
2267 return ret;
2268 }
2269
2270 getnstimeofday64(&ts);
2271
2272 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2273 write_seqcount_begin(&tk_core.seq);
2274
2275 orig_tai = tai = tk->tai_offset;
2276 ret = __do_adjtimex(txc, &ts, &tai);
2277
2278 if (tai != orig_tai) {
2279 __timekeeping_set_tai_offset(tk, tai);
2280 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2281 }
2282 tk_update_leap_state(tk);
2283
2284 write_seqcount_end(&tk_core.seq);
2285 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2286
2287 if (tai != orig_tai)
2288 clock_was_set();
2289
2290 ntp_notify_cmos_timer();
2291
2292 return ret;
2293}
2294
2295#ifdef CONFIG_NTP_PPS
2296/**
2297 * hardpps() - Accessor function to NTP __hardpps function
2298 */
2299void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2300{
2301 unsigned long flags;
2302
2303 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2304 write_seqcount_begin(&tk_core.seq);
2305
2306 __hardpps(phase_ts, raw_ts);
2307
2308 write_seqcount_end(&tk_core.seq);
2309 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2310}
2311EXPORT_SYMBOL(hardpps);
2312#endif
2313
2314/**
2315 * xtime_update() - advances the timekeeping infrastructure
2316 * @ticks: number of ticks, that have elapsed since the last call.
2317 *
2318 * Must be called with interrupts disabled.
2319 */
2320void xtime_update(unsigned long ticks)
2321{
2322 write_seqlock(&jiffies_lock);
2323 do_timer(ticks);
2324 write_sequnlock(&jiffies_lock);
2325 update_wall_time();
2326}