Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This file contains the functions which manage clocksource drivers.
4 *
5 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/device.h>
11#include <linux/clocksource.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
15#include <linux/tick.h>
16#include <linux/kthread.h>
17
18#include "tick-internal.h"
19#include "timekeeping_internal.h"
20
21/**
22 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
23 * @mult: pointer to mult variable
24 * @shift: pointer to shift variable
25 * @from: frequency to convert from
26 * @to: frequency to convert to
27 * @maxsec: guaranteed runtime conversion range in seconds
28 *
29 * The function evaluates the shift/mult pair for the scaled math
30 * operations of clocksources and clockevents.
31 *
32 * @to and @from are frequency values in HZ. For clock sources @to is
33 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
34 * event @to is the counter frequency and @from is NSEC_PER_SEC.
35 *
36 * The @maxsec conversion range argument controls the time frame in
37 * seconds which must be covered by the runtime conversion with the
38 * calculated mult and shift factors. This guarantees that no 64bit
39 * overflow happens when the input value of the conversion is
40 * multiplied with the calculated mult factor. Larger ranges may
41 * reduce the conversion accuracy by chosing smaller mult and shift
42 * factors.
43 */
44void
45clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
46{
47 u64 tmp;
48 u32 sft, sftacc= 32;
49
50 /*
51 * Calculate the shift factor which is limiting the conversion
52 * range:
53 */
54 tmp = ((u64)maxsec * from) >> 32;
55 while (tmp) {
56 tmp >>=1;
57 sftacc--;
58 }
59
60 /*
61 * Find the conversion shift/mult pair which has the best
62 * accuracy and fits the maxsec conversion range:
63 */
64 for (sft = 32; sft > 0; sft--) {
65 tmp = (u64) to << sft;
66 tmp += from / 2;
67 do_div(tmp, from);
68 if ((tmp >> sftacc) == 0)
69 break;
70 }
71 *mult = tmp;
72 *shift = sft;
73}
74EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
75
76/*[Clocksource internal variables]---------
77 * curr_clocksource:
78 * currently selected clocksource.
79 * suspend_clocksource:
80 * used to calculate the suspend time.
81 * clocksource_list:
82 * linked list with the registered clocksources
83 * clocksource_mutex:
84 * protects manipulations to curr_clocksource and the clocksource_list
85 * override_name:
86 * Name of the user-specified clocksource.
87 */
88static struct clocksource *curr_clocksource;
89static struct clocksource *suspend_clocksource;
90static LIST_HEAD(clocksource_list);
91static DEFINE_MUTEX(clocksource_mutex);
92static char override_name[CS_NAME_LEN];
93static int finished_booting;
94static u64 suspend_start;
95
96#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
97static void clocksource_watchdog_work(struct work_struct *work);
98static void clocksource_select(void);
99
100static LIST_HEAD(watchdog_list);
101static struct clocksource *watchdog;
102static struct timer_list watchdog_timer;
103static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
104static DEFINE_SPINLOCK(watchdog_lock);
105static int watchdog_running;
106static atomic_t watchdog_reset_pending;
107
108static inline void clocksource_watchdog_lock(unsigned long *flags)
109{
110 spin_lock_irqsave(&watchdog_lock, *flags);
111}
112
113static inline void clocksource_watchdog_unlock(unsigned long *flags)
114{
115 spin_unlock_irqrestore(&watchdog_lock, *flags);
116}
117
118static int clocksource_watchdog_kthread(void *data);
119static void __clocksource_change_rating(struct clocksource *cs, int rating);
120
121/*
122 * Interval: 0.5sec Threshold: 0.0625s
123 */
124#define WATCHDOG_INTERVAL (HZ >> 1)
125#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
126
127static void clocksource_watchdog_work(struct work_struct *work)
128{
129 /*
130 * We cannot directly run clocksource_watchdog_kthread() here, because
131 * clocksource_select() calls timekeeping_notify() which uses
132 * stop_machine(). One cannot use stop_machine() from a workqueue() due
133 * lock inversions wrt CPU hotplug.
134 *
135 * Also, we only ever run this work once or twice during the lifetime
136 * of the kernel, so there is no point in creating a more permanent
137 * kthread for this.
138 *
139 * If kthread_run fails the next watchdog scan over the
140 * watchdog_list will find the unstable clock again.
141 */
142 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
143}
144
145static void __clocksource_unstable(struct clocksource *cs)
146{
147 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
148 cs->flags |= CLOCK_SOURCE_UNSTABLE;
149
150 /*
151 * If the clocksource is registered clocksource_watchdog_kthread() will
152 * re-rate and re-select.
153 */
154 if (list_empty(&cs->list)) {
155 cs->rating = 0;
156 return;
157 }
158
159 if (cs->mark_unstable)
160 cs->mark_unstable(cs);
161
162 /* kick clocksource_watchdog_kthread() */
163 if (finished_booting)
164 schedule_work(&watchdog_work);
165}
166
167/**
168 * clocksource_mark_unstable - mark clocksource unstable via watchdog
169 * @cs: clocksource to be marked unstable
170 *
171 * This function is called by the x86 TSC code to mark clocksources as unstable;
172 * it defers demotion and re-selection to a kthread.
173 */
174void clocksource_mark_unstable(struct clocksource *cs)
175{
176 unsigned long flags;
177
178 spin_lock_irqsave(&watchdog_lock, flags);
179 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
180 if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
181 list_add(&cs->wd_list, &watchdog_list);
182 __clocksource_unstable(cs);
183 }
184 spin_unlock_irqrestore(&watchdog_lock, flags);
185}
186
187static void clocksource_watchdog(struct timer_list *unused)
188{
189 struct clocksource *cs;
190 u64 csnow, wdnow, cslast, wdlast, delta;
191 int64_t wd_nsec, cs_nsec;
192 int next_cpu, reset_pending;
193
194 spin_lock(&watchdog_lock);
195 if (!watchdog_running)
196 goto out;
197
198 reset_pending = atomic_read(&watchdog_reset_pending);
199
200 list_for_each_entry(cs, &watchdog_list, wd_list) {
201
202 /* Clocksource already marked unstable? */
203 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
204 if (finished_booting)
205 schedule_work(&watchdog_work);
206 continue;
207 }
208
209 local_irq_disable();
210 csnow = cs->read(cs);
211 wdnow = watchdog->read(watchdog);
212 local_irq_enable();
213
214 /* Clocksource initialized ? */
215 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
216 atomic_read(&watchdog_reset_pending)) {
217 cs->flags |= CLOCK_SOURCE_WATCHDOG;
218 cs->wd_last = wdnow;
219 cs->cs_last = csnow;
220 continue;
221 }
222
223 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
224 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
225 watchdog->shift);
226
227 delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
228 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
229 wdlast = cs->wd_last; /* save these in case we print them */
230 cslast = cs->cs_last;
231 cs->cs_last = csnow;
232 cs->wd_last = wdnow;
233
234 if (atomic_read(&watchdog_reset_pending))
235 continue;
236
237 /* Check the deviation from the watchdog clocksource. */
238 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
239 pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
240 smp_processor_id(), cs->name);
241 pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
242 watchdog->name, wdnow, wdlast, watchdog->mask);
243 pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
244 cs->name, csnow, cslast, cs->mask);
245 __clocksource_unstable(cs);
246 continue;
247 }
248
249 if (cs == curr_clocksource && cs->tick_stable)
250 cs->tick_stable(cs);
251
252 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
253 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
254 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
255 /* Mark it valid for high-res. */
256 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
257
258 /*
259 * clocksource_done_booting() will sort it if
260 * finished_booting is not set yet.
261 */
262 if (!finished_booting)
263 continue;
264
265 /*
266 * If this is not the current clocksource let
267 * the watchdog thread reselect it. Due to the
268 * change to high res this clocksource might
269 * be preferred now. If it is the current
270 * clocksource let the tick code know about
271 * that change.
272 */
273 if (cs != curr_clocksource) {
274 cs->flags |= CLOCK_SOURCE_RESELECT;
275 schedule_work(&watchdog_work);
276 } else {
277 tick_clock_notify();
278 }
279 }
280 }
281
282 /*
283 * We only clear the watchdog_reset_pending, when we did a
284 * full cycle through all clocksources.
285 */
286 if (reset_pending)
287 atomic_dec(&watchdog_reset_pending);
288
289 /*
290 * Cycle through CPUs to check if the CPUs stay synchronized
291 * to each other.
292 */
293 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
294 if (next_cpu >= nr_cpu_ids)
295 next_cpu = cpumask_first(cpu_online_mask);
296 watchdog_timer.expires += WATCHDOG_INTERVAL;
297 add_timer_on(&watchdog_timer, next_cpu);
298out:
299 spin_unlock(&watchdog_lock);
300}
301
302static inline void clocksource_start_watchdog(void)
303{
304 if (watchdog_running || !watchdog || list_empty(&watchdog_list))
305 return;
306 timer_setup(&watchdog_timer, clocksource_watchdog, 0);
307 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
308 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
309 watchdog_running = 1;
310}
311
312static inline void clocksource_stop_watchdog(void)
313{
314 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
315 return;
316 del_timer(&watchdog_timer);
317 watchdog_running = 0;
318}
319
320static inline void clocksource_reset_watchdog(void)
321{
322 struct clocksource *cs;
323
324 list_for_each_entry(cs, &watchdog_list, wd_list)
325 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
326}
327
328static void clocksource_resume_watchdog(void)
329{
330 atomic_inc(&watchdog_reset_pending);
331}
332
333static void clocksource_enqueue_watchdog(struct clocksource *cs)
334{
335 INIT_LIST_HEAD(&cs->wd_list);
336
337 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
338 /* cs is a clocksource to be watched. */
339 list_add(&cs->wd_list, &watchdog_list);
340 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
341 } else {
342 /* cs is a watchdog. */
343 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
344 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
345 }
346}
347
348static void clocksource_select_watchdog(bool fallback)
349{
350 struct clocksource *cs, *old_wd;
351 unsigned long flags;
352
353 spin_lock_irqsave(&watchdog_lock, flags);
354 /* save current watchdog */
355 old_wd = watchdog;
356 if (fallback)
357 watchdog = NULL;
358
359 list_for_each_entry(cs, &clocksource_list, list) {
360 /* cs is a clocksource to be watched. */
361 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
362 continue;
363
364 /* Skip current if we were requested for a fallback. */
365 if (fallback && cs == old_wd)
366 continue;
367
368 /* Pick the best watchdog. */
369 if (!watchdog || cs->rating > watchdog->rating)
370 watchdog = cs;
371 }
372 /* If we failed to find a fallback restore the old one. */
373 if (!watchdog)
374 watchdog = old_wd;
375
376 /* If we changed the watchdog we need to reset cycles. */
377 if (watchdog != old_wd)
378 clocksource_reset_watchdog();
379
380 /* Check if the watchdog timer needs to be started. */
381 clocksource_start_watchdog();
382 spin_unlock_irqrestore(&watchdog_lock, flags);
383}
384
385static void clocksource_dequeue_watchdog(struct clocksource *cs)
386{
387 if (cs != watchdog) {
388 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
389 /* cs is a watched clocksource. */
390 list_del_init(&cs->wd_list);
391 /* Check if the watchdog timer needs to be stopped. */
392 clocksource_stop_watchdog();
393 }
394 }
395}
396
397static int __clocksource_watchdog_kthread(void)
398{
399 struct clocksource *cs, *tmp;
400 unsigned long flags;
401 int select = 0;
402
403 spin_lock_irqsave(&watchdog_lock, flags);
404 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
405 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
406 list_del_init(&cs->wd_list);
407 __clocksource_change_rating(cs, 0);
408 select = 1;
409 }
410 if (cs->flags & CLOCK_SOURCE_RESELECT) {
411 cs->flags &= ~CLOCK_SOURCE_RESELECT;
412 select = 1;
413 }
414 }
415 /* Check if the watchdog timer needs to be stopped. */
416 clocksource_stop_watchdog();
417 spin_unlock_irqrestore(&watchdog_lock, flags);
418
419 return select;
420}
421
422static int clocksource_watchdog_kthread(void *data)
423{
424 mutex_lock(&clocksource_mutex);
425 if (__clocksource_watchdog_kthread())
426 clocksource_select();
427 mutex_unlock(&clocksource_mutex);
428 return 0;
429}
430
431static bool clocksource_is_watchdog(struct clocksource *cs)
432{
433 return cs == watchdog;
434}
435
436#else /* CONFIG_CLOCKSOURCE_WATCHDOG */
437
438static void clocksource_enqueue_watchdog(struct clocksource *cs)
439{
440 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
441 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
442}
443
444static void clocksource_select_watchdog(bool fallback) { }
445static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
446static inline void clocksource_resume_watchdog(void) { }
447static inline int __clocksource_watchdog_kthread(void) { return 0; }
448static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
449void clocksource_mark_unstable(struct clocksource *cs) { }
450
451static inline void clocksource_watchdog_lock(unsigned long *flags) { }
452static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
453
454#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
455
456static bool clocksource_is_suspend(struct clocksource *cs)
457{
458 return cs == suspend_clocksource;
459}
460
461static void __clocksource_suspend_select(struct clocksource *cs)
462{
463 /*
464 * Skip the clocksource which will be stopped in suspend state.
465 */
466 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
467 return;
468
469 /*
470 * The nonstop clocksource can be selected as the suspend clocksource to
471 * calculate the suspend time, so it should not supply suspend/resume
472 * interfaces to suspend the nonstop clocksource when system suspends.
473 */
474 if (cs->suspend || cs->resume) {
475 pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
476 cs->name);
477 }
478
479 /* Pick the best rating. */
480 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
481 suspend_clocksource = cs;
482}
483
484/**
485 * clocksource_suspend_select - Select the best clocksource for suspend timing
486 * @fallback: if select a fallback clocksource
487 */
488static void clocksource_suspend_select(bool fallback)
489{
490 struct clocksource *cs, *old_suspend;
491
492 old_suspend = suspend_clocksource;
493 if (fallback)
494 suspend_clocksource = NULL;
495
496 list_for_each_entry(cs, &clocksource_list, list) {
497 /* Skip current if we were requested for a fallback. */
498 if (fallback && cs == old_suspend)
499 continue;
500
501 __clocksource_suspend_select(cs);
502 }
503}
504
505/**
506 * clocksource_start_suspend_timing - Start measuring the suspend timing
507 * @cs: current clocksource from timekeeping
508 * @start_cycles: current cycles from timekeeping
509 *
510 * This function will save the start cycle values of suspend timer to calculate
511 * the suspend time when resuming system.
512 *
513 * This function is called late in the suspend process from timekeeping_suspend(),
514 * that means processes are freezed, non-boot cpus and interrupts are disabled
515 * now. It is therefore possible to start the suspend timer without taking the
516 * clocksource mutex.
517 */
518void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
519{
520 if (!suspend_clocksource)
521 return;
522
523 /*
524 * If current clocksource is the suspend timer, we should use the
525 * tkr_mono.cycle_last value as suspend_start to avoid same reading
526 * from suspend timer.
527 */
528 if (clocksource_is_suspend(cs)) {
529 suspend_start = start_cycles;
530 return;
531 }
532
533 if (suspend_clocksource->enable &&
534 suspend_clocksource->enable(suspend_clocksource)) {
535 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
536 return;
537 }
538
539 suspend_start = suspend_clocksource->read(suspend_clocksource);
540}
541
542/**
543 * clocksource_stop_suspend_timing - Stop measuring the suspend timing
544 * @cs: current clocksource from timekeeping
545 * @cycle_now: current cycles from timekeeping
546 *
547 * This function will calculate the suspend time from suspend timer.
548 *
549 * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
550 *
551 * This function is called early in the resume process from timekeeping_resume(),
552 * that means there is only one cpu, no processes are running and the interrupts
553 * are disabled. It is therefore possible to stop the suspend timer without
554 * taking the clocksource mutex.
555 */
556u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
557{
558 u64 now, delta, nsec = 0;
559
560 if (!suspend_clocksource)
561 return 0;
562
563 /*
564 * If current clocksource is the suspend timer, we should use the
565 * tkr_mono.cycle_last value from timekeeping as current cycle to
566 * avoid same reading from suspend timer.
567 */
568 if (clocksource_is_suspend(cs))
569 now = cycle_now;
570 else
571 now = suspend_clocksource->read(suspend_clocksource);
572
573 if (now > suspend_start) {
574 delta = clocksource_delta(now, suspend_start,
575 suspend_clocksource->mask);
576 nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
577 suspend_clocksource->shift);
578 }
579
580 /*
581 * Disable the suspend timer to save power if current clocksource is
582 * not the suspend timer.
583 */
584 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
585 suspend_clocksource->disable(suspend_clocksource);
586
587 return nsec;
588}
589
590/**
591 * clocksource_suspend - suspend the clocksource(s)
592 */
593void clocksource_suspend(void)
594{
595 struct clocksource *cs;
596
597 list_for_each_entry_reverse(cs, &clocksource_list, list)
598 if (cs->suspend)
599 cs->suspend(cs);
600}
601
602/**
603 * clocksource_resume - resume the clocksource(s)
604 */
605void clocksource_resume(void)
606{
607 struct clocksource *cs;
608
609 list_for_each_entry(cs, &clocksource_list, list)
610 if (cs->resume)
611 cs->resume(cs);
612
613 clocksource_resume_watchdog();
614}
615
616/**
617 * clocksource_touch_watchdog - Update watchdog
618 *
619 * Update the watchdog after exception contexts such as kgdb so as not
620 * to incorrectly trip the watchdog. This might fail when the kernel
621 * was stopped in code which holds watchdog_lock.
622 */
623void clocksource_touch_watchdog(void)
624{
625 clocksource_resume_watchdog();
626}
627
628/**
629 * clocksource_max_adjustment- Returns max adjustment amount
630 * @cs: Pointer to clocksource
631 *
632 */
633static u32 clocksource_max_adjustment(struct clocksource *cs)
634{
635 u64 ret;
636 /*
637 * We won't try to correct for more than 11% adjustments (110,000 ppm),
638 */
639 ret = (u64)cs->mult * 11;
640 do_div(ret,100);
641 return (u32)ret;
642}
643
644/**
645 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
646 * @mult: cycle to nanosecond multiplier
647 * @shift: cycle to nanosecond divisor (power of two)
648 * @maxadj: maximum adjustment value to mult (~11%)
649 * @mask: bitmask for two's complement subtraction of non 64 bit counters
650 * @max_cyc: maximum cycle value before potential overflow (does not include
651 * any safety margin)
652 *
653 * NOTE: This function includes a safety margin of 50%, in other words, we
654 * return half the number of nanoseconds the hardware counter can technically
655 * cover. This is done so that we can potentially detect problems caused by
656 * delayed timers or bad hardware, which might result in time intervals that
657 * are larger than what the math used can handle without overflows.
658 */
659u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
660{
661 u64 max_nsecs, max_cycles;
662
663 /*
664 * Calculate the maximum number of cycles that we can pass to the
665 * cyc2ns() function without overflowing a 64-bit result.
666 */
667 max_cycles = ULLONG_MAX;
668 do_div(max_cycles, mult+maxadj);
669
670 /*
671 * The actual maximum number of cycles we can defer the clocksource is
672 * determined by the minimum of max_cycles and mask.
673 * Note: Here we subtract the maxadj to make sure we don't sleep for
674 * too long if there's a large negative adjustment.
675 */
676 max_cycles = min(max_cycles, mask);
677 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
678
679 /* return the max_cycles value as well if requested */
680 if (max_cyc)
681 *max_cyc = max_cycles;
682
683 /* Return 50% of the actual maximum, so we can detect bad values */
684 max_nsecs >>= 1;
685
686 return max_nsecs;
687}
688
689/**
690 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
691 * @cs: Pointer to clocksource to be updated
692 *
693 */
694static inline void clocksource_update_max_deferment(struct clocksource *cs)
695{
696 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
697 cs->maxadj, cs->mask,
698 &cs->max_cycles);
699}
700
701#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
702
703static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
704{
705 struct clocksource *cs;
706
707 if (!finished_booting || list_empty(&clocksource_list))
708 return NULL;
709
710 /*
711 * We pick the clocksource with the highest rating. If oneshot
712 * mode is active, we pick the highres valid clocksource with
713 * the best rating.
714 */
715 list_for_each_entry(cs, &clocksource_list, list) {
716 if (skipcur && cs == curr_clocksource)
717 continue;
718 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
719 continue;
720 return cs;
721 }
722 return NULL;
723}
724
725static void __clocksource_select(bool skipcur)
726{
727 bool oneshot = tick_oneshot_mode_active();
728 struct clocksource *best, *cs;
729
730 /* Find the best suitable clocksource */
731 best = clocksource_find_best(oneshot, skipcur);
732 if (!best)
733 return;
734
735 if (!strlen(override_name))
736 goto found;
737
738 /* Check for the override clocksource. */
739 list_for_each_entry(cs, &clocksource_list, list) {
740 if (skipcur && cs == curr_clocksource)
741 continue;
742 if (strcmp(cs->name, override_name) != 0)
743 continue;
744 /*
745 * Check to make sure we don't switch to a non-highres
746 * capable clocksource if the tick code is in oneshot
747 * mode (highres or nohz)
748 */
749 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
750 /* Override clocksource cannot be used. */
751 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
752 pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
753 cs->name);
754 override_name[0] = 0;
755 } else {
756 /*
757 * The override cannot be currently verified.
758 * Deferring to let the watchdog check.
759 */
760 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
761 cs->name);
762 }
763 } else
764 /* Override clocksource can be used. */
765 best = cs;
766 break;
767 }
768
769found:
770 if (curr_clocksource != best && !timekeeping_notify(best)) {
771 pr_info("Switched to clocksource %s\n", best->name);
772 curr_clocksource = best;
773 }
774}
775
776/**
777 * clocksource_select - Select the best clocksource available
778 *
779 * Private function. Must hold clocksource_mutex when called.
780 *
781 * Select the clocksource with the best rating, or the clocksource,
782 * which is selected by userspace override.
783 */
784static void clocksource_select(void)
785{
786 __clocksource_select(false);
787}
788
789static void clocksource_select_fallback(void)
790{
791 __clocksource_select(true);
792}
793
794#else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
795static inline void clocksource_select(void) { }
796static inline void clocksource_select_fallback(void) { }
797
798#endif
799
800/*
801 * clocksource_done_booting - Called near the end of core bootup
802 *
803 * Hack to avoid lots of clocksource churn at boot time.
804 * We use fs_initcall because we want this to start before
805 * device_initcall but after subsys_initcall.
806 */
807static int __init clocksource_done_booting(void)
808{
809 mutex_lock(&clocksource_mutex);
810 curr_clocksource = clocksource_default_clock();
811 finished_booting = 1;
812 /*
813 * Run the watchdog first to eliminate unstable clock sources
814 */
815 __clocksource_watchdog_kthread();
816 clocksource_select();
817 mutex_unlock(&clocksource_mutex);
818 return 0;
819}
820fs_initcall(clocksource_done_booting);
821
822/*
823 * Enqueue the clocksource sorted by rating
824 */
825static void clocksource_enqueue(struct clocksource *cs)
826{
827 struct list_head *entry = &clocksource_list;
828 struct clocksource *tmp;
829
830 list_for_each_entry(tmp, &clocksource_list, list) {
831 /* Keep track of the place, where to insert */
832 if (tmp->rating < cs->rating)
833 break;
834 entry = &tmp->list;
835 }
836 list_add(&cs->list, entry);
837}
838
839/**
840 * __clocksource_update_freq_scale - Used update clocksource with new freq
841 * @cs: clocksource to be registered
842 * @scale: Scale factor multiplied against freq to get clocksource hz
843 * @freq: clocksource frequency (cycles per second) divided by scale
844 *
845 * This should only be called from the clocksource->enable() method.
846 *
847 * This *SHOULD NOT* be called directly! Please use the
848 * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
849 * functions.
850 */
851void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
852{
853 u64 sec;
854
855 /*
856 * Default clocksources are *special* and self-define their mult/shift.
857 * But, you're not special, so you should specify a freq value.
858 */
859 if (freq) {
860 /*
861 * Calc the maximum number of seconds which we can run before
862 * wrapping around. For clocksources which have a mask > 32-bit
863 * we need to limit the max sleep time to have a good
864 * conversion precision. 10 minutes is still a reasonable
865 * amount. That results in a shift value of 24 for a
866 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
867 * ~ 0.06ppm granularity for NTP.
868 */
869 sec = cs->mask;
870 do_div(sec, freq);
871 do_div(sec, scale);
872 if (!sec)
873 sec = 1;
874 else if (sec > 600 && cs->mask > UINT_MAX)
875 sec = 600;
876
877 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
878 NSEC_PER_SEC / scale, sec * scale);
879 }
880 /*
881 * Ensure clocksources that have large 'mult' values don't overflow
882 * when adjusted.
883 */
884 cs->maxadj = clocksource_max_adjustment(cs);
885 while (freq && ((cs->mult + cs->maxadj < cs->mult)
886 || (cs->mult - cs->maxadj > cs->mult))) {
887 cs->mult >>= 1;
888 cs->shift--;
889 cs->maxadj = clocksource_max_adjustment(cs);
890 }
891
892 /*
893 * Only warn for *special* clocksources that self-define
894 * their mult/shift values and don't specify a freq.
895 */
896 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
897 "timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
898 cs->name);
899
900 clocksource_update_max_deferment(cs);
901
902 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
903 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
904}
905EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
906
907/**
908 * __clocksource_register_scale - Used to install new clocksources
909 * @cs: clocksource to be registered
910 * @scale: Scale factor multiplied against freq to get clocksource hz
911 * @freq: clocksource frequency (cycles per second) divided by scale
912 *
913 * Returns -EBUSY if registration fails, zero otherwise.
914 *
915 * This *SHOULD NOT* be called directly! Please use the
916 * clocksource_register_hz() or clocksource_register_khz helper functions.
917 */
918int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
919{
920 unsigned long flags;
921
922 clocksource_arch_init(cs);
923
924 /* Initialize mult/shift and max_idle_ns */
925 __clocksource_update_freq_scale(cs, scale, freq);
926
927 /* Add clocksource to the clocksource list */
928 mutex_lock(&clocksource_mutex);
929
930 clocksource_watchdog_lock(&flags);
931 clocksource_enqueue(cs);
932 clocksource_enqueue_watchdog(cs);
933 clocksource_watchdog_unlock(&flags);
934
935 clocksource_select();
936 clocksource_select_watchdog(false);
937 __clocksource_suspend_select(cs);
938 mutex_unlock(&clocksource_mutex);
939 return 0;
940}
941EXPORT_SYMBOL_GPL(__clocksource_register_scale);
942
943static void __clocksource_change_rating(struct clocksource *cs, int rating)
944{
945 list_del(&cs->list);
946 cs->rating = rating;
947 clocksource_enqueue(cs);
948}
949
950/**
951 * clocksource_change_rating - Change the rating of a registered clocksource
952 * @cs: clocksource to be changed
953 * @rating: new rating
954 */
955void clocksource_change_rating(struct clocksource *cs, int rating)
956{
957 unsigned long flags;
958
959 mutex_lock(&clocksource_mutex);
960 clocksource_watchdog_lock(&flags);
961 __clocksource_change_rating(cs, rating);
962 clocksource_watchdog_unlock(&flags);
963
964 clocksource_select();
965 clocksource_select_watchdog(false);
966 clocksource_suspend_select(false);
967 mutex_unlock(&clocksource_mutex);
968}
969EXPORT_SYMBOL(clocksource_change_rating);
970
971/*
972 * Unbind clocksource @cs. Called with clocksource_mutex held
973 */
974static int clocksource_unbind(struct clocksource *cs)
975{
976 unsigned long flags;
977
978 if (clocksource_is_watchdog(cs)) {
979 /* Select and try to install a replacement watchdog. */
980 clocksource_select_watchdog(true);
981 if (clocksource_is_watchdog(cs))
982 return -EBUSY;
983 }
984
985 if (cs == curr_clocksource) {
986 /* Select and try to install a replacement clock source */
987 clocksource_select_fallback();
988 if (curr_clocksource == cs)
989 return -EBUSY;
990 }
991
992 if (clocksource_is_suspend(cs)) {
993 /*
994 * Select and try to install a replacement suspend clocksource.
995 * If no replacement suspend clocksource, we will just let the
996 * clocksource go and have no suspend clocksource.
997 */
998 clocksource_suspend_select(true);
999 }
1000
1001 clocksource_watchdog_lock(&flags);
1002 clocksource_dequeue_watchdog(cs);
1003 list_del_init(&cs->list);
1004 clocksource_watchdog_unlock(&flags);
1005
1006 return 0;
1007}
1008
1009/**
1010 * clocksource_unregister - remove a registered clocksource
1011 * @cs: clocksource to be unregistered
1012 */
1013int clocksource_unregister(struct clocksource *cs)
1014{
1015 int ret = 0;
1016
1017 mutex_lock(&clocksource_mutex);
1018 if (!list_empty(&cs->list))
1019 ret = clocksource_unbind(cs);
1020 mutex_unlock(&clocksource_mutex);
1021 return ret;
1022}
1023EXPORT_SYMBOL(clocksource_unregister);
1024
1025#ifdef CONFIG_SYSFS
1026/**
1027 * current_clocksource_show - sysfs interface for current clocksource
1028 * @dev: unused
1029 * @attr: unused
1030 * @buf: char buffer to be filled with clocksource list
1031 *
1032 * Provides sysfs interface for listing current clocksource.
1033 */
1034static ssize_t current_clocksource_show(struct device *dev,
1035 struct device_attribute *attr,
1036 char *buf)
1037{
1038 ssize_t count = 0;
1039
1040 mutex_lock(&clocksource_mutex);
1041 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
1042 mutex_unlock(&clocksource_mutex);
1043
1044 return count;
1045}
1046
1047ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
1048{
1049 size_t ret = cnt;
1050
1051 /* strings from sysfs write are not 0 terminated! */
1052 if (!cnt || cnt >= CS_NAME_LEN)
1053 return -EINVAL;
1054
1055 /* strip of \n: */
1056 if (buf[cnt-1] == '\n')
1057 cnt--;
1058 if (cnt > 0)
1059 memcpy(dst, buf, cnt);
1060 dst[cnt] = 0;
1061 return ret;
1062}
1063
1064/**
1065 * current_clocksource_store - interface for manually overriding clocksource
1066 * @dev: unused
1067 * @attr: unused
1068 * @buf: name of override clocksource
1069 * @count: length of buffer
1070 *
1071 * Takes input from sysfs interface for manually overriding the default
1072 * clocksource selection.
1073 */
1074static ssize_t current_clocksource_store(struct device *dev,
1075 struct device_attribute *attr,
1076 const char *buf, size_t count)
1077{
1078 ssize_t ret;
1079
1080 mutex_lock(&clocksource_mutex);
1081
1082 ret = sysfs_get_uname(buf, override_name, count);
1083 if (ret >= 0)
1084 clocksource_select();
1085
1086 mutex_unlock(&clocksource_mutex);
1087
1088 return ret;
1089}
1090static DEVICE_ATTR_RW(current_clocksource);
1091
1092/**
1093 * unbind_clocksource_store - interface for manually unbinding clocksource
1094 * @dev: unused
1095 * @attr: unused
1096 * @buf: unused
1097 * @count: length of buffer
1098 *
1099 * Takes input from sysfs interface for manually unbinding a clocksource.
1100 */
1101static ssize_t unbind_clocksource_store(struct device *dev,
1102 struct device_attribute *attr,
1103 const char *buf, size_t count)
1104{
1105 struct clocksource *cs;
1106 char name[CS_NAME_LEN];
1107 ssize_t ret;
1108
1109 ret = sysfs_get_uname(buf, name, count);
1110 if (ret < 0)
1111 return ret;
1112
1113 ret = -ENODEV;
1114 mutex_lock(&clocksource_mutex);
1115 list_for_each_entry(cs, &clocksource_list, list) {
1116 if (strcmp(cs->name, name))
1117 continue;
1118 ret = clocksource_unbind(cs);
1119 break;
1120 }
1121 mutex_unlock(&clocksource_mutex);
1122
1123 return ret ? ret : count;
1124}
1125static DEVICE_ATTR_WO(unbind_clocksource);
1126
1127/**
1128 * available_clocksource_show - sysfs interface for listing clocksource
1129 * @dev: unused
1130 * @attr: unused
1131 * @buf: char buffer to be filled with clocksource list
1132 *
1133 * Provides sysfs interface for listing registered clocksources
1134 */
1135static ssize_t available_clocksource_show(struct device *dev,
1136 struct device_attribute *attr,
1137 char *buf)
1138{
1139 struct clocksource *src;
1140 ssize_t count = 0;
1141
1142 mutex_lock(&clocksource_mutex);
1143 list_for_each_entry(src, &clocksource_list, list) {
1144 /*
1145 * Don't show non-HRES clocksource if the tick code is
1146 * in one shot mode (highres=on or nohz=on)
1147 */
1148 if (!tick_oneshot_mode_active() ||
1149 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1150 count += snprintf(buf + count,
1151 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
1152 "%s ", src->name);
1153 }
1154 mutex_unlock(&clocksource_mutex);
1155
1156 count += snprintf(buf + count,
1157 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
1158
1159 return count;
1160}
1161static DEVICE_ATTR_RO(available_clocksource);
1162
1163static struct attribute *clocksource_attrs[] = {
1164 &dev_attr_current_clocksource.attr,
1165 &dev_attr_unbind_clocksource.attr,
1166 &dev_attr_available_clocksource.attr,
1167 NULL
1168};
1169ATTRIBUTE_GROUPS(clocksource);
1170
1171static struct bus_type clocksource_subsys = {
1172 .name = "clocksource",
1173 .dev_name = "clocksource",
1174};
1175
1176static struct device device_clocksource = {
1177 .id = 0,
1178 .bus = &clocksource_subsys,
1179 .groups = clocksource_groups,
1180};
1181
1182static int __init init_clocksource_sysfs(void)
1183{
1184 int error = subsys_system_register(&clocksource_subsys, NULL);
1185
1186 if (!error)
1187 error = device_register(&device_clocksource);
1188
1189 return error;
1190}
1191
1192device_initcall(init_clocksource_sysfs);
1193#endif /* CONFIG_SYSFS */
1194
1195/**
1196 * boot_override_clocksource - boot clock override
1197 * @str: override name
1198 *
1199 * Takes a clocksource= boot argument and uses it
1200 * as the clocksource override name.
1201 */
1202static int __init boot_override_clocksource(char* str)
1203{
1204 mutex_lock(&clocksource_mutex);
1205 if (str)
1206 strlcpy(override_name, str, sizeof(override_name));
1207 mutex_unlock(&clocksource_mutex);
1208 return 1;
1209}
1210
1211__setup("clocksource=", boot_override_clocksource);
1212
1213/**
1214 * boot_override_clock - Compatibility layer for deprecated boot option
1215 * @str: override name
1216 *
1217 * DEPRECATED! Takes a clock= boot argument and uses it
1218 * as the clocksource override name
1219 */
1220static int __init boot_override_clock(char* str)
1221{
1222 if (!strcmp(str, "pmtmr")) {
1223 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
1224 return boot_override_clocksource("acpi_pm");
1225 }
1226 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
1227 return boot_override_clocksource(str);
1228}
1229
1230__setup("clock=", boot_override_clock);
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This file contains the functions which manage clocksource drivers.
4 *
5 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/device.h>
11#include <linux/clocksource.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
15#include <linux/tick.h>
16#include <linux/kthread.h>
17#include <linux/prandom.h>
18#include <linux/cpu.h>
19
20#include "tick-internal.h"
21#include "timekeeping_internal.h"
22
23static void clocksource_enqueue(struct clocksource *cs);
24
25static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
26{
27 u64 delta = clocksource_delta(end, start, cs->mask, cs->max_raw_delta);
28
29 if (likely(delta < cs->max_cycles))
30 return clocksource_cyc2ns(delta, cs->mult, cs->shift);
31
32 return mul_u64_u32_shr(delta, cs->mult, cs->shift);
33}
34
35/**
36 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
37 * @mult: pointer to mult variable
38 * @shift: pointer to shift variable
39 * @from: frequency to convert from
40 * @to: frequency to convert to
41 * @maxsec: guaranteed runtime conversion range in seconds
42 *
43 * The function evaluates the shift/mult pair for the scaled math
44 * operations of clocksources and clockevents.
45 *
46 * @to and @from are frequency values in HZ. For clock sources @to is
47 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
48 * event @to is the counter frequency and @from is NSEC_PER_SEC.
49 *
50 * The @maxsec conversion range argument controls the time frame in
51 * seconds which must be covered by the runtime conversion with the
52 * calculated mult and shift factors. This guarantees that no 64bit
53 * overflow happens when the input value of the conversion is
54 * multiplied with the calculated mult factor. Larger ranges may
55 * reduce the conversion accuracy by choosing smaller mult and shift
56 * factors.
57 */
58void
59clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
60{
61 u64 tmp;
62 u32 sft, sftacc= 32;
63
64 /*
65 * Calculate the shift factor which is limiting the conversion
66 * range:
67 */
68 tmp = ((u64)maxsec * from) >> 32;
69 while (tmp) {
70 tmp >>=1;
71 sftacc--;
72 }
73
74 /*
75 * Find the conversion shift/mult pair which has the best
76 * accuracy and fits the maxsec conversion range:
77 */
78 for (sft = 32; sft > 0; sft--) {
79 tmp = (u64) to << sft;
80 tmp += from / 2;
81 do_div(tmp, from);
82 if ((tmp >> sftacc) == 0)
83 break;
84 }
85 *mult = tmp;
86 *shift = sft;
87}
88EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
89
90/*[Clocksource internal variables]---------
91 * curr_clocksource:
92 * currently selected clocksource.
93 * suspend_clocksource:
94 * used to calculate the suspend time.
95 * clocksource_list:
96 * linked list with the registered clocksources
97 * clocksource_mutex:
98 * protects manipulations to curr_clocksource and the clocksource_list
99 * override_name:
100 * Name of the user-specified clocksource.
101 */
102static struct clocksource *curr_clocksource;
103static struct clocksource *suspend_clocksource;
104static LIST_HEAD(clocksource_list);
105static DEFINE_MUTEX(clocksource_mutex);
106static char override_name[CS_NAME_LEN];
107static int finished_booting;
108static u64 suspend_start;
109
110/*
111 * Interval: 0.5sec.
112 */
113#define WATCHDOG_INTERVAL (HZ >> 1)
114#define WATCHDOG_INTERVAL_MAX_NS ((2 * WATCHDOG_INTERVAL) * (NSEC_PER_SEC / HZ))
115
116/*
117 * Threshold: 0.0312s, when doubled: 0.0625s.
118 */
119#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5)
120
121/*
122 * Maximum permissible delay between two readouts of the watchdog
123 * clocksource surrounding a read of the clocksource being validated.
124 * This delay could be due to SMIs, NMIs, or to VCPU preemptions. Used as
125 * a lower bound for cs->uncertainty_margin values when registering clocks.
126 *
127 * The default of 500 parts per million is based on NTP's limits.
128 * If a clocksource is good enough for NTP, it is good enough for us!
129 *
130 * In other words, by default, even if a clocksource is extremely
131 * precise (for example, with a sub-nanosecond period), the maximum
132 * permissible skew between the clocksource watchdog and the clocksource
133 * under test is not permitted to go below the 500ppm minimum defined
134 * by MAX_SKEW_USEC. This 500ppm minimum may be overridden using the
135 * CLOCKSOURCE_WATCHDOG_MAX_SKEW_US Kconfig option.
136 */
137#ifdef CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US
138#define MAX_SKEW_USEC CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US
139#else
140#define MAX_SKEW_USEC (125 * WATCHDOG_INTERVAL / HZ)
141#endif
142
143/*
144 * Default for maximum permissible skew when cs->uncertainty_margin is
145 * not specified, and the lower bound even when cs->uncertainty_margin
146 * is specified. This is also the default that is used when registering
147 * clocks with unspecifed cs->uncertainty_margin, so this macro is used
148 * even in CONFIG_CLOCKSOURCE_WATCHDOG=n kernels.
149 */
150#define WATCHDOG_MAX_SKEW (MAX_SKEW_USEC * NSEC_PER_USEC)
151
152#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
153static void clocksource_watchdog_work(struct work_struct *work);
154static void clocksource_select(void);
155
156static LIST_HEAD(watchdog_list);
157static struct clocksource *watchdog;
158static struct timer_list watchdog_timer;
159static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
160static DEFINE_SPINLOCK(watchdog_lock);
161static int watchdog_running;
162static atomic_t watchdog_reset_pending;
163static int64_t watchdog_max_interval;
164
165static inline void clocksource_watchdog_lock(unsigned long *flags)
166{
167 spin_lock_irqsave(&watchdog_lock, *flags);
168}
169
170static inline void clocksource_watchdog_unlock(unsigned long *flags)
171{
172 spin_unlock_irqrestore(&watchdog_lock, *flags);
173}
174
175static int clocksource_watchdog_kthread(void *data);
176
177static void clocksource_watchdog_work(struct work_struct *work)
178{
179 /*
180 * We cannot directly run clocksource_watchdog_kthread() here, because
181 * clocksource_select() calls timekeeping_notify() which uses
182 * stop_machine(). One cannot use stop_machine() from a workqueue() due
183 * lock inversions wrt CPU hotplug.
184 *
185 * Also, we only ever run this work once or twice during the lifetime
186 * of the kernel, so there is no point in creating a more permanent
187 * kthread for this.
188 *
189 * If kthread_run fails the next watchdog scan over the
190 * watchdog_list will find the unstable clock again.
191 */
192 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
193}
194
195static void clocksource_change_rating(struct clocksource *cs, int rating)
196{
197 list_del(&cs->list);
198 cs->rating = rating;
199 clocksource_enqueue(cs);
200}
201
202static void __clocksource_unstable(struct clocksource *cs)
203{
204 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
205 cs->flags |= CLOCK_SOURCE_UNSTABLE;
206
207 /*
208 * If the clocksource is registered clocksource_watchdog_kthread() will
209 * re-rate and re-select.
210 */
211 if (list_empty(&cs->list)) {
212 cs->rating = 0;
213 return;
214 }
215
216 if (cs->mark_unstable)
217 cs->mark_unstable(cs);
218
219 /* kick clocksource_watchdog_kthread() */
220 if (finished_booting)
221 schedule_work(&watchdog_work);
222}
223
224/**
225 * clocksource_mark_unstable - mark clocksource unstable via watchdog
226 * @cs: clocksource to be marked unstable
227 *
228 * This function is called by the x86 TSC code to mark clocksources as unstable;
229 * it defers demotion and re-selection to a kthread.
230 */
231void clocksource_mark_unstable(struct clocksource *cs)
232{
233 unsigned long flags;
234
235 spin_lock_irqsave(&watchdog_lock, flags);
236 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
237 if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
238 list_add(&cs->wd_list, &watchdog_list);
239 __clocksource_unstable(cs);
240 }
241 spin_unlock_irqrestore(&watchdog_lock, flags);
242}
243
244static int verify_n_cpus = 8;
245module_param(verify_n_cpus, int, 0644);
246
247enum wd_read_status {
248 WD_READ_SUCCESS,
249 WD_READ_UNSTABLE,
250 WD_READ_SKIP
251};
252
253static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
254{
255 int64_t md = 2 * watchdog->uncertainty_margin;
256 unsigned int nretries, max_retries;
257 int64_t wd_delay, wd_seq_delay;
258 u64 wd_end, wd_end2;
259
260 max_retries = clocksource_get_max_watchdog_retry();
261 for (nretries = 0; nretries <= max_retries; nretries++) {
262 local_irq_disable();
263 *wdnow = watchdog->read(watchdog);
264 *csnow = cs->read(cs);
265 wd_end = watchdog->read(watchdog);
266 wd_end2 = watchdog->read(watchdog);
267 local_irq_enable();
268
269 wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end);
270 if (wd_delay <= md + cs->uncertainty_margin) {
271 if (nretries > 1 && nretries >= max_retries) {
272 pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
273 smp_processor_id(), watchdog->name, nretries);
274 }
275 return WD_READ_SUCCESS;
276 }
277
278 /*
279 * Now compute delay in consecutive watchdog read to see if
280 * there is too much external interferences that cause
281 * significant delay in reading both clocksource and watchdog.
282 *
283 * If consecutive WD read-back delay > md, report
284 * system busy, reinit the watchdog and skip the current
285 * watchdog test.
286 */
287 wd_seq_delay = cycles_to_nsec_safe(watchdog, wd_end, wd_end2);
288 if (wd_seq_delay > md)
289 goto skip_test;
290 }
291
292 pr_warn("timekeeping watchdog on CPU%d: wd-%s-wd excessive read-back delay of %lldns vs. limit of %ldns, wd-wd read-back delay only %lldns, attempt %d, marking %s unstable\n",
293 smp_processor_id(), cs->name, wd_delay, WATCHDOG_MAX_SKEW, wd_seq_delay, nretries, cs->name);
294 return WD_READ_UNSTABLE;
295
296skip_test:
297 pr_info("timekeeping watchdog on CPU%d: %s wd-wd read-back delay of %lldns\n",
298 smp_processor_id(), watchdog->name, wd_seq_delay);
299 pr_info("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n",
300 cs->name, wd_delay);
301 return WD_READ_SKIP;
302}
303
304static u64 csnow_mid;
305static cpumask_t cpus_ahead;
306static cpumask_t cpus_behind;
307static cpumask_t cpus_chosen;
308
309static void clocksource_verify_choose_cpus(void)
310{
311 int cpu, i, n = verify_n_cpus;
312
313 if (n < 0) {
314 /* Check all of the CPUs. */
315 cpumask_copy(&cpus_chosen, cpu_online_mask);
316 cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
317 return;
318 }
319
320 /* If no checking desired, or no other CPU to check, leave. */
321 cpumask_clear(&cpus_chosen);
322 if (n == 0 || num_online_cpus() <= 1)
323 return;
324
325 /* Make sure to select at least one CPU other than the current CPU. */
326 cpu = cpumask_first(cpu_online_mask);
327 if (cpu == smp_processor_id())
328 cpu = cpumask_next(cpu, cpu_online_mask);
329 if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
330 return;
331 cpumask_set_cpu(cpu, &cpus_chosen);
332
333 /* Force a sane value for the boot parameter. */
334 if (n > nr_cpu_ids)
335 n = nr_cpu_ids;
336
337 /*
338 * Randomly select the specified number of CPUs. If the same
339 * CPU is selected multiple times, that CPU is checked only once,
340 * and no replacement CPU is selected. This gracefully handles
341 * situations where verify_n_cpus is greater than the number of
342 * CPUs that are currently online.
343 */
344 for (i = 1; i < n; i++) {
345 cpu = get_random_u32_below(nr_cpu_ids);
346 cpu = cpumask_next(cpu - 1, cpu_online_mask);
347 if (cpu >= nr_cpu_ids)
348 cpu = cpumask_first(cpu_online_mask);
349 if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
350 cpumask_set_cpu(cpu, &cpus_chosen);
351 }
352
353 /* Don't verify ourselves. */
354 cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
355}
356
357static void clocksource_verify_one_cpu(void *csin)
358{
359 struct clocksource *cs = (struct clocksource *)csin;
360
361 csnow_mid = cs->read(cs);
362}
363
364void clocksource_verify_percpu(struct clocksource *cs)
365{
366 int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX;
367 u64 csnow_begin, csnow_end;
368 int cpu, testcpu;
369 s64 delta;
370
371 if (verify_n_cpus == 0)
372 return;
373 cpumask_clear(&cpus_ahead);
374 cpumask_clear(&cpus_behind);
375 cpus_read_lock();
376 migrate_disable();
377 clocksource_verify_choose_cpus();
378 if (cpumask_empty(&cpus_chosen)) {
379 migrate_enable();
380 cpus_read_unlock();
381 pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
382 return;
383 }
384 testcpu = smp_processor_id();
385 pr_info("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n",
386 cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
387 preempt_disable();
388 for_each_cpu(cpu, &cpus_chosen) {
389 if (cpu == testcpu)
390 continue;
391 csnow_begin = cs->read(cs);
392 smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
393 csnow_end = cs->read(cs);
394 delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
395 if (delta < 0)
396 cpumask_set_cpu(cpu, &cpus_behind);
397 delta = (csnow_end - csnow_mid) & cs->mask;
398 if (delta < 0)
399 cpumask_set_cpu(cpu, &cpus_ahead);
400 cs_nsec = cycles_to_nsec_safe(cs, csnow_begin, csnow_end);
401 if (cs_nsec > cs_nsec_max)
402 cs_nsec_max = cs_nsec;
403 if (cs_nsec < cs_nsec_min)
404 cs_nsec_min = cs_nsec;
405 }
406 preempt_enable();
407 migrate_enable();
408 cpus_read_unlock();
409 if (!cpumask_empty(&cpus_ahead))
410 pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
411 cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
412 if (!cpumask_empty(&cpus_behind))
413 pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n",
414 cpumask_pr_args(&cpus_behind), testcpu, cs->name);
415 if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind))
416 pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n",
417 testcpu, cs_nsec_min, cs_nsec_max, cs->name);
418}
419EXPORT_SYMBOL_GPL(clocksource_verify_percpu);
420
421static inline void clocksource_reset_watchdog(void)
422{
423 struct clocksource *cs;
424
425 list_for_each_entry(cs, &watchdog_list, wd_list)
426 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
427}
428
429
430static void clocksource_watchdog(struct timer_list *unused)
431{
432 int64_t wd_nsec, cs_nsec, interval;
433 u64 csnow, wdnow, cslast, wdlast;
434 int next_cpu, reset_pending;
435 struct clocksource *cs;
436 enum wd_read_status read_ret;
437 unsigned long extra_wait = 0;
438 u32 md;
439
440 spin_lock(&watchdog_lock);
441 if (!watchdog_running)
442 goto out;
443
444 reset_pending = atomic_read(&watchdog_reset_pending);
445
446 list_for_each_entry(cs, &watchdog_list, wd_list) {
447
448 /* Clocksource already marked unstable? */
449 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
450 if (finished_booting)
451 schedule_work(&watchdog_work);
452 continue;
453 }
454
455 read_ret = cs_watchdog_read(cs, &csnow, &wdnow);
456
457 if (read_ret == WD_READ_UNSTABLE) {
458 /* Clock readout unreliable, so give it up. */
459 __clocksource_unstable(cs);
460 continue;
461 }
462
463 /*
464 * When WD_READ_SKIP is returned, it means the system is likely
465 * under very heavy load, where the latency of reading
466 * watchdog/clocksource is very big, and affect the accuracy of
467 * watchdog check. So give system some space and suspend the
468 * watchdog check for 5 minutes.
469 */
470 if (read_ret == WD_READ_SKIP) {
471 /*
472 * As the watchdog timer will be suspended, and
473 * cs->last could keep unchanged for 5 minutes, reset
474 * the counters.
475 */
476 clocksource_reset_watchdog();
477 extra_wait = HZ * 300;
478 break;
479 }
480
481 /* Clocksource initialized ? */
482 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
483 atomic_read(&watchdog_reset_pending)) {
484 cs->flags |= CLOCK_SOURCE_WATCHDOG;
485 cs->wd_last = wdnow;
486 cs->cs_last = csnow;
487 continue;
488 }
489
490 wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow);
491 cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow);
492 wdlast = cs->wd_last; /* save these in case we print them */
493 cslast = cs->cs_last;
494 cs->cs_last = csnow;
495 cs->wd_last = wdnow;
496
497 if (atomic_read(&watchdog_reset_pending))
498 continue;
499
500 /*
501 * The processing of timer softirqs can get delayed (usually
502 * on account of ksoftirqd not getting to run in a timely
503 * manner), which causes the watchdog interval to stretch.
504 * Skew detection may fail for longer watchdog intervals
505 * on account of fixed margins being used.
506 * Some clocksources, e.g. acpi_pm, cannot tolerate
507 * watchdog intervals longer than a few seconds.
508 */
509 interval = max(cs_nsec, wd_nsec);
510 if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) {
511 if (system_state > SYSTEM_SCHEDULING &&
512 interval > 2 * watchdog_max_interval) {
513 watchdog_max_interval = interval;
514 pr_warn("Long readout interval, skipping watchdog check: cs_nsec: %lld wd_nsec: %lld\n",
515 cs_nsec, wd_nsec);
516 }
517 watchdog_timer.expires = jiffies;
518 continue;
519 }
520
521 /* Check the deviation from the watchdog clocksource. */
522 md = cs->uncertainty_margin + watchdog->uncertainty_margin;
523 if (abs(cs_nsec - wd_nsec) > md) {
524 s64 cs_wd_msec;
525 s64 wd_msec;
526 u32 wd_rem;
527
528 pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
529 smp_processor_id(), cs->name);
530 pr_warn(" '%s' wd_nsec: %lld wd_now: %llx wd_last: %llx mask: %llx\n",
531 watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask);
532 pr_warn(" '%s' cs_nsec: %lld cs_now: %llx cs_last: %llx mask: %llx\n",
533 cs->name, cs_nsec, csnow, cslast, cs->mask);
534 cs_wd_msec = div_s64_rem(cs_nsec - wd_nsec, 1000 * 1000, &wd_rem);
535 wd_msec = div_s64_rem(wd_nsec, 1000 * 1000, &wd_rem);
536 pr_warn(" Clocksource '%s' skewed %lld ns (%lld ms) over watchdog '%s' interval of %lld ns (%lld ms)\n",
537 cs->name, cs_nsec - wd_nsec, cs_wd_msec, watchdog->name, wd_nsec, wd_msec);
538 if (curr_clocksource == cs)
539 pr_warn(" '%s' is current clocksource.\n", cs->name);
540 else if (curr_clocksource)
541 pr_warn(" '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name);
542 else
543 pr_warn(" No current clocksource.\n");
544 __clocksource_unstable(cs);
545 continue;
546 }
547
548 if (cs == curr_clocksource && cs->tick_stable)
549 cs->tick_stable(cs);
550
551 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
552 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
553 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
554 /* Mark it valid for high-res. */
555 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
556
557 /*
558 * clocksource_done_booting() will sort it if
559 * finished_booting is not set yet.
560 */
561 if (!finished_booting)
562 continue;
563
564 /*
565 * If this is not the current clocksource let
566 * the watchdog thread reselect it. Due to the
567 * change to high res this clocksource might
568 * be preferred now. If it is the current
569 * clocksource let the tick code know about
570 * that change.
571 */
572 if (cs != curr_clocksource) {
573 cs->flags |= CLOCK_SOURCE_RESELECT;
574 schedule_work(&watchdog_work);
575 } else {
576 tick_clock_notify();
577 }
578 }
579 }
580
581 /*
582 * We only clear the watchdog_reset_pending, when we did a
583 * full cycle through all clocksources.
584 */
585 if (reset_pending)
586 atomic_dec(&watchdog_reset_pending);
587
588 /*
589 * Cycle through CPUs to check if the CPUs stay synchronized
590 * to each other.
591 */
592 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
593 if (next_cpu >= nr_cpu_ids)
594 next_cpu = cpumask_first(cpu_online_mask);
595
596 /*
597 * Arm timer if not already pending: could race with concurrent
598 * pair clocksource_stop_watchdog() clocksource_start_watchdog().
599 */
600 if (!timer_pending(&watchdog_timer)) {
601 watchdog_timer.expires += WATCHDOG_INTERVAL + extra_wait;
602 add_timer_on(&watchdog_timer, next_cpu);
603 }
604out:
605 spin_unlock(&watchdog_lock);
606}
607
608static inline void clocksource_start_watchdog(void)
609{
610 if (watchdog_running || !watchdog || list_empty(&watchdog_list))
611 return;
612 timer_setup(&watchdog_timer, clocksource_watchdog, 0);
613 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
614 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
615 watchdog_running = 1;
616}
617
618static inline void clocksource_stop_watchdog(void)
619{
620 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
621 return;
622 del_timer(&watchdog_timer);
623 watchdog_running = 0;
624}
625
626static void clocksource_resume_watchdog(void)
627{
628 atomic_inc(&watchdog_reset_pending);
629}
630
631static void clocksource_enqueue_watchdog(struct clocksource *cs)
632{
633 INIT_LIST_HEAD(&cs->wd_list);
634
635 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
636 /* cs is a clocksource to be watched. */
637 list_add(&cs->wd_list, &watchdog_list);
638 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
639 } else {
640 /* cs is a watchdog. */
641 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
642 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
643 }
644}
645
646static void clocksource_select_watchdog(bool fallback)
647{
648 struct clocksource *cs, *old_wd;
649 unsigned long flags;
650
651 spin_lock_irqsave(&watchdog_lock, flags);
652 /* save current watchdog */
653 old_wd = watchdog;
654 if (fallback)
655 watchdog = NULL;
656
657 list_for_each_entry(cs, &clocksource_list, list) {
658 /* cs is a clocksource to be watched. */
659 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
660 continue;
661
662 /* Skip current if we were requested for a fallback. */
663 if (fallback && cs == old_wd)
664 continue;
665
666 /* Pick the best watchdog. */
667 if (!watchdog || cs->rating > watchdog->rating)
668 watchdog = cs;
669 }
670 /* If we failed to find a fallback restore the old one. */
671 if (!watchdog)
672 watchdog = old_wd;
673
674 /* If we changed the watchdog we need to reset cycles. */
675 if (watchdog != old_wd)
676 clocksource_reset_watchdog();
677
678 /* Check if the watchdog timer needs to be started. */
679 clocksource_start_watchdog();
680 spin_unlock_irqrestore(&watchdog_lock, flags);
681}
682
683static void clocksource_dequeue_watchdog(struct clocksource *cs)
684{
685 if (cs != watchdog) {
686 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
687 /* cs is a watched clocksource. */
688 list_del_init(&cs->wd_list);
689 /* Check if the watchdog timer needs to be stopped. */
690 clocksource_stop_watchdog();
691 }
692 }
693}
694
695static int __clocksource_watchdog_kthread(void)
696{
697 struct clocksource *cs, *tmp;
698 unsigned long flags;
699 int select = 0;
700
701 /* Do any required per-CPU skew verification. */
702 if (curr_clocksource &&
703 curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE &&
704 curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU)
705 clocksource_verify_percpu(curr_clocksource);
706
707 spin_lock_irqsave(&watchdog_lock, flags);
708 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
709 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
710 list_del_init(&cs->wd_list);
711 clocksource_change_rating(cs, 0);
712 select = 1;
713 }
714 if (cs->flags & CLOCK_SOURCE_RESELECT) {
715 cs->flags &= ~CLOCK_SOURCE_RESELECT;
716 select = 1;
717 }
718 }
719 /* Check if the watchdog timer needs to be stopped. */
720 clocksource_stop_watchdog();
721 spin_unlock_irqrestore(&watchdog_lock, flags);
722
723 return select;
724}
725
726static int clocksource_watchdog_kthread(void *data)
727{
728 mutex_lock(&clocksource_mutex);
729 if (__clocksource_watchdog_kthread())
730 clocksource_select();
731 mutex_unlock(&clocksource_mutex);
732 return 0;
733}
734
735static bool clocksource_is_watchdog(struct clocksource *cs)
736{
737 return cs == watchdog;
738}
739
740#else /* CONFIG_CLOCKSOURCE_WATCHDOG */
741
742static void clocksource_enqueue_watchdog(struct clocksource *cs)
743{
744 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
745 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
746}
747
748static void clocksource_select_watchdog(bool fallback) { }
749static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
750static inline void clocksource_resume_watchdog(void) { }
751static inline int __clocksource_watchdog_kthread(void) { return 0; }
752static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
753void clocksource_mark_unstable(struct clocksource *cs) { }
754
755static inline void clocksource_watchdog_lock(unsigned long *flags) { }
756static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
757
758#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
759
760static bool clocksource_is_suspend(struct clocksource *cs)
761{
762 return cs == suspend_clocksource;
763}
764
765static void __clocksource_suspend_select(struct clocksource *cs)
766{
767 /*
768 * Skip the clocksource which will be stopped in suspend state.
769 */
770 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
771 return;
772
773 /*
774 * The nonstop clocksource can be selected as the suspend clocksource to
775 * calculate the suspend time, so it should not supply suspend/resume
776 * interfaces to suspend the nonstop clocksource when system suspends.
777 */
778 if (cs->suspend || cs->resume) {
779 pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
780 cs->name);
781 }
782
783 /* Pick the best rating. */
784 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
785 suspend_clocksource = cs;
786}
787
788/**
789 * clocksource_suspend_select - Select the best clocksource for suspend timing
790 * @fallback: if select a fallback clocksource
791 */
792static void clocksource_suspend_select(bool fallback)
793{
794 struct clocksource *cs, *old_suspend;
795
796 old_suspend = suspend_clocksource;
797 if (fallback)
798 suspend_clocksource = NULL;
799
800 list_for_each_entry(cs, &clocksource_list, list) {
801 /* Skip current if we were requested for a fallback. */
802 if (fallback && cs == old_suspend)
803 continue;
804
805 __clocksource_suspend_select(cs);
806 }
807}
808
809/**
810 * clocksource_start_suspend_timing - Start measuring the suspend timing
811 * @cs: current clocksource from timekeeping
812 * @start_cycles: current cycles from timekeeping
813 *
814 * This function will save the start cycle values of suspend timer to calculate
815 * the suspend time when resuming system.
816 *
817 * This function is called late in the suspend process from timekeeping_suspend(),
818 * that means processes are frozen, non-boot cpus and interrupts are disabled
819 * now. It is therefore possible to start the suspend timer without taking the
820 * clocksource mutex.
821 */
822void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
823{
824 if (!suspend_clocksource)
825 return;
826
827 /*
828 * If current clocksource is the suspend timer, we should use the
829 * tkr_mono.cycle_last value as suspend_start to avoid same reading
830 * from suspend timer.
831 */
832 if (clocksource_is_suspend(cs)) {
833 suspend_start = start_cycles;
834 return;
835 }
836
837 if (suspend_clocksource->enable &&
838 suspend_clocksource->enable(suspend_clocksource)) {
839 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
840 return;
841 }
842
843 suspend_start = suspend_clocksource->read(suspend_clocksource);
844}
845
846/**
847 * clocksource_stop_suspend_timing - Stop measuring the suspend timing
848 * @cs: current clocksource from timekeeping
849 * @cycle_now: current cycles from timekeeping
850 *
851 * This function will calculate the suspend time from suspend timer.
852 *
853 * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
854 *
855 * This function is called early in the resume process from timekeeping_resume(),
856 * that means there is only one cpu, no processes are running and the interrupts
857 * are disabled. It is therefore possible to stop the suspend timer without
858 * taking the clocksource mutex.
859 */
860u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
861{
862 u64 now, nsec = 0;
863
864 if (!suspend_clocksource)
865 return 0;
866
867 /*
868 * If current clocksource is the suspend timer, we should use the
869 * tkr_mono.cycle_last value from timekeeping as current cycle to
870 * avoid same reading from suspend timer.
871 */
872 if (clocksource_is_suspend(cs))
873 now = cycle_now;
874 else
875 now = suspend_clocksource->read(suspend_clocksource);
876
877 if (now > suspend_start)
878 nsec = cycles_to_nsec_safe(suspend_clocksource, suspend_start, now);
879
880 /*
881 * Disable the suspend timer to save power if current clocksource is
882 * not the suspend timer.
883 */
884 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
885 suspend_clocksource->disable(suspend_clocksource);
886
887 return nsec;
888}
889
890/**
891 * clocksource_suspend - suspend the clocksource(s)
892 */
893void clocksource_suspend(void)
894{
895 struct clocksource *cs;
896
897 list_for_each_entry_reverse(cs, &clocksource_list, list)
898 if (cs->suspend)
899 cs->suspend(cs);
900}
901
902/**
903 * clocksource_resume - resume the clocksource(s)
904 */
905void clocksource_resume(void)
906{
907 struct clocksource *cs;
908
909 list_for_each_entry(cs, &clocksource_list, list)
910 if (cs->resume)
911 cs->resume(cs);
912
913 clocksource_resume_watchdog();
914}
915
916/**
917 * clocksource_touch_watchdog - Update watchdog
918 *
919 * Update the watchdog after exception contexts such as kgdb so as not
920 * to incorrectly trip the watchdog. This might fail when the kernel
921 * was stopped in code which holds watchdog_lock.
922 */
923void clocksource_touch_watchdog(void)
924{
925 clocksource_resume_watchdog();
926}
927
928/**
929 * clocksource_max_adjustment- Returns max adjustment amount
930 * @cs: Pointer to clocksource
931 *
932 */
933static u32 clocksource_max_adjustment(struct clocksource *cs)
934{
935 u64 ret;
936 /*
937 * We won't try to correct for more than 11% adjustments (110,000 ppm),
938 */
939 ret = (u64)cs->mult * 11;
940 do_div(ret,100);
941 return (u32)ret;
942}
943
944/**
945 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
946 * @mult: cycle to nanosecond multiplier
947 * @shift: cycle to nanosecond divisor (power of two)
948 * @maxadj: maximum adjustment value to mult (~11%)
949 * @mask: bitmask for two's complement subtraction of non 64 bit counters
950 * @max_cyc: maximum cycle value before potential overflow (does not include
951 * any safety margin)
952 *
953 * NOTE: This function includes a safety margin of 50%, in other words, we
954 * return half the number of nanoseconds the hardware counter can technically
955 * cover. This is done so that we can potentially detect problems caused by
956 * delayed timers or bad hardware, which might result in time intervals that
957 * are larger than what the math used can handle without overflows.
958 */
959u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
960{
961 u64 max_nsecs, max_cycles;
962
963 /*
964 * Calculate the maximum number of cycles that we can pass to the
965 * cyc2ns() function without overflowing a 64-bit result.
966 */
967 max_cycles = ULLONG_MAX;
968 do_div(max_cycles, mult+maxadj);
969
970 /*
971 * The actual maximum number of cycles we can defer the clocksource is
972 * determined by the minimum of max_cycles and mask.
973 * Note: Here we subtract the maxadj to make sure we don't sleep for
974 * too long if there's a large negative adjustment.
975 */
976 max_cycles = min(max_cycles, mask);
977 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
978
979 /* return the max_cycles value as well if requested */
980 if (max_cyc)
981 *max_cyc = max_cycles;
982
983 /* Return 50% of the actual maximum, so we can detect bad values */
984 max_nsecs >>= 1;
985
986 return max_nsecs;
987}
988
989/**
990 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
991 * @cs: Pointer to clocksource to be updated
992 *
993 */
994static inline void clocksource_update_max_deferment(struct clocksource *cs)
995{
996 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
997 cs->maxadj, cs->mask,
998 &cs->max_cycles);
999
1000 /*
1001 * Threshold for detecting negative motion in clocksource_delta().
1002 *
1003 * Allow for 0.875 of the counter width so that overly long idle
1004 * sleeps, which go slightly over mask/2, do not trigger the
1005 * negative motion detection.
1006 */
1007 cs->max_raw_delta = (cs->mask >> 1) + (cs->mask >> 2) + (cs->mask >> 3);
1008}
1009
1010static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
1011{
1012 struct clocksource *cs;
1013
1014 if (!finished_booting || list_empty(&clocksource_list))
1015 return NULL;
1016
1017 /*
1018 * We pick the clocksource with the highest rating. If oneshot
1019 * mode is active, we pick the highres valid clocksource with
1020 * the best rating.
1021 */
1022 list_for_each_entry(cs, &clocksource_list, list) {
1023 if (skipcur && cs == curr_clocksource)
1024 continue;
1025 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1026 continue;
1027 return cs;
1028 }
1029 return NULL;
1030}
1031
1032static void __clocksource_select(bool skipcur)
1033{
1034 bool oneshot = tick_oneshot_mode_active();
1035 struct clocksource *best, *cs;
1036
1037 /* Find the best suitable clocksource */
1038 best = clocksource_find_best(oneshot, skipcur);
1039 if (!best)
1040 return;
1041
1042 if (!strlen(override_name))
1043 goto found;
1044
1045 /* Check for the override clocksource. */
1046 list_for_each_entry(cs, &clocksource_list, list) {
1047 if (skipcur && cs == curr_clocksource)
1048 continue;
1049 if (strcmp(cs->name, override_name) != 0)
1050 continue;
1051 /*
1052 * Check to make sure we don't switch to a non-highres
1053 * capable clocksource if the tick code is in oneshot
1054 * mode (highres or nohz)
1055 */
1056 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
1057 /* Override clocksource cannot be used. */
1058 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
1059 pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
1060 cs->name);
1061 override_name[0] = 0;
1062 } else {
1063 /*
1064 * The override cannot be currently verified.
1065 * Deferring to let the watchdog check.
1066 */
1067 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
1068 cs->name);
1069 }
1070 } else
1071 /* Override clocksource can be used. */
1072 best = cs;
1073 break;
1074 }
1075
1076found:
1077 if (curr_clocksource != best && !timekeeping_notify(best)) {
1078 pr_info("Switched to clocksource %s\n", best->name);
1079 curr_clocksource = best;
1080 }
1081}
1082
1083/**
1084 * clocksource_select - Select the best clocksource available
1085 *
1086 * Private function. Must hold clocksource_mutex when called.
1087 *
1088 * Select the clocksource with the best rating, or the clocksource,
1089 * which is selected by userspace override.
1090 */
1091static void clocksource_select(void)
1092{
1093 __clocksource_select(false);
1094}
1095
1096static void clocksource_select_fallback(void)
1097{
1098 __clocksource_select(true);
1099}
1100
1101/*
1102 * clocksource_done_booting - Called near the end of core bootup
1103 *
1104 * Hack to avoid lots of clocksource churn at boot time.
1105 * We use fs_initcall because we want this to start before
1106 * device_initcall but after subsys_initcall.
1107 */
1108static int __init clocksource_done_booting(void)
1109{
1110 mutex_lock(&clocksource_mutex);
1111 curr_clocksource = clocksource_default_clock();
1112 finished_booting = 1;
1113 /*
1114 * Run the watchdog first to eliminate unstable clock sources
1115 */
1116 __clocksource_watchdog_kthread();
1117 clocksource_select();
1118 mutex_unlock(&clocksource_mutex);
1119 return 0;
1120}
1121fs_initcall(clocksource_done_booting);
1122
1123/*
1124 * Enqueue the clocksource sorted by rating
1125 */
1126static void clocksource_enqueue(struct clocksource *cs)
1127{
1128 struct list_head *entry = &clocksource_list;
1129 struct clocksource *tmp;
1130
1131 list_for_each_entry(tmp, &clocksource_list, list) {
1132 /* Keep track of the place, where to insert */
1133 if (tmp->rating < cs->rating)
1134 break;
1135 entry = &tmp->list;
1136 }
1137 list_add(&cs->list, entry);
1138}
1139
1140/**
1141 * __clocksource_update_freq_scale - Used update clocksource with new freq
1142 * @cs: clocksource to be registered
1143 * @scale: Scale factor multiplied against freq to get clocksource hz
1144 * @freq: clocksource frequency (cycles per second) divided by scale
1145 *
1146 * This should only be called from the clocksource->enable() method.
1147 *
1148 * This *SHOULD NOT* be called directly! Please use the
1149 * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
1150 * functions.
1151 */
1152void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
1153{
1154 u64 sec;
1155
1156 /*
1157 * Default clocksources are *special* and self-define their mult/shift.
1158 * But, you're not special, so you should specify a freq value.
1159 */
1160 if (freq) {
1161 /*
1162 * Calc the maximum number of seconds which we can run before
1163 * wrapping around. For clocksources which have a mask > 32-bit
1164 * we need to limit the max sleep time to have a good
1165 * conversion precision. 10 minutes is still a reasonable
1166 * amount. That results in a shift value of 24 for a
1167 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
1168 * ~ 0.06ppm granularity for NTP.
1169 */
1170 sec = cs->mask;
1171 do_div(sec, freq);
1172 do_div(sec, scale);
1173 if (!sec)
1174 sec = 1;
1175 else if (sec > 600 && cs->mask > UINT_MAX)
1176 sec = 600;
1177
1178 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
1179 NSEC_PER_SEC / scale, sec * scale);
1180 }
1181
1182 /*
1183 * If the uncertainty margin is not specified, calculate it. If
1184 * both scale and freq are non-zero, calculate the clock period, but
1185 * bound below at 2*WATCHDOG_MAX_SKEW, that is, 500ppm by default.
1186 * However, if either of scale or freq is zero, be very conservative
1187 * and take the tens-of-milliseconds WATCHDOG_THRESHOLD value
1188 * for the uncertainty margin. Allow stupidly small uncertainty
1189 * margins to be specified by the caller for testing purposes,
1190 * but warn to discourage production use of this capability.
1191 *
1192 * Bottom line: The sum of the uncertainty margins of the
1193 * watchdog clocksource and the clocksource under test will be at
1194 * least 500ppm by default. For more information, please see the
1195 * comment preceding CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US above.
1196 */
1197 if (scale && freq && !cs->uncertainty_margin) {
1198 cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq);
1199 if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW)
1200 cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW;
1201 } else if (!cs->uncertainty_margin) {
1202 cs->uncertainty_margin = WATCHDOG_THRESHOLD;
1203 }
1204 WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW);
1205
1206 /*
1207 * Ensure clocksources that have large 'mult' values don't overflow
1208 * when adjusted.
1209 */
1210 cs->maxadj = clocksource_max_adjustment(cs);
1211 while (freq && ((cs->mult + cs->maxadj < cs->mult)
1212 || (cs->mult - cs->maxadj > cs->mult))) {
1213 cs->mult >>= 1;
1214 cs->shift--;
1215 cs->maxadj = clocksource_max_adjustment(cs);
1216 }
1217
1218 /*
1219 * Only warn for *special* clocksources that self-define
1220 * their mult/shift values and don't specify a freq.
1221 */
1222 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
1223 "timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
1224 cs->name);
1225
1226 clocksource_update_max_deferment(cs);
1227
1228 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
1229 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
1230}
1231EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
1232
1233/**
1234 * __clocksource_register_scale - Used to install new clocksources
1235 * @cs: clocksource to be registered
1236 * @scale: Scale factor multiplied against freq to get clocksource hz
1237 * @freq: clocksource frequency (cycles per second) divided by scale
1238 *
1239 * Returns -EBUSY if registration fails, zero otherwise.
1240 *
1241 * This *SHOULD NOT* be called directly! Please use the
1242 * clocksource_register_hz() or clocksource_register_khz helper functions.
1243 */
1244int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
1245{
1246 unsigned long flags;
1247
1248 clocksource_arch_init(cs);
1249
1250 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX))
1251 cs->id = CSID_GENERIC;
1252 if (cs->vdso_clock_mode < 0 ||
1253 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
1254 pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
1255 cs->name, cs->vdso_clock_mode);
1256 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
1257 }
1258
1259 /* Initialize mult/shift and max_idle_ns */
1260 __clocksource_update_freq_scale(cs, scale, freq);
1261
1262 /* Add clocksource to the clocksource list */
1263 mutex_lock(&clocksource_mutex);
1264
1265 clocksource_watchdog_lock(&flags);
1266 clocksource_enqueue(cs);
1267 clocksource_enqueue_watchdog(cs);
1268 clocksource_watchdog_unlock(&flags);
1269
1270 clocksource_select();
1271 clocksource_select_watchdog(false);
1272 __clocksource_suspend_select(cs);
1273 mutex_unlock(&clocksource_mutex);
1274 return 0;
1275}
1276EXPORT_SYMBOL_GPL(__clocksource_register_scale);
1277
1278/*
1279 * Unbind clocksource @cs. Called with clocksource_mutex held
1280 */
1281static int clocksource_unbind(struct clocksource *cs)
1282{
1283 unsigned long flags;
1284
1285 if (clocksource_is_watchdog(cs)) {
1286 /* Select and try to install a replacement watchdog. */
1287 clocksource_select_watchdog(true);
1288 if (clocksource_is_watchdog(cs))
1289 return -EBUSY;
1290 }
1291
1292 if (cs == curr_clocksource) {
1293 /* Select and try to install a replacement clock source */
1294 clocksource_select_fallback();
1295 if (curr_clocksource == cs)
1296 return -EBUSY;
1297 }
1298
1299 if (clocksource_is_suspend(cs)) {
1300 /*
1301 * Select and try to install a replacement suspend clocksource.
1302 * If no replacement suspend clocksource, we will just let the
1303 * clocksource go and have no suspend clocksource.
1304 */
1305 clocksource_suspend_select(true);
1306 }
1307
1308 clocksource_watchdog_lock(&flags);
1309 clocksource_dequeue_watchdog(cs);
1310 list_del_init(&cs->list);
1311 clocksource_watchdog_unlock(&flags);
1312
1313 return 0;
1314}
1315
1316/**
1317 * clocksource_unregister - remove a registered clocksource
1318 * @cs: clocksource to be unregistered
1319 */
1320int clocksource_unregister(struct clocksource *cs)
1321{
1322 int ret = 0;
1323
1324 mutex_lock(&clocksource_mutex);
1325 if (!list_empty(&cs->list))
1326 ret = clocksource_unbind(cs);
1327 mutex_unlock(&clocksource_mutex);
1328 return ret;
1329}
1330EXPORT_SYMBOL(clocksource_unregister);
1331
1332#ifdef CONFIG_SYSFS
1333/**
1334 * current_clocksource_show - sysfs interface for current clocksource
1335 * @dev: unused
1336 * @attr: unused
1337 * @buf: char buffer to be filled with clocksource list
1338 *
1339 * Provides sysfs interface for listing current clocksource.
1340 */
1341static ssize_t current_clocksource_show(struct device *dev,
1342 struct device_attribute *attr,
1343 char *buf)
1344{
1345 ssize_t count = 0;
1346
1347 mutex_lock(&clocksource_mutex);
1348 count = sysfs_emit(buf, "%s\n", curr_clocksource->name);
1349 mutex_unlock(&clocksource_mutex);
1350
1351 return count;
1352}
1353
1354ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
1355{
1356 size_t ret = cnt;
1357
1358 /* strings from sysfs write are not 0 terminated! */
1359 if (!cnt || cnt >= CS_NAME_LEN)
1360 return -EINVAL;
1361
1362 /* strip of \n: */
1363 if (buf[cnt-1] == '\n')
1364 cnt--;
1365 if (cnt > 0)
1366 memcpy(dst, buf, cnt);
1367 dst[cnt] = 0;
1368 return ret;
1369}
1370
1371/**
1372 * current_clocksource_store - interface for manually overriding clocksource
1373 * @dev: unused
1374 * @attr: unused
1375 * @buf: name of override clocksource
1376 * @count: length of buffer
1377 *
1378 * Takes input from sysfs interface for manually overriding the default
1379 * clocksource selection.
1380 */
1381static ssize_t current_clocksource_store(struct device *dev,
1382 struct device_attribute *attr,
1383 const char *buf, size_t count)
1384{
1385 ssize_t ret;
1386
1387 mutex_lock(&clocksource_mutex);
1388
1389 ret = sysfs_get_uname(buf, override_name, count);
1390 if (ret >= 0)
1391 clocksource_select();
1392
1393 mutex_unlock(&clocksource_mutex);
1394
1395 return ret;
1396}
1397static DEVICE_ATTR_RW(current_clocksource);
1398
1399/**
1400 * unbind_clocksource_store - interface for manually unbinding clocksource
1401 * @dev: unused
1402 * @attr: unused
1403 * @buf: unused
1404 * @count: length of buffer
1405 *
1406 * Takes input from sysfs interface for manually unbinding a clocksource.
1407 */
1408static ssize_t unbind_clocksource_store(struct device *dev,
1409 struct device_attribute *attr,
1410 const char *buf, size_t count)
1411{
1412 struct clocksource *cs;
1413 char name[CS_NAME_LEN];
1414 ssize_t ret;
1415
1416 ret = sysfs_get_uname(buf, name, count);
1417 if (ret < 0)
1418 return ret;
1419
1420 ret = -ENODEV;
1421 mutex_lock(&clocksource_mutex);
1422 list_for_each_entry(cs, &clocksource_list, list) {
1423 if (strcmp(cs->name, name))
1424 continue;
1425 ret = clocksource_unbind(cs);
1426 break;
1427 }
1428 mutex_unlock(&clocksource_mutex);
1429
1430 return ret ? ret : count;
1431}
1432static DEVICE_ATTR_WO(unbind_clocksource);
1433
1434/**
1435 * available_clocksource_show - sysfs interface for listing clocksource
1436 * @dev: unused
1437 * @attr: unused
1438 * @buf: char buffer to be filled with clocksource list
1439 *
1440 * Provides sysfs interface for listing registered clocksources
1441 */
1442static ssize_t available_clocksource_show(struct device *dev,
1443 struct device_attribute *attr,
1444 char *buf)
1445{
1446 struct clocksource *src;
1447 ssize_t count = 0;
1448
1449 mutex_lock(&clocksource_mutex);
1450 list_for_each_entry(src, &clocksource_list, list) {
1451 /*
1452 * Don't show non-HRES clocksource if the tick code is
1453 * in one shot mode (highres=on or nohz=on)
1454 */
1455 if (!tick_oneshot_mode_active() ||
1456 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1457 count += snprintf(buf + count,
1458 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
1459 "%s ", src->name);
1460 }
1461 mutex_unlock(&clocksource_mutex);
1462
1463 count += snprintf(buf + count,
1464 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
1465
1466 return count;
1467}
1468static DEVICE_ATTR_RO(available_clocksource);
1469
1470static struct attribute *clocksource_attrs[] = {
1471 &dev_attr_current_clocksource.attr,
1472 &dev_attr_unbind_clocksource.attr,
1473 &dev_attr_available_clocksource.attr,
1474 NULL
1475};
1476ATTRIBUTE_GROUPS(clocksource);
1477
1478static const struct bus_type clocksource_subsys = {
1479 .name = "clocksource",
1480 .dev_name = "clocksource",
1481};
1482
1483static struct device device_clocksource = {
1484 .id = 0,
1485 .bus = &clocksource_subsys,
1486 .groups = clocksource_groups,
1487};
1488
1489static int __init init_clocksource_sysfs(void)
1490{
1491 int error = subsys_system_register(&clocksource_subsys, NULL);
1492
1493 if (!error)
1494 error = device_register(&device_clocksource);
1495
1496 return error;
1497}
1498
1499device_initcall(init_clocksource_sysfs);
1500#endif /* CONFIG_SYSFS */
1501
1502/**
1503 * boot_override_clocksource - boot clock override
1504 * @str: override name
1505 *
1506 * Takes a clocksource= boot argument and uses it
1507 * as the clocksource override name.
1508 */
1509static int __init boot_override_clocksource(char* str)
1510{
1511 mutex_lock(&clocksource_mutex);
1512 if (str)
1513 strscpy(override_name, str, sizeof(override_name));
1514 mutex_unlock(&clocksource_mutex);
1515 return 1;
1516}
1517
1518__setup("clocksource=", boot_override_clocksource);
1519
1520/**
1521 * boot_override_clock - Compatibility layer for deprecated boot option
1522 * @str: override name
1523 *
1524 * DEPRECATED! Takes a clock= boot argument and uses it
1525 * as the clocksource override name
1526 */
1527static int __init boot_override_clock(char* str)
1528{
1529 if (!strcmp(str, "pmtmr")) {
1530 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
1531 return boot_override_clocksource("acpi_pm");
1532 }
1533 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
1534 return boot_override_clocksource(str);
1535}
1536
1537__setup("clock=", boot_override_clock);