Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/kernel.h>
5#include <linux/sched.h>
6#include <linux/sched/clock.h>
7#include <linux/init.h>
8#include <linux/export.h>
9#include <linux/timer.h>
10#include <linux/acpi_pmtmr.h>
11#include <linux/cpufreq.h>
12#include <linux/delay.h>
13#include <linux/clocksource.h>
14#include <linux/percpu.h>
15#include <linux/timex.h>
16#include <linux/static_key.h>
17#include <linux/static_call.h>
18
19#include <asm/hpet.h>
20#include <asm/timer.h>
21#include <asm/vgtod.h>
22#include <asm/time.h>
23#include <asm/delay.h>
24#include <asm/hypervisor.h>
25#include <asm/nmi.h>
26#include <asm/x86_init.h>
27#include <asm/geode.h>
28#include <asm/apic.h>
29#include <asm/cpu_device_id.h>
30#include <asm/i8259.h>
31#include <asm/topology.h>
32#include <asm/uv/uv.h>
33
34unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
35EXPORT_SYMBOL(cpu_khz);
36
37unsigned int __read_mostly tsc_khz;
38EXPORT_SYMBOL(tsc_khz);
39
40#define KHZ 1000
41
42/*
43 * TSC can be unstable due to cpufreq or due to unsynced TSCs
44 */
45static int __read_mostly tsc_unstable;
46static unsigned int __initdata tsc_early_khz;
47
48static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc);
49
50int tsc_clocksource_reliable;
51
52static int __read_mostly tsc_force_recalibrate;
53
54static struct clocksource_base art_base_clk = {
55 .id = CSID_X86_ART,
56};
57static bool have_art;
58
59struct cyc2ns {
60 struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
61 seqcount_latch_t seq; /* 32 + 4 = 36 */
62
63}; /* fits one cacheline */
64
65static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
66
67static int __init tsc_early_khz_setup(char *buf)
68{
69 return kstrtouint(buf, 0, &tsc_early_khz);
70}
71early_param("tsc_early_khz", tsc_early_khz_setup);
72
73__always_inline void __cyc2ns_read(struct cyc2ns_data *data)
74{
75 int seq, idx;
76
77 do {
78 seq = this_cpu_read(cyc2ns.seq.seqcount.sequence);
79 idx = seq & 1;
80
81 data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
82 data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
83 data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
84
85 } while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence)));
86}
87
88__always_inline void cyc2ns_read_begin(struct cyc2ns_data *data)
89{
90 preempt_disable_notrace();
91 __cyc2ns_read(data);
92}
93
94__always_inline void cyc2ns_read_end(void)
95{
96 preempt_enable_notrace();
97}
98
99/*
100 * Accelerators for sched_clock()
101 * convert from cycles(64bits) => nanoseconds (64bits)
102 * basic equation:
103 * ns = cycles / (freq / ns_per_sec)
104 * ns = cycles * (ns_per_sec / freq)
105 * ns = cycles * (10^9 / (cpu_khz * 10^3))
106 * ns = cycles * (10^6 / cpu_khz)
107 *
108 * Then we use scaling math (suggested by george@mvista.com) to get:
109 * ns = cycles * (10^6 * SC / cpu_khz) / SC
110 * ns = cycles * cyc2ns_scale / SC
111 *
112 * And since SC is a constant power of two, we can convert the div
113 * into a shift. The larger SC is, the more accurate the conversion, but
114 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
115 * (64-bit result) can be used.
116 *
117 * We can use khz divisor instead of mhz to keep a better precision.
118 * (mathieu.desnoyers@polymtl.ca)
119 *
120 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
121 */
122
123static __always_inline unsigned long long __cycles_2_ns(unsigned long long cyc)
124{
125 struct cyc2ns_data data;
126 unsigned long long ns;
127
128 __cyc2ns_read(&data);
129
130 ns = data.cyc2ns_offset;
131 ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
132
133 return ns;
134}
135
136static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
137{
138 unsigned long long ns;
139 preempt_disable_notrace();
140 ns = __cycles_2_ns(cyc);
141 preempt_enable_notrace();
142 return ns;
143}
144
145static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
146{
147 unsigned long long ns_now;
148 struct cyc2ns_data data;
149 struct cyc2ns *c2n;
150
151 ns_now = cycles_2_ns(tsc_now);
152
153 /*
154 * Compute a new multiplier as per the above comment and ensure our
155 * time function is continuous; see the comment near struct
156 * cyc2ns_data.
157 */
158 clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
159 NSEC_PER_MSEC, 0);
160
161 /*
162 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
163 * not expected to be greater than 31 due to the original published
164 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
165 * value) - refer perf_event_mmap_page documentation in perf_event.h.
166 */
167 if (data.cyc2ns_shift == 32) {
168 data.cyc2ns_shift = 31;
169 data.cyc2ns_mul >>= 1;
170 }
171
172 data.cyc2ns_offset = ns_now -
173 mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
174
175 c2n = per_cpu_ptr(&cyc2ns, cpu);
176
177 write_seqcount_latch_begin(&c2n->seq);
178 c2n->data[0] = data;
179 write_seqcount_latch(&c2n->seq);
180 c2n->data[1] = data;
181 write_seqcount_latch_end(&c2n->seq);
182}
183
184static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
185{
186 unsigned long flags;
187
188 local_irq_save(flags);
189 sched_clock_idle_sleep_event();
190
191 if (khz)
192 __set_cyc2ns_scale(khz, cpu, tsc_now);
193
194 sched_clock_idle_wakeup_event();
195 local_irq_restore(flags);
196}
197
198/*
199 * Initialize cyc2ns for boot cpu
200 */
201static void __init cyc2ns_init_boot_cpu(void)
202{
203 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
204
205 seqcount_latch_init(&c2n->seq);
206 __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
207}
208
209/*
210 * Secondary CPUs do not run through tsc_init(), so set up
211 * all the scale factors for all CPUs, assuming the same
212 * speed as the bootup CPU.
213 */
214static void __init cyc2ns_init_secondary_cpus(void)
215{
216 unsigned int cpu, this_cpu = smp_processor_id();
217 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
218 struct cyc2ns_data *data = c2n->data;
219
220 for_each_possible_cpu(cpu) {
221 if (cpu != this_cpu) {
222 seqcount_latch_init(&c2n->seq);
223 c2n = per_cpu_ptr(&cyc2ns, cpu);
224 c2n->data[0] = data[0];
225 c2n->data[1] = data[1];
226 }
227 }
228}
229
230/*
231 * Scheduler clock - returns current time in nanosec units.
232 */
233noinstr u64 native_sched_clock(void)
234{
235 if (static_branch_likely(&__use_tsc)) {
236 u64 tsc_now = rdtsc();
237
238 /* return the value in ns */
239 return __cycles_2_ns(tsc_now);
240 }
241
242 /*
243 * Fall back to jiffies if there's no TSC available:
244 * ( But note that we still use it if the TSC is marked
245 * unstable. We do this because unlike Time Of Day,
246 * the scheduler clock tolerates small errors and it's
247 * very important for it to be as fast as the platform
248 * can achieve it. )
249 */
250
251 /* No locking but a rare wrong value is not a big deal: */
252 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
253}
254
255/*
256 * Generate a sched_clock if you already have a TSC value.
257 */
258u64 native_sched_clock_from_tsc(u64 tsc)
259{
260 return cycles_2_ns(tsc);
261}
262
263/* We need to define a real function for sched_clock, to override the
264 weak default version */
265#ifdef CONFIG_PARAVIRT
266noinstr u64 sched_clock_noinstr(void)
267{
268 return paravirt_sched_clock();
269}
270
271bool using_native_sched_clock(void)
272{
273 return static_call_query(pv_sched_clock) == native_sched_clock;
274}
275#else
276u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock")));
277
278bool using_native_sched_clock(void) { return true; }
279#endif
280
281notrace u64 sched_clock(void)
282{
283 u64 now;
284 preempt_disable_notrace();
285 now = sched_clock_noinstr();
286 preempt_enable_notrace();
287 return now;
288}
289
290int check_tsc_unstable(void)
291{
292 return tsc_unstable;
293}
294EXPORT_SYMBOL_GPL(check_tsc_unstable);
295
296#ifdef CONFIG_X86_TSC
297int __init notsc_setup(char *str)
298{
299 mark_tsc_unstable("boot parameter notsc");
300 return 1;
301}
302#else
303/*
304 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
305 * in cpu/common.c
306 */
307int __init notsc_setup(char *str)
308{
309 setup_clear_cpu_cap(X86_FEATURE_TSC);
310 return 1;
311}
312#endif
313
314__setup("notsc", notsc_setup);
315
316static int no_sched_irq_time;
317static int no_tsc_watchdog;
318static int tsc_as_watchdog;
319
320static int __init tsc_setup(char *str)
321{
322 if (!strcmp(str, "reliable"))
323 tsc_clocksource_reliable = 1;
324 if (!strncmp(str, "noirqtime", 9))
325 no_sched_irq_time = 1;
326 if (!strcmp(str, "unstable"))
327 mark_tsc_unstable("boot parameter");
328 if (!strcmp(str, "nowatchdog")) {
329 no_tsc_watchdog = 1;
330 if (tsc_as_watchdog)
331 pr_alert("%s: Overriding earlier tsc=watchdog with tsc=nowatchdog\n",
332 __func__);
333 tsc_as_watchdog = 0;
334 }
335 if (!strcmp(str, "recalibrate"))
336 tsc_force_recalibrate = 1;
337 if (!strcmp(str, "watchdog")) {
338 if (no_tsc_watchdog)
339 pr_alert("%s: tsc=watchdog overridden by earlier tsc=nowatchdog\n",
340 __func__);
341 else
342 tsc_as_watchdog = 1;
343 }
344 return 1;
345}
346
347__setup("tsc=", tsc_setup);
348
349#define MAX_RETRIES 5
350#define TSC_DEFAULT_THRESHOLD 0x20000
351
352/*
353 * Read TSC and the reference counters. Take care of any disturbances
354 */
355static u64 tsc_read_refs(u64 *p, int hpet)
356{
357 u64 t1, t2;
358 u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
359 int i;
360
361 for (i = 0; i < MAX_RETRIES; i++) {
362 t1 = get_cycles();
363 if (hpet)
364 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
365 else
366 *p = acpi_pm_read_early();
367 t2 = get_cycles();
368 if ((t2 - t1) < thresh)
369 return t2;
370 }
371 return ULLONG_MAX;
372}
373
374/*
375 * Calculate the TSC frequency from HPET reference
376 */
377static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
378{
379 u64 tmp;
380
381 if (hpet2 < hpet1)
382 hpet2 += 0x100000000ULL;
383 hpet2 -= hpet1;
384 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
385 do_div(tmp, 1000000);
386 deltatsc = div64_u64(deltatsc, tmp);
387
388 return (unsigned long) deltatsc;
389}
390
391/*
392 * Calculate the TSC frequency from PMTimer reference
393 */
394static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
395{
396 u64 tmp;
397
398 if (!pm1 && !pm2)
399 return ULONG_MAX;
400
401 if (pm2 < pm1)
402 pm2 += (u64)ACPI_PM_OVRRUN;
403 pm2 -= pm1;
404 tmp = pm2 * 1000000000LL;
405 do_div(tmp, PMTMR_TICKS_PER_SEC);
406 do_div(deltatsc, tmp);
407
408 return (unsigned long) deltatsc;
409}
410
411#define CAL_MS 10
412#define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
413#define CAL_PIT_LOOPS 1000
414
415#define CAL2_MS 50
416#define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
417#define CAL2_PIT_LOOPS 5000
418
419
420/*
421 * Try to calibrate the TSC against the Programmable
422 * Interrupt Timer and return the frequency of the TSC
423 * in kHz.
424 *
425 * Return ULONG_MAX on failure to calibrate.
426 */
427static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
428{
429 u64 tsc, t1, t2, delta;
430 unsigned long tscmin, tscmax;
431 int pitcnt;
432
433 if (!has_legacy_pic()) {
434 /*
435 * Relies on tsc_early_delay_calibrate() to have given us semi
436 * usable udelay(), wait for the same 50ms we would have with
437 * the PIT loop below.
438 */
439 udelay(10 * USEC_PER_MSEC);
440 udelay(10 * USEC_PER_MSEC);
441 udelay(10 * USEC_PER_MSEC);
442 udelay(10 * USEC_PER_MSEC);
443 udelay(10 * USEC_PER_MSEC);
444 return ULONG_MAX;
445 }
446
447 /* Set the Gate high, disable speaker */
448 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
449
450 /*
451 * Setup CTC channel 2* for mode 0, (interrupt on terminal
452 * count mode), binary count. Set the latch register to 50ms
453 * (LSB then MSB) to begin countdown.
454 */
455 outb(0xb0, 0x43);
456 outb(latch & 0xff, 0x42);
457 outb(latch >> 8, 0x42);
458
459 tsc = t1 = t2 = get_cycles();
460
461 pitcnt = 0;
462 tscmax = 0;
463 tscmin = ULONG_MAX;
464 while ((inb(0x61) & 0x20) == 0) {
465 t2 = get_cycles();
466 delta = t2 - tsc;
467 tsc = t2;
468 if ((unsigned long) delta < tscmin)
469 tscmin = (unsigned int) delta;
470 if ((unsigned long) delta > tscmax)
471 tscmax = (unsigned int) delta;
472 pitcnt++;
473 }
474
475 /*
476 * Sanity checks:
477 *
478 * If we were not able to read the PIT more than loopmin
479 * times, then we have been hit by a massive SMI
480 *
481 * If the maximum is 10 times larger than the minimum,
482 * then we got hit by an SMI as well.
483 */
484 if (pitcnt < loopmin || tscmax > 10 * tscmin)
485 return ULONG_MAX;
486
487 /* Calculate the PIT value */
488 delta = t2 - t1;
489 do_div(delta, ms);
490 return delta;
491}
492
493/*
494 * This reads the current MSB of the PIT counter, and
495 * checks if we are running on sufficiently fast and
496 * non-virtualized hardware.
497 *
498 * Our expectations are:
499 *
500 * - the PIT is running at roughly 1.19MHz
501 *
502 * - each IO is going to take about 1us on real hardware,
503 * but we allow it to be much faster (by a factor of 10) or
504 * _slightly_ slower (ie we allow up to a 2us read+counter
505 * update - anything else implies a unacceptably slow CPU
506 * or PIT for the fast calibration to work.
507 *
508 * - with 256 PIT ticks to read the value, we have 214us to
509 * see the same MSB (and overhead like doing a single TSC
510 * read per MSB value etc).
511 *
512 * - We're doing 2 reads per loop (LSB, MSB), and we expect
513 * them each to take about a microsecond on real hardware.
514 * So we expect a count value of around 100. But we'll be
515 * generous, and accept anything over 50.
516 *
517 * - if the PIT is stuck, and we see *many* more reads, we
518 * return early (and the next caller of pit_expect_msb()
519 * then consider it a failure when they don't see the
520 * next expected value).
521 *
522 * These expectations mean that we know that we have seen the
523 * transition from one expected value to another with a fairly
524 * high accuracy, and we didn't miss any events. We can thus
525 * use the TSC value at the transitions to calculate a pretty
526 * good value for the TSC frequency.
527 */
528static inline int pit_verify_msb(unsigned char val)
529{
530 /* Ignore LSB */
531 inb(0x42);
532 return inb(0x42) == val;
533}
534
535static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
536{
537 int count;
538 u64 tsc = 0, prev_tsc = 0;
539
540 for (count = 0; count < 50000; count++) {
541 if (!pit_verify_msb(val))
542 break;
543 prev_tsc = tsc;
544 tsc = get_cycles();
545 }
546 *deltap = get_cycles() - prev_tsc;
547 *tscp = tsc;
548
549 /*
550 * We require _some_ success, but the quality control
551 * will be based on the error terms on the TSC values.
552 */
553 return count > 5;
554}
555
556/*
557 * How many MSB values do we want to see? We aim for
558 * a maximum error rate of 500ppm (in practice the
559 * real error is much smaller), but refuse to spend
560 * more than 50ms on it.
561 */
562#define MAX_QUICK_PIT_MS 50
563#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
564
565static unsigned long quick_pit_calibrate(void)
566{
567 int i;
568 u64 tsc, delta;
569 unsigned long d1, d2;
570
571 if (!has_legacy_pic())
572 return 0;
573
574 /* Set the Gate high, disable speaker */
575 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
576
577 /*
578 * Counter 2, mode 0 (one-shot), binary count
579 *
580 * NOTE! Mode 2 decrements by two (and then the
581 * output is flipped each time, giving the same
582 * final output frequency as a decrement-by-one),
583 * so mode 0 is much better when looking at the
584 * individual counts.
585 */
586 outb(0xb0, 0x43);
587
588 /* Start at 0xffff */
589 outb(0xff, 0x42);
590 outb(0xff, 0x42);
591
592 /*
593 * The PIT starts counting at the next edge, so we
594 * need to delay for a microsecond. The easiest way
595 * to do that is to just read back the 16-bit counter
596 * once from the PIT.
597 */
598 pit_verify_msb(0);
599
600 if (pit_expect_msb(0xff, &tsc, &d1)) {
601 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
602 if (!pit_expect_msb(0xff-i, &delta, &d2))
603 break;
604
605 delta -= tsc;
606
607 /*
608 * Extrapolate the error and fail fast if the error will
609 * never be below 500 ppm.
610 */
611 if (i == 1 &&
612 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
613 return 0;
614
615 /*
616 * Iterate until the error is less than 500 ppm
617 */
618 if (d1+d2 >= delta >> 11)
619 continue;
620
621 /*
622 * Check the PIT one more time to verify that
623 * all TSC reads were stable wrt the PIT.
624 *
625 * This also guarantees serialization of the
626 * last cycle read ('d2') in pit_expect_msb.
627 */
628 if (!pit_verify_msb(0xfe - i))
629 break;
630 goto success;
631 }
632 }
633 pr_info("Fast TSC calibration failed\n");
634 return 0;
635
636success:
637 /*
638 * Ok, if we get here, then we've seen the
639 * MSB of the PIT decrement 'i' times, and the
640 * error has shrunk to less than 500 ppm.
641 *
642 * As a result, we can depend on there not being
643 * any odd delays anywhere, and the TSC reads are
644 * reliable (within the error).
645 *
646 * kHz = ticks / time-in-seconds / 1000;
647 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
648 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
649 */
650 delta *= PIT_TICK_RATE;
651 do_div(delta, i*256*1000);
652 pr_info("Fast TSC calibration using PIT\n");
653 return delta;
654}
655
656/**
657 * native_calibrate_tsc - determine TSC frequency
658 * Determine TSC frequency via CPUID, else return 0.
659 */
660unsigned long native_calibrate_tsc(void)
661{
662 unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
663 unsigned int crystal_khz;
664
665 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
666 return 0;
667
668 if (boot_cpu_data.cpuid_level < 0x15)
669 return 0;
670
671 eax_denominator = ebx_numerator = ecx_hz = edx = 0;
672
673 /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
674 cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
675
676 if (ebx_numerator == 0 || eax_denominator == 0)
677 return 0;
678
679 crystal_khz = ecx_hz / 1000;
680
681 /*
682 * Denverton SoCs don't report crystal clock, and also don't support
683 * CPUID.0x16 for the calculation below, so hardcode the 25MHz crystal
684 * clock.
685 */
686 if (crystal_khz == 0 &&
687 boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT_D)
688 crystal_khz = 25000;
689
690 /*
691 * TSC frequency reported directly by CPUID is a "hardware reported"
692 * frequency and is the most accurate one so far we have. This
693 * is considered a known frequency.
694 */
695 if (crystal_khz != 0)
696 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
697
698 /*
699 * Some Intel SoCs like Skylake and Kabylake don't report the crystal
700 * clock, but we can easily calculate it to a high degree of accuracy
701 * by considering the crystal ratio and the CPU speed.
702 */
703 if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= 0x16) {
704 unsigned int eax_base_mhz, ebx, ecx, edx;
705
706 cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx);
707 crystal_khz = eax_base_mhz * 1000 *
708 eax_denominator / ebx_numerator;
709 }
710
711 if (crystal_khz == 0)
712 return 0;
713
714 /*
715 * For Atom SoCs TSC is the only reliable clocksource.
716 * Mark TSC reliable so no watchdog on it.
717 */
718 if (boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT)
719 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
720
721#ifdef CONFIG_X86_LOCAL_APIC
722 /*
723 * The local APIC appears to be fed by the core crystal clock
724 * (which sounds entirely sensible). We can set the global
725 * lapic_timer_period here to avoid having to calibrate the APIC
726 * timer later.
727 */
728 lapic_timer_period = crystal_khz * 1000 / HZ;
729#endif
730
731 return crystal_khz * ebx_numerator / eax_denominator;
732}
733
734static unsigned long cpu_khz_from_cpuid(void)
735{
736 unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
737
738 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
739 return 0;
740
741 if (boot_cpu_data.cpuid_level < 0x16)
742 return 0;
743
744 eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
745
746 cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
747
748 return eax_base_mhz * 1000;
749}
750
751/*
752 * calibrate cpu using pit, hpet, and ptimer methods. They are available
753 * later in boot after acpi is initialized.
754 */
755static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
756{
757 u64 tsc1, tsc2, delta, ref1, ref2;
758 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
759 unsigned long flags, latch, ms;
760 int hpet = is_hpet_enabled(), i, loopmin;
761
762 /*
763 * Run 5 calibration loops to get the lowest frequency value
764 * (the best estimate). We use two different calibration modes
765 * here:
766 *
767 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
768 * load a timeout of 50ms. We read the time right after we
769 * started the timer and wait until the PIT count down reaches
770 * zero. In each wait loop iteration we read the TSC and check
771 * the delta to the previous read. We keep track of the min
772 * and max values of that delta. The delta is mostly defined
773 * by the IO time of the PIT access, so we can detect when
774 * any disturbance happened between the two reads. If the
775 * maximum time is significantly larger than the minimum time,
776 * then we discard the result and have another try.
777 *
778 * 2) Reference counter. If available we use the HPET or the
779 * PMTIMER as a reference to check the sanity of that value.
780 * We use separate TSC readouts and check inside of the
781 * reference read for any possible disturbance. We discard
782 * disturbed values here as well. We do that around the PIT
783 * calibration delay loop as we have to wait for a certain
784 * amount of time anyway.
785 */
786
787 /* Preset PIT loop values */
788 latch = CAL_LATCH;
789 ms = CAL_MS;
790 loopmin = CAL_PIT_LOOPS;
791
792 for (i = 0; i < 3; i++) {
793 unsigned long tsc_pit_khz;
794
795 /*
796 * Read the start value and the reference count of
797 * hpet/pmtimer when available. Then do the PIT
798 * calibration, which will take at least 50ms, and
799 * read the end value.
800 */
801 local_irq_save(flags);
802 tsc1 = tsc_read_refs(&ref1, hpet);
803 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
804 tsc2 = tsc_read_refs(&ref2, hpet);
805 local_irq_restore(flags);
806
807 /* Pick the lowest PIT TSC calibration so far */
808 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
809
810 /* hpet or pmtimer available ? */
811 if (ref1 == ref2)
812 continue;
813
814 /* Check, whether the sampling was disturbed */
815 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
816 continue;
817
818 tsc2 = (tsc2 - tsc1) * 1000000LL;
819 if (hpet)
820 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
821 else
822 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
823
824 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
825
826 /* Check the reference deviation */
827 delta = ((u64) tsc_pit_min) * 100;
828 do_div(delta, tsc_ref_min);
829
830 /*
831 * If both calibration results are inside a 10% window
832 * then we can be sure, that the calibration
833 * succeeded. We break out of the loop right away. We
834 * use the reference value, as it is more precise.
835 */
836 if (delta >= 90 && delta <= 110) {
837 pr_info("PIT calibration matches %s. %d loops\n",
838 hpet ? "HPET" : "PMTIMER", i + 1);
839 return tsc_ref_min;
840 }
841
842 /*
843 * Check whether PIT failed more than once. This
844 * happens in virtualized environments. We need to
845 * give the virtual PC a slightly longer timeframe for
846 * the HPET/PMTIMER to make the result precise.
847 */
848 if (i == 1 && tsc_pit_min == ULONG_MAX) {
849 latch = CAL2_LATCH;
850 ms = CAL2_MS;
851 loopmin = CAL2_PIT_LOOPS;
852 }
853 }
854
855 /*
856 * Now check the results.
857 */
858 if (tsc_pit_min == ULONG_MAX) {
859 /* PIT gave no useful value */
860 pr_warn("Unable to calibrate against PIT\n");
861
862 /* We don't have an alternative source, disable TSC */
863 if (!hpet && !ref1 && !ref2) {
864 pr_notice("No reference (HPET/PMTIMER) available\n");
865 return 0;
866 }
867
868 /* The alternative source failed as well, disable TSC */
869 if (tsc_ref_min == ULONG_MAX) {
870 pr_warn("HPET/PMTIMER calibration failed\n");
871 return 0;
872 }
873
874 /* Use the alternative source */
875 pr_info("using %s reference calibration\n",
876 hpet ? "HPET" : "PMTIMER");
877
878 return tsc_ref_min;
879 }
880
881 /* We don't have an alternative source, use the PIT calibration value */
882 if (!hpet && !ref1 && !ref2) {
883 pr_info("Using PIT calibration value\n");
884 return tsc_pit_min;
885 }
886
887 /* The alternative source failed, use the PIT calibration value */
888 if (tsc_ref_min == ULONG_MAX) {
889 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
890 return tsc_pit_min;
891 }
892
893 /*
894 * The calibration values differ too much. In doubt, we use
895 * the PIT value as we know that there are PMTIMERs around
896 * running at double speed. At least we let the user know:
897 */
898 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
899 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
900 pr_info("Using PIT calibration value\n");
901 return tsc_pit_min;
902}
903
904/**
905 * native_calibrate_cpu_early - can calibrate the cpu early in boot
906 */
907unsigned long native_calibrate_cpu_early(void)
908{
909 unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
910
911 if (!fast_calibrate)
912 fast_calibrate = cpu_khz_from_msr();
913 if (!fast_calibrate) {
914 local_irq_save(flags);
915 fast_calibrate = quick_pit_calibrate();
916 local_irq_restore(flags);
917 }
918 return fast_calibrate;
919}
920
921
922/**
923 * native_calibrate_cpu - calibrate the cpu
924 */
925static unsigned long native_calibrate_cpu(void)
926{
927 unsigned long tsc_freq = native_calibrate_cpu_early();
928
929 if (!tsc_freq)
930 tsc_freq = pit_hpet_ptimer_calibrate_cpu();
931
932 return tsc_freq;
933}
934
935void recalibrate_cpu_khz(void)
936{
937#ifndef CONFIG_SMP
938 unsigned long cpu_khz_old = cpu_khz;
939
940 if (!boot_cpu_has(X86_FEATURE_TSC))
941 return;
942
943 cpu_khz = x86_platform.calibrate_cpu();
944 tsc_khz = x86_platform.calibrate_tsc();
945 if (tsc_khz == 0)
946 tsc_khz = cpu_khz;
947 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
948 cpu_khz = tsc_khz;
949 cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
950 cpu_khz_old, cpu_khz);
951#endif
952}
953EXPORT_SYMBOL_GPL(recalibrate_cpu_khz);
954
955
956static unsigned long long cyc2ns_suspend;
957
958void tsc_save_sched_clock_state(void)
959{
960 if (!sched_clock_stable())
961 return;
962
963 cyc2ns_suspend = sched_clock();
964}
965
966/*
967 * Even on processors with invariant TSC, TSC gets reset in some the
968 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
969 * arbitrary value (still sync'd across cpu's) during resume from such sleep
970 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
971 * that sched_clock() continues from the point where it was left off during
972 * suspend.
973 */
974void tsc_restore_sched_clock_state(void)
975{
976 unsigned long long offset;
977 unsigned long flags;
978 int cpu;
979
980 if (!sched_clock_stable())
981 return;
982
983 local_irq_save(flags);
984
985 /*
986 * We're coming out of suspend, there's no concurrency yet; don't
987 * bother being nice about the RCU stuff, just write to both
988 * data fields.
989 */
990
991 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
992 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
993
994 offset = cyc2ns_suspend - sched_clock();
995
996 for_each_possible_cpu(cpu) {
997 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
998 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
999 }
1000
1001 local_irq_restore(flags);
1002}
1003
1004#ifdef CONFIG_CPU_FREQ
1005/*
1006 * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
1007 * changes.
1008 *
1009 * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
1010 * as unstable and give up in those cases.
1011 *
1012 * Should fix up last_tsc too. Currently gettimeofday in the
1013 * first tick after the change will be slightly wrong.
1014 */
1015
1016static unsigned int ref_freq;
1017static unsigned long loops_per_jiffy_ref;
1018static unsigned long tsc_khz_ref;
1019
1020static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
1021 void *data)
1022{
1023 struct cpufreq_freqs *freq = data;
1024
1025 if (num_online_cpus() > 1) {
1026 mark_tsc_unstable("cpufreq changes on SMP");
1027 return 0;
1028 }
1029
1030 if (!ref_freq) {
1031 ref_freq = freq->old;
1032 loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
1033 tsc_khz_ref = tsc_khz;
1034 }
1035
1036 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
1037 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
1038 boot_cpu_data.loops_per_jiffy =
1039 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
1040
1041 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
1042 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
1043 mark_tsc_unstable("cpufreq changes");
1044
1045 set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
1046 }
1047
1048 return 0;
1049}
1050
1051static struct notifier_block time_cpufreq_notifier_block = {
1052 .notifier_call = time_cpufreq_notifier
1053};
1054
1055static int __init cpufreq_register_tsc_scaling(void)
1056{
1057 if (!boot_cpu_has(X86_FEATURE_TSC))
1058 return 0;
1059 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1060 return 0;
1061 cpufreq_register_notifier(&time_cpufreq_notifier_block,
1062 CPUFREQ_TRANSITION_NOTIFIER);
1063 return 0;
1064}
1065
1066core_initcall(cpufreq_register_tsc_scaling);
1067
1068#endif /* CONFIG_CPU_FREQ */
1069
1070#define ART_CPUID_LEAF (0x15)
1071#define ART_MIN_DENOMINATOR (1)
1072
1073
1074/*
1075 * If ART is present detect the numerator:denominator to convert to TSC
1076 */
1077static void __init detect_art(void)
1078{
1079 unsigned int unused;
1080
1081 if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
1082 return;
1083
1084 /*
1085 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
1086 * and the TSC counter resets must not occur asynchronously.
1087 */
1088 if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
1089 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
1090 !boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
1091 tsc_async_resets)
1092 return;
1093
1094 cpuid(ART_CPUID_LEAF, &art_base_clk.denominator,
1095 &art_base_clk.numerator, &art_base_clk.freq_khz, &unused);
1096
1097 art_base_clk.freq_khz /= KHZ;
1098 if (art_base_clk.denominator < ART_MIN_DENOMINATOR)
1099 return;
1100
1101 rdmsrl(MSR_IA32_TSC_ADJUST, art_base_clk.offset);
1102
1103 /* Make this sticky over multiple CPU init calls */
1104 setup_force_cpu_cap(X86_FEATURE_ART);
1105}
1106
1107
1108/* clocksource code */
1109
1110static void tsc_resume(struct clocksource *cs)
1111{
1112 tsc_verify_tsc_adjust(true);
1113}
1114
1115/*
1116 * We used to compare the TSC to the cycle_last value in the clocksource
1117 * structure to avoid a nasty time-warp. This can be observed in a
1118 * very small window right after one CPU updated cycle_last under
1119 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
1120 * is smaller than the cycle_last reference value due to a TSC which
1121 * is slightly behind. This delta is nowhere else observable, but in
1122 * that case it results in a forward time jump in the range of hours
1123 * due to the unsigned delta calculation of the time keeping core
1124 * code, which is necessary to support wrapping clocksources like pm
1125 * timer.
1126 *
1127 * This sanity check is now done in the core timekeeping code.
1128 * checking the result of read_tsc() - cycle_last for being negative.
1129 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
1130 */
1131static u64 read_tsc(struct clocksource *cs)
1132{
1133 return (u64)rdtsc_ordered();
1134}
1135
1136static void tsc_cs_mark_unstable(struct clocksource *cs)
1137{
1138 if (tsc_unstable)
1139 return;
1140
1141 tsc_unstable = 1;
1142 if (using_native_sched_clock())
1143 clear_sched_clock_stable();
1144 disable_sched_clock_irqtime();
1145 pr_info("Marking TSC unstable due to clocksource watchdog\n");
1146}
1147
1148static void tsc_cs_tick_stable(struct clocksource *cs)
1149{
1150 if (tsc_unstable)
1151 return;
1152
1153 if (using_native_sched_clock())
1154 sched_clock_tick_stable();
1155}
1156
1157static int tsc_cs_enable(struct clocksource *cs)
1158{
1159 vclocks_set_used(VDSO_CLOCKMODE_TSC);
1160 return 0;
1161}
1162
1163/*
1164 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
1165 */
1166static struct clocksource clocksource_tsc_early = {
1167 .name = "tsc-early",
1168 .rating = 299,
1169 .uncertainty_margin = 32 * NSEC_PER_MSEC,
1170 .read = read_tsc,
1171 .mask = CLOCKSOURCE_MASK(64),
1172 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1173 CLOCK_SOURCE_MUST_VERIFY,
1174 .id = CSID_X86_TSC_EARLY,
1175 .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
1176 .enable = tsc_cs_enable,
1177 .resume = tsc_resume,
1178 .mark_unstable = tsc_cs_mark_unstable,
1179 .tick_stable = tsc_cs_tick_stable,
1180 .list = LIST_HEAD_INIT(clocksource_tsc_early.list),
1181};
1182
1183/*
1184 * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
1185 * this one will immediately take over. We will only register if TSC has
1186 * been found good.
1187 */
1188static struct clocksource clocksource_tsc = {
1189 .name = "tsc",
1190 .rating = 300,
1191 .read = read_tsc,
1192 .mask = CLOCKSOURCE_MASK(64),
1193 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1194 CLOCK_SOURCE_VALID_FOR_HRES |
1195 CLOCK_SOURCE_MUST_VERIFY |
1196 CLOCK_SOURCE_VERIFY_PERCPU,
1197 .id = CSID_X86_TSC,
1198 .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
1199 .enable = tsc_cs_enable,
1200 .resume = tsc_resume,
1201 .mark_unstable = tsc_cs_mark_unstable,
1202 .tick_stable = tsc_cs_tick_stable,
1203 .list = LIST_HEAD_INIT(clocksource_tsc.list),
1204};
1205
1206void mark_tsc_unstable(char *reason)
1207{
1208 if (tsc_unstable)
1209 return;
1210
1211 tsc_unstable = 1;
1212 if (using_native_sched_clock())
1213 clear_sched_clock_stable();
1214 disable_sched_clock_irqtime();
1215 pr_info("Marking TSC unstable due to %s\n", reason);
1216
1217 clocksource_mark_unstable(&clocksource_tsc_early);
1218 clocksource_mark_unstable(&clocksource_tsc);
1219}
1220
1221EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1222
1223static void __init tsc_disable_clocksource_watchdog(void)
1224{
1225 clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1226 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1227}
1228
1229bool tsc_clocksource_watchdog_disabled(void)
1230{
1231 return !(clocksource_tsc.flags & CLOCK_SOURCE_MUST_VERIFY) &&
1232 tsc_as_watchdog && !no_tsc_watchdog;
1233}
1234
1235static void __init check_system_tsc_reliable(void)
1236{
1237#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1238 if (is_geode_lx()) {
1239 /* RTSC counts during suspend */
1240#define RTSC_SUSP 0x100
1241 unsigned long res_low, res_high;
1242
1243 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1244 /* Geode_LX - the OLPC CPU has a very reliable TSC */
1245 if (res_low & RTSC_SUSP)
1246 tsc_clocksource_reliable = 1;
1247 }
1248#endif
1249 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1250 tsc_clocksource_reliable = 1;
1251
1252 /*
1253 * Disable the clocksource watchdog when the system has:
1254 * - TSC running at constant frequency
1255 * - TSC which does not stop in C-States
1256 * - the TSC_ADJUST register which allows to detect even minimal
1257 * modifications
1258 * - not more than four packages
1259 */
1260 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
1261 boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
1262 boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
1263 topology_max_packages() <= 4)
1264 tsc_disable_clocksource_watchdog();
1265}
1266
1267/*
1268 * Make an educated guess if the TSC is trustworthy and synchronized
1269 * over all CPUs.
1270 */
1271int unsynchronized_tsc(void)
1272{
1273 if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
1274 return 1;
1275
1276#ifdef CONFIG_SMP
1277 if (apic_is_clustered_box())
1278 return 1;
1279#endif
1280
1281 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1282 return 0;
1283
1284 if (tsc_clocksource_reliable)
1285 return 0;
1286 /*
1287 * Intel systems are normally all synchronized.
1288 * Exceptions must mark TSC as unstable:
1289 */
1290 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1291 /* assume multi socket systems are not synchronized: */
1292 if (topology_max_packages() > 1)
1293 return 1;
1294 }
1295
1296 return 0;
1297}
1298
1299static void tsc_refine_calibration_work(struct work_struct *work);
1300static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1301/**
1302 * tsc_refine_calibration_work - Further refine tsc freq calibration
1303 * @work: ignored.
1304 *
1305 * This functions uses delayed work over a period of a
1306 * second to further refine the TSC freq value. Since this is
1307 * timer based, instead of loop based, we don't block the boot
1308 * process while this longer calibration is done.
1309 *
1310 * If there are any calibration anomalies (too many SMIs, etc),
1311 * or the refined calibration is off by 1% of the fast early
1312 * calibration, we throw out the new calibration and use the
1313 * early calibration.
1314 */
1315static void tsc_refine_calibration_work(struct work_struct *work)
1316{
1317 static u64 tsc_start = ULLONG_MAX, ref_start;
1318 static int hpet;
1319 u64 tsc_stop, ref_stop, delta;
1320 unsigned long freq;
1321 int cpu;
1322
1323 /* Don't bother refining TSC on unstable systems */
1324 if (tsc_unstable)
1325 goto unreg;
1326
1327 /*
1328 * Since the work is started early in boot, we may be
1329 * delayed the first time we expire. So set the workqueue
1330 * again once we know timers are working.
1331 */
1332 if (tsc_start == ULLONG_MAX) {
1333restart:
1334 /*
1335 * Only set hpet once, to avoid mixing hardware
1336 * if the hpet becomes enabled later.
1337 */
1338 hpet = is_hpet_enabled();
1339 tsc_start = tsc_read_refs(&ref_start, hpet);
1340 schedule_delayed_work(&tsc_irqwork, HZ);
1341 return;
1342 }
1343
1344 tsc_stop = tsc_read_refs(&ref_stop, hpet);
1345
1346 /* hpet or pmtimer available ? */
1347 if (ref_start == ref_stop)
1348 goto out;
1349
1350 /* Check, whether the sampling was disturbed */
1351 if (tsc_stop == ULLONG_MAX)
1352 goto restart;
1353
1354 delta = tsc_stop - tsc_start;
1355 delta *= 1000000LL;
1356 if (hpet)
1357 freq = calc_hpet_ref(delta, ref_start, ref_stop);
1358 else
1359 freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
1360
1361 /* Will hit this only if tsc_force_recalibrate has been set */
1362 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1363
1364 /* Warn if the deviation exceeds 500 ppm */
1365 if (abs(tsc_khz - freq) > (tsc_khz >> 11)) {
1366 pr_warn("Warning: TSC freq calibrated by CPUID/MSR differs from what is calibrated by HW timer, please check with vendor!!\n");
1367 pr_info("Previous calibrated TSC freq:\t %lu.%03lu MHz\n",
1368 (unsigned long)tsc_khz / 1000,
1369 (unsigned long)tsc_khz % 1000);
1370 }
1371
1372 pr_info("TSC freq recalibrated by [%s]:\t %lu.%03lu MHz\n",
1373 hpet ? "HPET" : "PM_TIMER",
1374 (unsigned long)freq / 1000,
1375 (unsigned long)freq % 1000);
1376
1377 return;
1378 }
1379
1380 /* Make sure we're within 1% */
1381 if (abs(tsc_khz - freq) > tsc_khz/100)
1382 goto out;
1383
1384 tsc_khz = freq;
1385 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1386 (unsigned long)tsc_khz / 1000,
1387 (unsigned long)tsc_khz % 1000);
1388
1389 /* Inform the TSC deadline clockevent devices about the recalibration */
1390 lapic_update_tsc_freq();
1391
1392 /* Update the sched_clock() rate to match the clocksource one */
1393 for_each_possible_cpu(cpu)
1394 set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
1395
1396out:
1397 if (tsc_unstable)
1398 goto unreg;
1399
1400 if (boot_cpu_has(X86_FEATURE_ART)) {
1401 have_art = true;
1402 clocksource_tsc.base = &art_base_clk;
1403 }
1404 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1405unreg:
1406 clocksource_unregister(&clocksource_tsc_early);
1407}
1408
1409
1410static int __init init_tsc_clocksource(void)
1411{
1412 if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
1413 return 0;
1414
1415 if (tsc_unstable) {
1416 clocksource_unregister(&clocksource_tsc_early);
1417 return 0;
1418 }
1419
1420 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1421 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1422
1423 /*
1424 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
1425 * the refined calibration and directly register it as a clocksource.
1426 */
1427 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1428 if (boot_cpu_has(X86_FEATURE_ART)) {
1429 have_art = true;
1430 clocksource_tsc.base = &art_base_clk;
1431 }
1432 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1433 clocksource_unregister(&clocksource_tsc_early);
1434
1435 if (!tsc_force_recalibrate)
1436 return 0;
1437 }
1438
1439 schedule_delayed_work(&tsc_irqwork, 0);
1440 return 0;
1441}
1442/*
1443 * We use device_initcall here, to ensure we run after the hpet
1444 * is fully initialized, which may occur at fs_initcall time.
1445 */
1446device_initcall(init_tsc_clocksource);
1447
1448static bool __init determine_cpu_tsc_frequencies(bool early)
1449{
1450 /* Make sure that cpu and tsc are not already calibrated */
1451 WARN_ON(cpu_khz || tsc_khz);
1452
1453 if (early) {
1454 cpu_khz = x86_platform.calibrate_cpu();
1455 if (tsc_early_khz) {
1456 tsc_khz = tsc_early_khz;
1457 } else {
1458 tsc_khz = x86_platform.calibrate_tsc();
1459 clocksource_tsc.freq_khz = tsc_khz;
1460 }
1461 } else {
1462 /* We should not be here with non-native cpu calibration */
1463 WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
1464 cpu_khz = pit_hpet_ptimer_calibrate_cpu();
1465 }
1466
1467 /*
1468 * Trust non-zero tsc_khz as authoritative,
1469 * and use it to sanity check cpu_khz,
1470 * which will be off if system timer is off.
1471 */
1472 if (tsc_khz == 0)
1473 tsc_khz = cpu_khz;
1474 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
1475 cpu_khz = tsc_khz;
1476
1477 if (tsc_khz == 0)
1478 return false;
1479
1480 pr_info("Detected %lu.%03lu MHz processor\n",
1481 (unsigned long)cpu_khz / KHZ,
1482 (unsigned long)cpu_khz % KHZ);
1483
1484 if (cpu_khz != tsc_khz) {
1485 pr_info("Detected %lu.%03lu MHz TSC",
1486 (unsigned long)tsc_khz / KHZ,
1487 (unsigned long)tsc_khz % KHZ);
1488 }
1489 return true;
1490}
1491
1492static unsigned long __init get_loops_per_jiffy(void)
1493{
1494 u64 lpj = (u64)tsc_khz * KHZ;
1495
1496 do_div(lpj, HZ);
1497 return lpj;
1498}
1499
1500static void __init tsc_enable_sched_clock(void)
1501{
1502 loops_per_jiffy = get_loops_per_jiffy();
1503 use_tsc_delay();
1504
1505 /* Sanitize TSC ADJUST before cyc2ns gets initialized */
1506 tsc_store_and_check_tsc_adjust(true);
1507 cyc2ns_init_boot_cpu();
1508 static_branch_enable(&__use_tsc);
1509}
1510
1511void __init tsc_early_init(void)
1512{
1513 if (!boot_cpu_has(X86_FEATURE_TSC))
1514 return;
1515 /* Don't change UV TSC multi-chassis synchronization */
1516 if (is_early_uv_system())
1517 return;
1518 if (!determine_cpu_tsc_frequencies(true))
1519 return;
1520 tsc_enable_sched_clock();
1521}
1522
1523void __init tsc_init(void)
1524{
1525 if (!cpu_feature_enabled(X86_FEATURE_TSC)) {
1526 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1527 return;
1528 }
1529
1530 /*
1531 * native_calibrate_cpu_early can only calibrate using methods that are
1532 * available early in boot.
1533 */
1534 if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
1535 x86_platform.calibrate_cpu = native_calibrate_cpu;
1536
1537 if (!tsc_khz) {
1538 /* We failed to determine frequencies earlier, try again */
1539 if (!determine_cpu_tsc_frequencies(false)) {
1540 mark_tsc_unstable("could not calculate TSC khz");
1541 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1542 return;
1543 }
1544 tsc_enable_sched_clock();
1545 }
1546
1547 cyc2ns_init_secondary_cpus();
1548
1549 if (!no_sched_irq_time)
1550 enable_sched_clock_irqtime();
1551
1552 lpj_fine = get_loops_per_jiffy();
1553
1554 check_system_tsc_reliable();
1555
1556 if (unsynchronized_tsc()) {
1557 mark_tsc_unstable("TSCs unsynchronized");
1558 return;
1559 }
1560
1561 if (tsc_clocksource_reliable || no_tsc_watchdog)
1562 tsc_disable_clocksource_watchdog();
1563
1564 clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
1565 detect_art();
1566}
1567
1568#ifdef CONFIG_SMP
1569/*
1570 * Check whether existing calibration data can be reused.
1571 */
1572unsigned long calibrate_delay_is_known(void)
1573{
1574 int sibling, cpu = smp_processor_id();
1575 int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
1576 const struct cpumask *mask = topology_core_cpumask(cpu);
1577
1578 /*
1579 * If TSC has constant frequency and TSC is synchronized across
1580 * sockets then reuse CPU0 calibration.
1581 */
1582 if (constant_tsc && !tsc_unstable)
1583 return cpu_data(0).loops_per_jiffy;
1584
1585 /*
1586 * If TSC has constant frequency and TSC is not synchronized across
1587 * sockets and this is not the first CPU in the socket, then reuse
1588 * the calibration value of an already online CPU on that socket.
1589 *
1590 * This assumes that CONSTANT_TSC is consistent for all CPUs in a
1591 * socket.
1592 */
1593 if (!constant_tsc || !mask)
1594 return 0;
1595
1596 sibling = cpumask_any_but(mask, cpu);
1597 if (sibling < nr_cpu_ids)
1598 return cpu_data(sibling).loops_per_jiffy;
1599 return 0;
1600}
1601#endif
1// SPDX-License-Identifier: GPL-2.0-only
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/kernel.h>
5#include <linux/sched.h>
6#include <linux/sched/clock.h>
7#include <linux/init.h>
8#include <linux/export.h>
9#include <linux/timer.h>
10#include <linux/acpi_pmtmr.h>
11#include <linux/cpufreq.h>
12#include <linux/delay.h>
13#include <linux/clocksource.h>
14#include <linux/percpu.h>
15#include <linux/timex.h>
16#include <linux/static_key.h>
17
18#include <asm/hpet.h>
19#include <asm/timer.h>
20#include <asm/vgtod.h>
21#include <asm/time.h>
22#include <asm/delay.h>
23#include <asm/hypervisor.h>
24#include <asm/nmi.h>
25#include <asm/x86_init.h>
26#include <asm/geode.h>
27#include <asm/apic.h>
28#include <asm/intel-family.h>
29#include <asm/i8259.h>
30#include <asm/uv/uv.h>
31
32unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
33EXPORT_SYMBOL(cpu_khz);
34
35unsigned int __read_mostly tsc_khz;
36EXPORT_SYMBOL(tsc_khz);
37
38#define KHZ 1000
39
40/*
41 * TSC can be unstable due to cpufreq or due to unsynced TSCs
42 */
43static int __read_mostly tsc_unstable;
44static unsigned int __initdata tsc_early_khz;
45
46static DEFINE_STATIC_KEY_FALSE(__use_tsc);
47
48int tsc_clocksource_reliable;
49
50static u32 art_to_tsc_numerator;
51static u32 art_to_tsc_denominator;
52static u64 art_to_tsc_offset;
53struct clocksource *art_related_clocksource;
54
55struct cyc2ns {
56 struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
57 seqcount_t seq; /* 32 + 4 = 36 */
58
59}; /* fits one cacheline */
60
61static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
62
63static int __init tsc_early_khz_setup(char *buf)
64{
65 return kstrtouint(buf, 0, &tsc_early_khz);
66}
67early_param("tsc_early_khz", tsc_early_khz_setup);
68
69__always_inline void cyc2ns_read_begin(struct cyc2ns_data *data)
70{
71 int seq, idx;
72
73 preempt_disable_notrace();
74
75 do {
76 seq = this_cpu_read(cyc2ns.seq.sequence);
77 idx = seq & 1;
78
79 data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
80 data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
81 data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
82
83 } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
84}
85
86__always_inline void cyc2ns_read_end(void)
87{
88 preempt_enable_notrace();
89}
90
91/*
92 * Accelerators for sched_clock()
93 * convert from cycles(64bits) => nanoseconds (64bits)
94 * basic equation:
95 * ns = cycles / (freq / ns_per_sec)
96 * ns = cycles * (ns_per_sec / freq)
97 * ns = cycles * (10^9 / (cpu_khz * 10^3))
98 * ns = cycles * (10^6 / cpu_khz)
99 *
100 * Then we use scaling math (suggested by george@mvista.com) to get:
101 * ns = cycles * (10^6 * SC / cpu_khz) / SC
102 * ns = cycles * cyc2ns_scale / SC
103 *
104 * And since SC is a constant power of two, we can convert the div
105 * into a shift. The larger SC is, the more accurate the conversion, but
106 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
107 * (64-bit result) can be used.
108 *
109 * We can use khz divisor instead of mhz to keep a better precision.
110 * (mathieu.desnoyers@polymtl.ca)
111 *
112 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
113 */
114
115static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
116{
117 struct cyc2ns_data data;
118 unsigned long long ns;
119
120 cyc2ns_read_begin(&data);
121
122 ns = data.cyc2ns_offset;
123 ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
124
125 cyc2ns_read_end();
126
127 return ns;
128}
129
130static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
131{
132 unsigned long long ns_now;
133 struct cyc2ns_data data;
134 struct cyc2ns *c2n;
135
136 ns_now = cycles_2_ns(tsc_now);
137
138 /*
139 * Compute a new multiplier as per the above comment and ensure our
140 * time function is continuous; see the comment near struct
141 * cyc2ns_data.
142 */
143 clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
144 NSEC_PER_MSEC, 0);
145
146 /*
147 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
148 * not expected to be greater than 31 due to the original published
149 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
150 * value) - refer perf_event_mmap_page documentation in perf_event.h.
151 */
152 if (data.cyc2ns_shift == 32) {
153 data.cyc2ns_shift = 31;
154 data.cyc2ns_mul >>= 1;
155 }
156
157 data.cyc2ns_offset = ns_now -
158 mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
159
160 c2n = per_cpu_ptr(&cyc2ns, cpu);
161
162 raw_write_seqcount_latch(&c2n->seq);
163 c2n->data[0] = data;
164 raw_write_seqcount_latch(&c2n->seq);
165 c2n->data[1] = data;
166}
167
168static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
169{
170 unsigned long flags;
171
172 local_irq_save(flags);
173 sched_clock_idle_sleep_event();
174
175 if (khz)
176 __set_cyc2ns_scale(khz, cpu, tsc_now);
177
178 sched_clock_idle_wakeup_event();
179 local_irq_restore(flags);
180}
181
182/*
183 * Initialize cyc2ns for boot cpu
184 */
185static void __init cyc2ns_init_boot_cpu(void)
186{
187 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
188
189 seqcount_init(&c2n->seq);
190 __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
191}
192
193/*
194 * Secondary CPUs do not run through tsc_init(), so set up
195 * all the scale factors for all CPUs, assuming the same
196 * speed as the bootup CPU.
197 */
198static void __init cyc2ns_init_secondary_cpus(void)
199{
200 unsigned int cpu, this_cpu = smp_processor_id();
201 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
202 struct cyc2ns_data *data = c2n->data;
203
204 for_each_possible_cpu(cpu) {
205 if (cpu != this_cpu) {
206 seqcount_init(&c2n->seq);
207 c2n = per_cpu_ptr(&cyc2ns, cpu);
208 c2n->data[0] = data[0];
209 c2n->data[1] = data[1];
210 }
211 }
212}
213
214/*
215 * Scheduler clock - returns current time in nanosec units.
216 */
217u64 native_sched_clock(void)
218{
219 if (static_branch_likely(&__use_tsc)) {
220 u64 tsc_now = rdtsc();
221
222 /* return the value in ns */
223 return cycles_2_ns(tsc_now);
224 }
225
226 /*
227 * Fall back to jiffies if there's no TSC available:
228 * ( But note that we still use it if the TSC is marked
229 * unstable. We do this because unlike Time Of Day,
230 * the scheduler clock tolerates small errors and it's
231 * very important for it to be as fast as the platform
232 * can achieve it. )
233 */
234
235 /* No locking but a rare wrong value is not a big deal: */
236 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
237}
238
239/*
240 * Generate a sched_clock if you already have a TSC value.
241 */
242u64 native_sched_clock_from_tsc(u64 tsc)
243{
244 return cycles_2_ns(tsc);
245}
246
247/* We need to define a real function for sched_clock, to override the
248 weak default version */
249#ifdef CONFIG_PARAVIRT
250unsigned long long sched_clock(void)
251{
252 return paravirt_sched_clock();
253}
254
255bool using_native_sched_clock(void)
256{
257 return pv_ops.time.sched_clock == native_sched_clock;
258}
259#else
260unsigned long long
261sched_clock(void) __attribute__((alias("native_sched_clock")));
262
263bool using_native_sched_clock(void) { return true; }
264#endif
265
266int check_tsc_unstable(void)
267{
268 return tsc_unstable;
269}
270EXPORT_SYMBOL_GPL(check_tsc_unstable);
271
272#ifdef CONFIG_X86_TSC
273int __init notsc_setup(char *str)
274{
275 mark_tsc_unstable("boot parameter notsc");
276 return 1;
277}
278#else
279/*
280 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
281 * in cpu/common.c
282 */
283int __init notsc_setup(char *str)
284{
285 setup_clear_cpu_cap(X86_FEATURE_TSC);
286 return 1;
287}
288#endif
289
290__setup("notsc", notsc_setup);
291
292static int no_sched_irq_time;
293static int no_tsc_watchdog;
294
295static int __init tsc_setup(char *str)
296{
297 if (!strcmp(str, "reliable"))
298 tsc_clocksource_reliable = 1;
299 if (!strncmp(str, "noirqtime", 9))
300 no_sched_irq_time = 1;
301 if (!strcmp(str, "unstable"))
302 mark_tsc_unstable("boot parameter");
303 if (!strcmp(str, "nowatchdog"))
304 no_tsc_watchdog = 1;
305 return 1;
306}
307
308__setup("tsc=", tsc_setup);
309
310#define MAX_RETRIES 5
311#define TSC_DEFAULT_THRESHOLD 0x20000
312
313/*
314 * Read TSC and the reference counters. Take care of any disturbances
315 */
316static u64 tsc_read_refs(u64 *p, int hpet)
317{
318 u64 t1, t2;
319 u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
320 int i;
321
322 for (i = 0; i < MAX_RETRIES; i++) {
323 t1 = get_cycles();
324 if (hpet)
325 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
326 else
327 *p = acpi_pm_read_early();
328 t2 = get_cycles();
329 if ((t2 - t1) < thresh)
330 return t2;
331 }
332 return ULLONG_MAX;
333}
334
335/*
336 * Calculate the TSC frequency from HPET reference
337 */
338static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
339{
340 u64 tmp;
341
342 if (hpet2 < hpet1)
343 hpet2 += 0x100000000ULL;
344 hpet2 -= hpet1;
345 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
346 do_div(tmp, 1000000);
347 deltatsc = div64_u64(deltatsc, tmp);
348
349 return (unsigned long) deltatsc;
350}
351
352/*
353 * Calculate the TSC frequency from PMTimer reference
354 */
355static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
356{
357 u64 tmp;
358
359 if (!pm1 && !pm2)
360 return ULONG_MAX;
361
362 if (pm2 < pm1)
363 pm2 += (u64)ACPI_PM_OVRRUN;
364 pm2 -= pm1;
365 tmp = pm2 * 1000000000LL;
366 do_div(tmp, PMTMR_TICKS_PER_SEC);
367 do_div(deltatsc, tmp);
368
369 return (unsigned long) deltatsc;
370}
371
372#define CAL_MS 10
373#define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
374#define CAL_PIT_LOOPS 1000
375
376#define CAL2_MS 50
377#define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
378#define CAL2_PIT_LOOPS 5000
379
380
381/*
382 * Try to calibrate the TSC against the Programmable
383 * Interrupt Timer and return the frequency of the TSC
384 * in kHz.
385 *
386 * Return ULONG_MAX on failure to calibrate.
387 */
388static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
389{
390 u64 tsc, t1, t2, delta;
391 unsigned long tscmin, tscmax;
392 int pitcnt;
393
394 if (!has_legacy_pic()) {
395 /*
396 * Relies on tsc_early_delay_calibrate() to have given us semi
397 * usable udelay(), wait for the same 50ms we would have with
398 * the PIT loop below.
399 */
400 udelay(10 * USEC_PER_MSEC);
401 udelay(10 * USEC_PER_MSEC);
402 udelay(10 * USEC_PER_MSEC);
403 udelay(10 * USEC_PER_MSEC);
404 udelay(10 * USEC_PER_MSEC);
405 return ULONG_MAX;
406 }
407
408 /* Set the Gate high, disable speaker */
409 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
410
411 /*
412 * Setup CTC channel 2* for mode 0, (interrupt on terminal
413 * count mode), binary count. Set the latch register to 50ms
414 * (LSB then MSB) to begin countdown.
415 */
416 outb(0xb0, 0x43);
417 outb(latch & 0xff, 0x42);
418 outb(latch >> 8, 0x42);
419
420 tsc = t1 = t2 = get_cycles();
421
422 pitcnt = 0;
423 tscmax = 0;
424 tscmin = ULONG_MAX;
425 while ((inb(0x61) & 0x20) == 0) {
426 t2 = get_cycles();
427 delta = t2 - tsc;
428 tsc = t2;
429 if ((unsigned long) delta < tscmin)
430 tscmin = (unsigned int) delta;
431 if ((unsigned long) delta > tscmax)
432 tscmax = (unsigned int) delta;
433 pitcnt++;
434 }
435
436 /*
437 * Sanity checks:
438 *
439 * If we were not able to read the PIT more than loopmin
440 * times, then we have been hit by a massive SMI
441 *
442 * If the maximum is 10 times larger than the minimum,
443 * then we got hit by an SMI as well.
444 */
445 if (pitcnt < loopmin || tscmax > 10 * tscmin)
446 return ULONG_MAX;
447
448 /* Calculate the PIT value */
449 delta = t2 - t1;
450 do_div(delta, ms);
451 return delta;
452}
453
454/*
455 * This reads the current MSB of the PIT counter, and
456 * checks if we are running on sufficiently fast and
457 * non-virtualized hardware.
458 *
459 * Our expectations are:
460 *
461 * - the PIT is running at roughly 1.19MHz
462 *
463 * - each IO is going to take about 1us on real hardware,
464 * but we allow it to be much faster (by a factor of 10) or
465 * _slightly_ slower (ie we allow up to a 2us read+counter
466 * update - anything else implies a unacceptably slow CPU
467 * or PIT for the fast calibration to work.
468 *
469 * - with 256 PIT ticks to read the value, we have 214us to
470 * see the same MSB (and overhead like doing a single TSC
471 * read per MSB value etc).
472 *
473 * - We're doing 2 reads per loop (LSB, MSB), and we expect
474 * them each to take about a microsecond on real hardware.
475 * So we expect a count value of around 100. But we'll be
476 * generous, and accept anything over 50.
477 *
478 * - if the PIT is stuck, and we see *many* more reads, we
479 * return early (and the next caller of pit_expect_msb()
480 * then consider it a failure when they don't see the
481 * next expected value).
482 *
483 * These expectations mean that we know that we have seen the
484 * transition from one expected value to another with a fairly
485 * high accuracy, and we didn't miss any events. We can thus
486 * use the TSC value at the transitions to calculate a pretty
487 * good value for the TSC frequency.
488 */
489static inline int pit_verify_msb(unsigned char val)
490{
491 /* Ignore LSB */
492 inb(0x42);
493 return inb(0x42) == val;
494}
495
496static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
497{
498 int count;
499 u64 tsc = 0, prev_tsc = 0;
500
501 for (count = 0; count < 50000; count++) {
502 if (!pit_verify_msb(val))
503 break;
504 prev_tsc = tsc;
505 tsc = get_cycles();
506 }
507 *deltap = get_cycles() - prev_tsc;
508 *tscp = tsc;
509
510 /*
511 * We require _some_ success, but the quality control
512 * will be based on the error terms on the TSC values.
513 */
514 return count > 5;
515}
516
517/*
518 * How many MSB values do we want to see? We aim for
519 * a maximum error rate of 500ppm (in practice the
520 * real error is much smaller), but refuse to spend
521 * more than 50ms on it.
522 */
523#define MAX_QUICK_PIT_MS 50
524#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
525
526static unsigned long quick_pit_calibrate(void)
527{
528 int i;
529 u64 tsc, delta;
530 unsigned long d1, d2;
531
532 if (!has_legacy_pic())
533 return 0;
534
535 /* Set the Gate high, disable speaker */
536 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
537
538 /*
539 * Counter 2, mode 0 (one-shot), binary count
540 *
541 * NOTE! Mode 2 decrements by two (and then the
542 * output is flipped each time, giving the same
543 * final output frequency as a decrement-by-one),
544 * so mode 0 is much better when looking at the
545 * individual counts.
546 */
547 outb(0xb0, 0x43);
548
549 /* Start at 0xffff */
550 outb(0xff, 0x42);
551 outb(0xff, 0x42);
552
553 /*
554 * The PIT starts counting at the next edge, so we
555 * need to delay for a microsecond. The easiest way
556 * to do that is to just read back the 16-bit counter
557 * once from the PIT.
558 */
559 pit_verify_msb(0);
560
561 if (pit_expect_msb(0xff, &tsc, &d1)) {
562 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
563 if (!pit_expect_msb(0xff-i, &delta, &d2))
564 break;
565
566 delta -= tsc;
567
568 /*
569 * Extrapolate the error and fail fast if the error will
570 * never be below 500 ppm.
571 */
572 if (i == 1 &&
573 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
574 return 0;
575
576 /*
577 * Iterate until the error is less than 500 ppm
578 */
579 if (d1+d2 >= delta >> 11)
580 continue;
581
582 /*
583 * Check the PIT one more time to verify that
584 * all TSC reads were stable wrt the PIT.
585 *
586 * This also guarantees serialization of the
587 * last cycle read ('d2') in pit_expect_msb.
588 */
589 if (!pit_verify_msb(0xfe - i))
590 break;
591 goto success;
592 }
593 }
594 pr_info("Fast TSC calibration failed\n");
595 return 0;
596
597success:
598 /*
599 * Ok, if we get here, then we've seen the
600 * MSB of the PIT decrement 'i' times, and the
601 * error has shrunk to less than 500 ppm.
602 *
603 * As a result, we can depend on there not being
604 * any odd delays anywhere, and the TSC reads are
605 * reliable (within the error).
606 *
607 * kHz = ticks / time-in-seconds / 1000;
608 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
609 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
610 */
611 delta *= PIT_TICK_RATE;
612 do_div(delta, i*256*1000);
613 pr_info("Fast TSC calibration using PIT\n");
614 return delta;
615}
616
617/**
618 * native_calibrate_tsc
619 * Determine TSC frequency via CPUID, else return 0.
620 */
621unsigned long native_calibrate_tsc(void)
622{
623 unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
624 unsigned int crystal_khz;
625
626 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
627 return 0;
628
629 if (boot_cpu_data.cpuid_level < 0x15)
630 return 0;
631
632 eax_denominator = ebx_numerator = ecx_hz = edx = 0;
633
634 /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
635 cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
636
637 if (ebx_numerator == 0 || eax_denominator == 0)
638 return 0;
639
640 crystal_khz = ecx_hz / 1000;
641
642 /*
643 * Denverton SoCs don't report crystal clock, and also don't support
644 * CPUID.0x16 for the calculation below, so hardcode the 25MHz crystal
645 * clock.
646 */
647 if (crystal_khz == 0 &&
648 boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D)
649 crystal_khz = 25000;
650
651 /*
652 * TSC frequency reported directly by CPUID is a "hardware reported"
653 * frequency and is the most accurate one so far we have. This
654 * is considered a known frequency.
655 */
656 if (crystal_khz != 0)
657 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
658
659 /*
660 * Some Intel SoCs like Skylake and Kabylake don't report the crystal
661 * clock, but we can easily calculate it to a high degree of accuracy
662 * by considering the crystal ratio and the CPU speed.
663 */
664 if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= 0x16) {
665 unsigned int eax_base_mhz, ebx, ecx, edx;
666
667 cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx);
668 crystal_khz = eax_base_mhz * 1000 *
669 eax_denominator / ebx_numerator;
670 }
671
672 if (crystal_khz == 0)
673 return 0;
674
675 /*
676 * For Atom SoCs TSC is the only reliable clocksource.
677 * Mark TSC reliable so no watchdog on it.
678 */
679 if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT)
680 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
681
682#ifdef CONFIG_X86_LOCAL_APIC
683 /*
684 * The local APIC appears to be fed by the core crystal clock
685 * (which sounds entirely sensible). We can set the global
686 * lapic_timer_period here to avoid having to calibrate the APIC
687 * timer later.
688 */
689 lapic_timer_period = crystal_khz * 1000 / HZ;
690#endif
691
692 return crystal_khz * ebx_numerator / eax_denominator;
693}
694
695static unsigned long cpu_khz_from_cpuid(void)
696{
697 unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
698
699 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
700 return 0;
701
702 if (boot_cpu_data.cpuid_level < 0x16)
703 return 0;
704
705 eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
706
707 cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
708
709 return eax_base_mhz * 1000;
710}
711
712/*
713 * calibrate cpu using pit, hpet, and ptimer methods. They are available
714 * later in boot after acpi is initialized.
715 */
716static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
717{
718 u64 tsc1, tsc2, delta, ref1, ref2;
719 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
720 unsigned long flags, latch, ms;
721 int hpet = is_hpet_enabled(), i, loopmin;
722
723 /*
724 * Run 5 calibration loops to get the lowest frequency value
725 * (the best estimate). We use two different calibration modes
726 * here:
727 *
728 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
729 * load a timeout of 50ms. We read the time right after we
730 * started the timer and wait until the PIT count down reaches
731 * zero. In each wait loop iteration we read the TSC and check
732 * the delta to the previous read. We keep track of the min
733 * and max values of that delta. The delta is mostly defined
734 * by the IO time of the PIT access, so we can detect when
735 * any disturbance happened between the two reads. If the
736 * maximum time is significantly larger than the minimum time,
737 * then we discard the result and have another try.
738 *
739 * 2) Reference counter. If available we use the HPET or the
740 * PMTIMER as a reference to check the sanity of that value.
741 * We use separate TSC readouts and check inside of the
742 * reference read for any possible disturbance. We dicard
743 * disturbed values here as well. We do that around the PIT
744 * calibration delay loop as we have to wait for a certain
745 * amount of time anyway.
746 */
747
748 /* Preset PIT loop values */
749 latch = CAL_LATCH;
750 ms = CAL_MS;
751 loopmin = CAL_PIT_LOOPS;
752
753 for (i = 0; i < 3; i++) {
754 unsigned long tsc_pit_khz;
755
756 /*
757 * Read the start value and the reference count of
758 * hpet/pmtimer when available. Then do the PIT
759 * calibration, which will take at least 50ms, and
760 * read the end value.
761 */
762 local_irq_save(flags);
763 tsc1 = tsc_read_refs(&ref1, hpet);
764 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
765 tsc2 = tsc_read_refs(&ref2, hpet);
766 local_irq_restore(flags);
767
768 /* Pick the lowest PIT TSC calibration so far */
769 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
770
771 /* hpet or pmtimer available ? */
772 if (ref1 == ref2)
773 continue;
774
775 /* Check, whether the sampling was disturbed */
776 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
777 continue;
778
779 tsc2 = (tsc2 - tsc1) * 1000000LL;
780 if (hpet)
781 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
782 else
783 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
784
785 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
786
787 /* Check the reference deviation */
788 delta = ((u64) tsc_pit_min) * 100;
789 do_div(delta, tsc_ref_min);
790
791 /*
792 * If both calibration results are inside a 10% window
793 * then we can be sure, that the calibration
794 * succeeded. We break out of the loop right away. We
795 * use the reference value, as it is more precise.
796 */
797 if (delta >= 90 && delta <= 110) {
798 pr_info("PIT calibration matches %s. %d loops\n",
799 hpet ? "HPET" : "PMTIMER", i + 1);
800 return tsc_ref_min;
801 }
802
803 /*
804 * Check whether PIT failed more than once. This
805 * happens in virtualized environments. We need to
806 * give the virtual PC a slightly longer timeframe for
807 * the HPET/PMTIMER to make the result precise.
808 */
809 if (i == 1 && tsc_pit_min == ULONG_MAX) {
810 latch = CAL2_LATCH;
811 ms = CAL2_MS;
812 loopmin = CAL2_PIT_LOOPS;
813 }
814 }
815
816 /*
817 * Now check the results.
818 */
819 if (tsc_pit_min == ULONG_MAX) {
820 /* PIT gave no useful value */
821 pr_warn("Unable to calibrate against PIT\n");
822
823 /* We don't have an alternative source, disable TSC */
824 if (!hpet && !ref1 && !ref2) {
825 pr_notice("No reference (HPET/PMTIMER) available\n");
826 return 0;
827 }
828
829 /* The alternative source failed as well, disable TSC */
830 if (tsc_ref_min == ULONG_MAX) {
831 pr_warn("HPET/PMTIMER calibration failed\n");
832 return 0;
833 }
834
835 /* Use the alternative source */
836 pr_info("using %s reference calibration\n",
837 hpet ? "HPET" : "PMTIMER");
838
839 return tsc_ref_min;
840 }
841
842 /* We don't have an alternative source, use the PIT calibration value */
843 if (!hpet && !ref1 && !ref2) {
844 pr_info("Using PIT calibration value\n");
845 return tsc_pit_min;
846 }
847
848 /* The alternative source failed, use the PIT calibration value */
849 if (tsc_ref_min == ULONG_MAX) {
850 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
851 return tsc_pit_min;
852 }
853
854 /*
855 * The calibration values differ too much. In doubt, we use
856 * the PIT value as we know that there are PMTIMERs around
857 * running at double speed. At least we let the user know:
858 */
859 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
860 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
861 pr_info("Using PIT calibration value\n");
862 return tsc_pit_min;
863}
864
865/**
866 * native_calibrate_cpu_early - can calibrate the cpu early in boot
867 */
868unsigned long native_calibrate_cpu_early(void)
869{
870 unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
871
872 if (!fast_calibrate)
873 fast_calibrate = cpu_khz_from_msr();
874 if (!fast_calibrate) {
875 local_irq_save(flags);
876 fast_calibrate = quick_pit_calibrate();
877 local_irq_restore(flags);
878 }
879 return fast_calibrate;
880}
881
882
883/**
884 * native_calibrate_cpu - calibrate the cpu
885 */
886static unsigned long native_calibrate_cpu(void)
887{
888 unsigned long tsc_freq = native_calibrate_cpu_early();
889
890 if (!tsc_freq)
891 tsc_freq = pit_hpet_ptimer_calibrate_cpu();
892
893 return tsc_freq;
894}
895
896void recalibrate_cpu_khz(void)
897{
898#ifndef CONFIG_SMP
899 unsigned long cpu_khz_old = cpu_khz;
900
901 if (!boot_cpu_has(X86_FEATURE_TSC))
902 return;
903
904 cpu_khz = x86_platform.calibrate_cpu();
905 tsc_khz = x86_platform.calibrate_tsc();
906 if (tsc_khz == 0)
907 tsc_khz = cpu_khz;
908 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
909 cpu_khz = tsc_khz;
910 cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
911 cpu_khz_old, cpu_khz);
912#endif
913}
914
915EXPORT_SYMBOL(recalibrate_cpu_khz);
916
917
918static unsigned long long cyc2ns_suspend;
919
920void tsc_save_sched_clock_state(void)
921{
922 if (!sched_clock_stable())
923 return;
924
925 cyc2ns_suspend = sched_clock();
926}
927
928/*
929 * Even on processors with invariant TSC, TSC gets reset in some the
930 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
931 * arbitrary value (still sync'd across cpu's) during resume from such sleep
932 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
933 * that sched_clock() continues from the point where it was left off during
934 * suspend.
935 */
936void tsc_restore_sched_clock_state(void)
937{
938 unsigned long long offset;
939 unsigned long flags;
940 int cpu;
941
942 if (!sched_clock_stable())
943 return;
944
945 local_irq_save(flags);
946
947 /*
948 * We're coming out of suspend, there's no concurrency yet; don't
949 * bother being nice about the RCU stuff, just write to both
950 * data fields.
951 */
952
953 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
954 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
955
956 offset = cyc2ns_suspend - sched_clock();
957
958 for_each_possible_cpu(cpu) {
959 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
960 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
961 }
962
963 local_irq_restore(flags);
964}
965
966#ifdef CONFIG_CPU_FREQ
967/*
968 * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
969 * changes.
970 *
971 * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
972 * as unstable and give up in those cases.
973 *
974 * Should fix up last_tsc too. Currently gettimeofday in the
975 * first tick after the change will be slightly wrong.
976 */
977
978static unsigned int ref_freq;
979static unsigned long loops_per_jiffy_ref;
980static unsigned long tsc_khz_ref;
981
982static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
983 void *data)
984{
985 struct cpufreq_freqs *freq = data;
986
987 if (num_online_cpus() > 1) {
988 mark_tsc_unstable("cpufreq changes on SMP");
989 return 0;
990 }
991
992 if (!ref_freq) {
993 ref_freq = freq->old;
994 loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
995 tsc_khz_ref = tsc_khz;
996 }
997
998 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
999 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
1000 boot_cpu_data.loops_per_jiffy =
1001 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
1002
1003 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
1004 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
1005 mark_tsc_unstable("cpufreq changes");
1006
1007 set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
1008 }
1009
1010 return 0;
1011}
1012
1013static struct notifier_block time_cpufreq_notifier_block = {
1014 .notifier_call = time_cpufreq_notifier
1015};
1016
1017static int __init cpufreq_register_tsc_scaling(void)
1018{
1019 if (!boot_cpu_has(X86_FEATURE_TSC))
1020 return 0;
1021 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1022 return 0;
1023 cpufreq_register_notifier(&time_cpufreq_notifier_block,
1024 CPUFREQ_TRANSITION_NOTIFIER);
1025 return 0;
1026}
1027
1028core_initcall(cpufreq_register_tsc_scaling);
1029
1030#endif /* CONFIG_CPU_FREQ */
1031
1032#define ART_CPUID_LEAF (0x15)
1033#define ART_MIN_DENOMINATOR (1)
1034
1035
1036/*
1037 * If ART is present detect the numerator:denominator to convert to TSC
1038 */
1039static void __init detect_art(void)
1040{
1041 unsigned int unused[2];
1042
1043 if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
1044 return;
1045
1046 /*
1047 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
1048 * and the TSC counter resets must not occur asynchronously.
1049 */
1050 if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
1051 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
1052 !boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
1053 tsc_async_resets)
1054 return;
1055
1056 cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
1057 &art_to_tsc_numerator, unused, unused+1);
1058
1059 if (art_to_tsc_denominator < ART_MIN_DENOMINATOR)
1060 return;
1061
1062 rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset);
1063
1064 /* Make this sticky over multiple CPU init calls */
1065 setup_force_cpu_cap(X86_FEATURE_ART);
1066}
1067
1068
1069/* clocksource code */
1070
1071static void tsc_resume(struct clocksource *cs)
1072{
1073 tsc_verify_tsc_adjust(true);
1074}
1075
1076/*
1077 * We used to compare the TSC to the cycle_last value in the clocksource
1078 * structure to avoid a nasty time-warp. This can be observed in a
1079 * very small window right after one CPU updated cycle_last under
1080 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
1081 * is smaller than the cycle_last reference value due to a TSC which
1082 * is slighty behind. This delta is nowhere else observable, but in
1083 * that case it results in a forward time jump in the range of hours
1084 * due to the unsigned delta calculation of the time keeping core
1085 * code, which is necessary to support wrapping clocksources like pm
1086 * timer.
1087 *
1088 * This sanity check is now done in the core timekeeping code.
1089 * checking the result of read_tsc() - cycle_last for being negative.
1090 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
1091 */
1092static u64 read_tsc(struct clocksource *cs)
1093{
1094 return (u64)rdtsc_ordered();
1095}
1096
1097static void tsc_cs_mark_unstable(struct clocksource *cs)
1098{
1099 if (tsc_unstable)
1100 return;
1101
1102 tsc_unstable = 1;
1103 if (using_native_sched_clock())
1104 clear_sched_clock_stable();
1105 disable_sched_clock_irqtime();
1106 pr_info("Marking TSC unstable due to clocksource watchdog\n");
1107}
1108
1109static void tsc_cs_tick_stable(struct clocksource *cs)
1110{
1111 if (tsc_unstable)
1112 return;
1113
1114 if (using_native_sched_clock())
1115 sched_clock_tick_stable();
1116}
1117
1118static int tsc_cs_enable(struct clocksource *cs)
1119{
1120 vclocks_set_used(VDSO_CLOCKMODE_TSC);
1121 return 0;
1122}
1123
1124/*
1125 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
1126 */
1127static struct clocksource clocksource_tsc_early = {
1128 .name = "tsc-early",
1129 .rating = 299,
1130 .read = read_tsc,
1131 .mask = CLOCKSOURCE_MASK(64),
1132 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1133 CLOCK_SOURCE_MUST_VERIFY,
1134 .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
1135 .enable = tsc_cs_enable,
1136 .resume = tsc_resume,
1137 .mark_unstable = tsc_cs_mark_unstable,
1138 .tick_stable = tsc_cs_tick_stable,
1139 .list = LIST_HEAD_INIT(clocksource_tsc_early.list),
1140};
1141
1142/*
1143 * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
1144 * this one will immediately take over. We will only register if TSC has
1145 * been found good.
1146 */
1147static struct clocksource clocksource_tsc = {
1148 .name = "tsc",
1149 .rating = 300,
1150 .read = read_tsc,
1151 .mask = CLOCKSOURCE_MASK(64),
1152 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1153 CLOCK_SOURCE_VALID_FOR_HRES |
1154 CLOCK_SOURCE_MUST_VERIFY,
1155 .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
1156 .enable = tsc_cs_enable,
1157 .resume = tsc_resume,
1158 .mark_unstable = tsc_cs_mark_unstable,
1159 .tick_stable = tsc_cs_tick_stable,
1160 .list = LIST_HEAD_INIT(clocksource_tsc.list),
1161};
1162
1163void mark_tsc_unstable(char *reason)
1164{
1165 if (tsc_unstable)
1166 return;
1167
1168 tsc_unstable = 1;
1169 if (using_native_sched_clock())
1170 clear_sched_clock_stable();
1171 disable_sched_clock_irqtime();
1172 pr_info("Marking TSC unstable due to %s\n", reason);
1173
1174 clocksource_mark_unstable(&clocksource_tsc_early);
1175 clocksource_mark_unstable(&clocksource_tsc);
1176}
1177
1178EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1179
1180static void __init check_system_tsc_reliable(void)
1181{
1182#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1183 if (is_geode_lx()) {
1184 /* RTSC counts during suspend */
1185#define RTSC_SUSP 0x100
1186 unsigned long res_low, res_high;
1187
1188 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1189 /* Geode_LX - the OLPC CPU has a very reliable TSC */
1190 if (res_low & RTSC_SUSP)
1191 tsc_clocksource_reliable = 1;
1192 }
1193#endif
1194 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1195 tsc_clocksource_reliable = 1;
1196}
1197
1198/*
1199 * Make an educated guess if the TSC is trustworthy and synchronized
1200 * over all CPUs.
1201 */
1202int unsynchronized_tsc(void)
1203{
1204 if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
1205 return 1;
1206
1207#ifdef CONFIG_SMP
1208 if (apic_is_clustered_box())
1209 return 1;
1210#endif
1211
1212 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1213 return 0;
1214
1215 if (tsc_clocksource_reliable)
1216 return 0;
1217 /*
1218 * Intel systems are normally all synchronized.
1219 * Exceptions must mark TSC as unstable:
1220 */
1221 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1222 /* assume multi socket systems are not synchronized: */
1223 if (num_possible_cpus() > 1)
1224 return 1;
1225 }
1226
1227 return 0;
1228}
1229
1230/*
1231 * Convert ART to TSC given numerator/denominator found in detect_art()
1232 */
1233struct system_counterval_t convert_art_to_tsc(u64 art)
1234{
1235 u64 tmp, res, rem;
1236
1237 rem = do_div(art, art_to_tsc_denominator);
1238
1239 res = art * art_to_tsc_numerator;
1240 tmp = rem * art_to_tsc_numerator;
1241
1242 do_div(tmp, art_to_tsc_denominator);
1243 res += tmp + art_to_tsc_offset;
1244
1245 return (struct system_counterval_t) {.cs = art_related_clocksource,
1246 .cycles = res};
1247}
1248EXPORT_SYMBOL(convert_art_to_tsc);
1249
1250/**
1251 * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC.
1252 * @art_ns: ART (Always Running Timer) in unit of nanoseconds
1253 *
1254 * PTM requires all timestamps to be in units of nanoseconds. When user
1255 * software requests a cross-timestamp, this function converts system timestamp
1256 * to TSC.
1257 *
1258 * This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set
1259 * indicating the tsc_khz is derived from CPUID[15H]. Drivers should check
1260 * that this flag is set before conversion to TSC is attempted.
1261 *
1262 * Return:
1263 * struct system_counterval_t - system counter value with the pointer to the
1264 * corresponding clocksource
1265 * @cycles: System counter value
1266 * @cs: Clocksource corresponding to system counter value. Used
1267 * by timekeeping code to verify comparibility of two cycle
1268 * values.
1269 */
1270
1271struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns)
1272{
1273 u64 tmp, res, rem;
1274
1275 rem = do_div(art_ns, USEC_PER_SEC);
1276
1277 res = art_ns * tsc_khz;
1278 tmp = rem * tsc_khz;
1279
1280 do_div(tmp, USEC_PER_SEC);
1281 res += tmp;
1282
1283 return (struct system_counterval_t) { .cs = art_related_clocksource,
1284 .cycles = res};
1285}
1286EXPORT_SYMBOL(convert_art_ns_to_tsc);
1287
1288
1289static void tsc_refine_calibration_work(struct work_struct *work);
1290static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1291/**
1292 * tsc_refine_calibration_work - Further refine tsc freq calibration
1293 * @work - ignored.
1294 *
1295 * This functions uses delayed work over a period of a
1296 * second to further refine the TSC freq value. Since this is
1297 * timer based, instead of loop based, we don't block the boot
1298 * process while this longer calibration is done.
1299 *
1300 * If there are any calibration anomalies (too many SMIs, etc),
1301 * or the refined calibration is off by 1% of the fast early
1302 * calibration, we throw out the new calibration and use the
1303 * early calibration.
1304 */
1305static void tsc_refine_calibration_work(struct work_struct *work)
1306{
1307 static u64 tsc_start = ULLONG_MAX, ref_start;
1308 static int hpet;
1309 u64 tsc_stop, ref_stop, delta;
1310 unsigned long freq;
1311 int cpu;
1312
1313 /* Don't bother refining TSC on unstable systems */
1314 if (tsc_unstable)
1315 goto unreg;
1316
1317 /*
1318 * Since the work is started early in boot, we may be
1319 * delayed the first time we expire. So set the workqueue
1320 * again once we know timers are working.
1321 */
1322 if (tsc_start == ULLONG_MAX) {
1323restart:
1324 /*
1325 * Only set hpet once, to avoid mixing hardware
1326 * if the hpet becomes enabled later.
1327 */
1328 hpet = is_hpet_enabled();
1329 tsc_start = tsc_read_refs(&ref_start, hpet);
1330 schedule_delayed_work(&tsc_irqwork, HZ);
1331 return;
1332 }
1333
1334 tsc_stop = tsc_read_refs(&ref_stop, hpet);
1335
1336 /* hpet or pmtimer available ? */
1337 if (ref_start == ref_stop)
1338 goto out;
1339
1340 /* Check, whether the sampling was disturbed */
1341 if (tsc_stop == ULLONG_MAX)
1342 goto restart;
1343
1344 delta = tsc_stop - tsc_start;
1345 delta *= 1000000LL;
1346 if (hpet)
1347 freq = calc_hpet_ref(delta, ref_start, ref_stop);
1348 else
1349 freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
1350
1351 /* Make sure we're within 1% */
1352 if (abs(tsc_khz - freq) > tsc_khz/100)
1353 goto out;
1354
1355 tsc_khz = freq;
1356 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1357 (unsigned long)tsc_khz / 1000,
1358 (unsigned long)tsc_khz % 1000);
1359
1360 /* Inform the TSC deadline clockevent devices about the recalibration */
1361 lapic_update_tsc_freq();
1362
1363 /* Update the sched_clock() rate to match the clocksource one */
1364 for_each_possible_cpu(cpu)
1365 set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
1366
1367out:
1368 if (tsc_unstable)
1369 goto unreg;
1370
1371 if (boot_cpu_has(X86_FEATURE_ART))
1372 art_related_clocksource = &clocksource_tsc;
1373 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1374unreg:
1375 clocksource_unregister(&clocksource_tsc_early);
1376}
1377
1378
1379static int __init init_tsc_clocksource(void)
1380{
1381 if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
1382 return 0;
1383
1384 if (tsc_unstable)
1385 goto unreg;
1386
1387 if (tsc_clocksource_reliable || no_tsc_watchdog)
1388 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1389
1390 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1391 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1392
1393 /*
1394 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
1395 * the refined calibration and directly register it as a clocksource.
1396 */
1397 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1398 if (boot_cpu_has(X86_FEATURE_ART))
1399 art_related_clocksource = &clocksource_tsc;
1400 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1401unreg:
1402 clocksource_unregister(&clocksource_tsc_early);
1403 return 0;
1404 }
1405
1406 schedule_delayed_work(&tsc_irqwork, 0);
1407 return 0;
1408}
1409/*
1410 * We use device_initcall here, to ensure we run after the hpet
1411 * is fully initialized, which may occur at fs_initcall time.
1412 */
1413device_initcall(init_tsc_clocksource);
1414
1415static bool __init determine_cpu_tsc_frequencies(bool early)
1416{
1417 /* Make sure that cpu and tsc are not already calibrated */
1418 WARN_ON(cpu_khz || tsc_khz);
1419
1420 if (early) {
1421 cpu_khz = x86_platform.calibrate_cpu();
1422 if (tsc_early_khz)
1423 tsc_khz = tsc_early_khz;
1424 else
1425 tsc_khz = x86_platform.calibrate_tsc();
1426 } else {
1427 /* We should not be here with non-native cpu calibration */
1428 WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
1429 cpu_khz = pit_hpet_ptimer_calibrate_cpu();
1430 }
1431
1432 /*
1433 * Trust non-zero tsc_khz as authoritative,
1434 * and use it to sanity check cpu_khz,
1435 * which will be off if system timer is off.
1436 */
1437 if (tsc_khz == 0)
1438 tsc_khz = cpu_khz;
1439 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
1440 cpu_khz = tsc_khz;
1441
1442 if (tsc_khz == 0)
1443 return false;
1444
1445 pr_info("Detected %lu.%03lu MHz processor\n",
1446 (unsigned long)cpu_khz / KHZ,
1447 (unsigned long)cpu_khz % KHZ);
1448
1449 if (cpu_khz != tsc_khz) {
1450 pr_info("Detected %lu.%03lu MHz TSC",
1451 (unsigned long)tsc_khz / KHZ,
1452 (unsigned long)tsc_khz % KHZ);
1453 }
1454 return true;
1455}
1456
1457static unsigned long __init get_loops_per_jiffy(void)
1458{
1459 u64 lpj = (u64)tsc_khz * KHZ;
1460
1461 do_div(lpj, HZ);
1462 return lpj;
1463}
1464
1465static void __init tsc_enable_sched_clock(void)
1466{
1467 /* Sanitize TSC ADJUST before cyc2ns gets initialized */
1468 tsc_store_and_check_tsc_adjust(true);
1469 cyc2ns_init_boot_cpu();
1470 static_branch_enable(&__use_tsc);
1471}
1472
1473void __init tsc_early_init(void)
1474{
1475 if (!boot_cpu_has(X86_FEATURE_TSC))
1476 return;
1477 /* Don't change UV TSC multi-chassis synchronization */
1478 if (is_early_uv_system())
1479 return;
1480 if (!determine_cpu_tsc_frequencies(true))
1481 return;
1482 loops_per_jiffy = get_loops_per_jiffy();
1483
1484 tsc_enable_sched_clock();
1485}
1486
1487void __init tsc_init(void)
1488{
1489 /*
1490 * native_calibrate_cpu_early can only calibrate using methods that are
1491 * available early in boot.
1492 */
1493 if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
1494 x86_platform.calibrate_cpu = native_calibrate_cpu;
1495
1496 if (!boot_cpu_has(X86_FEATURE_TSC)) {
1497 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1498 return;
1499 }
1500
1501 if (!tsc_khz) {
1502 /* We failed to determine frequencies earlier, try again */
1503 if (!determine_cpu_tsc_frequencies(false)) {
1504 mark_tsc_unstable("could not calculate TSC khz");
1505 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1506 return;
1507 }
1508 tsc_enable_sched_clock();
1509 }
1510
1511 cyc2ns_init_secondary_cpus();
1512
1513 if (!no_sched_irq_time)
1514 enable_sched_clock_irqtime();
1515
1516 lpj_fine = get_loops_per_jiffy();
1517 use_tsc_delay();
1518
1519 check_system_tsc_reliable();
1520
1521 if (unsynchronized_tsc()) {
1522 mark_tsc_unstable("TSCs unsynchronized");
1523 return;
1524 }
1525
1526 if (tsc_clocksource_reliable || no_tsc_watchdog)
1527 clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1528
1529 clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
1530 detect_art();
1531}
1532
1533#ifdef CONFIG_SMP
1534/*
1535 * If we have a constant TSC and are using the TSC for the delay loop,
1536 * we can skip clock calibration if another cpu in the same socket has already
1537 * been calibrated. This assumes that CONSTANT_TSC applies to all
1538 * cpus in the socket - this should be a safe assumption.
1539 */
1540unsigned long calibrate_delay_is_known(void)
1541{
1542 int sibling, cpu = smp_processor_id();
1543 int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
1544 const struct cpumask *mask = topology_core_cpumask(cpu);
1545
1546 if (!constant_tsc || !mask)
1547 return 0;
1548
1549 sibling = cpumask_any_but(mask, cpu);
1550 if (sibling < nr_cpu_ids)
1551 return cpu_data(sibling).loops_per_jiffy;
1552 return 0;
1553}
1554#endif