Linux Audio

Check our new training course

Loading...
v3.1
 
 
 
  1#include <linux/kernel.h>
  2#include <linux/sched.h>
 
  3#include <linux/init.h>
  4#include <linux/module.h>
  5#include <linux/timer.h>
  6#include <linux/acpi_pmtmr.h>
  7#include <linux/cpufreq.h>
  8#include <linux/delay.h>
  9#include <linux/clocksource.h>
 10#include <linux/percpu.h>
 11#include <linux/timex.h>
 
 
 12
 13#include <asm/hpet.h>
 14#include <asm/timer.h>
 15#include <asm/vgtod.h>
 16#include <asm/time.h>
 17#include <asm/delay.h>
 18#include <asm/hypervisor.h>
 19#include <asm/nmi.h>
 20#include <asm/x86_init.h>
 
 
 
 
 
 21
 22unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
 23EXPORT_SYMBOL(cpu_khz);
 24
 25unsigned int __read_mostly tsc_khz;
 26EXPORT_SYMBOL(tsc_khz);
 27
 
 
 28/*
 29 * TSC can be unstable due to cpufreq or due to unsynced TSCs
 30 */
 31static int __read_mostly tsc_unstable;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32
 33/* native_sched_clock() is called before tsc_init(), so
 34   we must start with the TSC soft disabled to prevent
 35   erroneous rdtsc usage on !cpu_has_tsc processors */
 36static int __read_mostly tsc_disabled = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38static int tsc_clocksource_reliable;
 39/*
 40 * Scheduler clock - returns current time in nanosec units.
 41 */
 42u64 native_sched_clock(void)
 43{
 44	u64 this_offset;
 
 
 
 
 
 45
 46	/*
 47	 * Fall back to jiffies if there's no TSC available:
 48	 * ( But note that we still use it if the TSC is marked
 49	 *   unstable. We do this because unlike Time Of Day,
 50	 *   the scheduler clock tolerates small errors and it's
 51	 *   very important for it to be as fast as the platform
 52	 *   can achieve it. )
 53	 */
 54	if (unlikely(tsc_disabled)) {
 55		/* No locking but a rare wrong value is not a big deal: */
 56		return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
 57	}
 58
 59	/* read the Time Stamp Counter: */
 60	rdtscll(this_offset);
 
 61
 62	/* return the value in ns */
 63	return __cycles_2_ns(this_offset);
 
 
 
 
 64}
 65
 66/* We need to define a real function for sched_clock, to override the
 67   weak default version */
 68#ifdef CONFIG_PARAVIRT
 69unsigned long long sched_clock(void)
 70{
 71	return paravirt_sched_clock();
 72}
 
 
 
 
 
 73#else
 74unsigned long long
 75sched_clock(void) __attribute__((alias("native_sched_clock")));
 
 76#endif
 77
 
 
 
 
 
 
 
 
 
 78int check_tsc_unstable(void)
 79{
 80	return tsc_unstable;
 81}
 82EXPORT_SYMBOL_GPL(check_tsc_unstable);
 83
 84#ifdef CONFIG_X86_TSC
 85int __init notsc_setup(char *str)
 86{
 87	printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
 88			"cannot disable TSC completely.\n");
 89	tsc_disabled = 1;
 90	return 1;
 91}
 92#else
 93/*
 94 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
 95 * in cpu/common.c
 96 */
 97int __init notsc_setup(char *str)
 98{
 99	setup_clear_cpu_cap(X86_FEATURE_TSC);
100	return 1;
101}
102#endif
103
104__setup("notsc", notsc_setup);
105
106static int no_sched_irq_time;
 
 
107
108static int __init tsc_setup(char *str)
109{
110	if (!strcmp(str, "reliable"))
111		tsc_clocksource_reliable = 1;
112	if (!strncmp(str, "noirqtime", 9))
113		no_sched_irq_time = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114	return 1;
115}
116
117__setup("tsc=", tsc_setup);
118
119#define MAX_RETRIES     5
120#define SMI_TRESHOLD    50000
121
122/*
123 * Read TSC and the reference counters. Take care of SMI disturbance
124 */
125static u64 tsc_read_refs(u64 *p, int hpet)
126{
127	u64 t1, t2;
 
128	int i;
129
130	for (i = 0; i < MAX_RETRIES; i++) {
131		t1 = get_cycles();
132		if (hpet)
133			*p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
134		else
135			*p = acpi_pm_read_early();
136		t2 = get_cycles();
137		if ((t2 - t1) < SMI_TRESHOLD)
138			return t2;
139	}
140	return ULLONG_MAX;
141}
142
143/*
144 * Calculate the TSC frequency from HPET reference
145 */
146static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
147{
148	u64 tmp;
149
150	if (hpet2 < hpet1)
151		hpet2 += 0x100000000ULL;
152	hpet2 -= hpet1;
153	tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
154	do_div(tmp, 1000000);
155	do_div(deltatsc, tmp);
156
157	return (unsigned long) deltatsc;
158}
159
160/*
161 * Calculate the TSC frequency from PMTimer reference
162 */
163static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
164{
165	u64 tmp;
166
167	if (!pm1 && !pm2)
168		return ULONG_MAX;
169
170	if (pm2 < pm1)
171		pm2 += (u64)ACPI_PM_OVRRUN;
172	pm2 -= pm1;
173	tmp = pm2 * 1000000000LL;
174	do_div(tmp, PMTMR_TICKS_PER_SEC);
175	do_div(deltatsc, tmp);
176
177	return (unsigned long) deltatsc;
178}
179
180#define CAL_MS		10
181#define CAL_LATCH	(CLOCK_TICK_RATE / (1000 / CAL_MS))
182#define CAL_PIT_LOOPS	1000
183
184#define CAL2_MS		50
185#define CAL2_LATCH	(CLOCK_TICK_RATE / (1000 / CAL2_MS))
186#define CAL2_PIT_LOOPS	5000
187
188
189/*
190 * Try to calibrate the TSC against the Programmable
191 * Interrupt Timer and return the frequency of the TSC
192 * in kHz.
193 *
194 * Return ULONG_MAX on failure to calibrate.
195 */
196static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
197{
198	u64 tsc, t1, t2, delta;
199	unsigned long tscmin, tscmax;
200	int pitcnt;
201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202	/* Set the Gate high, disable speaker */
203	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
204
205	/*
206	 * Setup CTC channel 2* for mode 0, (interrupt on terminal
207	 * count mode), binary count. Set the latch register to 50ms
208	 * (LSB then MSB) to begin countdown.
209	 */
210	outb(0xb0, 0x43);
211	outb(latch & 0xff, 0x42);
212	outb(latch >> 8, 0x42);
213
214	tsc = t1 = t2 = get_cycles();
215
216	pitcnt = 0;
217	tscmax = 0;
218	tscmin = ULONG_MAX;
219	while ((inb(0x61) & 0x20) == 0) {
220		t2 = get_cycles();
221		delta = t2 - tsc;
222		tsc = t2;
223		if ((unsigned long) delta < tscmin)
224			tscmin = (unsigned int) delta;
225		if ((unsigned long) delta > tscmax)
226			tscmax = (unsigned int) delta;
227		pitcnt++;
228	}
229
230	/*
231	 * Sanity checks:
232	 *
233	 * If we were not able to read the PIT more than loopmin
234	 * times, then we have been hit by a massive SMI
235	 *
236	 * If the maximum is 10 times larger than the minimum,
237	 * then we got hit by an SMI as well.
238	 */
239	if (pitcnt < loopmin || tscmax > 10 * tscmin)
240		return ULONG_MAX;
241
242	/* Calculate the PIT value */
243	delta = t2 - t1;
244	do_div(delta, ms);
245	return delta;
246}
247
248/*
249 * This reads the current MSB of the PIT counter, and
250 * checks if we are running on sufficiently fast and
251 * non-virtualized hardware.
252 *
253 * Our expectations are:
254 *
255 *  - the PIT is running at roughly 1.19MHz
256 *
257 *  - each IO is going to take about 1us on real hardware,
258 *    but we allow it to be much faster (by a factor of 10) or
259 *    _slightly_ slower (ie we allow up to a 2us read+counter
260 *    update - anything else implies a unacceptably slow CPU
261 *    or PIT for the fast calibration to work.
262 *
263 *  - with 256 PIT ticks to read the value, we have 214us to
264 *    see the same MSB (and overhead like doing a single TSC
265 *    read per MSB value etc).
266 *
267 *  - We're doing 2 reads per loop (LSB, MSB), and we expect
268 *    them each to take about a microsecond on real hardware.
269 *    So we expect a count value of around 100. But we'll be
270 *    generous, and accept anything over 50.
271 *
272 *  - if the PIT is stuck, and we see *many* more reads, we
273 *    return early (and the next caller of pit_expect_msb()
274 *    then consider it a failure when they don't see the
275 *    next expected value).
276 *
277 * These expectations mean that we know that we have seen the
278 * transition from one expected value to another with a fairly
279 * high accuracy, and we didn't miss any events. We can thus
280 * use the TSC value at the transitions to calculate a pretty
281 * good value for the TSC frequencty.
282 */
283static inline int pit_verify_msb(unsigned char val)
284{
285	/* Ignore LSB */
286	inb(0x42);
287	return inb(0x42) == val;
288}
289
290static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
291{
292	int count;
293	u64 tsc = 0;
294
295	for (count = 0; count < 50000; count++) {
296		if (!pit_verify_msb(val))
297			break;
 
298		tsc = get_cycles();
299	}
300	*deltap = get_cycles() - tsc;
301	*tscp = tsc;
302
303	/*
304	 * We require _some_ success, but the quality control
305	 * will be based on the error terms on the TSC values.
306	 */
307	return count > 5;
308}
309
310/*
311 * How many MSB values do we want to see? We aim for
312 * a maximum error rate of 500ppm (in practice the
313 * real error is much smaller), but refuse to spend
314 * more than 25ms on it.
315 */
316#define MAX_QUICK_PIT_MS 25
317#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
318
319static unsigned long quick_pit_calibrate(void)
320{
321	int i;
322	u64 tsc, delta;
323	unsigned long d1, d2;
324
 
 
 
325	/* Set the Gate high, disable speaker */
326	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
327
328	/*
329	 * Counter 2, mode 0 (one-shot), binary count
330	 *
331	 * NOTE! Mode 2 decrements by two (and then the
332	 * output is flipped each time, giving the same
333	 * final output frequency as a decrement-by-one),
334	 * so mode 0 is much better when looking at the
335	 * individual counts.
336	 */
337	outb(0xb0, 0x43);
338
339	/* Start at 0xffff */
340	outb(0xff, 0x42);
341	outb(0xff, 0x42);
342
343	/*
344	 * The PIT starts counting at the next edge, so we
345	 * need to delay for a microsecond. The easiest way
346	 * to do that is to just read back the 16-bit counter
347	 * once from the PIT.
348	 */
349	pit_verify_msb(0);
350
351	if (pit_expect_msb(0xff, &tsc, &d1)) {
352		for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
353			if (!pit_expect_msb(0xff-i, &delta, &d2))
354				break;
355
 
 
 
 
 
 
 
 
 
 
356			/*
357			 * Iterate until the error is less than 500 ppm
358			 */
359			delta -= tsc;
360			if (d1+d2 >= delta >> 11)
361				continue;
362
363			/*
364			 * Check the PIT one more time to verify that
365			 * all TSC reads were stable wrt the PIT.
366			 *
367			 * This also guarantees serialization of the
368			 * last cycle read ('d2') in pit_expect_msb.
369			 */
370			if (!pit_verify_msb(0xfe - i))
371				break;
372			goto success;
373		}
374	}
375	printk("Fast TSC calibration failed\n");
376	return 0;
377
378success:
379	/*
380	 * Ok, if we get here, then we've seen the
381	 * MSB of the PIT decrement 'i' times, and the
382	 * error has shrunk to less than 500 ppm.
383	 *
384	 * As a result, we can depend on there not being
385	 * any odd delays anywhere, and the TSC reads are
386	 * reliable (within the error). We also adjust the
387	 * delta to the middle of the error bars, just
388	 * because it looks nicer.
389	 *
390	 * kHz = ticks / time-in-seconds / 1000;
391	 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
392	 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
393	 */
394	delta += (long)(d2 - d1)/2;
395	delta *= PIT_TICK_RATE;
396	do_div(delta, i*256*1000);
397	printk("Fast TSC calibration using PIT\n");
398	return delta;
399}
400
401/**
402 * native_calibrate_tsc - calibrate the tsc on boot
 
403 */
404unsigned long native_calibrate_tsc(void)
405{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
406	u64 tsc1, tsc2, delta, ref1, ref2;
407	unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
408	unsigned long flags, latch, ms, fast_calibrate;
409	int hpet = is_hpet_enabled(), i, loopmin;
410
411	local_irq_save(flags);
412	fast_calibrate = quick_pit_calibrate();
413	local_irq_restore(flags);
414	if (fast_calibrate)
415		return fast_calibrate;
416
417	/*
418	 * Run 5 calibration loops to get the lowest frequency value
419	 * (the best estimate). We use two different calibration modes
420	 * here:
421	 *
422	 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
423	 * load a timeout of 50ms. We read the time right after we
424	 * started the timer and wait until the PIT count down reaches
425	 * zero. In each wait loop iteration we read the TSC and check
426	 * the delta to the previous read. We keep track of the min
427	 * and max values of that delta. The delta is mostly defined
428	 * by the IO time of the PIT access, so we can detect when a
429	 * SMI/SMM disturbance happened between the two reads. If the
430	 * maximum time is significantly larger than the minimum time,
431	 * then we discard the result and have another try.
432	 *
433	 * 2) Reference counter. If available we use the HPET or the
434	 * PMTIMER as a reference to check the sanity of that value.
435	 * We use separate TSC readouts and check inside of the
436	 * reference read for a SMI/SMM disturbance. We dicard
437	 * disturbed values here as well. We do that around the PIT
438	 * calibration delay loop as we have to wait for a certain
439	 * amount of time anyway.
440	 */
441
442	/* Preset PIT loop values */
443	latch = CAL_LATCH;
444	ms = CAL_MS;
445	loopmin = CAL_PIT_LOOPS;
446
447	for (i = 0; i < 3; i++) {
448		unsigned long tsc_pit_khz;
449
450		/*
451		 * Read the start value and the reference count of
452		 * hpet/pmtimer when available. Then do the PIT
453		 * calibration, which will take at least 50ms, and
454		 * read the end value.
455		 */
456		local_irq_save(flags);
457		tsc1 = tsc_read_refs(&ref1, hpet);
458		tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
459		tsc2 = tsc_read_refs(&ref2, hpet);
460		local_irq_restore(flags);
461
462		/* Pick the lowest PIT TSC calibration so far */
463		tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
464
465		/* hpet or pmtimer available ? */
466		if (ref1 == ref2)
467			continue;
468
469		/* Check, whether the sampling was disturbed by an SMI */
470		if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
471			continue;
472
473		tsc2 = (tsc2 - tsc1) * 1000000LL;
474		if (hpet)
475			tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
476		else
477			tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
478
479		tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
480
481		/* Check the reference deviation */
482		delta = ((u64) tsc_pit_min) * 100;
483		do_div(delta, tsc_ref_min);
484
485		/*
486		 * If both calibration results are inside a 10% window
487		 * then we can be sure, that the calibration
488		 * succeeded. We break out of the loop right away. We
489		 * use the reference value, as it is more precise.
490		 */
491		if (delta >= 90 && delta <= 110) {
492			printk(KERN_INFO
493			       "TSC: PIT calibration matches %s. %d loops\n",
494			       hpet ? "HPET" : "PMTIMER", i + 1);
495			return tsc_ref_min;
496		}
497
498		/*
499		 * Check whether PIT failed more than once. This
500		 * happens in virtualized environments. We need to
501		 * give the virtual PC a slightly longer timeframe for
502		 * the HPET/PMTIMER to make the result precise.
503		 */
504		if (i == 1 && tsc_pit_min == ULONG_MAX) {
505			latch = CAL2_LATCH;
506			ms = CAL2_MS;
507			loopmin = CAL2_PIT_LOOPS;
508		}
509	}
510
511	/*
512	 * Now check the results.
513	 */
514	if (tsc_pit_min == ULONG_MAX) {
515		/* PIT gave no useful value */
516		printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n");
517
518		/* We don't have an alternative source, disable TSC */
519		if (!hpet && !ref1 && !ref2) {
520			printk("TSC: No reference (HPET/PMTIMER) available\n");
521			return 0;
522		}
523
524		/* The alternative source failed as well, disable TSC */
525		if (tsc_ref_min == ULONG_MAX) {
526			printk(KERN_WARNING "TSC: HPET/PMTIMER calibration "
527			       "failed.\n");
528			return 0;
529		}
530
531		/* Use the alternative source */
532		printk(KERN_INFO "TSC: using %s reference calibration\n",
533		       hpet ? "HPET" : "PMTIMER");
534
535		return tsc_ref_min;
536	}
537
538	/* We don't have an alternative source, use the PIT calibration value */
539	if (!hpet && !ref1 && !ref2) {
540		printk(KERN_INFO "TSC: Using PIT calibration value\n");
541		return tsc_pit_min;
542	}
543
544	/* The alternative source failed, use the PIT calibration value */
545	if (tsc_ref_min == ULONG_MAX) {
546		printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. "
547		       "Using PIT calibration\n");
548		return tsc_pit_min;
549	}
550
551	/*
552	 * The calibration values differ too much. In doubt, we use
553	 * the PIT value as we know that there are PMTIMERs around
554	 * running at double speed. At least we let the user know:
555	 */
556	printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
557	       hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
558	printk(KERN_INFO "TSC: Using PIT calibration value\n");
559	return tsc_pit_min;
560}
561
562int recalibrate_cpu_khz(void)
 
 
 
563{
564#ifndef CONFIG_SMP
565	unsigned long cpu_khz_old = cpu_khz;
566
567	if (cpu_has_tsc) {
568		tsc_khz = x86_platform.calibrate_tsc();
569		cpu_khz = tsc_khz;
570		cpu_data(0).loops_per_jiffy =
571			cpufreq_scale(cpu_data(0).loops_per_jiffy,
572					cpu_khz_old, cpu_khz);
573		return 0;
574	} else
575		return -ENODEV;
576#else
577	return -ENODEV;
578#endif
579}
580
581EXPORT_SYMBOL(recalibrate_cpu_khz);
582
583
584/* Accelerators for sched_clock()
585 * convert from cycles(64bits) => nanoseconds (64bits)
586 *  basic equation:
587 *              ns = cycles / (freq / ns_per_sec)
588 *              ns = cycles * (ns_per_sec / freq)
589 *              ns = cycles * (10^9 / (cpu_khz * 10^3))
590 *              ns = cycles * (10^6 / cpu_khz)
591 *
592 *      Then we use scaling math (suggested by george@mvista.com) to get:
593 *              ns = cycles * (10^6 * SC / cpu_khz) / SC
594 *              ns = cycles * cyc2ns_scale / SC
595 *
596 *      And since SC is a constant power of two, we can convert the div
597 *  into a shift.
598 *
599 *  We can use khz divisor instead of mhz to keep a better precision, since
600 *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
601 *  (mathieu.desnoyers@polymtl.ca)
602 *
603 *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
604 */
605
606DEFINE_PER_CPU(unsigned long, cyc2ns);
607DEFINE_PER_CPU(unsigned long long, cyc2ns_offset);
608
609static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
610{
611	unsigned long long tsc_now, ns_now, *offset;
612	unsigned long flags, *scale;
613
614	local_irq_save(flags);
615	sched_clock_idle_sleep_event();
616
617	scale = &per_cpu(cyc2ns, cpu);
618	offset = &per_cpu(cyc2ns_offset, cpu);
619
620	rdtscll(tsc_now);
621	ns_now = __cycles_2_ns(tsc_now);
 
 
622
623	if (cpu_khz) {
624		*scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
625		*offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR);
626	}
627
628	sched_clock_idle_wakeup_event(0);
629	local_irq_restore(flags);
 
 
 
 
 
 
 
630}
 
 
631
632static unsigned long long cyc2ns_suspend;
633
634void save_sched_clock_state(void)
635{
636	if (!sched_clock_stable)
637		return;
638
639	cyc2ns_suspend = sched_clock();
640}
641
642/*
643 * Even on processors with invariant TSC, TSC gets reset in some the
644 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
645 * arbitrary value (still sync'd across cpu's) during resume from such sleep
646 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
647 * that sched_clock() continues from the point where it was left off during
648 * suspend.
649 */
650void restore_sched_clock_state(void)
651{
652	unsigned long long offset;
653	unsigned long flags;
654	int cpu;
655
656	if (!sched_clock_stable)
657		return;
658
659	local_irq_save(flags);
660
661	__this_cpu_write(cyc2ns_offset, 0);
 
 
 
 
 
 
 
 
662	offset = cyc2ns_suspend - sched_clock();
663
664	for_each_possible_cpu(cpu)
665		per_cpu(cyc2ns_offset, cpu) = offset;
 
 
666
667	local_irq_restore(flags);
668}
669
670#ifdef CONFIG_CPU_FREQ
671
672/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
673 * changes.
674 *
675 * RED-PEN: On SMP we assume all CPUs run with the same frequency.  It's
676 * not that important because current Opteron setups do not support
677 * scaling on SMP anyroads.
678 *
679 * Should fix up last_tsc too. Currently gettimeofday in the
680 * first tick after the change will be slightly wrong.
681 */
682
683static unsigned int  ref_freq;
684static unsigned long loops_per_jiffy_ref;
685static unsigned long tsc_khz_ref;
686
687static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
688				void *data)
689{
690	struct cpufreq_freqs *freq = data;
691	unsigned long *lpj;
692
693	if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
 
694		return 0;
695
696	lpj = &boot_cpu_data.loops_per_jiffy;
697#ifdef CONFIG_SMP
698	if (!(freq->flags & CPUFREQ_CONST_LOOPS))
699		lpj = &cpu_data(freq->cpu).loops_per_jiffy;
700#endif
701
702	if (!ref_freq) {
703		ref_freq = freq->old;
704		loops_per_jiffy_ref = *lpj;
705		tsc_khz_ref = tsc_khz;
706	}
 
707	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
708			(val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
709			(val == CPUFREQ_RESUMECHANGE)) {
710		*lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
711
712		tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
713		if (!(freq->flags & CPUFREQ_CONST_LOOPS))
714			mark_tsc_unstable("cpufreq changes");
715	}
716
717	set_cyc2ns_scale(tsc_khz, freq->cpu);
 
718
719	return 0;
720}
721
722static struct notifier_block time_cpufreq_notifier_block = {
723	.notifier_call  = time_cpufreq_notifier
724};
725
726static int __init cpufreq_tsc(void)
727{
728	if (!cpu_has_tsc)
729		return 0;
730	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
731		return 0;
732	cpufreq_register_notifier(&time_cpufreq_notifier_block,
733				CPUFREQ_TRANSITION_NOTIFIER);
734	return 0;
735}
736
737core_initcall(cpufreq_tsc);
738
739#endif /* CONFIG_CPU_FREQ */
740
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
741/* clocksource code */
742
743static struct clocksource clocksource_tsc;
 
 
 
744
745/*
746 * We compare the TSC to the cycle_last value in the clocksource
747 * structure to avoid a nasty time-warp. This can be observed in a
748 * very small window right after one CPU updated cycle_last under
749 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
750 * is smaller than the cycle_last reference value due to a TSC which
751 * is slighty behind. This delta is nowhere else observable, but in
752 * that case it results in a forward time jump in the range of hours
753 * due to the unsigned delta calculation of the time keeping core
754 * code, which is necessary to support wrapping clocksources like pm
755 * timer.
 
 
 
 
756 */
757static cycle_t read_tsc(struct clocksource *cs)
758{
759	cycle_t ret = (cycle_t)get_cycles();
 
 
 
 
 
 
760
761	return ret >= clocksource_tsc.cycle_last ?
762		ret : clocksource_tsc.cycle_last;
 
 
 
763}
764
765static void resume_tsc(struct clocksource *cs)
766{
767	clocksource_tsc.cycle_last = 0;
 
 
 
 
768}
769
770static struct clocksource clocksource_tsc = {
771	.name                   = "tsc",
772	.rating                 = 300,
773	.read                   = read_tsc,
774	.resume			= resume_tsc,
775	.mask                   = CLOCKSOURCE_MASK(64),
776	.flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
 
 
 
 
 
 
 
 
 
777				  CLOCK_SOURCE_MUST_VERIFY,
778#ifdef CONFIG_X86_64
779	.archdata               = { .vclock_mode = VCLOCK_TSC },
780#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
781};
782
783void mark_tsc_unstable(char *reason)
784{
785	if (!tsc_unstable) {
786		tsc_unstable = 1;
787		sched_clock_stable = 0;
788		disable_sched_clock_irqtime();
789		printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
790		/* Change only the rating, when not registered */
791		if (clocksource_tsc.mult)
792			clocksource_mark_unstable(&clocksource_tsc);
793		else {
794			clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
795			clocksource_tsc.rating = 0;
796		}
797	}
798}
799
800EXPORT_SYMBOL_GPL(mark_tsc_unstable);
801
 
 
 
 
 
 
 
 
 
 
 
 
802static void __init check_system_tsc_reliable(void)
803{
804#ifdef CONFIG_MGEODE_LX
805	/* RTSC counts during suspend */
 
806#define RTSC_SUSP 0x100
807	unsigned long res_low, res_high;
808
809	rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
810	/* Geode_LX - the OLPC CPU has a very reliable TSC */
811	if (res_low & RTSC_SUSP)
812		tsc_clocksource_reliable = 1;
 
813#endif
814	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
815		tsc_clocksource_reliable = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
816}
817
818/*
819 * Make an educated guess if the TSC is trustworthy and synchronized
820 * over all CPUs.
821 */
822__cpuinit int unsynchronized_tsc(void)
823{
824	if (!cpu_has_tsc || tsc_unstable)
825		return 1;
826
827#ifdef CONFIG_SMP
828	if (apic_is_clustered_box())
829		return 1;
830#endif
831
832	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
833		return 0;
834
835	if (tsc_clocksource_reliable)
836		return 0;
837	/*
838	 * Intel systems are normally all synchronized.
839	 * Exceptions must mark TSC as unstable:
840	 */
841	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
842		/* assume multi socket systems are not synchronized: */
843		if (num_possible_cpus() > 1)
844			return 1;
845	}
846
847	return 0;
848}
849
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
850
851static void tsc_refine_calibration_work(struct work_struct *work);
852static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
853/**
854 * tsc_refine_calibration_work - Further refine tsc freq calibration
855 * @work - ignored.
856 *
857 * This functions uses delayed work over a period of a
858 * second to further refine the TSC freq value. Since this is
859 * timer based, instead of loop based, we don't block the boot
860 * process while this longer calibration is done.
861 *
862 * If there are any calibration anomalies (too many SMIs, etc),
863 * or the refined calibration is off by 1% of the fast early
864 * calibration, we throw out the new calibration and use the
865 * early calibration.
866 */
867static void tsc_refine_calibration_work(struct work_struct *work)
868{
869	static u64 tsc_start = -1, ref_start;
870	static int hpet;
871	u64 tsc_stop, ref_stop, delta;
872	unsigned long freq;
 
873
874	/* Don't bother refining TSC on unstable systems */
875	if (check_tsc_unstable())
876		goto out;
877
878	/*
879	 * Since the work is started early in boot, we may be
880	 * delayed the first time we expire. So set the workqueue
881	 * again once we know timers are working.
882	 */
883	if (tsc_start == -1) {
 
884		/*
885		 * Only set hpet once, to avoid mixing hardware
886		 * if the hpet becomes enabled later.
887		 */
888		hpet = is_hpet_enabled();
889		schedule_delayed_work(&tsc_irqwork, HZ);
890		tsc_start = tsc_read_refs(&ref_start, hpet);
 
891		return;
892	}
893
894	tsc_stop = tsc_read_refs(&ref_stop, hpet);
895
896	/* hpet or pmtimer available ? */
897	if (ref_start == ref_stop)
898		goto out;
899
900	/* Check, whether the sampling was disturbed by an SMI */
901	if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX)
902		goto out;
903
904	delta = tsc_stop - tsc_start;
905	delta *= 1000000LL;
906	if (hpet)
907		freq = calc_hpet_ref(delta, ref_start, ref_stop);
908	else
909		freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
910
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
911	/* Make sure we're within 1% */
912	if (abs(tsc_khz - freq) > tsc_khz/100)
913		goto out;
914
915	tsc_khz = freq;
916	printk(KERN_INFO "Refined TSC clocksource calibration: "
917		"%lu.%03lu MHz.\n", (unsigned long)tsc_khz / 1000,
918					(unsigned long)tsc_khz % 1000);
 
 
 
 
 
 
 
919
920out:
 
 
 
 
 
921	clocksource_register_khz(&clocksource_tsc, tsc_khz);
 
 
922}
923
924
925static int __init init_tsc_clocksource(void)
926{
927	if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz)
928		return 0;
929
930	if (tsc_clocksource_reliable)
931		clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
932	/* lower the rating if we already know its unstable: */
933	if (check_tsc_unstable()) {
934		clocksource_tsc.rating = 0;
935		clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
936	}
 
937	schedule_delayed_work(&tsc_irqwork, 0);
938	return 0;
939}
940/*
941 * We use device_initcall here, to ensure we run after the hpet
942 * is fully initialized, which may occur at fs_initcall time.
943 */
944device_initcall(init_tsc_clocksource);
945
946void __init tsc_init(void)
947{
948	u64 lpj;
949	int cpu;
950
951	x86_init.timers.tsc_pre_init();
 
 
 
 
 
 
 
 
 
 
952
953	if (!cpu_has_tsc)
954		return;
 
 
 
 
 
 
 
955
956	tsc_khz = x86_platform.calibrate_tsc();
957	cpu_khz = tsc_khz;
958
959	if (!tsc_khz) {
960		mark_tsc_unstable("could not calculate TSC khz");
961		return;
 
 
 
 
 
962	}
 
 
963
964	printk("Detected %lu.%03lu MHz processor.\n",
965			(unsigned long)cpu_khz / 1000,
966			(unsigned long)cpu_khz % 1000);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
967
968	/*
969	 * Secondary CPUs do not run through tsc_init(), so set up
970	 * all the scale factors for all CPUs, assuming the same
971	 * speed as the bootup CPU. (cpufreq notifiers will fix this
972	 * up if their speed diverges)
973	 */
974	for_each_possible_cpu(cpu)
975		set_cyc2ns_scale(cpu_khz, cpu);
976
977	if (tsc_disabled > 0)
978		return;
 
 
 
 
 
 
 
979
980	/* now allow native_sched_clock() to use rdtsc */
981	tsc_disabled = 0;
982
983	if (!no_sched_irq_time)
984		enable_sched_clock_irqtime();
985
986	lpj = ((u64)tsc_khz * 1000);
987	do_div(lpj, HZ);
988	lpj_fine = lpj;
989
990	use_tsc_delay();
991
992	if (unsynchronized_tsc())
993		mark_tsc_unstable("TSCs unsynchronized");
 
 
994
995	check_system_tsc_reliable();
 
 
 
 
996}
997
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   3
   4#include <linux/kernel.h>
   5#include <linux/sched.h>
   6#include <linux/sched/clock.h>
   7#include <linux/init.h>
   8#include <linux/export.h>
   9#include <linux/timer.h>
  10#include <linux/acpi_pmtmr.h>
  11#include <linux/cpufreq.h>
  12#include <linux/delay.h>
  13#include <linux/clocksource.h>
  14#include <linux/percpu.h>
  15#include <linux/timex.h>
  16#include <linux/static_key.h>
  17#include <linux/static_call.h>
  18
  19#include <asm/hpet.h>
  20#include <asm/timer.h>
  21#include <asm/vgtod.h>
  22#include <asm/time.h>
  23#include <asm/delay.h>
  24#include <asm/hypervisor.h>
  25#include <asm/nmi.h>
  26#include <asm/x86_init.h>
  27#include <asm/geode.h>
  28#include <asm/apic.h>
  29#include <asm/intel-family.h>
  30#include <asm/i8259.h>
  31#include <asm/uv/uv.h>
  32
  33unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
  34EXPORT_SYMBOL(cpu_khz);
  35
  36unsigned int __read_mostly tsc_khz;
  37EXPORT_SYMBOL(tsc_khz);
  38
  39#define KHZ	1000
  40
  41/*
  42 * TSC can be unstable due to cpufreq or due to unsynced TSCs
  43 */
  44static int __read_mostly tsc_unstable;
  45static unsigned int __initdata tsc_early_khz;
  46
  47static DEFINE_STATIC_KEY_FALSE(__use_tsc);
  48
  49int tsc_clocksource_reliable;
  50
  51static int __read_mostly tsc_force_recalibrate;
  52
  53static u32 art_to_tsc_numerator;
  54static u32 art_to_tsc_denominator;
  55static u64 art_to_tsc_offset;
  56static bool have_art;
  57
  58struct cyc2ns {
  59	struct cyc2ns_data data[2];	/*  0 + 2*16 = 32 */
  60	seqcount_latch_t   seq;		/* 32 + 4    = 36 */
  61
  62}; /* fits one cacheline */
  63
  64static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
  65
  66static int __init tsc_early_khz_setup(char *buf)
  67{
  68	return kstrtouint(buf, 0, &tsc_early_khz);
  69}
  70early_param("tsc_early_khz", tsc_early_khz_setup);
  71
  72__always_inline void __cyc2ns_read(struct cyc2ns_data *data)
  73{
  74	int seq, idx;
  75
  76	do {
  77		seq = this_cpu_read(cyc2ns.seq.seqcount.sequence);
  78		idx = seq & 1;
  79
  80		data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
  81		data->cyc2ns_mul    = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
  82		data->cyc2ns_shift  = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
  83
  84	} while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence)));
  85}
  86
  87__always_inline void cyc2ns_read_begin(struct cyc2ns_data *data)
  88{
  89	preempt_disable_notrace();
  90	__cyc2ns_read(data);
  91}
  92
  93__always_inline void cyc2ns_read_end(void)
  94{
  95	preempt_enable_notrace();
  96}
  97
  98/*
  99 * Accelerators for sched_clock()
 100 * convert from cycles(64bits) => nanoseconds (64bits)
 101 *  basic equation:
 102 *              ns = cycles / (freq / ns_per_sec)
 103 *              ns = cycles * (ns_per_sec / freq)
 104 *              ns = cycles * (10^9 / (cpu_khz * 10^3))
 105 *              ns = cycles * (10^6 / cpu_khz)
 106 *
 107 *      Then we use scaling math (suggested by george@mvista.com) to get:
 108 *              ns = cycles * (10^6 * SC / cpu_khz) / SC
 109 *              ns = cycles * cyc2ns_scale / SC
 110 *
 111 *      And since SC is a constant power of two, we can convert the div
 112 *  into a shift. The larger SC is, the more accurate the conversion, but
 113 *  cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
 114 *  (64-bit result) can be used.
 115 *
 116 *  We can use khz divisor instead of mhz to keep a better precision.
 117 *  (mathieu.desnoyers@polymtl.ca)
 118 *
 119 *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
 120 */
 121
 122static __always_inline unsigned long long __cycles_2_ns(unsigned long long cyc)
 123{
 124	struct cyc2ns_data data;
 125	unsigned long long ns;
 126
 127	__cyc2ns_read(&data);
 128
 129	ns = data.cyc2ns_offset;
 130	ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
 131
 132	return ns;
 133}
 134
 135static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
 136{
 137	unsigned long long ns;
 138	preempt_disable_notrace();
 139	ns = __cycles_2_ns(cyc);
 140	preempt_enable_notrace();
 141	return ns;
 142}
 143
 144static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
 145{
 146	unsigned long long ns_now;
 147	struct cyc2ns_data data;
 148	struct cyc2ns *c2n;
 149
 150	ns_now = cycles_2_ns(tsc_now);
 151
 152	/*
 153	 * Compute a new multiplier as per the above comment and ensure our
 154	 * time function is continuous; see the comment near struct
 155	 * cyc2ns_data.
 156	 */
 157	clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
 158			       NSEC_PER_MSEC, 0);
 159
 160	/*
 161	 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
 162	 * not expected to be greater than 31 due to the original published
 163	 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
 164	 * value) - refer perf_event_mmap_page documentation in perf_event.h.
 165	 */
 166	if (data.cyc2ns_shift == 32) {
 167		data.cyc2ns_shift = 31;
 168		data.cyc2ns_mul >>= 1;
 169	}
 170
 171	data.cyc2ns_offset = ns_now -
 172		mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
 173
 174	c2n = per_cpu_ptr(&cyc2ns, cpu);
 175
 176	raw_write_seqcount_latch(&c2n->seq);
 177	c2n->data[0] = data;
 178	raw_write_seqcount_latch(&c2n->seq);
 179	c2n->data[1] = data;
 180}
 181
 182static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
 183{
 184	unsigned long flags;
 185
 186	local_irq_save(flags);
 187	sched_clock_idle_sleep_event();
 188
 189	if (khz)
 190		__set_cyc2ns_scale(khz, cpu, tsc_now);
 191
 192	sched_clock_idle_wakeup_event();
 193	local_irq_restore(flags);
 194}
 195
 196/*
 197 * Initialize cyc2ns for boot cpu
 198 */
 199static void __init cyc2ns_init_boot_cpu(void)
 200{
 201	struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
 202
 203	seqcount_latch_init(&c2n->seq);
 204	__set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
 205}
 206
 207/*
 208 * Secondary CPUs do not run through tsc_init(), so set up
 209 * all the scale factors for all CPUs, assuming the same
 210 * speed as the bootup CPU.
 211 */
 212static void __init cyc2ns_init_secondary_cpus(void)
 213{
 214	unsigned int cpu, this_cpu = smp_processor_id();
 215	struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
 216	struct cyc2ns_data *data = c2n->data;
 217
 218	for_each_possible_cpu(cpu) {
 219		if (cpu != this_cpu) {
 220			seqcount_latch_init(&c2n->seq);
 221			c2n = per_cpu_ptr(&cyc2ns, cpu);
 222			c2n->data[0] = data[0];
 223			c2n->data[1] = data[1];
 224		}
 225	}
 226}
 227
 
 228/*
 229 * Scheduler clock - returns current time in nanosec units.
 230 */
 231noinstr u64 native_sched_clock(void)
 232{
 233	if (static_branch_likely(&__use_tsc)) {
 234		u64 tsc_now = rdtsc();
 235
 236		/* return the value in ns */
 237		return __cycles_2_ns(tsc_now);
 238	}
 239
 240	/*
 241	 * Fall back to jiffies if there's no TSC available:
 242	 * ( But note that we still use it if the TSC is marked
 243	 *   unstable. We do this because unlike Time Of Day,
 244	 *   the scheduler clock tolerates small errors and it's
 245	 *   very important for it to be as fast as the platform
 246	 *   can achieve it. )
 247	 */
 
 
 
 
 248
 249	/* No locking but a rare wrong value is not a big deal: */
 250	return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
 251}
 252
 253/*
 254 * Generate a sched_clock if you already have a TSC value.
 255 */
 256u64 native_sched_clock_from_tsc(u64 tsc)
 257{
 258	return cycles_2_ns(tsc);
 259}
 260
 261/* We need to define a real function for sched_clock, to override the
 262   weak default version */
 263#ifdef CONFIG_PARAVIRT
 264noinstr u64 sched_clock_noinstr(void)
 265{
 266	return paravirt_sched_clock();
 267}
 268
 269bool using_native_sched_clock(void)
 270{
 271	return static_call_query(pv_sched_clock) == native_sched_clock;
 272}
 273#else
 274u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock")));
 275
 276bool using_native_sched_clock(void) { return true; }
 277#endif
 278
 279notrace u64 sched_clock(void)
 280{
 281	u64 now;
 282	preempt_disable_notrace();
 283	now = sched_clock_noinstr();
 284	preempt_enable_notrace();
 285	return now;
 286}
 287
 288int check_tsc_unstable(void)
 289{
 290	return tsc_unstable;
 291}
 292EXPORT_SYMBOL_GPL(check_tsc_unstable);
 293
 294#ifdef CONFIG_X86_TSC
 295int __init notsc_setup(char *str)
 296{
 297	mark_tsc_unstable("boot parameter notsc");
 
 
 298	return 1;
 299}
 300#else
 301/*
 302 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
 303 * in cpu/common.c
 304 */
 305int __init notsc_setup(char *str)
 306{
 307	setup_clear_cpu_cap(X86_FEATURE_TSC);
 308	return 1;
 309}
 310#endif
 311
 312__setup("notsc", notsc_setup);
 313
 314static int no_sched_irq_time;
 315static int no_tsc_watchdog;
 316static int tsc_as_watchdog;
 317
 318static int __init tsc_setup(char *str)
 319{
 320	if (!strcmp(str, "reliable"))
 321		tsc_clocksource_reliable = 1;
 322	if (!strncmp(str, "noirqtime", 9))
 323		no_sched_irq_time = 1;
 324	if (!strcmp(str, "unstable"))
 325		mark_tsc_unstable("boot parameter");
 326	if (!strcmp(str, "nowatchdog")) {
 327		no_tsc_watchdog = 1;
 328		if (tsc_as_watchdog)
 329			pr_alert("%s: Overriding earlier tsc=watchdog with tsc=nowatchdog\n",
 330				 __func__);
 331		tsc_as_watchdog = 0;
 332	}
 333	if (!strcmp(str, "recalibrate"))
 334		tsc_force_recalibrate = 1;
 335	if (!strcmp(str, "watchdog")) {
 336		if (no_tsc_watchdog)
 337			pr_alert("%s: tsc=watchdog overridden by earlier tsc=nowatchdog\n",
 338				 __func__);
 339		else
 340			tsc_as_watchdog = 1;
 341	}
 342	return 1;
 343}
 344
 345__setup("tsc=", tsc_setup);
 346
 347#define MAX_RETRIES		5
 348#define TSC_DEFAULT_THRESHOLD	0x20000
 349
 350/*
 351 * Read TSC and the reference counters. Take care of any disturbances
 352 */
 353static u64 tsc_read_refs(u64 *p, int hpet)
 354{
 355	u64 t1, t2;
 356	u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
 357	int i;
 358
 359	for (i = 0; i < MAX_RETRIES; i++) {
 360		t1 = get_cycles();
 361		if (hpet)
 362			*p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
 363		else
 364			*p = acpi_pm_read_early();
 365		t2 = get_cycles();
 366		if ((t2 - t1) < thresh)
 367			return t2;
 368	}
 369	return ULLONG_MAX;
 370}
 371
 372/*
 373 * Calculate the TSC frequency from HPET reference
 374 */
 375static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
 376{
 377	u64 tmp;
 378
 379	if (hpet2 < hpet1)
 380		hpet2 += 0x100000000ULL;
 381	hpet2 -= hpet1;
 382	tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
 383	do_div(tmp, 1000000);
 384	deltatsc = div64_u64(deltatsc, tmp);
 385
 386	return (unsigned long) deltatsc;
 387}
 388
 389/*
 390 * Calculate the TSC frequency from PMTimer reference
 391 */
 392static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
 393{
 394	u64 tmp;
 395
 396	if (!pm1 && !pm2)
 397		return ULONG_MAX;
 398
 399	if (pm2 < pm1)
 400		pm2 += (u64)ACPI_PM_OVRRUN;
 401	pm2 -= pm1;
 402	tmp = pm2 * 1000000000LL;
 403	do_div(tmp, PMTMR_TICKS_PER_SEC);
 404	do_div(deltatsc, tmp);
 405
 406	return (unsigned long) deltatsc;
 407}
 408
 409#define CAL_MS		10
 410#define CAL_LATCH	(PIT_TICK_RATE / (1000 / CAL_MS))
 411#define CAL_PIT_LOOPS	1000
 412
 413#define CAL2_MS		50
 414#define CAL2_LATCH	(PIT_TICK_RATE / (1000 / CAL2_MS))
 415#define CAL2_PIT_LOOPS	5000
 416
 417
 418/*
 419 * Try to calibrate the TSC against the Programmable
 420 * Interrupt Timer and return the frequency of the TSC
 421 * in kHz.
 422 *
 423 * Return ULONG_MAX on failure to calibrate.
 424 */
 425static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
 426{
 427	u64 tsc, t1, t2, delta;
 428	unsigned long tscmin, tscmax;
 429	int pitcnt;
 430
 431	if (!has_legacy_pic()) {
 432		/*
 433		 * Relies on tsc_early_delay_calibrate() to have given us semi
 434		 * usable udelay(), wait for the same 50ms we would have with
 435		 * the PIT loop below.
 436		 */
 437		udelay(10 * USEC_PER_MSEC);
 438		udelay(10 * USEC_PER_MSEC);
 439		udelay(10 * USEC_PER_MSEC);
 440		udelay(10 * USEC_PER_MSEC);
 441		udelay(10 * USEC_PER_MSEC);
 442		return ULONG_MAX;
 443	}
 444
 445	/* Set the Gate high, disable speaker */
 446	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
 447
 448	/*
 449	 * Setup CTC channel 2* for mode 0, (interrupt on terminal
 450	 * count mode), binary count. Set the latch register to 50ms
 451	 * (LSB then MSB) to begin countdown.
 452	 */
 453	outb(0xb0, 0x43);
 454	outb(latch & 0xff, 0x42);
 455	outb(latch >> 8, 0x42);
 456
 457	tsc = t1 = t2 = get_cycles();
 458
 459	pitcnt = 0;
 460	tscmax = 0;
 461	tscmin = ULONG_MAX;
 462	while ((inb(0x61) & 0x20) == 0) {
 463		t2 = get_cycles();
 464		delta = t2 - tsc;
 465		tsc = t2;
 466		if ((unsigned long) delta < tscmin)
 467			tscmin = (unsigned int) delta;
 468		if ((unsigned long) delta > tscmax)
 469			tscmax = (unsigned int) delta;
 470		pitcnt++;
 471	}
 472
 473	/*
 474	 * Sanity checks:
 475	 *
 476	 * If we were not able to read the PIT more than loopmin
 477	 * times, then we have been hit by a massive SMI
 478	 *
 479	 * If the maximum is 10 times larger than the minimum,
 480	 * then we got hit by an SMI as well.
 481	 */
 482	if (pitcnt < loopmin || tscmax > 10 * tscmin)
 483		return ULONG_MAX;
 484
 485	/* Calculate the PIT value */
 486	delta = t2 - t1;
 487	do_div(delta, ms);
 488	return delta;
 489}
 490
 491/*
 492 * This reads the current MSB of the PIT counter, and
 493 * checks if we are running on sufficiently fast and
 494 * non-virtualized hardware.
 495 *
 496 * Our expectations are:
 497 *
 498 *  - the PIT is running at roughly 1.19MHz
 499 *
 500 *  - each IO is going to take about 1us on real hardware,
 501 *    but we allow it to be much faster (by a factor of 10) or
 502 *    _slightly_ slower (ie we allow up to a 2us read+counter
 503 *    update - anything else implies a unacceptably slow CPU
 504 *    or PIT for the fast calibration to work.
 505 *
 506 *  - with 256 PIT ticks to read the value, we have 214us to
 507 *    see the same MSB (and overhead like doing a single TSC
 508 *    read per MSB value etc).
 509 *
 510 *  - We're doing 2 reads per loop (LSB, MSB), and we expect
 511 *    them each to take about a microsecond on real hardware.
 512 *    So we expect a count value of around 100. But we'll be
 513 *    generous, and accept anything over 50.
 514 *
 515 *  - if the PIT is stuck, and we see *many* more reads, we
 516 *    return early (and the next caller of pit_expect_msb()
 517 *    then consider it a failure when they don't see the
 518 *    next expected value).
 519 *
 520 * These expectations mean that we know that we have seen the
 521 * transition from one expected value to another with a fairly
 522 * high accuracy, and we didn't miss any events. We can thus
 523 * use the TSC value at the transitions to calculate a pretty
 524 * good value for the TSC frequency.
 525 */
 526static inline int pit_verify_msb(unsigned char val)
 527{
 528	/* Ignore LSB */
 529	inb(0x42);
 530	return inb(0x42) == val;
 531}
 532
 533static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
 534{
 535	int count;
 536	u64 tsc = 0, prev_tsc = 0;
 537
 538	for (count = 0; count < 50000; count++) {
 539		if (!pit_verify_msb(val))
 540			break;
 541		prev_tsc = tsc;
 542		tsc = get_cycles();
 543	}
 544	*deltap = get_cycles() - prev_tsc;
 545	*tscp = tsc;
 546
 547	/*
 548	 * We require _some_ success, but the quality control
 549	 * will be based on the error terms on the TSC values.
 550	 */
 551	return count > 5;
 552}
 553
 554/*
 555 * How many MSB values do we want to see? We aim for
 556 * a maximum error rate of 500ppm (in practice the
 557 * real error is much smaller), but refuse to spend
 558 * more than 50ms on it.
 559 */
 560#define MAX_QUICK_PIT_MS 50
 561#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
 562
 563static unsigned long quick_pit_calibrate(void)
 564{
 565	int i;
 566	u64 tsc, delta;
 567	unsigned long d1, d2;
 568
 569	if (!has_legacy_pic())
 570		return 0;
 571
 572	/* Set the Gate high, disable speaker */
 573	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
 574
 575	/*
 576	 * Counter 2, mode 0 (one-shot), binary count
 577	 *
 578	 * NOTE! Mode 2 decrements by two (and then the
 579	 * output is flipped each time, giving the same
 580	 * final output frequency as a decrement-by-one),
 581	 * so mode 0 is much better when looking at the
 582	 * individual counts.
 583	 */
 584	outb(0xb0, 0x43);
 585
 586	/* Start at 0xffff */
 587	outb(0xff, 0x42);
 588	outb(0xff, 0x42);
 589
 590	/*
 591	 * The PIT starts counting at the next edge, so we
 592	 * need to delay for a microsecond. The easiest way
 593	 * to do that is to just read back the 16-bit counter
 594	 * once from the PIT.
 595	 */
 596	pit_verify_msb(0);
 597
 598	if (pit_expect_msb(0xff, &tsc, &d1)) {
 599		for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
 600			if (!pit_expect_msb(0xff-i, &delta, &d2))
 601				break;
 602
 603			delta -= tsc;
 604
 605			/*
 606			 * Extrapolate the error and fail fast if the error will
 607			 * never be below 500 ppm.
 608			 */
 609			if (i == 1 &&
 610			    d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
 611				return 0;
 612
 613			/*
 614			 * Iterate until the error is less than 500 ppm
 615			 */
 
 616			if (d1+d2 >= delta >> 11)
 617				continue;
 618
 619			/*
 620			 * Check the PIT one more time to verify that
 621			 * all TSC reads were stable wrt the PIT.
 622			 *
 623			 * This also guarantees serialization of the
 624			 * last cycle read ('d2') in pit_expect_msb.
 625			 */
 626			if (!pit_verify_msb(0xfe - i))
 627				break;
 628			goto success;
 629		}
 630	}
 631	pr_info("Fast TSC calibration failed\n");
 632	return 0;
 633
 634success:
 635	/*
 636	 * Ok, if we get here, then we've seen the
 637	 * MSB of the PIT decrement 'i' times, and the
 638	 * error has shrunk to less than 500 ppm.
 639	 *
 640	 * As a result, we can depend on there not being
 641	 * any odd delays anywhere, and the TSC reads are
 642	 * reliable (within the error).
 
 
 643	 *
 644	 * kHz = ticks / time-in-seconds / 1000;
 645	 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
 646	 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
 647	 */
 
 648	delta *= PIT_TICK_RATE;
 649	do_div(delta, i*256*1000);
 650	pr_info("Fast TSC calibration using PIT\n");
 651	return delta;
 652}
 653
 654/**
 655 * native_calibrate_tsc - determine TSC frequency
 656 * Determine TSC frequency via CPUID, else return 0.
 657 */
 658unsigned long native_calibrate_tsc(void)
 659{
 660	unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
 661	unsigned int crystal_khz;
 662
 663	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
 664		return 0;
 665
 666	if (boot_cpu_data.cpuid_level < 0x15)
 667		return 0;
 668
 669	eax_denominator = ebx_numerator = ecx_hz = edx = 0;
 670
 671	/* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
 672	cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
 673
 674	if (ebx_numerator == 0 || eax_denominator == 0)
 675		return 0;
 676
 677	crystal_khz = ecx_hz / 1000;
 678
 679	/*
 680	 * Denverton SoCs don't report crystal clock, and also don't support
 681	 * CPUID.0x16 for the calculation below, so hardcode the 25MHz crystal
 682	 * clock.
 683	 */
 684	if (crystal_khz == 0 &&
 685			boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D)
 686		crystal_khz = 25000;
 687
 688	/*
 689	 * TSC frequency reported directly by CPUID is a "hardware reported"
 690	 * frequency and is the most accurate one so far we have. This
 691	 * is considered a known frequency.
 692	 */
 693	if (crystal_khz != 0)
 694		setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
 695
 696	/*
 697	 * Some Intel SoCs like Skylake and Kabylake don't report the crystal
 698	 * clock, but we can easily calculate it to a high degree of accuracy
 699	 * by considering the crystal ratio and the CPU speed.
 700	 */
 701	if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= 0x16) {
 702		unsigned int eax_base_mhz, ebx, ecx, edx;
 703
 704		cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx);
 705		crystal_khz = eax_base_mhz * 1000 *
 706			eax_denominator / ebx_numerator;
 707	}
 708
 709	if (crystal_khz == 0)
 710		return 0;
 711
 712	/*
 713	 * For Atom SoCs TSC is the only reliable clocksource.
 714	 * Mark TSC reliable so no watchdog on it.
 715	 */
 716	if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT)
 717		setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
 718
 719#ifdef CONFIG_X86_LOCAL_APIC
 720	/*
 721	 * The local APIC appears to be fed by the core crystal clock
 722	 * (which sounds entirely sensible). We can set the global
 723	 * lapic_timer_period here to avoid having to calibrate the APIC
 724	 * timer later.
 725	 */
 726	lapic_timer_period = crystal_khz * 1000 / HZ;
 727#endif
 728
 729	return crystal_khz * ebx_numerator / eax_denominator;
 730}
 731
 732static unsigned long cpu_khz_from_cpuid(void)
 733{
 734	unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
 735
 736	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
 737		return 0;
 738
 739	if (boot_cpu_data.cpuid_level < 0x16)
 740		return 0;
 741
 742	eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
 743
 744	cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
 745
 746	return eax_base_mhz * 1000;
 747}
 748
 749/*
 750 * calibrate cpu using pit, hpet, and ptimer methods. They are available
 751 * later in boot after acpi is initialized.
 752 */
 753static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
 754{
 755	u64 tsc1, tsc2, delta, ref1, ref2;
 756	unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
 757	unsigned long flags, latch, ms;
 758	int hpet = is_hpet_enabled(), i, loopmin;
 759
 
 
 
 
 
 
 760	/*
 761	 * Run 5 calibration loops to get the lowest frequency value
 762	 * (the best estimate). We use two different calibration modes
 763	 * here:
 764	 *
 765	 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
 766	 * load a timeout of 50ms. We read the time right after we
 767	 * started the timer and wait until the PIT count down reaches
 768	 * zero. In each wait loop iteration we read the TSC and check
 769	 * the delta to the previous read. We keep track of the min
 770	 * and max values of that delta. The delta is mostly defined
 771	 * by the IO time of the PIT access, so we can detect when
 772	 * any disturbance happened between the two reads. If the
 773	 * maximum time is significantly larger than the minimum time,
 774	 * then we discard the result and have another try.
 775	 *
 776	 * 2) Reference counter. If available we use the HPET or the
 777	 * PMTIMER as a reference to check the sanity of that value.
 778	 * We use separate TSC readouts and check inside of the
 779	 * reference read for any possible disturbance. We discard
 780	 * disturbed values here as well. We do that around the PIT
 781	 * calibration delay loop as we have to wait for a certain
 782	 * amount of time anyway.
 783	 */
 784
 785	/* Preset PIT loop values */
 786	latch = CAL_LATCH;
 787	ms = CAL_MS;
 788	loopmin = CAL_PIT_LOOPS;
 789
 790	for (i = 0; i < 3; i++) {
 791		unsigned long tsc_pit_khz;
 792
 793		/*
 794		 * Read the start value and the reference count of
 795		 * hpet/pmtimer when available. Then do the PIT
 796		 * calibration, which will take at least 50ms, and
 797		 * read the end value.
 798		 */
 799		local_irq_save(flags);
 800		tsc1 = tsc_read_refs(&ref1, hpet);
 801		tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
 802		tsc2 = tsc_read_refs(&ref2, hpet);
 803		local_irq_restore(flags);
 804
 805		/* Pick the lowest PIT TSC calibration so far */
 806		tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
 807
 808		/* hpet or pmtimer available ? */
 809		if (ref1 == ref2)
 810			continue;
 811
 812		/* Check, whether the sampling was disturbed */
 813		if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
 814			continue;
 815
 816		tsc2 = (tsc2 - tsc1) * 1000000LL;
 817		if (hpet)
 818			tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
 819		else
 820			tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
 821
 822		tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
 823
 824		/* Check the reference deviation */
 825		delta = ((u64) tsc_pit_min) * 100;
 826		do_div(delta, tsc_ref_min);
 827
 828		/*
 829		 * If both calibration results are inside a 10% window
 830		 * then we can be sure, that the calibration
 831		 * succeeded. We break out of the loop right away. We
 832		 * use the reference value, as it is more precise.
 833		 */
 834		if (delta >= 90 && delta <= 110) {
 835			pr_info("PIT calibration matches %s. %d loops\n",
 836				hpet ? "HPET" : "PMTIMER", i + 1);
 
 837			return tsc_ref_min;
 838		}
 839
 840		/*
 841		 * Check whether PIT failed more than once. This
 842		 * happens in virtualized environments. We need to
 843		 * give the virtual PC a slightly longer timeframe for
 844		 * the HPET/PMTIMER to make the result precise.
 845		 */
 846		if (i == 1 && tsc_pit_min == ULONG_MAX) {
 847			latch = CAL2_LATCH;
 848			ms = CAL2_MS;
 849			loopmin = CAL2_PIT_LOOPS;
 850		}
 851	}
 852
 853	/*
 854	 * Now check the results.
 855	 */
 856	if (tsc_pit_min == ULONG_MAX) {
 857		/* PIT gave no useful value */
 858		pr_warn("Unable to calibrate against PIT\n");
 859
 860		/* We don't have an alternative source, disable TSC */
 861		if (!hpet && !ref1 && !ref2) {
 862			pr_notice("No reference (HPET/PMTIMER) available\n");
 863			return 0;
 864		}
 865
 866		/* The alternative source failed as well, disable TSC */
 867		if (tsc_ref_min == ULONG_MAX) {
 868			pr_warn("HPET/PMTIMER calibration failed\n");
 
 869			return 0;
 870		}
 871
 872		/* Use the alternative source */
 873		pr_info("using %s reference calibration\n",
 874			hpet ? "HPET" : "PMTIMER");
 875
 876		return tsc_ref_min;
 877	}
 878
 879	/* We don't have an alternative source, use the PIT calibration value */
 880	if (!hpet && !ref1 && !ref2) {
 881		pr_info("Using PIT calibration value\n");
 882		return tsc_pit_min;
 883	}
 884
 885	/* The alternative source failed, use the PIT calibration value */
 886	if (tsc_ref_min == ULONG_MAX) {
 887		pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
 
 888		return tsc_pit_min;
 889	}
 890
 891	/*
 892	 * The calibration values differ too much. In doubt, we use
 893	 * the PIT value as we know that there are PMTIMERs around
 894	 * running at double speed. At least we let the user know:
 895	 */
 896	pr_warn("PIT calibration deviates from %s: %lu %lu\n",
 897		hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
 898	pr_info("Using PIT calibration value\n");
 899	return tsc_pit_min;
 900}
 901
 902/**
 903 * native_calibrate_cpu_early - can calibrate the cpu early in boot
 904 */
 905unsigned long native_calibrate_cpu_early(void)
 906{
 907	unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
 
 908
 909	if (!fast_calibrate)
 910		fast_calibrate = cpu_khz_from_msr();
 911	if (!fast_calibrate) {
 912		local_irq_save(flags);
 913		fast_calibrate = quick_pit_calibrate();
 914		local_irq_restore(flags);
 915	}
 916	return fast_calibrate;
 
 
 
 
 917}
 918
 
 919
 920/**
 921 * native_calibrate_cpu - calibrate the cpu
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 922 */
 923static unsigned long native_calibrate_cpu(void)
 
 
 
 
 924{
 925	unsigned long tsc_freq = native_calibrate_cpu_early();
 
 926
 927	if (!tsc_freq)
 928		tsc_freq = pit_hpet_ptimer_calibrate_cpu();
 929
 930	return tsc_freq;
 931}
 932
 933void recalibrate_cpu_khz(void)
 934{
 935#ifndef CONFIG_SMP
 936	unsigned long cpu_khz_old = cpu_khz;
 937
 938	if (!boot_cpu_has(X86_FEATURE_TSC))
 939		return;
 
 
 940
 941	cpu_khz = x86_platform.calibrate_cpu();
 942	tsc_khz = x86_platform.calibrate_tsc();
 943	if (tsc_khz == 0)
 944		tsc_khz = cpu_khz;
 945	else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
 946		cpu_khz = tsc_khz;
 947	cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
 948						    cpu_khz_old, cpu_khz);
 949#endif
 950}
 951EXPORT_SYMBOL_GPL(recalibrate_cpu_khz);
 952
 953
 954static unsigned long long cyc2ns_suspend;
 955
 956void tsc_save_sched_clock_state(void)
 957{
 958	if (!sched_clock_stable())
 959		return;
 960
 961	cyc2ns_suspend = sched_clock();
 962}
 963
 964/*
 965 * Even on processors with invariant TSC, TSC gets reset in some the
 966 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
 967 * arbitrary value (still sync'd across cpu's) during resume from such sleep
 968 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
 969 * that sched_clock() continues from the point where it was left off during
 970 * suspend.
 971 */
 972void tsc_restore_sched_clock_state(void)
 973{
 974	unsigned long long offset;
 975	unsigned long flags;
 976	int cpu;
 977
 978	if (!sched_clock_stable())
 979		return;
 980
 981	local_irq_save(flags);
 982
 983	/*
 984	 * We're coming out of suspend, there's no concurrency yet; don't
 985	 * bother being nice about the RCU stuff, just write to both
 986	 * data fields.
 987	 */
 988
 989	this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
 990	this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
 991
 992	offset = cyc2ns_suspend - sched_clock();
 993
 994	for_each_possible_cpu(cpu) {
 995		per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
 996		per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
 997	}
 998
 999	local_irq_restore(flags);
1000}
1001
1002#ifdef CONFIG_CPU_FREQ
1003/*
1004 * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
1005 * changes.
1006 *
1007 * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
1008 * as unstable and give up in those cases.
 
1009 *
1010 * Should fix up last_tsc too. Currently gettimeofday in the
1011 * first tick after the change will be slightly wrong.
1012 */
1013
1014static unsigned int  ref_freq;
1015static unsigned long loops_per_jiffy_ref;
1016static unsigned long tsc_khz_ref;
1017
1018static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
1019				void *data)
1020{
1021	struct cpufreq_freqs *freq = data;
 
1022
1023	if (num_online_cpus() > 1) {
1024		mark_tsc_unstable("cpufreq changes on SMP");
1025		return 0;
1026	}
 
 
 
 
 
1027
1028	if (!ref_freq) {
1029		ref_freq = freq->old;
1030		loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
1031		tsc_khz_ref = tsc_khz;
1032	}
1033
1034	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
1035	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
1036		boot_cpu_data.loops_per_jiffy =
1037			cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
1038
1039		tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
1040		if (!(freq->flags & CPUFREQ_CONST_LOOPS))
1041			mark_tsc_unstable("cpufreq changes");
 
1042
1043		set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
1044	}
1045
1046	return 0;
1047}
1048
1049static struct notifier_block time_cpufreq_notifier_block = {
1050	.notifier_call  = time_cpufreq_notifier
1051};
1052
1053static int __init cpufreq_register_tsc_scaling(void)
1054{
1055	if (!boot_cpu_has(X86_FEATURE_TSC))
1056		return 0;
1057	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1058		return 0;
1059	cpufreq_register_notifier(&time_cpufreq_notifier_block,
1060				CPUFREQ_TRANSITION_NOTIFIER);
1061	return 0;
1062}
1063
1064core_initcall(cpufreq_register_tsc_scaling);
1065
1066#endif /* CONFIG_CPU_FREQ */
1067
1068#define ART_CPUID_LEAF (0x15)
1069#define ART_MIN_DENOMINATOR (1)
1070
1071
1072/*
1073 * If ART is present detect the numerator:denominator to convert to TSC
1074 */
1075static void __init detect_art(void)
1076{
1077	unsigned int unused[2];
1078
1079	if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
1080		return;
1081
1082	/*
1083	 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
1084	 * and the TSC counter resets must not occur asynchronously.
1085	 */
1086	if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
1087	    !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
1088	    !boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
1089	    tsc_async_resets)
1090		return;
1091
1092	cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
1093	      &art_to_tsc_numerator, unused, unused+1);
1094
1095	if (art_to_tsc_denominator < ART_MIN_DENOMINATOR)
1096		return;
1097
1098	rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset);
1099
1100	/* Make this sticky over multiple CPU init calls */
1101	setup_force_cpu_cap(X86_FEATURE_ART);
1102}
1103
1104
1105/* clocksource code */
1106
1107static void tsc_resume(struct clocksource *cs)
1108{
1109	tsc_verify_tsc_adjust(true);
1110}
1111
1112/*
1113 * We used to compare the TSC to the cycle_last value in the clocksource
1114 * structure to avoid a nasty time-warp. This can be observed in a
1115 * very small window right after one CPU updated cycle_last under
1116 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
1117 * is smaller than the cycle_last reference value due to a TSC which
1118 * is slightly behind. This delta is nowhere else observable, but in
1119 * that case it results in a forward time jump in the range of hours
1120 * due to the unsigned delta calculation of the time keeping core
1121 * code, which is necessary to support wrapping clocksources like pm
1122 * timer.
1123 *
1124 * This sanity check is now done in the core timekeeping code.
1125 * checking the result of read_tsc() - cycle_last for being negative.
1126 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
1127 */
1128static u64 read_tsc(struct clocksource *cs)
1129{
1130	return (u64)rdtsc_ordered();
1131}
1132
1133static void tsc_cs_mark_unstable(struct clocksource *cs)
1134{
1135	if (tsc_unstable)
1136		return;
1137
1138	tsc_unstable = 1;
1139	if (using_native_sched_clock())
1140		clear_sched_clock_stable();
1141	disable_sched_clock_irqtime();
1142	pr_info("Marking TSC unstable due to clocksource watchdog\n");
1143}
1144
1145static void tsc_cs_tick_stable(struct clocksource *cs)
1146{
1147	if (tsc_unstable)
1148		return;
1149
1150	if (using_native_sched_clock())
1151		sched_clock_tick_stable();
1152}
1153
1154static int tsc_cs_enable(struct clocksource *cs)
1155{
1156	vclocks_set_used(VDSO_CLOCKMODE_TSC);
1157	return 0;
1158}
1159
1160/*
1161 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
1162 */
1163static struct clocksource clocksource_tsc_early = {
1164	.name			= "tsc-early",
1165	.rating			= 299,
1166	.uncertainty_margin	= 32 * NSEC_PER_MSEC,
1167	.read			= read_tsc,
1168	.mask			= CLOCKSOURCE_MASK(64),
1169	.flags			= CLOCK_SOURCE_IS_CONTINUOUS |
1170				  CLOCK_SOURCE_MUST_VERIFY,
1171	.id			= CSID_X86_TSC_EARLY,
1172	.vdso_clock_mode	= VDSO_CLOCKMODE_TSC,
1173	.enable			= tsc_cs_enable,
1174	.resume			= tsc_resume,
1175	.mark_unstable		= tsc_cs_mark_unstable,
1176	.tick_stable		= tsc_cs_tick_stable,
1177	.list			= LIST_HEAD_INIT(clocksource_tsc_early.list),
1178};
1179
1180/*
1181 * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
1182 * this one will immediately take over. We will only register if TSC has
1183 * been found good.
1184 */
1185static struct clocksource clocksource_tsc = {
1186	.name			= "tsc",
1187	.rating			= 300,
1188	.read			= read_tsc,
1189	.mask			= CLOCKSOURCE_MASK(64),
1190	.flags			= CLOCK_SOURCE_IS_CONTINUOUS |
1191				  CLOCK_SOURCE_VALID_FOR_HRES |
1192				  CLOCK_SOURCE_MUST_VERIFY |
1193				  CLOCK_SOURCE_VERIFY_PERCPU,
1194	.id			= CSID_X86_TSC,
1195	.vdso_clock_mode	= VDSO_CLOCKMODE_TSC,
1196	.enable			= tsc_cs_enable,
1197	.resume			= tsc_resume,
1198	.mark_unstable		= tsc_cs_mark_unstable,
1199	.tick_stable		= tsc_cs_tick_stable,
1200	.list			= LIST_HEAD_INIT(clocksource_tsc.list),
1201};
1202
1203void mark_tsc_unstable(char *reason)
1204{
1205	if (tsc_unstable)
1206		return;
1207
1208	tsc_unstable = 1;
1209	if (using_native_sched_clock())
1210		clear_sched_clock_stable();
1211	disable_sched_clock_irqtime();
1212	pr_info("Marking TSC unstable due to %s\n", reason);
1213
1214	clocksource_mark_unstable(&clocksource_tsc_early);
1215	clocksource_mark_unstable(&clocksource_tsc);
 
 
1216}
1217
1218EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1219
1220static void __init tsc_disable_clocksource_watchdog(void)
1221{
1222	clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1223	clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1224}
1225
1226bool tsc_clocksource_watchdog_disabled(void)
1227{
1228	return !(clocksource_tsc.flags & CLOCK_SOURCE_MUST_VERIFY) &&
1229	       tsc_as_watchdog && !no_tsc_watchdog;
1230}
1231
1232static void __init check_system_tsc_reliable(void)
1233{
1234#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1235	if (is_geode_lx()) {
1236		/* RTSC counts during suspend */
1237#define RTSC_SUSP 0x100
1238		unsigned long res_low, res_high;
1239
1240		rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1241		/* Geode_LX - the OLPC CPU has a very reliable TSC */
1242		if (res_low & RTSC_SUSP)
1243			tsc_clocksource_reliable = 1;
1244	}
1245#endif
1246	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1247		tsc_clocksource_reliable = 1;
1248
1249	/*
1250	 * Disable the clocksource watchdog when the system has:
1251	 *  - TSC running at constant frequency
1252	 *  - TSC which does not stop in C-States
1253	 *  - the TSC_ADJUST register which allows to detect even minimal
1254	 *    modifications
1255	 *  - not more than two sockets. As the number of sockets cannot be
1256	 *    evaluated at the early boot stage where this has to be
1257	 *    invoked, check the number of online memory nodes as a
1258	 *    fallback solution which is an reasonable estimate.
1259	 */
1260	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
1261	    boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
1262	    boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
1263	    nr_online_nodes <= 4)
1264		tsc_disable_clocksource_watchdog();
1265}
1266
1267/*
1268 * Make an educated guess if the TSC is trustworthy and synchronized
1269 * over all CPUs.
1270 */
1271int unsynchronized_tsc(void)
1272{
1273	if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
1274		return 1;
1275
1276#ifdef CONFIG_SMP
1277	if (apic_is_clustered_box())
1278		return 1;
1279#endif
1280
1281	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1282		return 0;
1283
1284	if (tsc_clocksource_reliable)
1285		return 0;
1286	/*
1287	 * Intel systems are normally all synchronized.
1288	 * Exceptions must mark TSC as unstable:
1289	 */
1290	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1291		/* assume multi socket systems are not synchronized: */
1292		if (num_possible_cpus() > 1)
1293			return 1;
1294	}
1295
1296	return 0;
1297}
1298
1299/*
1300 * Convert ART to TSC given numerator/denominator found in detect_art()
1301 */
1302struct system_counterval_t convert_art_to_tsc(u64 art)
1303{
1304	u64 tmp, res, rem;
1305
1306	rem = do_div(art, art_to_tsc_denominator);
1307
1308	res = art * art_to_tsc_numerator;
1309	tmp = rem * art_to_tsc_numerator;
1310
1311	do_div(tmp, art_to_tsc_denominator);
1312	res += tmp + art_to_tsc_offset;
1313
1314	return (struct system_counterval_t) {
1315		.cs_id	= have_art ? CSID_X86_TSC : CSID_GENERIC,
1316		.cycles	= res,
1317	};
1318}
1319EXPORT_SYMBOL(convert_art_to_tsc);
1320
1321/**
1322 * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC.
1323 * @art_ns: ART (Always Running Timer) in unit of nanoseconds
1324 *
1325 * PTM requires all timestamps to be in units of nanoseconds. When user
1326 * software requests a cross-timestamp, this function converts system timestamp
1327 * to TSC.
1328 *
1329 * This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set
1330 * indicating the tsc_khz is derived from CPUID[15H]. Drivers should check
1331 * that this flag is set before conversion to TSC is attempted.
1332 *
1333 * Return:
1334 * struct system_counterval_t - system counter value with the ID of the
1335 *	corresponding clocksource:
1336 *	cycles:		System counter value
1337 *	cs_id:		The clocksource ID for validating comparability
1338 */
1339
1340struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns)
1341{
1342	u64 tmp, res, rem;
1343
1344	rem = do_div(art_ns, USEC_PER_SEC);
1345
1346	res = art_ns * tsc_khz;
1347	tmp = rem * tsc_khz;
1348
1349	do_div(tmp, USEC_PER_SEC);
1350	res += tmp;
1351
1352	return (struct system_counterval_t) {
1353		.cs_id	= have_art ? CSID_X86_TSC : CSID_GENERIC,
1354		.cycles	= res,
1355	};
1356}
1357EXPORT_SYMBOL(convert_art_ns_to_tsc);
1358
1359
1360static void tsc_refine_calibration_work(struct work_struct *work);
1361static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1362/**
1363 * tsc_refine_calibration_work - Further refine tsc freq calibration
1364 * @work: ignored.
1365 *
1366 * This functions uses delayed work over a period of a
1367 * second to further refine the TSC freq value. Since this is
1368 * timer based, instead of loop based, we don't block the boot
1369 * process while this longer calibration is done.
1370 *
1371 * If there are any calibration anomalies (too many SMIs, etc),
1372 * or the refined calibration is off by 1% of the fast early
1373 * calibration, we throw out the new calibration and use the
1374 * early calibration.
1375 */
1376static void tsc_refine_calibration_work(struct work_struct *work)
1377{
1378	static u64 tsc_start = ULLONG_MAX, ref_start;
1379	static int hpet;
1380	u64 tsc_stop, ref_stop, delta;
1381	unsigned long freq;
1382	int cpu;
1383
1384	/* Don't bother refining TSC on unstable systems */
1385	if (tsc_unstable)
1386		goto unreg;
1387
1388	/*
1389	 * Since the work is started early in boot, we may be
1390	 * delayed the first time we expire. So set the workqueue
1391	 * again once we know timers are working.
1392	 */
1393	if (tsc_start == ULLONG_MAX) {
1394restart:
1395		/*
1396		 * Only set hpet once, to avoid mixing hardware
1397		 * if the hpet becomes enabled later.
1398		 */
1399		hpet = is_hpet_enabled();
 
1400		tsc_start = tsc_read_refs(&ref_start, hpet);
1401		schedule_delayed_work(&tsc_irqwork, HZ);
1402		return;
1403	}
1404
1405	tsc_stop = tsc_read_refs(&ref_stop, hpet);
1406
1407	/* hpet or pmtimer available ? */
1408	if (ref_start == ref_stop)
1409		goto out;
1410
1411	/* Check, whether the sampling was disturbed */
1412	if (tsc_stop == ULLONG_MAX)
1413		goto restart;
1414
1415	delta = tsc_stop - tsc_start;
1416	delta *= 1000000LL;
1417	if (hpet)
1418		freq = calc_hpet_ref(delta, ref_start, ref_stop);
1419	else
1420		freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
1421
1422	/* Will hit this only if tsc_force_recalibrate has been set */
1423	if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1424
1425		/* Warn if the deviation exceeds 500 ppm */
1426		if (abs(tsc_khz - freq) > (tsc_khz >> 11)) {
1427			pr_warn("Warning: TSC freq calibrated by CPUID/MSR differs from what is calibrated by HW timer, please check with vendor!!\n");
1428			pr_info("Previous calibrated TSC freq:\t %lu.%03lu MHz\n",
1429				(unsigned long)tsc_khz / 1000,
1430				(unsigned long)tsc_khz % 1000);
1431		}
1432
1433		pr_info("TSC freq recalibrated by [%s]:\t %lu.%03lu MHz\n",
1434			hpet ? "HPET" : "PM_TIMER",
1435			(unsigned long)freq / 1000,
1436			(unsigned long)freq % 1000);
1437
1438		return;
1439	}
1440
1441	/* Make sure we're within 1% */
1442	if (abs(tsc_khz - freq) > tsc_khz/100)
1443		goto out;
1444
1445	tsc_khz = freq;
1446	pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1447		(unsigned long)tsc_khz / 1000,
1448		(unsigned long)tsc_khz % 1000);
1449
1450	/* Inform the TSC deadline clockevent devices about the recalibration */
1451	lapic_update_tsc_freq();
1452
1453	/* Update the sched_clock() rate to match the clocksource one */
1454	for_each_possible_cpu(cpu)
1455		set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
1456
1457out:
1458	if (tsc_unstable)
1459		goto unreg;
1460
1461	if (boot_cpu_has(X86_FEATURE_ART))
1462		have_art = true;
1463	clocksource_register_khz(&clocksource_tsc, tsc_khz);
1464unreg:
1465	clocksource_unregister(&clocksource_tsc_early);
1466}
1467
1468
1469static int __init init_tsc_clocksource(void)
1470{
1471	if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
1472		return 0;
1473
1474	if (tsc_unstable) {
1475		clocksource_unregister(&clocksource_tsc_early);
1476		return 0;
1477	}
1478
1479	if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1480		clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1481
1482	/*
1483	 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
1484	 * the refined calibration and directly register it as a clocksource.
1485	 */
1486	if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1487		if (boot_cpu_has(X86_FEATURE_ART))
1488			have_art = true;
1489		clocksource_register_khz(&clocksource_tsc, tsc_khz);
1490		clocksource_unregister(&clocksource_tsc_early);
1491
1492		if (!tsc_force_recalibrate)
1493			return 0;
1494	}
1495
1496	schedule_delayed_work(&tsc_irqwork, 0);
1497	return 0;
1498}
1499/*
1500 * We use device_initcall here, to ensure we run after the hpet
1501 * is fully initialized, which may occur at fs_initcall time.
1502 */
1503device_initcall(init_tsc_clocksource);
1504
1505static bool __init determine_cpu_tsc_frequencies(bool early)
1506{
1507	/* Make sure that cpu and tsc are not already calibrated */
1508	WARN_ON(cpu_khz || tsc_khz);
1509
1510	if (early) {
1511		cpu_khz = x86_platform.calibrate_cpu();
1512		if (tsc_early_khz)
1513			tsc_khz = tsc_early_khz;
1514		else
1515			tsc_khz = x86_platform.calibrate_tsc();
1516	} else {
1517		/* We should not be here with non-native cpu calibration */
1518		WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
1519		cpu_khz = pit_hpet_ptimer_calibrate_cpu();
1520	}
1521
1522	/*
1523	 * Trust non-zero tsc_khz as authoritative,
1524	 * and use it to sanity check cpu_khz,
1525	 * which will be off if system timer is off.
1526	 */
1527	if (tsc_khz == 0)
1528		tsc_khz = cpu_khz;
1529	else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
1530		cpu_khz = tsc_khz;
1531
1532	if (tsc_khz == 0)
1533		return false;
1534
1535	pr_info("Detected %lu.%03lu MHz processor\n",
1536		(unsigned long)cpu_khz / KHZ,
1537		(unsigned long)cpu_khz % KHZ);
1538
1539	if (cpu_khz != tsc_khz) {
1540		pr_info("Detected %lu.%03lu MHz TSC",
1541			(unsigned long)tsc_khz / KHZ,
1542			(unsigned long)tsc_khz % KHZ);
1543	}
1544	return true;
1545}
1546
1547static unsigned long __init get_loops_per_jiffy(void)
1548{
1549	u64 lpj = (u64)tsc_khz * KHZ;
1550
1551	do_div(lpj, HZ);
1552	return lpj;
1553}
1554
1555static void __init tsc_enable_sched_clock(void)
1556{
1557	loops_per_jiffy = get_loops_per_jiffy();
1558	use_tsc_delay();
1559
1560	/* Sanitize TSC ADJUST before cyc2ns gets initialized */
1561	tsc_store_and_check_tsc_adjust(true);
1562	cyc2ns_init_boot_cpu();
1563	static_branch_enable(&__use_tsc);
1564}
1565
1566void __init tsc_early_init(void)
1567{
1568	if (!boot_cpu_has(X86_FEATURE_TSC))
1569		return;
1570	/* Don't change UV TSC multi-chassis synchronization */
1571	if (is_early_uv_system())
1572		return;
1573	if (!determine_cpu_tsc_frequencies(true))
1574		return;
1575	tsc_enable_sched_clock();
1576}
1577
1578void __init tsc_init(void)
1579{
1580	if (!cpu_feature_enabled(X86_FEATURE_TSC)) {
1581		setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1582		return;
1583	}
1584
1585	/*
1586	 * native_calibrate_cpu_early can only calibrate using methods that are
1587	 * available early in boot.
 
 
1588	 */
1589	if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
1590		x86_platform.calibrate_cpu = native_calibrate_cpu;
1591
1592	if (!tsc_khz) {
1593		/* We failed to determine frequencies earlier, try again */
1594		if (!determine_cpu_tsc_frequencies(false)) {
1595			mark_tsc_unstable("could not calculate TSC khz");
1596			setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1597			return;
1598		}
1599		tsc_enable_sched_clock();
1600	}
1601
1602	cyc2ns_init_secondary_cpus();
 
1603
1604	if (!no_sched_irq_time)
1605		enable_sched_clock_irqtime();
1606
1607	lpj_fine = get_loops_per_jiffy();
 
 
1608
1609	check_system_tsc_reliable();
1610
1611	if (unsynchronized_tsc()) {
1612		mark_tsc_unstable("TSCs unsynchronized");
1613		return;
1614	}
1615
1616	if (tsc_clocksource_reliable || no_tsc_watchdog)
1617		tsc_disable_clocksource_watchdog();
1618
1619	clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
1620	detect_art();
1621}
1622
1623#ifdef CONFIG_SMP
1624/*
1625 * Check whether existing calibration data can be reused.
1626 */
1627unsigned long calibrate_delay_is_known(void)
1628{
1629	int sibling, cpu = smp_processor_id();
1630	int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
1631	const struct cpumask *mask = topology_core_cpumask(cpu);
1632
1633	/*
1634	 * If TSC has constant frequency and TSC is synchronized across
1635	 * sockets then reuse CPU0 calibration.
1636	 */
1637	if (constant_tsc && !tsc_unstable)
1638		return cpu_data(0).loops_per_jiffy;
1639
1640	/*
1641	 * If TSC has constant frequency and TSC is not synchronized across
1642	 * sockets and this is not the first CPU in the socket, then reuse
1643	 * the calibration value of an already online CPU on that socket.
1644	 *
1645	 * This assumes that CONSTANT_TSC is consistent for all CPUs in a
1646	 * socket.
1647	 */
1648	if (!constant_tsc || !mask)
1649		return 0;
1650
1651	sibling = cpumask_any_but(mask, cpu);
1652	if (sibling < nr_cpu_ids)
1653		return cpu_data(sibling).loops_per_jiffy;
1654	return 0;
1655}
1656#endif