Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * sched_clock for unstable cpu clocks
  3 *
  4 *  Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
  5 *
  6 *  Updates and enhancements:
  7 *    Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
  8 *
  9 * Based on code by:
 10 *   Ingo Molnar <mingo@redhat.com>
 11 *   Guillaume Chazarain <guichaz@gmail.com>
 12 *
 13 *
 14 * What:
 15 *
 16 * cpu_clock(i) provides a fast (execution time) high resolution
 17 * clock with bounded drift between CPUs. The value of cpu_clock(i)
 18 * is monotonic for constant i. The timestamp returned is in nanoseconds.
 19 *
 20 * ######################### BIG FAT WARNING ##########################
 21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
 22 * # go backwards !!                                                  #
 23 * ####################################################################
 24 *
 25 * There is no strict promise about the base, although it tends to start
 26 * at 0 on boot (but people really shouldn't rely on that).
 27 *
 28 * cpu_clock(i)       -- can be used from any context, including NMI.
 29 * local_clock()      -- is cpu_clock() on the current cpu.
 30 *
 31 * sched_clock_cpu(i)
 32 *
 33 * How:
 34 *
 35 * The implementation either uses sched_clock() when
 36 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
 37 * sched_clock() is assumed to provide these properties (mostly it means
 38 * the architecture provides a globally synchronized highres time source).
 39 *
 40 * Otherwise it tries to create a semi stable clock from a mixture of other
 41 * clocks, including:
 42 *
 43 *  - GTOD (clock monotomic)
 44 *  - sched_clock()
 45 *  - explicit idle events
 46 *
 47 * We use GTOD as base and use sched_clock() deltas to improve resolution. The
 48 * deltas are filtered to provide monotonicity and keeping it within an
 49 * expected window.
 50 *
 51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
 52 * that is otherwise invisible (TSC gets stopped).
 53 *
 54 */
 55#include <linux/spinlock.h>
 56#include <linux/hardirq.h>
 57#include <linux/export.h>
 58#include <linux/percpu.h>
 59#include <linux/ktime.h>
 60#include <linux/sched.h>
 61#include <linux/static_key.h>
 62#include <linux/workqueue.h>
 63#include <linux/compiler.h>
 64#include <linux/tick.h>
 65
 66/*
 67 * Scheduler clock - returns current time in nanosec units.
 68 * This is default implementation.
 69 * Architectures and sub-architectures can override this.
 70 */
 71unsigned long long __weak sched_clock(void)
 72{
 73	return (unsigned long long)(jiffies - INITIAL_JIFFIES)
 74					* (NSEC_PER_SEC / HZ);
 75}
 76EXPORT_SYMBOL_GPL(sched_clock);
 77
 78__read_mostly int sched_clock_running;
 79
 80#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 81static struct static_key __sched_clock_stable = STATIC_KEY_INIT;
 82static int __sched_clock_stable_early;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 83
 84int sched_clock_stable(void)
 85{
 86	return static_key_false(&__sched_clock_stable);
 
 
 
 
 
 
 87}
 88
 89static void __set_sched_clock_stable(void)
 90{
 91	if (!sched_clock_stable())
 92		static_key_slow_inc(&__sched_clock_stable);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93
 
 94	tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
 95}
 96
 97void set_sched_clock_stable(void)
 
 
 
 
 
 
 
 
 
 
 
 98{
 99	__sched_clock_stable_early = 1;
 
100
101	smp_mb(); /* matches sched_clock_init() */
 
 
 
 
 
102
103	if (!sched_clock_running)
104		return;
 
105
106	__set_sched_clock_stable();
 
 
 
 
 
107}
108
109static void __clear_sched_clock_stable(struct work_struct *work)
 
 
110{
111	/* XXX worry about clock continuity */
112	if (sched_clock_stable())
113		static_key_slow_dec(&__sched_clock_stable);
114
115	tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
 
116}
117
118static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
119
120void clear_sched_clock_stable(void)
121{
122	__sched_clock_stable_early = 0;
123
124	smp_mb(); /* matches sched_clock_init() */
125
126	if (!sched_clock_running)
127		return;
128
129	schedule_work(&sched_clock_work);
 
130}
131
132struct sched_clock_data {
133	u64			tick_raw;
134	u64			tick_gtod;
135	u64			clock;
136};
137
138static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
139
140static inline struct sched_clock_data *this_scd(void)
141{
142	return this_cpu_ptr(&sched_clock_data);
143}
144
145static inline struct sched_clock_data *cpu_sdc(int cpu)
146{
147	return &per_cpu(sched_clock_data, cpu);
148}
149
150void sched_clock_init(void)
151{
152	u64 ktime_now = ktime_to_ns(ktime_get());
153	int cpu;
154
155	for_each_possible_cpu(cpu) {
156		struct sched_clock_data *scd = cpu_sdc(cpu);
157
158		scd->tick_raw = 0;
159		scd->tick_gtod = ktime_now;
160		scd->clock = ktime_now;
161	}
162
163	sched_clock_running = 1;
164
 
 
 
 
 
 
 
 
 
165	/*
166	 * Ensure that it is impossible to not do a static_key update.
167	 *
168	 * Either {set,clear}_sched_clock_stable() must see sched_clock_running
169	 * and do the update, or we must see their __sched_clock_stable_early
170	 * and do the update, or both.
171	 */
172	smp_mb(); /* matches {set,clear}_sched_clock_stable() */
173
174	if (__sched_clock_stable_early)
175		__set_sched_clock_stable();
176	else
177		__clear_sched_clock_stable(NULL);
178}
 
179
180/*
181 * min, max except they take wrapping into account
182 */
183
184static inline u64 wrap_min(u64 x, u64 y)
185{
186	return (s64)(x - y) < 0 ? x : y;
187}
188
189static inline u64 wrap_max(u64 x, u64 y)
190{
191	return (s64)(x - y) > 0 ? x : y;
192}
193
194/*
195 * update the percpu scd from the raw @now value
196 *
197 *  - filter out backward motion
198 *  - use the GTOD tick value to create a window to filter crazy TSC values
199 */
200static u64 sched_clock_local(struct sched_clock_data *scd)
201{
202	u64 now, clock, old_clock, min_clock, max_clock;
203	s64 delta;
204
205again:
206	now = sched_clock();
207	delta = now - scd->tick_raw;
208	if (unlikely(delta < 0))
209		delta = 0;
210
211	old_clock = scd->clock;
212
213	/*
214	 * scd->clock = clamp(scd->tick_gtod + delta,
215	 *		      max(scd->tick_gtod, scd->clock),
216	 *		      scd->tick_gtod + TICK_NSEC);
217	 */
218
219	clock = scd->tick_gtod + delta;
220	min_clock = wrap_max(scd->tick_gtod, old_clock);
221	max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
 
222
223	clock = wrap_max(clock, min_clock);
224	clock = wrap_min(clock, max_clock);
225
226	if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
227		goto again;
228
229	return clock;
230}
231
232static u64 sched_clock_remote(struct sched_clock_data *scd)
233{
234	struct sched_clock_data *my_scd = this_scd();
235	u64 this_clock, remote_clock;
236	u64 *ptr, old_val, val;
237
238#if BITS_PER_LONG != 64
239again:
240	/*
241	 * Careful here: The local and the remote clock values need to
242	 * be read out atomic as we need to compare the values and
243	 * then update either the local or the remote side. So the
244	 * cmpxchg64 below only protects one readout.
245	 *
246	 * We must reread via sched_clock_local() in the retry case on
247	 * 32bit as an NMI could use sched_clock_local() via the
248	 * tracer and hit between the readout of
249	 * the low32bit and the high 32bit portion.
250	 */
251	this_clock = sched_clock_local(my_scd);
252	/*
253	 * We must enforce atomic readout on 32bit, otherwise the
254	 * update on the remote cpu can hit inbetween the readout of
255	 * the low32bit and the high 32bit portion.
256	 */
257	remote_clock = cmpxchg64(&scd->clock, 0, 0);
258#else
259	/*
260	 * On 64bit the read of [my]scd->clock is atomic versus the
261	 * update, so we can avoid the above 32bit dance.
262	 */
263	sched_clock_local(my_scd);
264again:
265	this_clock = my_scd->clock;
266	remote_clock = scd->clock;
267#endif
268
269	/*
270	 * Use the opportunity that we have both locks
271	 * taken to couple the two clocks: we take the
272	 * larger time as the latest time for both
273	 * runqueues. (this creates monotonic movement)
274	 */
275	if (likely((s64)(remote_clock - this_clock) < 0)) {
276		ptr = &scd->clock;
277		old_val = remote_clock;
278		val = this_clock;
279	} else {
280		/*
281		 * Should be rare, but possible:
282		 */
283		ptr = &my_scd->clock;
284		old_val = this_clock;
285		val = remote_clock;
286	}
287
288	if (cmpxchg64(ptr, old_val, val) != old_val)
289		goto again;
290
291	return val;
292}
293
294/*
295 * Similar to cpu_clock(), but requires local IRQs to be disabled.
296 *
297 * See cpu_clock().
298 */
299u64 sched_clock_cpu(int cpu)
300{
301	struct sched_clock_data *scd;
302	u64 clock;
303
304	if (sched_clock_stable())
305		return sched_clock();
306
307	if (unlikely(!sched_clock_running))
308		return 0ull;
309
310	preempt_disable_notrace();
311	scd = cpu_sdc(cpu);
312
313	if (cpu != smp_processor_id())
314		clock = sched_clock_remote(scd);
315	else
316		clock = sched_clock_local(scd);
317	preempt_enable_notrace();
318
319	return clock;
320}
 
321
322void sched_clock_tick(void)
323{
324	struct sched_clock_data *scd;
325	u64 now, now_gtod;
326
327	if (sched_clock_stable())
328		return;
329
330	if (unlikely(!sched_clock_running))
331		return;
332
333	WARN_ON_ONCE(!irqs_disabled());
334
335	scd = this_scd();
336	now_gtod = ktime_to_ns(ktime_get());
337	now = sched_clock();
338
339	scd->tick_raw = now;
340	scd->tick_gtod = now_gtod;
341	sched_clock_local(scd);
342}
343
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344/*
345 * We are going deep-idle (irqs are disabled):
346 */
347void sched_clock_idle_sleep_event(void)
348{
349	sched_clock_cpu(smp_processor_id());
350}
351EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
352
353/*
354 * We just idled delta nanoseconds (called with irqs disabled):
355 */
356void sched_clock_idle_wakeup_event(u64 delta_ns)
357{
358	if (timekeeping_suspended)
 
 
 
 
 
359		return;
360
 
361	sched_clock_tick();
362	touch_softlockup_watchdog_sched();
363}
364EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
365
366/*
367 * As outlined at the top, provides a fast, high resolution, nanosecond
368 * time source that is monotonic per cpu argument and has bounded drift
369 * between cpus.
370 *
371 * ######################### BIG FAT WARNING ##########################
372 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
373 * # go backwards !!                                                  #
374 * ####################################################################
375 */
376u64 cpu_clock(int cpu)
377{
378	if (!sched_clock_stable())
379		return sched_clock_cpu(cpu);
380
381	return sched_clock();
382}
383
384/*
385 * Similar to cpu_clock() for the current cpu. Time will only be observed
386 * to be monotonic if care is taken to only compare timestampt taken on the
387 * same CPU.
388 *
389 * See cpu_clock().
390 */
391u64 local_clock(void)
392{
393	if (!sched_clock_stable())
394		return sched_clock_cpu(raw_smp_processor_id());
395
396	return sched_clock();
397}
398
399#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
400
401void sched_clock_init(void)
402{
403	sched_clock_running = 1;
 
 
 
404}
405
406u64 sched_clock_cpu(int cpu)
407{
408	if (unlikely(!sched_clock_running))
409		return 0;
410
411	return sched_clock();
412}
413
414u64 cpu_clock(int cpu)
415{
416	return sched_clock();
417}
418
419u64 local_clock(void)
420{
421	return sched_clock();
422}
423
424#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
425
426EXPORT_SYMBOL_GPL(cpu_clock);
427EXPORT_SYMBOL_GPL(local_clock);
428
429/*
430 * Running clock - returns the time that has elapsed while a guest has been
431 * running.
432 * On a guest this value should be local_clock minus the time the guest was
433 * suspended by the hypervisor (for any reason).
434 * On bare metal this function should return the same as local_clock.
435 * Architectures and sub-architectures can override this.
436 */
437u64 __weak running_clock(void)
438{
439	return local_clock();
440}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * sched_clock() for unstable CPU clocks
  4 *
  5 *  Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
  6 *
  7 *  Updates and enhancements:
  8 *    Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
  9 *
 10 * Based on code by:
 11 *   Ingo Molnar <mingo@redhat.com>
 12 *   Guillaume Chazarain <guichaz@gmail.com>
 13 *
 14 *
 15 * What this file implements:
 16 *
 17 * cpu_clock(i) provides a fast (execution time) high resolution
 18 * clock with bounded drift between CPUs. The value of cpu_clock(i)
 19 * is monotonic for constant i. The timestamp returned is in nanoseconds.
 20 *
 21 * ######################### BIG FAT WARNING ##########################
 22 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
 23 * # go backwards !!                                                  #
 24 * ####################################################################
 25 *
 26 * There is no strict promise about the base, although it tends to start
 27 * at 0 on boot (but people really shouldn't rely on that).
 28 *
 29 * cpu_clock(i)       -- can be used from any context, including NMI.
 30 * local_clock()      -- is cpu_clock() on the current CPU.
 31 *
 32 * sched_clock_cpu(i)
 33 *
 34 * How it is implemented:
 35 *
 36 * The implementation either uses sched_clock() when
 37 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
 38 * sched_clock() is assumed to provide these properties (mostly it means
 39 * the architecture provides a globally synchronized highres time source).
 40 *
 41 * Otherwise it tries to create a semi stable clock from a mixture of other
 42 * clocks, including:
 43 *
 44 *  - GTOD (clock monotomic)
 45 *  - sched_clock()
 46 *  - explicit idle events
 47 *
 48 * We use GTOD as base and use sched_clock() deltas to improve resolution. The
 49 * deltas are filtered to provide monotonicity and keeping it within an
 50 * expected window.
 51 *
 52 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
 53 * that is otherwise invisible (TSC gets stopped).
 54 *
 55 */
 56#include "sched.h"
 57#include <linux/sched_clock.h>
 
 
 
 
 
 
 
 
 58
 59/*
 60 * Scheduler clock - returns current time in nanosec units.
 61 * This is default implementation.
 62 * Architectures and sub-architectures can override this.
 63 */
 64unsigned long long __weak sched_clock(void)
 65{
 66	return (unsigned long long)(jiffies - INITIAL_JIFFIES)
 67					* (NSEC_PER_SEC / HZ);
 68}
 69EXPORT_SYMBOL_GPL(sched_clock);
 70
 71static DEFINE_STATIC_KEY_FALSE(sched_clock_running);
 72
 73#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 74/*
 75 * We must start with !__sched_clock_stable because the unstable -> stable
 76 * transition is accurate, while the stable -> unstable transition is not.
 77 *
 78 * Similarly we start with __sched_clock_stable_early, thereby assuming we
 79 * will become stable, such that there's only a single 1 -> 0 transition.
 80 */
 81static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
 82static int __sched_clock_stable_early = 1;
 83
 84/*
 85 * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset
 86 */
 87__read_mostly u64 __sched_clock_offset;
 88static __read_mostly u64 __gtod_offset;
 89
 90struct sched_clock_data {
 91	u64			tick_raw;
 92	u64			tick_gtod;
 93	u64			clock;
 94};
 95
 96static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
 97
 98static inline struct sched_clock_data *this_scd(void)
 99{
100	return this_cpu_ptr(&sched_clock_data);
101}
102
103static inline struct sched_clock_data *cpu_sdc(int cpu)
104{
105	return &per_cpu(sched_clock_data, cpu);
106}
107
108int sched_clock_stable(void)
109{
110	return static_branch_likely(&__sched_clock_stable);
111}
112
113static void __scd_stamp(struct sched_clock_data *scd)
114{
115	scd->tick_gtod = ktime_get_ns();
116	scd->tick_raw = sched_clock();
117}
118
119static void __set_sched_clock_stable(void)
120{
121	struct sched_clock_data *scd;
122
123	/*
124	 * Since we're still unstable and the tick is already running, we have
125	 * to disable IRQs in order to get a consistent scd->tick* reading.
126	 */
127	local_irq_disable();
128	scd = this_scd();
129	/*
130	 * Attempt to make the (initial) unstable->stable transition continuous.
131	 */
132	__sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
133	local_irq_enable();
134
135	printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
136			scd->tick_gtod, __gtod_offset,
137			scd->tick_raw,  __sched_clock_offset);
138
139	static_branch_enable(&__sched_clock_stable);
140	tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
141}
142
143/*
144 * If we ever get here, we're screwed, because we found out -- typically after
145 * the fact -- that TSC wasn't good. This means all our clocksources (including
146 * ktime) could have reported wrong values.
147 *
148 * What we do here is an attempt to fix up and continue sort of where we left
149 * off in a coherent manner.
150 *
151 * The only way to fully avoid random clock jumps is to boot with:
152 * "tsc=unstable".
153 */
154static void __sched_clock_work(struct work_struct *work)
155{
156	struct sched_clock_data *scd;
157	int cpu;
158
159	/* take a current timestamp and set 'now' */
160	preempt_disable();
161	scd = this_scd();
162	__scd_stamp(scd);
163	scd->clock = scd->tick_gtod + __gtod_offset;
164	preempt_enable();
165
166	/* clone to all CPUs */
167	for_each_possible_cpu(cpu)
168		per_cpu(sched_clock_data, cpu) = *scd;
169
170	printk(KERN_WARNING "TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unstable'.\n");
171	printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
172			scd->tick_gtod, __gtod_offset,
173			scd->tick_raw,  __sched_clock_offset);
174
175	static_branch_disable(&__sched_clock_stable);
176}
177
178static DECLARE_WORK(sched_clock_work, __sched_clock_work);
179
180static void __clear_sched_clock_stable(void)
181{
182	if (!sched_clock_stable())
183		return;
 
184
185	tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
186	schedule_work(&sched_clock_work);
187}
188
 
 
189void clear_sched_clock_stable(void)
190{
191	__sched_clock_stable_early = 0;
192
193	smp_mb(); /* matches sched_clock_init_late() */
 
 
 
194
195	if (static_key_count(&sched_clock_running.key) == 2)
196		__clear_sched_clock_stable();
197}
198
199static void __sched_clock_gtod_offset(void)
 
 
 
 
 
 
 
 
200{
201	struct sched_clock_data *scd = this_scd();
 
202
203	__scd_stamp(scd);
204	__gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod;
 
205}
206
207void __init sched_clock_init(void)
208{
209	/*
210	 * Set __gtod_offset such that once we mark sched_clock_running,
211	 * sched_clock_tick() continues where sched_clock() left off.
212	 *
213	 * Even if TSC is buggered, we're still UP at this point so it
214	 * can't really be out of sync.
215	 */
216	local_irq_disable();
217	__sched_clock_gtod_offset();
218	local_irq_enable();
 
 
219
220	static_branch_inc(&sched_clock_running);
221}
222/*
223 * We run this as late_initcall() such that it runs after all built-in drivers,
224 * notably: acpi_processor and intel_idle, which can mark the TSC as unstable.
225 */
226static int __init sched_clock_init_late(void)
227{
228	static_branch_inc(&sched_clock_running);
229	/*
230	 * Ensure that it is impossible to not do a static_key update.
231	 *
232	 * Either {set,clear}_sched_clock_stable() must see sched_clock_running
233	 * and do the update, or we must see their __sched_clock_stable_early
234	 * and do the update, or both.
235	 */
236	smp_mb(); /* matches {set,clear}_sched_clock_stable() */
237
238	if (__sched_clock_stable_early)
239		__set_sched_clock_stable();
240
241	return 0;
242}
243late_initcall(sched_clock_init_late);
244
245/*
246 * min, max except they take wrapping into account
247 */
248
249static inline u64 wrap_min(u64 x, u64 y)
250{
251	return (s64)(x - y) < 0 ? x : y;
252}
253
254static inline u64 wrap_max(u64 x, u64 y)
255{
256	return (s64)(x - y) > 0 ? x : y;
257}
258
259/*
260 * update the percpu scd from the raw @now value
261 *
262 *  - filter out backward motion
263 *  - use the GTOD tick value to create a window to filter crazy TSC values
264 */
265static u64 sched_clock_local(struct sched_clock_data *scd)
266{
267	u64 now, clock, old_clock, min_clock, max_clock, gtod;
268	s64 delta;
269
270again:
271	now = sched_clock();
272	delta = now - scd->tick_raw;
273	if (unlikely(delta < 0))
274		delta = 0;
275
276	old_clock = scd->clock;
277
278	/*
279	 * scd->clock = clamp(scd->tick_gtod + delta,
280	 *		      max(scd->tick_gtod, scd->clock),
281	 *		      scd->tick_gtod + TICK_NSEC);
282	 */
283
284	gtod = scd->tick_gtod + __gtod_offset;
285	clock = gtod + delta;
286	min_clock = wrap_max(gtod, old_clock);
287	max_clock = wrap_max(old_clock, gtod + TICK_NSEC);
288
289	clock = wrap_max(clock, min_clock);
290	clock = wrap_min(clock, max_clock);
291
292	if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
293		goto again;
294
295	return clock;
296}
297
298static u64 sched_clock_remote(struct sched_clock_data *scd)
299{
300	struct sched_clock_data *my_scd = this_scd();
301	u64 this_clock, remote_clock;
302	u64 *ptr, old_val, val;
303
304#if BITS_PER_LONG != 64
305again:
306	/*
307	 * Careful here: The local and the remote clock values need to
308	 * be read out atomic as we need to compare the values and
309	 * then update either the local or the remote side. So the
310	 * cmpxchg64 below only protects one readout.
311	 *
312	 * We must reread via sched_clock_local() in the retry case on
313	 * 32-bit kernels as an NMI could use sched_clock_local() via the
314	 * tracer and hit between the readout of
315	 * the low 32-bit and the high 32-bit portion.
316	 */
317	this_clock = sched_clock_local(my_scd);
318	/*
319	 * We must enforce atomic readout on 32-bit, otherwise the
320	 * update on the remote CPU can hit inbetween the readout of
321	 * the low 32-bit and the high 32-bit portion.
322	 */
323	remote_clock = cmpxchg64(&scd->clock, 0, 0);
324#else
325	/*
326	 * On 64-bit kernels the read of [my]scd->clock is atomic versus the
327	 * update, so we can avoid the above 32-bit dance.
328	 */
329	sched_clock_local(my_scd);
330again:
331	this_clock = my_scd->clock;
332	remote_clock = scd->clock;
333#endif
334
335	/*
336	 * Use the opportunity that we have both locks
337	 * taken to couple the two clocks: we take the
338	 * larger time as the latest time for both
339	 * runqueues. (this creates monotonic movement)
340	 */
341	if (likely((s64)(remote_clock - this_clock) < 0)) {
342		ptr = &scd->clock;
343		old_val = remote_clock;
344		val = this_clock;
345	} else {
346		/*
347		 * Should be rare, but possible:
348		 */
349		ptr = &my_scd->clock;
350		old_val = this_clock;
351		val = remote_clock;
352	}
353
354	if (cmpxchg64(ptr, old_val, val) != old_val)
355		goto again;
356
357	return val;
358}
359
360/*
361 * Similar to cpu_clock(), but requires local IRQs to be disabled.
362 *
363 * See cpu_clock().
364 */
365u64 sched_clock_cpu(int cpu)
366{
367	struct sched_clock_data *scd;
368	u64 clock;
369
370	if (sched_clock_stable())
371		return sched_clock() + __sched_clock_offset;
372
373	if (!static_branch_likely(&sched_clock_running))
374		return sched_clock();
375
376	preempt_disable_notrace();
377	scd = cpu_sdc(cpu);
378
379	if (cpu != smp_processor_id())
380		clock = sched_clock_remote(scd);
381	else
382		clock = sched_clock_local(scd);
383	preempt_enable_notrace();
384
385	return clock;
386}
387EXPORT_SYMBOL_GPL(sched_clock_cpu);
388
389void sched_clock_tick(void)
390{
391	struct sched_clock_data *scd;
 
392
393	if (sched_clock_stable())
394		return;
395
396	if (!static_branch_likely(&sched_clock_running))
397		return;
398
399	lockdep_assert_irqs_disabled();
400
401	scd = this_scd();
402	__scd_stamp(scd);
 
 
 
 
403	sched_clock_local(scd);
404}
405
406void sched_clock_tick_stable(void)
407{
408	if (!sched_clock_stable())
409		return;
410
411	/*
412	 * Called under watchdog_lock.
413	 *
414	 * The watchdog just found this TSC to (still) be stable, so now is a
415	 * good moment to update our __gtod_offset. Because once we find the
416	 * TSC to be unstable, any computation will be computing crap.
417	 */
418	local_irq_disable();
419	__sched_clock_gtod_offset();
420	local_irq_enable();
421}
422
423/*
424 * We are going deep-idle (irqs are disabled):
425 */
426void sched_clock_idle_sleep_event(void)
427{
428	sched_clock_cpu(smp_processor_id());
429}
430EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
431
432/*
433 * We just idled; resync with ktime.
434 */
435void sched_clock_idle_wakeup_event(void)
436{
437	unsigned long flags;
438
439	if (sched_clock_stable())
440		return;
441
442	if (unlikely(timekeeping_suspended))
443		return;
444
445	local_irq_save(flags);
446	sched_clock_tick();
447	local_irq_restore(flags);
448}
449EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
452
453void __init sched_clock_init(void)
454{
455	static_branch_inc(&sched_clock_running);
456	local_irq_disable();
457	generic_sched_clock_init();
458	local_irq_enable();
459}
460
461u64 sched_clock_cpu(int cpu)
462{
463	if (!static_branch_likely(&sched_clock_running))
464		return 0;
465
466	return sched_clock();
467}
468
 
 
 
 
 
 
 
 
 
 
469#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
 
 
 
470
471/*
472 * Running clock - returns the time that has elapsed while a guest has been
473 * running.
474 * On a guest this value should be local_clock minus the time the guest was
475 * suspended by the hypervisor (for any reason).
476 * On bare metal this function should return the same as local_clock.
477 * Architectures and sub-architectures can override this.
478 */
479u64 __weak running_clock(void)
480{
481	return local_clock();
482}