Loading...
1/*
2 * sched_clock for unstable cpu clocks
3 *
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5 *
6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
8 *
9 * Based on code by:
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
12 *
13 *
14 * What:
15 *
16 * cpu_clock(i) provides a fast (execution time) high resolution
17 * clock with bounded drift between CPUs. The value of cpu_clock(i)
18 * is monotonic for constant i. The timestamp returned is in nanoseconds.
19 *
20 * ######################### BIG FAT WARNING ##########################
21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
22 * # go backwards !! #
23 * ####################################################################
24 *
25 * There is no strict promise about the base, although it tends to start
26 * at 0 on boot (but people really shouldn't rely on that).
27 *
28 * cpu_clock(i) -- can be used from any context, including NMI.
29 * local_clock() -- is cpu_clock() on the current cpu.
30 *
31 * sched_clock_cpu(i)
32 *
33 * How:
34 *
35 * The implementation either uses sched_clock() when
36 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
37 * sched_clock() is assumed to provide these properties (mostly it means
38 * the architecture provides a globally synchronized highres time source).
39 *
40 * Otherwise it tries to create a semi stable clock from a mixture of other
41 * clocks, including:
42 *
43 * - GTOD (clock monotomic)
44 * - sched_clock()
45 * - explicit idle events
46 *
47 * We use GTOD as base and use sched_clock() deltas to improve resolution. The
48 * deltas are filtered to provide monotonicity and keeping it within an
49 * expected window.
50 *
51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
52 * that is otherwise invisible (TSC gets stopped).
53 *
54 */
55#include <linux/spinlock.h>
56#include <linux/hardirq.h>
57#include <linux/export.h>
58#include <linux/percpu.h>
59#include <linux/ktime.h>
60#include <linux/sched.h>
61#include <linux/static_key.h>
62#include <linux/workqueue.h>
63#include <linux/compiler.h>
64
65/*
66 * Scheduler clock - returns current time in nanosec units.
67 * This is default implementation.
68 * Architectures and sub-architectures can override this.
69 */
70unsigned long long __weak sched_clock(void)
71{
72 return (unsigned long long)(jiffies - INITIAL_JIFFIES)
73 * (NSEC_PER_SEC / HZ);
74}
75EXPORT_SYMBOL_GPL(sched_clock);
76
77__read_mostly int sched_clock_running;
78
79#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
80static struct static_key __sched_clock_stable = STATIC_KEY_INIT;
81static int __sched_clock_stable_early;
82
83int sched_clock_stable(void)
84{
85 return static_key_false(&__sched_clock_stable);
86}
87
88static void __set_sched_clock_stable(void)
89{
90 if (!sched_clock_stable())
91 static_key_slow_inc(&__sched_clock_stable);
92}
93
94void set_sched_clock_stable(void)
95{
96 __sched_clock_stable_early = 1;
97
98 smp_mb(); /* matches sched_clock_init() */
99
100 if (!sched_clock_running)
101 return;
102
103 __set_sched_clock_stable();
104}
105
106static void __clear_sched_clock_stable(struct work_struct *work)
107{
108 /* XXX worry about clock continuity */
109 if (sched_clock_stable())
110 static_key_slow_dec(&__sched_clock_stable);
111}
112
113static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
114
115void clear_sched_clock_stable(void)
116{
117 __sched_clock_stable_early = 0;
118
119 smp_mb(); /* matches sched_clock_init() */
120
121 if (!sched_clock_running)
122 return;
123
124 schedule_work(&sched_clock_work);
125}
126
127struct sched_clock_data {
128 u64 tick_raw;
129 u64 tick_gtod;
130 u64 clock;
131};
132
133static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
134
135static inline struct sched_clock_data *this_scd(void)
136{
137 return &__get_cpu_var(sched_clock_data);
138}
139
140static inline struct sched_clock_data *cpu_sdc(int cpu)
141{
142 return &per_cpu(sched_clock_data, cpu);
143}
144
145void sched_clock_init(void)
146{
147 u64 ktime_now = ktime_to_ns(ktime_get());
148 int cpu;
149
150 for_each_possible_cpu(cpu) {
151 struct sched_clock_data *scd = cpu_sdc(cpu);
152
153 scd->tick_raw = 0;
154 scd->tick_gtod = ktime_now;
155 scd->clock = ktime_now;
156 }
157
158 sched_clock_running = 1;
159
160 /*
161 * Ensure that it is impossible to not do a static_key update.
162 *
163 * Either {set,clear}_sched_clock_stable() must see sched_clock_running
164 * and do the update, or we must see their __sched_clock_stable_early
165 * and do the update, or both.
166 */
167 smp_mb(); /* matches {set,clear}_sched_clock_stable() */
168
169 if (__sched_clock_stable_early)
170 __set_sched_clock_stable();
171 else
172 __clear_sched_clock_stable(NULL);
173}
174
175/*
176 * min, max except they take wrapping into account
177 */
178
179static inline u64 wrap_min(u64 x, u64 y)
180{
181 return (s64)(x - y) < 0 ? x : y;
182}
183
184static inline u64 wrap_max(u64 x, u64 y)
185{
186 return (s64)(x - y) > 0 ? x : y;
187}
188
189/*
190 * update the percpu scd from the raw @now value
191 *
192 * - filter out backward motion
193 * - use the GTOD tick value to create a window to filter crazy TSC values
194 */
195static u64 sched_clock_local(struct sched_clock_data *scd)
196{
197 u64 now, clock, old_clock, min_clock, max_clock;
198 s64 delta;
199
200again:
201 now = sched_clock();
202 delta = now - scd->tick_raw;
203 if (unlikely(delta < 0))
204 delta = 0;
205
206 old_clock = scd->clock;
207
208 /*
209 * scd->clock = clamp(scd->tick_gtod + delta,
210 * max(scd->tick_gtod, scd->clock),
211 * scd->tick_gtod + TICK_NSEC);
212 */
213
214 clock = scd->tick_gtod + delta;
215 min_clock = wrap_max(scd->tick_gtod, old_clock);
216 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
217
218 clock = wrap_max(clock, min_clock);
219 clock = wrap_min(clock, max_clock);
220
221 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
222 goto again;
223
224 return clock;
225}
226
227static u64 sched_clock_remote(struct sched_clock_data *scd)
228{
229 struct sched_clock_data *my_scd = this_scd();
230 u64 this_clock, remote_clock;
231 u64 *ptr, old_val, val;
232
233#if BITS_PER_LONG != 64
234again:
235 /*
236 * Careful here: The local and the remote clock values need to
237 * be read out atomic as we need to compare the values and
238 * then update either the local or the remote side. So the
239 * cmpxchg64 below only protects one readout.
240 *
241 * We must reread via sched_clock_local() in the retry case on
242 * 32bit as an NMI could use sched_clock_local() via the
243 * tracer and hit between the readout of
244 * the low32bit and the high 32bit portion.
245 */
246 this_clock = sched_clock_local(my_scd);
247 /*
248 * We must enforce atomic readout on 32bit, otherwise the
249 * update on the remote cpu can hit inbetween the readout of
250 * the low32bit and the high 32bit portion.
251 */
252 remote_clock = cmpxchg64(&scd->clock, 0, 0);
253#else
254 /*
255 * On 64bit the read of [my]scd->clock is atomic versus the
256 * update, so we can avoid the above 32bit dance.
257 */
258 sched_clock_local(my_scd);
259again:
260 this_clock = my_scd->clock;
261 remote_clock = scd->clock;
262#endif
263
264 /*
265 * Use the opportunity that we have both locks
266 * taken to couple the two clocks: we take the
267 * larger time as the latest time for both
268 * runqueues. (this creates monotonic movement)
269 */
270 if (likely((s64)(remote_clock - this_clock) < 0)) {
271 ptr = &scd->clock;
272 old_val = remote_clock;
273 val = this_clock;
274 } else {
275 /*
276 * Should be rare, but possible:
277 */
278 ptr = &my_scd->clock;
279 old_val = this_clock;
280 val = remote_clock;
281 }
282
283 if (cmpxchg64(ptr, old_val, val) != old_val)
284 goto again;
285
286 return val;
287}
288
289/*
290 * Similar to cpu_clock(), but requires local IRQs to be disabled.
291 *
292 * See cpu_clock().
293 */
294u64 sched_clock_cpu(int cpu)
295{
296 struct sched_clock_data *scd;
297 u64 clock;
298
299 if (sched_clock_stable())
300 return sched_clock();
301
302 if (unlikely(!sched_clock_running))
303 return 0ull;
304
305 preempt_disable_notrace();
306 scd = cpu_sdc(cpu);
307
308 if (cpu != smp_processor_id())
309 clock = sched_clock_remote(scd);
310 else
311 clock = sched_clock_local(scd);
312 preempt_enable_notrace();
313
314 return clock;
315}
316
317void sched_clock_tick(void)
318{
319 struct sched_clock_data *scd;
320 u64 now, now_gtod;
321
322 if (sched_clock_stable())
323 return;
324
325 if (unlikely(!sched_clock_running))
326 return;
327
328 WARN_ON_ONCE(!irqs_disabled());
329
330 scd = this_scd();
331 now_gtod = ktime_to_ns(ktime_get());
332 now = sched_clock();
333
334 scd->tick_raw = now;
335 scd->tick_gtod = now_gtod;
336 sched_clock_local(scd);
337}
338
339/*
340 * We are going deep-idle (irqs are disabled):
341 */
342void sched_clock_idle_sleep_event(void)
343{
344 sched_clock_cpu(smp_processor_id());
345}
346EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
347
348/*
349 * We just idled delta nanoseconds (called with irqs disabled):
350 */
351void sched_clock_idle_wakeup_event(u64 delta_ns)
352{
353 if (timekeeping_suspended)
354 return;
355
356 sched_clock_tick();
357 touch_softlockup_watchdog();
358}
359EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
360
361/*
362 * As outlined at the top, provides a fast, high resolution, nanosecond
363 * time source that is monotonic per cpu argument and has bounded drift
364 * between cpus.
365 *
366 * ######################### BIG FAT WARNING ##########################
367 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
368 * # go backwards !! #
369 * ####################################################################
370 */
371u64 cpu_clock(int cpu)
372{
373 if (!sched_clock_stable())
374 return sched_clock_cpu(cpu);
375
376 return sched_clock();
377}
378
379/*
380 * Similar to cpu_clock() for the current cpu. Time will only be observed
381 * to be monotonic if care is taken to only compare timestampt taken on the
382 * same CPU.
383 *
384 * See cpu_clock().
385 */
386u64 local_clock(void)
387{
388 if (!sched_clock_stable())
389 return sched_clock_cpu(raw_smp_processor_id());
390
391 return sched_clock();
392}
393
394#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
395
396void sched_clock_init(void)
397{
398 sched_clock_running = 1;
399}
400
401u64 sched_clock_cpu(int cpu)
402{
403 if (unlikely(!sched_clock_running))
404 return 0;
405
406 return sched_clock();
407}
408
409u64 cpu_clock(int cpu)
410{
411 return sched_clock();
412}
413
414u64 local_clock(void)
415{
416 return sched_clock();
417}
418
419#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
420
421EXPORT_SYMBOL_GPL(cpu_clock);
422EXPORT_SYMBOL_GPL(local_clock);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * sched_clock() for unstable CPU clocks
4 *
5 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
6 *
7 * Updates and enhancements:
8 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
9 *
10 * Based on code by:
11 * Ingo Molnar <mingo@redhat.com>
12 * Guillaume Chazarain <guichaz@gmail.com>
13 *
14 *
15 * What this file implements:
16 *
17 * cpu_clock(i) provides a fast (execution time) high resolution
18 * clock with bounded drift between CPUs. The value of cpu_clock(i)
19 * is monotonic for constant i. The timestamp returned is in nanoseconds.
20 *
21 * ######################### BIG FAT WARNING ##########################
22 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
23 * # go backwards !! #
24 * ####################################################################
25 *
26 * There is no strict promise about the base, although it tends to start
27 * at 0 on boot (but people really shouldn't rely on that).
28 *
29 * cpu_clock(i) -- can be used from any context, including NMI.
30 * local_clock() -- is cpu_clock() on the current CPU.
31 *
32 * sched_clock_cpu(i)
33 *
34 * How it is implemented:
35 *
36 * The implementation either uses sched_clock() when
37 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
38 * sched_clock() is assumed to provide these properties (mostly it means
39 * the architecture provides a globally synchronized highres time source).
40 *
41 * Otherwise it tries to create a semi stable clock from a mixture of other
42 * clocks, including:
43 *
44 * - GTOD (clock monotomic)
45 * - sched_clock()
46 * - explicit idle events
47 *
48 * We use GTOD as base and use sched_clock() deltas to improve resolution. The
49 * deltas are filtered to provide monotonicity and keeping it within an
50 * expected window.
51 *
52 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
53 * that is otherwise invisible (TSC gets stopped).
54 *
55 */
56#include "sched.h"
57#include <linux/sched_clock.h>
58
59/*
60 * Scheduler clock - returns current time in nanosec units.
61 * This is default implementation.
62 * Architectures and sub-architectures can override this.
63 */
64unsigned long long __weak sched_clock(void)
65{
66 return (unsigned long long)(jiffies - INITIAL_JIFFIES)
67 * (NSEC_PER_SEC / HZ);
68}
69EXPORT_SYMBOL_GPL(sched_clock);
70
71static DEFINE_STATIC_KEY_FALSE(sched_clock_running);
72
73#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
74/*
75 * We must start with !__sched_clock_stable because the unstable -> stable
76 * transition is accurate, while the stable -> unstable transition is not.
77 *
78 * Similarly we start with __sched_clock_stable_early, thereby assuming we
79 * will become stable, such that there's only a single 1 -> 0 transition.
80 */
81static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
82static int __sched_clock_stable_early = 1;
83
84/*
85 * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset
86 */
87__read_mostly u64 __sched_clock_offset;
88static __read_mostly u64 __gtod_offset;
89
90struct sched_clock_data {
91 u64 tick_raw;
92 u64 tick_gtod;
93 u64 clock;
94};
95
96static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
97
98static inline struct sched_clock_data *this_scd(void)
99{
100 return this_cpu_ptr(&sched_clock_data);
101}
102
103static inline struct sched_clock_data *cpu_sdc(int cpu)
104{
105 return &per_cpu(sched_clock_data, cpu);
106}
107
108int sched_clock_stable(void)
109{
110 return static_branch_likely(&__sched_clock_stable);
111}
112
113static void __scd_stamp(struct sched_clock_data *scd)
114{
115 scd->tick_gtod = ktime_get_ns();
116 scd->tick_raw = sched_clock();
117}
118
119static void __set_sched_clock_stable(void)
120{
121 struct sched_clock_data *scd;
122
123 /*
124 * Since we're still unstable and the tick is already running, we have
125 * to disable IRQs in order to get a consistent scd->tick* reading.
126 */
127 local_irq_disable();
128 scd = this_scd();
129 /*
130 * Attempt to make the (initial) unstable->stable transition continuous.
131 */
132 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
133 local_irq_enable();
134
135 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
136 scd->tick_gtod, __gtod_offset,
137 scd->tick_raw, __sched_clock_offset);
138
139 static_branch_enable(&__sched_clock_stable);
140 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
141}
142
143/*
144 * If we ever get here, we're screwed, because we found out -- typically after
145 * the fact -- that TSC wasn't good. This means all our clocksources (including
146 * ktime) could have reported wrong values.
147 *
148 * What we do here is an attempt to fix up and continue sort of where we left
149 * off in a coherent manner.
150 *
151 * The only way to fully avoid random clock jumps is to boot with:
152 * "tsc=unstable".
153 */
154static void __sched_clock_work(struct work_struct *work)
155{
156 struct sched_clock_data *scd;
157 int cpu;
158
159 /* take a current timestamp and set 'now' */
160 preempt_disable();
161 scd = this_scd();
162 __scd_stamp(scd);
163 scd->clock = scd->tick_gtod + __gtod_offset;
164 preempt_enable();
165
166 /* clone to all CPUs */
167 for_each_possible_cpu(cpu)
168 per_cpu(sched_clock_data, cpu) = *scd;
169
170 printk(KERN_WARNING "TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unstable'.\n");
171 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
172 scd->tick_gtod, __gtod_offset,
173 scd->tick_raw, __sched_clock_offset);
174
175 static_branch_disable(&__sched_clock_stable);
176}
177
178static DECLARE_WORK(sched_clock_work, __sched_clock_work);
179
180static void __clear_sched_clock_stable(void)
181{
182 if (!sched_clock_stable())
183 return;
184
185 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
186 schedule_work(&sched_clock_work);
187}
188
189void clear_sched_clock_stable(void)
190{
191 __sched_clock_stable_early = 0;
192
193 smp_mb(); /* matches sched_clock_init_late() */
194
195 if (static_key_count(&sched_clock_running.key) == 2)
196 __clear_sched_clock_stable();
197}
198
199static void __sched_clock_gtod_offset(void)
200{
201 struct sched_clock_data *scd = this_scd();
202
203 __scd_stamp(scd);
204 __gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod;
205}
206
207void __init sched_clock_init(void)
208{
209 /*
210 * Set __gtod_offset such that once we mark sched_clock_running,
211 * sched_clock_tick() continues where sched_clock() left off.
212 *
213 * Even if TSC is buggered, we're still UP at this point so it
214 * can't really be out of sync.
215 */
216 local_irq_disable();
217 __sched_clock_gtod_offset();
218 local_irq_enable();
219
220 static_branch_inc(&sched_clock_running);
221}
222/*
223 * We run this as late_initcall() such that it runs after all built-in drivers,
224 * notably: acpi_processor and intel_idle, which can mark the TSC as unstable.
225 */
226static int __init sched_clock_init_late(void)
227{
228 static_branch_inc(&sched_clock_running);
229 /*
230 * Ensure that it is impossible to not do a static_key update.
231 *
232 * Either {set,clear}_sched_clock_stable() must see sched_clock_running
233 * and do the update, or we must see their __sched_clock_stable_early
234 * and do the update, or both.
235 */
236 smp_mb(); /* matches {set,clear}_sched_clock_stable() */
237
238 if (__sched_clock_stable_early)
239 __set_sched_clock_stable();
240
241 return 0;
242}
243late_initcall(sched_clock_init_late);
244
245/*
246 * min, max except they take wrapping into account
247 */
248
249static inline u64 wrap_min(u64 x, u64 y)
250{
251 return (s64)(x - y) < 0 ? x : y;
252}
253
254static inline u64 wrap_max(u64 x, u64 y)
255{
256 return (s64)(x - y) > 0 ? x : y;
257}
258
259/*
260 * update the percpu scd from the raw @now value
261 *
262 * - filter out backward motion
263 * - use the GTOD tick value to create a window to filter crazy TSC values
264 */
265static u64 sched_clock_local(struct sched_clock_data *scd)
266{
267 u64 now, clock, old_clock, min_clock, max_clock, gtod;
268 s64 delta;
269
270again:
271 now = sched_clock();
272 delta = now - scd->tick_raw;
273 if (unlikely(delta < 0))
274 delta = 0;
275
276 old_clock = scd->clock;
277
278 /*
279 * scd->clock = clamp(scd->tick_gtod + delta,
280 * max(scd->tick_gtod, scd->clock),
281 * scd->tick_gtod + TICK_NSEC);
282 */
283
284 gtod = scd->tick_gtod + __gtod_offset;
285 clock = gtod + delta;
286 min_clock = wrap_max(gtod, old_clock);
287 max_clock = wrap_max(old_clock, gtod + TICK_NSEC);
288
289 clock = wrap_max(clock, min_clock);
290 clock = wrap_min(clock, max_clock);
291
292 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
293 goto again;
294
295 return clock;
296}
297
298static u64 sched_clock_remote(struct sched_clock_data *scd)
299{
300 struct sched_clock_data *my_scd = this_scd();
301 u64 this_clock, remote_clock;
302 u64 *ptr, old_val, val;
303
304#if BITS_PER_LONG != 64
305again:
306 /*
307 * Careful here: The local and the remote clock values need to
308 * be read out atomic as we need to compare the values and
309 * then update either the local or the remote side. So the
310 * cmpxchg64 below only protects one readout.
311 *
312 * We must reread via sched_clock_local() in the retry case on
313 * 32-bit kernels as an NMI could use sched_clock_local() via the
314 * tracer and hit between the readout of
315 * the low 32-bit and the high 32-bit portion.
316 */
317 this_clock = sched_clock_local(my_scd);
318 /*
319 * We must enforce atomic readout on 32-bit, otherwise the
320 * update on the remote CPU can hit inbetween the readout of
321 * the low 32-bit and the high 32-bit portion.
322 */
323 remote_clock = cmpxchg64(&scd->clock, 0, 0);
324#else
325 /*
326 * On 64-bit kernels the read of [my]scd->clock is atomic versus the
327 * update, so we can avoid the above 32-bit dance.
328 */
329 sched_clock_local(my_scd);
330again:
331 this_clock = my_scd->clock;
332 remote_clock = scd->clock;
333#endif
334
335 /*
336 * Use the opportunity that we have both locks
337 * taken to couple the two clocks: we take the
338 * larger time as the latest time for both
339 * runqueues. (this creates monotonic movement)
340 */
341 if (likely((s64)(remote_clock - this_clock) < 0)) {
342 ptr = &scd->clock;
343 old_val = remote_clock;
344 val = this_clock;
345 } else {
346 /*
347 * Should be rare, but possible:
348 */
349 ptr = &my_scd->clock;
350 old_val = this_clock;
351 val = remote_clock;
352 }
353
354 if (cmpxchg64(ptr, old_val, val) != old_val)
355 goto again;
356
357 return val;
358}
359
360/*
361 * Similar to cpu_clock(), but requires local IRQs to be disabled.
362 *
363 * See cpu_clock().
364 */
365u64 sched_clock_cpu(int cpu)
366{
367 struct sched_clock_data *scd;
368 u64 clock;
369
370 if (sched_clock_stable())
371 return sched_clock() + __sched_clock_offset;
372
373 if (!static_branch_likely(&sched_clock_running))
374 return sched_clock();
375
376 preempt_disable_notrace();
377 scd = cpu_sdc(cpu);
378
379 if (cpu != smp_processor_id())
380 clock = sched_clock_remote(scd);
381 else
382 clock = sched_clock_local(scd);
383 preempt_enable_notrace();
384
385 return clock;
386}
387EXPORT_SYMBOL_GPL(sched_clock_cpu);
388
389void sched_clock_tick(void)
390{
391 struct sched_clock_data *scd;
392
393 if (sched_clock_stable())
394 return;
395
396 if (!static_branch_likely(&sched_clock_running))
397 return;
398
399 lockdep_assert_irqs_disabled();
400
401 scd = this_scd();
402 __scd_stamp(scd);
403 sched_clock_local(scd);
404}
405
406void sched_clock_tick_stable(void)
407{
408 if (!sched_clock_stable())
409 return;
410
411 /*
412 * Called under watchdog_lock.
413 *
414 * The watchdog just found this TSC to (still) be stable, so now is a
415 * good moment to update our __gtod_offset. Because once we find the
416 * TSC to be unstable, any computation will be computing crap.
417 */
418 local_irq_disable();
419 __sched_clock_gtod_offset();
420 local_irq_enable();
421}
422
423/*
424 * We are going deep-idle (irqs are disabled):
425 */
426void sched_clock_idle_sleep_event(void)
427{
428 sched_clock_cpu(smp_processor_id());
429}
430EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
431
432/*
433 * We just idled; resync with ktime.
434 */
435void sched_clock_idle_wakeup_event(void)
436{
437 unsigned long flags;
438
439 if (sched_clock_stable())
440 return;
441
442 if (unlikely(timekeeping_suspended))
443 return;
444
445 local_irq_save(flags);
446 sched_clock_tick();
447 local_irq_restore(flags);
448}
449EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
450
451#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
452
453void __init sched_clock_init(void)
454{
455 static_branch_inc(&sched_clock_running);
456 local_irq_disable();
457 generic_sched_clock_init();
458 local_irq_enable();
459}
460
461u64 sched_clock_cpu(int cpu)
462{
463 if (!static_branch_likely(&sched_clock_running))
464 return 0;
465
466 return sched_clock();
467}
468
469#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
470
471/*
472 * Running clock - returns the time that has elapsed while a guest has been
473 * running.
474 * On a guest this value should be local_clock minus the time the guest was
475 * suspended by the hypervisor (for any reason).
476 * On bare metal this function should return the same as local_clock.
477 * Architectures and sub-architectures can override this.
478 */
479u64 __weak running_clock(void)
480{
481 return local_clock();
482}