Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * sched_clock for unstable cpu clocks
  3 *
  4 *  Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
  5 *
  6 *  Updates and enhancements:
  7 *    Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
  8 *
  9 * Based on code by:
 10 *   Ingo Molnar <mingo@redhat.com>
 11 *   Guillaume Chazarain <guichaz@gmail.com>
 12 *
 13 *
 14 * What:
 15 *
 16 * cpu_clock(i) provides a fast (execution time) high resolution
 17 * clock with bounded drift between CPUs. The value of cpu_clock(i)
 18 * is monotonic for constant i. The timestamp returned is in nanoseconds.
 19 *
 20 * ######################### BIG FAT WARNING ##########################
 21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
 22 * # go backwards !!                                                  #
 23 * ####################################################################
 24 *
 25 * There is no strict promise about the base, although it tends to start
 26 * at 0 on boot (but people really shouldn't rely on that).
 27 *
 28 * cpu_clock(i)       -- can be used from any context, including NMI.
 29 * local_clock()      -- is cpu_clock() on the current cpu.
 30 *
 31 * sched_clock_cpu(i)
 32 *
 33 * How:
 34 *
 35 * The implementation either uses sched_clock() when
 36 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
 37 * sched_clock() is assumed to provide these properties (mostly it means
 38 * the architecture provides a globally synchronized highres time source).
 39 *
 40 * Otherwise it tries to create a semi stable clock from a mixture of other
 41 * clocks, including:
 42 *
 43 *  - GTOD (clock monotomic)
 44 *  - sched_clock()
 45 *  - explicit idle events
 46 *
 47 * We use GTOD as base and use sched_clock() deltas to improve resolution. The
 48 * deltas are filtered to provide monotonicity and keeping it within an
 49 * expected window.
 50 *
 51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
 52 * that is otherwise invisible (TSC gets stopped).
 53 *
 54 */
 55#include <linux/spinlock.h>
 56#include <linux/hardirq.h>
 57#include <linux/export.h>
 58#include <linux/percpu.h>
 59#include <linux/ktime.h>
 60#include <linux/sched.h>
 61#include <linux/static_key.h>
 62#include <linux/workqueue.h>
 63#include <linux/compiler.h>
 64#include <linux/tick.h>
 65
 66/*
 67 * Scheduler clock - returns current time in nanosec units.
 68 * This is default implementation.
 69 * Architectures and sub-architectures can override this.
 70 */
 71unsigned long long __weak sched_clock(void)
 72{
 73	return (unsigned long long)(jiffies - INITIAL_JIFFIES)
 74					* (NSEC_PER_SEC / HZ);
 75}
 76EXPORT_SYMBOL_GPL(sched_clock);
 77
 78__read_mostly int sched_clock_running;
 79
 80#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 81static struct static_key __sched_clock_stable = STATIC_KEY_INIT;
 82static int __sched_clock_stable_early;
 83
 84int sched_clock_stable(void)
 85{
 86	return static_key_false(&__sched_clock_stable);
 87}
 88
 89static void __set_sched_clock_stable(void)
 90{
 91	if (!sched_clock_stable())
 92		static_key_slow_inc(&__sched_clock_stable);
 93
 94	tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
 95}
 96
 97void set_sched_clock_stable(void)
 98{
 99	__sched_clock_stable_early = 1;
100
101	smp_mb(); /* matches sched_clock_init() */
102
103	if (!sched_clock_running)
104		return;
105
106	__set_sched_clock_stable();
107}
108
109static void __clear_sched_clock_stable(struct work_struct *work)
110{
111	/* XXX worry about clock continuity */
112	if (sched_clock_stable())
113		static_key_slow_dec(&__sched_clock_stable);
114
115	tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
116}
117
118static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
119
120void clear_sched_clock_stable(void)
121{
122	__sched_clock_stable_early = 0;
123
124	smp_mb(); /* matches sched_clock_init() */
125
126	if (!sched_clock_running)
127		return;
128
129	schedule_work(&sched_clock_work);
130}
131
132struct sched_clock_data {
133	u64			tick_raw;
134	u64			tick_gtod;
135	u64			clock;
136};
137
138static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
139
140static inline struct sched_clock_data *this_scd(void)
141{
142	return this_cpu_ptr(&sched_clock_data);
143}
144
145static inline struct sched_clock_data *cpu_sdc(int cpu)
146{
147	return &per_cpu(sched_clock_data, cpu);
148}
149
150void sched_clock_init(void)
151{
152	u64 ktime_now = ktime_to_ns(ktime_get());
153	int cpu;
154
155	for_each_possible_cpu(cpu) {
156		struct sched_clock_data *scd = cpu_sdc(cpu);
157
158		scd->tick_raw = 0;
159		scd->tick_gtod = ktime_now;
160		scd->clock = ktime_now;
161	}
162
163	sched_clock_running = 1;
164
165	/*
166	 * Ensure that it is impossible to not do a static_key update.
167	 *
168	 * Either {set,clear}_sched_clock_stable() must see sched_clock_running
169	 * and do the update, or we must see their __sched_clock_stable_early
170	 * and do the update, or both.
171	 */
172	smp_mb(); /* matches {set,clear}_sched_clock_stable() */
173
174	if (__sched_clock_stable_early)
175		__set_sched_clock_stable();
176	else
177		__clear_sched_clock_stable(NULL);
178}
179
180/*
181 * min, max except they take wrapping into account
182 */
183
184static inline u64 wrap_min(u64 x, u64 y)
185{
186	return (s64)(x - y) < 0 ? x : y;
187}
188
189static inline u64 wrap_max(u64 x, u64 y)
190{
191	return (s64)(x - y) > 0 ? x : y;
192}
193
194/*
195 * update the percpu scd from the raw @now value
196 *
197 *  - filter out backward motion
198 *  - use the GTOD tick value to create a window to filter crazy TSC values
199 */
200static u64 sched_clock_local(struct sched_clock_data *scd)
201{
202	u64 now, clock, old_clock, min_clock, max_clock;
203	s64 delta;
204
205again:
206	now = sched_clock();
207	delta = now - scd->tick_raw;
208	if (unlikely(delta < 0))
209		delta = 0;
210
211	old_clock = scd->clock;
212
213	/*
214	 * scd->clock = clamp(scd->tick_gtod + delta,
215	 *		      max(scd->tick_gtod, scd->clock),
216	 *		      scd->tick_gtod + TICK_NSEC);
217	 */
218
219	clock = scd->tick_gtod + delta;
220	min_clock = wrap_max(scd->tick_gtod, old_clock);
221	max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
222
223	clock = wrap_max(clock, min_clock);
224	clock = wrap_min(clock, max_clock);
225
226	if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
227		goto again;
228
229	return clock;
230}
231
232static u64 sched_clock_remote(struct sched_clock_data *scd)
233{
234	struct sched_clock_data *my_scd = this_scd();
235	u64 this_clock, remote_clock;
236	u64 *ptr, old_val, val;
237
238#if BITS_PER_LONG != 64
239again:
240	/*
241	 * Careful here: The local and the remote clock values need to
242	 * be read out atomic as we need to compare the values and
243	 * then update either the local or the remote side. So the
244	 * cmpxchg64 below only protects one readout.
245	 *
246	 * We must reread via sched_clock_local() in the retry case on
247	 * 32bit as an NMI could use sched_clock_local() via the
248	 * tracer and hit between the readout of
249	 * the low32bit and the high 32bit portion.
250	 */
251	this_clock = sched_clock_local(my_scd);
252	/*
253	 * We must enforce atomic readout on 32bit, otherwise the
254	 * update on the remote cpu can hit inbetween the readout of
255	 * the low32bit and the high 32bit portion.
256	 */
257	remote_clock = cmpxchg64(&scd->clock, 0, 0);
258#else
259	/*
260	 * On 64bit the read of [my]scd->clock is atomic versus the
261	 * update, so we can avoid the above 32bit dance.
262	 */
263	sched_clock_local(my_scd);
264again:
265	this_clock = my_scd->clock;
266	remote_clock = scd->clock;
267#endif
268
269	/*
270	 * Use the opportunity that we have both locks
271	 * taken to couple the two clocks: we take the
272	 * larger time as the latest time for both
273	 * runqueues. (this creates monotonic movement)
274	 */
275	if (likely((s64)(remote_clock - this_clock) < 0)) {
276		ptr = &scd->clock;
277		old_val = remote_clock;
278		val = this_clock;
279	} else {
280		/*
281		 * Should be rare, but possible:
282		 */
283		ptr = &my_scd->clock;
284		old_val = this_clock;
285		val = remote_clock;
286	}
287
288	if (cmpxchg64(ptr, old_val, val) != old_val)
289		goto again;
290
291	return val;
292}
293
294/*
295 * Similar to cpu_clock(), but requires local IRQs to be disabled.
296 *
297 * See cpu_clock().
298 */
299u64 sched_clock_cpu(int cpu)
300{
301	struct sched_clock_data *scd;
302	u64 clock;
303
304	if (sched_clock_stable())
305		return sched_clock();
306
307	if (unlikely(!sched_clock_running))
308		return 0ull;
309
310	preempt_disable_notrace();
311	scd = cpu_sdc(cpu);
312
313	if (cpu != smp_processor_id())
314		clock = sched_clock_remote(scd);
315	else
316		clock = sched_clock_local(scd);
317	preempt_enable_notrace();
318
319	return clock;
320}
321EXPORT_SYMBOL_GPL(sched_clock_cpu);
322
323void sched_clock_tick(void)
324{
325	struct sched_clock_data *scd;
326	u64 now, now_gtod;
327
328	if (sched_clock_stable())
329		return;
330
331	if (unlikely(!sched_clock_running))
332		return;
333
334	WARN_ON_ONCE(!irqs_disabled());
335
336	scd = this_scd();
337	now_gtod = ktime_to_ns(ktime_get());
338	now = sched_clock();
339
340	scd->tick_raw = now;
341	scd->tick_gtod = now_gtod;
342	sched_clock_local(scd);
343}
344
345/*
346 * We are going deep-idle (irqs are disabled):
347 */
348void sched_clock_idle_sleep_event(void)
349{
350	sched_clock_cpu(smp_processor_id());
351}
352EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
353
354/*
355 * We just idled delta nanoseconds (called with irqs disabled):
356 */
357void sched_clock_idle_wakeup_event(u64 delta_ns)
358{
359	if (timekeeping_suspended)
360		return;
361
362	sched_clock_tick();
363	touch_softlockup_watchdog_sched();
364}
365EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
366
367#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
368
369void sched_clock_init(void)
370{
371	sched_clock_running = 1;
372}
373
374u64 sched_clock_cpu(int cpu)
375{
376	if (unlikely(!sched_clock_running))
377		return 0;
378
379	return sched_clock();
380}
381#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
382
383/*
384 * Running clock - returns the time that has elapsed while a guest has been
385 * running.
386 * On a guest this value should be local_clock minus the time the guest was
387 * suspended by the hypervisor (for any reason).
388 * On bare metal this function should return the same as local_clock.
389 * Architectures and sub-architectures can override this.
390 */
391u64 __weak running_clock(void)
392{
393	return local_clock();
394}