Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v4.6
  1/*
  2 * tracing clocks
  3 *
  4 *  Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5 *
  6 * Implements 3 trace clock variants, with differing scalability/precision
  7 * tradeoffs:
  8 *
  9 *  -   local: CPU-local trace clock
 10 *  -  medium: scalable global clock with some jitter
 11 *  -  global: globally monotonic, serialized clock
 12 *
 13 * Tracer plugins will chose a default from these clocks.
 14 */
 15#include <linux/spinlock.h>
 16#include <linux/irqflags.h>
 17#include <linux/hardirq.h>
 18#include <linux/module.h>
 19#include <linux/percpu.h>
 20#include <linux/sched.h>
 
 21#include <linux/ktime.h>
 22#include <linux/trace_clock.h>
 23
 24/*
 25 * trace_clock_local(): the simplest and least coherent tracing clock.
 26 *
 27 * Useful for tracing that does not cross to other CPUs nor
 28 * does it go through idle events.
 29 */
 30u64 notrace trace_clock_local(void)
 31{
 32	u64 clock;
 33
 34	/*
 35	 * sched_clock() is an architecture implemented, fast, scalable,
 36	 * lockless clock. It is not guaranteed to be coherent across
 37	 * CPUs, nor across CPU idle events.
 38	 */
 39	preempt_disable_notrace();
 40	clock = sched_clock();
 41	preempt_enable_notrace();
 42
 43	return clock;
 44}
 45EXPORT_SYMBOL_GPL(trace_clock_local);
 46
 47/*
 48 * trace_clock(): 'between' trace clock. Not completely serialized,
 49 * but not completely incorrect when crossing CPUs either.
 50 *
 51 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
 52 * jitter between CPUs. So it's a pretty scalable clock, but there
 53 * can be offsets in the trace data.
 54 */
 55u64 notrace trace_clock(void)
 56{
 57	return local_clock();
 58}
 59EXPORT_SYMBOL_GPL(trace_clock);
 60
 61/*
 62 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
 63 * Note that this use of jiffies_64 is not completely safe on
 64 * 32-bit systems. But the window is tiny, and the effect if
 65 * we are affected is that we will have an obviously bogus
 66 * timestamp on a trace event - i.e. not life threatening.
 67 */
 68u64 notrace trace_clock_jiffies(void)
 69{
 70	return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
 71}
 72EXPORT_SYMBOL_GPL(trace_clock_jiffies);
 73
 74/*
 75 * trace_clock_global(): special globally coherent trace clock
 76 *
 77 * It has higher overhead than the other trace clocks but is still
 78 * an order of magnitude faster than GTOD derived hardware clocks.
 79 *
 80 * Used by plugins that need globally coherent timestamps.
 81 */
 82
 83/* keep prev_time and lock in the same cacheline. */
 84static struct {
 85	u64 prev_time;
 86	arch_spinlock_t lock;
 87} trace_clock_struct ____cacheline_aligned_in_smp =
 88	{
 89		.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
 90	};
 91
 92u64 notrace trace_clock_global(void)
 93{
 94	unsigned long flags;
 95	int this_cpu;
 96	u64 now;
 97
 98	local_irq_save(flags);
 99
100	this_cpu = raw_smp_processor_id();
101	now = sched_clock_cpu(this_cpu);
102	/*
103	 * If in an NMI context then dont risk lockups and return the
104	 * cpu_clock() time:
105	 */
106	if (unlikely(in_nmi()))
107		goto out;
108
109	arch_spin_lock(&trace_clock_struct.lock);
110
111	/*
112	 * TODO: if this happens often then maybe we should reset
113	 * my_scd->clock to prev_time+1, to make sure
114	 * we start ticking with the local clock from now on?
115	 */
116	if ((s64)(now - trace_clock_struct.prev_time) < 0)
117		now = trace_clock_struct.prev_time + 1;
118
119	trace_clock_struct.prev_time = now;
120
121	arch_spin_unlock(&trace_clock_struct.lock);
122
123 out:
124	local_irq_restore(flags);
125
126	return now;
127}
128EXPORT_SYMBOL_GPL(trace_clock_global);
129
130static atomic64_t trace_counter;
131
132/*
133 * trace_clock_counter(): simply an atomic counter.
134 * Use the trace_counter "counter" for cases where you do not care
135 * about timings, but are interested in strict ordering.
136 */
137u64 notrace trace_clock_counter(void)
138{
139	return atomic64_add_return(1, &trace_counter);
140}
v4.17
  1/*
  2 * tracing clocks
  3 *
  4 *  Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5 *
  6 * Implements 3 trace clock variants, with differing scalability/precision
  7 * tradeoffs:
  8 *
  9 *  -   local: CPU-local trace clock
 10 *  -  medium: scalable global clock with some jitter
 11 *  -  global: globally monotonic, serialized clock
 12 *
 13 * Tracer plugins will chose a default from these clocks.
 14 */
 15#include <linux/spinlock.h>
 16#include <linux/irqflags.h>
 17#include <linux/hardirq.h>
 18#include <linux/module.h>
 19#include <linux/percpu.h>
 20#include <linux/sched.h>
 21#include <linux/sched/clock.h>
 22#include <linux/ktime.h>
 23#include <linux/trace_clock.h>
 24
 25/*
 26 * trace_clock_local(): the simplest and least coherent tracing clock.
 27 *
 28 * Useful for tracing that does not cross to other CPUs nor
 29 * does it go through idle events.
 30 */
 31u64 notrace trace_clock_local(void)
 32{
 33	u64 clock;
 34
 35	/*
 36	 * sched_clock() is an architecture implemented, fast, scalable,
 37	 * lockless clock. It is not guaranteed to be coherent across
 38	 * CPUs, nor across CPU idle events.
 39	 */
 40	preempt_disable_notrace();
 41	clock = sched_clock();
 42	preempt_enable_notrace();
 43
 44	return clock;
 45}
 46EXPORT_SYMBOL_GPL(trace_clock_local);
 47
 48/*
 49 * trace_clock(): 'between' trace clock. Not completely serialized,
 50 * but not completely incorrect when crossing CPUs either.
 51 *
 52 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
 53 * jitter between CPUs. So it's a pretty scalable clock, but there
 54 * can be offsets in the trace data.
 55 */
 56u64 notrace trace_clock(void)
 57{
 58	return local_clock();
 59}
 60EXPORT_SYMBOL_GPL(trace_clock);
 61
 62/*
 63 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
 64 * Note that this use of jiffies_64 is not completely safe on
 65 * 32-bit systems. But the window is tiny, and the effect if
 66 * we are affected is that we will have an obviously bogus
 67 * timestamp on a trace event - i.e. not life threatening.
 68 */
 69u64 notrace trace_clock_jiffies(void)
 70{
 71	return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
 72}
 73EXPORT_SYMBOL_GPL(trace_clock_jiffies);
 74
 75/*
 76 * trace_clock_global(): special globally coherent trace clock
 77 *
 78 * It has higher overhead than the other trace clocks but is still
 79 * an order of magnitude faster than GTOD derived hardware clocks.
 80 *
 81 * Used by plugins that need globally coherent timestamps.
 82 */
 83
 84/* keep prev_time and lock in the same cacheline. */
 85static struct {
 86	u64 prev_time;
 87	arch_spinlock_t lock;
 88} trace_clock_struct ____cacheline_aligned_in_smp =
 89	{
 90		.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
 91	};
 92
 93u64 notrace trace_clock_global(void)
 94{
 95	unsigned long flags;
 96	int this_cpu;
 97	u64 now;
 98
 99	raw_local_irq_save(flags);
100
101	this_cpu = raw_smp_processor_id();
102	now = sched_clock_cpu(this_cpu);
103	/*
104	 * If in an NMI context then dont risk lockups and return the
105	 * cpu_clock() time:
106	 */
107	if (unlikely(in_nmi()))
108		goto out;
109
110	arch_spin_lock(&trace_clock_struct.lock);
111
112	/*
113	 * TODO: if this happens often then maybe we should reset
114	 * my_scd->clock to prev_time+1, to make sure
115	 * we start ticking with the local clock from now on?
116	 */
117	if ((s64)(now - trace_clock_struct.prev_time) < 0)
118		now = trace_clock_struct.prev_time + 1;
119
120	trace_clock_struct.prev_time = now;
121
122	arch_spin_unlock(&trace_clock_struct.lock);
123
124 out:
125	raw_local_irq_restore(flags);
126
127	return now;
128}
129EXPORT_SYMBOL_GPL(trace_clock_global);
130
131static atomic64_t trace_counter;
132
133/*
134 * trace_clock_counter(): simply an atomic counter.
135 * Use the trace_counter "counter" for cases where you do not care
136 * about timings, but are interested in strict ordering.
137 */
138u64 notrace trace_clock_counter(void)
139{
140	return atomic64_add_return(1, &trace_counter);
141}