Loading...
1/*
2 * tracing clocks
3 *
4 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Implements 3 trace clock variants, with differing scalability/precision
7 * tradeoffs:
8 *
9 * - local: CPU-local trace clock
10 * - medium: scalable global clock with some jitter
11 * - global: globally monotonic, serialized clock
12 *
13 * Tracer plugins will chose a default from these clocks.
14 */
15#include <linux/spinlock.h>
16#include <linux/irqflags.h>
17#include <linux/hardirq.h>
18#include <linux/module.h>
19#include <linux/percpu.h>
20#include <linux/sched.h>
21#include <linux/ktime.h>
22#include <linux/trace_clock.h>
23
24/*
25 * trace_clock_local(): the simplest and least coherent tracing clock.
26 *
27 * Useful for tracing that does not cross to other CPUs nor
28 * does it go through idle events.
29 */
30u64 notrace trace_clock_local(void)
31{
32 u64 clock;
33
34 /*
35 * sched_clock() is an architecture implemented, fast, scalable,
36 * lockless clock. It is not guaranteed to be coherent across
37 * CPUs, nor across CPU idle events.
38 */
39 preempt_disable_notrace();
40 clock = sched_clock();
41 preempt_enable_notrace();
42
43 return clock;
44}
45EXPORT_SYMBOL_GPL(trace_clock_local);
46
47/*
48 * trace_clock(): 'between' trace clock. Not completely serialized,
49 * but not completely incorrect when crossing CPUs either.
50 *
51 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
52 * jitter between CPUs. So it's a pretty scalable clock, but there
53 * can be offsets in the trace data.
54 */
55u64 notrace trace_clock(void)
56{
57 return local_clock();
58}
59
60/*
61 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
62 */
63u64 notrace trace_clock_jiffies(void)
64{
65 u64 jiffy = jiffies - INITIAL_JIFFIES;
66
67 /* Return nsecs */
68 return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
69}
70
71/*
72 * trace_clock_global(): special globally coherent trace clock
73 *
74 * It has higher overhead than the other trace clocks but is still
75 * an order of magnitude faster than GTOD derived hardware clocks.
76 *
77 * Used by plugins that need globally coherent timestamps.
78 */
79
80/* keep prev_time and lock in the same cacheline. */
81static struct {
82 u64 prev_time;
83 arch_spinlock_t lock;
84} trace_clock_struct ____cacheline_aligned_in_smp =
85 {
86 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
87 };
88
89u64 notrace trace_clock_global(void)
90{
91 unsigned long flags;
92 int this_cpu;
93 u64 now;
94
95 local_irq_save(flags);
96
97 this_cpu = raw_smp_processor_id();
98 now = sched_clock_cpu(this_cpu);
99 /*
100 * If in an NMI context then dont risk lockups and return the
101 * cpu_clock() time:
102 */
103 if (unlikely(in_nmi()))
104 goto out;
105
106 arch_spin_lock(&trace_clock_struct.lock);
107
108 /*
109 * TODO: if this happens often then maybe we should reset
110 * my_scd->clock to prev_time+1, to make sure
111 * we start ticking with the local clock from now on?
112 */
113 if ((s64)(now - trace_clock_struct.prev_time) < 0)
114 now = trace_clock_struct.prev_time + 1;
115
116 trace_clock_struct.prev_time = now;
117
118 arch_spin_unlock(&trace_clock_struct.lock);
119
120 out:
121 local_irq_restore(flags);
122
123 return now;
124}
125
126static atomic64_t trace_counter;
127
128/*
129 * trace_clock_counter(): simply an atomic counter.
130 * Use the trace_counter "counter" for cases where you do not care
131 * about timings, but are interested in strict ordering.
132 */
133u64 notrace trace_clock_counter(void)
134{
135 return atomic64_add_return(1, &trace_counter);
136}
1/*
2 * tracing clocks
3 *
4 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Implements 3 trace clock variants, with differing scalability/precision
7 * tradeoffs:
8 *
9 * - local: CPU-local trace clock
10 * - medium: scalable global clock with some jitter
11 * - global: globally monotonic, serialized clock
12 *
13 * Tracer plugins will chose a default from these clocks.
14 */
15#include <linux/spinlock.h>
16#include <linux/irqflags.h>
17#include <linux/hardirq.h>
18#include <linux/module.h>
19#include <linux/percpu.h>
20#include <linux/sched.h>
21#include <linux/ktime.h>
22#include <linux/trace_clock.h>
23
24#include "trace.h"
25
26/*
27 * trace_clock_local(): the simplest and least coherent tracing clock.
28 *
29 * Useful for tracing that does not cross to other CPUs nor
30 * does it go through idle events.
31 */
32u64 notrace trace_clock_local(void)
33{
34 u64 clock;
35
36 /*
37 * sched_clock() is an architecture implemented, fast, scalable,
38 * lockless clock. It is not guaranteed to be coherent across
39 * CPUs, nor across CPU idle events.
40 */
41 preempt_disable_notrace();
42 clock = sched_clock();
43 preempt_enable_notrace();
44
45 return clock;
46}
47
48/*
49 * trace_clock(): 'between' trace clock. Not completely serialized,
50 * but not completely incorrect when crossing CPUs either.
51 *
52 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
53 * jitter between CPUs. So it's a pretty scalable clock, but there
54 * can be offsets in the trace data.
55 */
56u64 notrace trace_clock(void)
57{
58 return local_clock();
59}
60
61
62/*
63 * trace_clock_global(): special globally coherent trace clock
64 *
65 * It has higher overhead than the other trace clocks but is still
66 * an order of magnitude faster than GTOD derived hardware clocks.
67 *
68 * Used by plugins that need globally coherent timestamps.
69 */
70
71/* keep prev_time and lock in the same cacheline. */
72static struct {
73 u64 prev_time;
74 arch_spinlock_t lock;
75} trace_clock_struct ____cacheline_aligned_in_smp =
76 {
77 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
78 };
79
80u64 notrace trace_clock_global(void)
81{
82 unsigned long flags;
83 int this_cpu;
84 u64 now;
85
86 local_irq_save(flags);
87
88 this_cpu = raw_smp_processor_id();
89 now = cpu_clock(this_cpu);
90 /*
91 * If in an NMI context then dont risk lockups and return the
92 * cpu_clock() time:
93 */
94 if (unlikely(in_nmi()))
95 goto out;
96
97 arch_spin_lock(&trace_clock_struct.lock);
98
99 /*
100 * TODO: if this happens often then maybe we should reset
101 * my_scd->clock to prev_time+1, to make sure
102 * we start ticking with the local clock from now on?
103 */
104 if ((s64)(now - trace_clock_struct.prev_time) < 0)
105 now = trace_clock_struct.prev_time + 1;
106
107 trace_clock_struct.prev_time = now;
108
109 arch_spin_unlock(&trace_clock_struct.lock);
110
111 out:
112 local_irq_restore(flags);
113
114 return now;
115}