Loading...
1/*
2 * tracing clocks
3 *
4 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Implements 3 trace clock variants, with differing scalability/precision
7 * tradeoffs:
8 *
9 * - local: CPU-local trace clock
10 * - medium: scalable global clock with some jitter
11 * - global: globally monotonic, serialized clock
12 *
13 * Tracer plugins will chose a default from these clocks.
14 */
15#include <linux/spinlock.h>
16#include <linux/irqflags.h>
17#include <linux/hardirq.h>
18#include <linux/module.h>
19#include <linux/percpu.h>
20#include <linux/sched.h>
21#include <linux/ktime.h>
22#include <linux/trace_clock.h>
23
24/*
25 * trace_clock_local(): the simplest and least coherent tracing clock.
26 *
27 * Useful for tracing that does not cross to other CPUs nor
28 * does it go through idle events.
29 */
30u64 notrace trace_clock_local(void)
31{
32 u64 clock;
33
34 /*
35 * sched_clock() is an architecture implemented, fast, scalable,
36 * lockless clock. It is not guaranteed to be coherent across
37 * CPUs, nor across CPU idle events.
38 */
39 preempt_disable_notrace();
40 clock = sched_clock();
41 preempt_enable_notrace();
42
43 return clock;
44}
45EXPORT_SYMBOL_GPL(trace_clock_local);
46
47/*
48 * trace_clock(): 'between' trace clock. Not completely serialized,
49 * but not completely incorrect when crossing CPUs either.
50 *
51 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
52 * jitter between CPUs. So it's a pretty scalable clock, but there
53 * can be offsets in the trace data.
54 */
55u64 notrace trace_clock(void)
56{
57 return local_clock();
58}
59
60/*
61 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
62 */
63u64 notrace trace_clock_jiffies(void)
64{
65 u64 jiffy = jiffies - INITIAL_JIFFIES;
66
67 /* Return nsecs */
68 return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
69}
70
71/*
72 * trace_clock_global(): special globally coherent trace clock
73 *
74 * It has higher overhead than the other trace clocks but is still
75 * an order of magnitude faster than GTOD derived hardware clocks.
76 *
77 * Used by plugins that need globally coherent timestamps.
78 */
79
80/* keep prev_time and lock in the same cacheline. */
81static struct {
82 u64 prev_time;
83 arch_spinlock_t lock;
84} trace_clock_struct ____cacheline_aligned_in_smp =
85 {
86 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
87 };
88
89u64 notrace trace_clock_global(void)
90{
91 unsigned long flags;
92 int this_cpu;
93 u64 now;
94
95 local_irq_save(flags);
96
97 this_cpu = raw_smp_processor_id();
98 now = sched_clock_cpu(this_cpu);
99 /*
100 * If in an NMI context then dont risk lockups and return the
101 * cpu_clock() time:
102 */
103 if (unlikely(in_nmi()))
104 goto out;
105
106 arch_spin_lock(&trace_clock_struct.lock);
107
108 /*
109 * TODO: if this happens often then maybe we should reset
110 * my_scd->clock to prev_time+1, to make sure
111 * we start ticking with the local clock from now on?
112 */
113 if ((s64)(now - trace_clock_struct.prev_time) < 0)
114 now = trace_clock_struct.prev_time + 1;
115
116 trace_clock_struct.prev_time = now;
117
118 arch_spin_unlock(&trace_clock_struct.lock);
119
120 out:
121 local_irq_restore(flags);
122
123 return now;
124}
125
126static atomic64_t trace_counter;
127
128/*
129 * trace_clock_counter(): simply an atomic counter.
130 * Use the trace_counter "counter" for cases where you do not care
131 * about timings, but are interested in strict ordering.
132 */
133u64 notrace trace_clock_counter(void)
134{
135 return atomic64_add_return(1, &trace_counter);
136}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * tracing clocks
4 *
5 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 *
7 * Implements 3 trace clock variants, with differing scalability/precision
8 * tradeoffs:
9 *
10 * - local: CPU-local trace clock
11 * - medium: scalable global clock with some jitter
12 * - global: globally monotonic, serialized clock
13 *
14 * Tracer plugins will chose a default from these clocks.
15 */
16#include <linux/spinlock.h>
17#include <linux/irqflags.h>
18#include <linux/hardirq.h>
19#include <linux/module.h>
20#include <linux/percpu.h>
21#include <linux/sched.h>
22#include <linux/sched/clock.h>
23#include <linux/ktime.h>
24#include <linux/trace_clock.h>
25
26/*
27 * trace_clock_local(): the simplest and least coherent tracing clock.
28 *
29 * Useful for tracing that does not cross to other CPUs nor
30 * does it go through idle events.
31 */
32u64 notrace trace_clock_local(void)
33{
34 u64 clock;
35
36 /*
37 * sched_clock() is an architecture implemented, fast, scalable,
38 * lockless clock. It is not guaranteed to be coherent across
39 * CPUs, nor across CPU idle events.
40 */
41 preempt_disable_notrace();
42 clock = sched_clock();
43 preempt_enable_notrace();
44
45 return clock;
46}
47EXPORT_SYMBOL_GPL(trace_clock_local);
48
49/*
50 * trace_clock(): 'between' trace clock. Not completely serialized,
51 * but not completely incorrect when crossing CPUs either.
52 *
53 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
54 * jitter between CPUs. So it's a pretty scalable clock, but there
55 * can be offsets in the trace data.
56 */
57u64 notrace trace_clock(void)
58{
59 return local_clock();
60}
61EXPORT_SYMBOL_GPL(trace_clock);
62
63/*
64 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
65 * Note that this use of jiffies_64 is not completely safe on
66 * 32-bit systems. But the window is tiny, and the effect if
67 * we are affected is that we will have an obviously bogus
68 * timestamp on a trace event - i.e. not life threatening.
69 */
70u64 notrace trace_clock_jiffies(void)
71{
72 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
73}
74EXPORT_SYMBOL_GPL(trace_clock_jiffies);
75
76/*
77 * trace_clock_global(): special globally coherent trace clock
78 *
79 * It has higher overhead than the other trace clocks but is still
80 * an order of magnitude faster than GTOD derived hardware clocks.
81 *
82 * Used by plugins that need globally coherent timestamps.
83 */
84
85/* keep prev_time and lock in the same cacheline. */
86static struct {
87 u64 prev_time;
88 arch_spinlock_t lock;
89} trace_clock_struct ____cacheline_aligned_in_smp =
90 {
91 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
92 };
93
94u64 notrace trace_clock_global(void)
95{
96 unsigned long flags;
97 int this_cpu;
98 u64 now;
99
100 raw_local_irq_save(flags);
101
102 this_cpu = raw_smp_processor_id();
103 now = sched_clock_cpu(this_cpu);
104 /*
105 * If in an NMI context then dont risk lockups and return the
106 * cpu_clock() time:
107 */
108 if (unlikely(in_nmi()))
109 goto out;
110
111 arch_spin_lock(&trace_clock_struct.lock);
112
113 /*
114 * TODO: if this happens often then maybe we should reset
115 * my_scd->clock to prev_time+1, to make sure
116 * we start ticking with the local clock from now on?
117 */
118 if ((s64)(now - trace_clock_struct.prev_time) < 0)
119 now = trace_clock_struct.prev_time + 1;
120
121 trace_clock_struct.prev_time = now;
122
123 arch_spin_unlock(&trace_clock_struct.lock);
124
125 out:
126 raw_local_irq_restore(flags);
127
128 return now;
129}
130EXPORT_SYMBOL_GPL(trace_clock_global);
131
132static atomic64_t trace_counter;
133
134/*
135 * trace_clock_counter(): simply an atomic counter.
136 * Use the trace_counter "counter" for cases where you do not care
137 * about timings, but are interested in strict ordering.
138 */
139u64 notrace trace_clock_counter(void)
140{
141 return atomic64_add_return(1, &trace_counter);
142}