Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic sched_clock() support, to extend low level hardware time
4 * counters to full 64-bit ns values.
5 */
6#include <linux/clocksource.h>
7#include <linux/init.h>
8#include <linux/jiffies.h>
9#include <linux/ktime.h>
10#include <linux/kernel.h>
11#include <linux/math.h>
12#include <linux/moduleparam.h>
13#include <linux/sched.h>
14#include <linux/sched/clock.h>
15#include <linux/syscore_ops.h>
16#include <linux/hrtimer.h>
17#include <linux/sched_clock.h>
18#include <linux/seqlock.h>
19#include <linux/bitops.h>
20
21#include "timekeeping.h"
22
23/**
24 * struct clock_data - all data needed for sched_clock() (including
25 * registration of a new clock source)
26 *
27 * @seq: Sequence counter for protecting updates. The lowest
28 * bit is the index for @read_data.
29 * @read_data: Data required to read from sched_clock.
30 * @wrap_kt: Duration for which clock can run before wrapping.
31 * @rate: Tick rate of the registered clock.
32 * @actual_read_sched_clock: Registered hardware level clock read function.
33 *
34 * The ordering of this structure has been chosen to optimize cache
35 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
36 * into a single 64-byte cache line.
37 */
38struct clock_data {
39 seqcount_latch_t seq;
40 struct clock_read_data read_data[2];
41 ktime_t wrap_kt;
42 unsigned long rate;
43
44 u64 (*actual_read_sched_clock)(void);
45};
46
47static struct hrtimer sched_clock_timer;
48static int irqtime = -1;
49
50core_param(irqtime, irqtime, int, 0400);
51
52static u64 notrace jiffy_sched_clock_read(void)
53{
54 /*
55 * We don't need to use get_jiffies_64 on 32-bit arches here
56 * because we register with BITS_PER_LONG
57 */
58 return (u64)(jiffies - INITIAL_JIFFIES);
59}
60
61static struct clock_data cd ____cacheline_aligned = {
62 .read_data[0] = { .mult = NSEC_PER_SEC / HZ,
63 .read_sched_clock = jiffy_sched_clock_read, },
64 .actual_read_sched_clock = jiffy_sched_clock_read,
65};
66
67static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
68{
69 return (cyc * mult) >> shift;
70}
71
72notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
73{
74 *seq = raw_read_seqcount_latch(&cd.seq);
75 return cd.read_data + (*seq & 1);
76}
77
78notrace int sched_clock_read_retry(unsigned int seq)
79{
80 return raw_read_seqcount_latch_retry(&cd.seq, seq);
81}
82
83unsigned long long noinstr sched_clock_noinstr(void)
84{
85 struct clock_read_data *rd;
86 unsigned int seq;
87 u64 cyc, res;
88
89 do {
90 seq = raw_read_seqcount_latch(&cd.seq);
91 rd = cd.read_data + (seq & 1);
92
93 cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
94 rd->sched_clock_mask;
95 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
96 } while (raw_read_seqcount_latch_retry(&cd.seq, seq));
97
98 return res;
99}
100
101unsigned long long notrace sched_clock(void)
102{
103 unsigned long long ns;
104 preempt_disable_notrace();
105 ns = sched_clock_noinstr();
106 preempt_enable_notrace();
107 return ns;
108}
109
110/*
111 * Updating the data required to read the clock.
112 *
113 * sched_clock() will never observe mis-matched data even if called from
114 * an NMI. We do this by maintaining an odd/even copy of the data and
115 * steering sched_clock() to one or the other using a sequence counter.
116 * In order to preserve the data cache profile of sched_clock() as much
117 * as possible the system reverts back to the even copy when the update
118 * completes; the odd copy is used *only* during an update.
119 */
120static void update_clock_read_data(struct clock_read_data *rd)
121{
122 /* update the backup (odd) copy with the new data */
123 cd.read_data[1] = *rd;
124
125 /* steer readers towards the odd copy */
126 raw_write_seqcount_latch(&cd.seq);
127
128 /* now its safe for us to update the normal (even) copy */
129 cd.read_data[0] = *rd;
130
131 /* switch readers back to the even copy */
132 raw_write_seqcount_latch(&cd.seq);
133}
134
135/*
136 * Atomically update the sched_clock() epoch.
137 */
138static void update_sched_clock(void)
139{
140 u64 cyc;
141 u64 ns;
142 struct clock_read_data rd;
143
144 rd = cd.read_data[0];
145
146 cyc = cd.actual_read_sched_clock();
147 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
148
149 rd.epoch_ns = ns;
150 rd.epoch_cyc = cyc;
151
152 update_clock_read_data(&rd);
153}
154
155static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
156{
157 update_sched_clock();
158 hrtimer_forward_now(hrt, cd.wrap_kt);
159
160 return HRTIMER_RESTART;
161}
162
163void __init
164sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
165{
166 u64 res, wrap, new_mask, new_epoch, cyc, ns;
167 u32 new_mult, new_shift;
168 unsigned long r, flags;
169 char r_unit;
170 struct clock_read_data rd;
171
172 if (cd.rate > rate)
173 return;
174
175 /* Cannot register a sched_clock with interrupts on */
176 local_irq_save(flags);
177
178 /* Calculate the mult/shift to convert counter ticks to ns. */
179 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
180
181 new_mask = CLOCKSOURCE_MASK(bits);
182 cd.rate = rate;
183
184 /* Calculate how many nanosecs until we risk wrapping */
185 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
186 cd.wrap_kt = ns_to_ktime(wrap);
187
188 rd = cd.read_data[0];
189
190 /* Update epoch for new counter and update 'epoch_ns' from old counter*/
191 new_epoch = read();
192 cyc = cd.actual_read_sched_clock();
193 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
194 cd.actual_read_sched_clock = read;
195
196 rd.read_sched_clock = read;
197 rd.sched_clock_mask = new_mask;
198 rd.mult = new_mult;
199 rd.shift = new_shift;
200 rd.epoch_cyc = new_epoch;
201 rd.epoch_ns = ns;
202
203 update_clock_read_data(&rd);
204
205 if (sched_clock_timer.function != NULL) {
206 /* update timeout for clock wrap */
207 hrtimer_start(&sched_clock_timer, cd.wrap_kt,
208 HRTIMER_MODE_REL_HARD);
209 }
210
211 r = rate;
212 if (r >= 4000000) {
213 r = DIV_ROUND_CLOSEST(r, 1000000);
214 r_unit = 'M';
215 } else if (r >= 4000) {
216 r = DIV_ROUND_CLOSEST(r, 1000);
217 r_unit = 'k';
218 } else {
219 r_unit = ' ';
220 }
221
222 /* Calculate the ns resolution of this counter */
223 res = cyc_to_ns(1ULL, new_mult, new_shift);
224
225 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
226 bits, r, r_unit, res, wrap);
227
228 /* Enable IRQ time accounting if we have a fast enough sched_clock() */
229 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
230 enable_sched_clock_irqtime();
231
232 local_irq_restore(flags);
233
234 pr_debug("Registered %pS as sched_clock source\n", read);
235}
236
237void __init generic_sched_clock_init(void)
238{
239 /*
240 * If no sched_clock() function has been provided at that point,
241 * make it the final one.
242 */
243 if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
244 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
245
246 update_sched_clock();
247
248 /*
249 * Start the timer to keep sched_clock() properly updated and
250 * sets the initial epoch.
251 */
252 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
253 sched_clock_timer.function = sched_clock_poll;
254 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
255}
256
257/*
258 * Clock read function for use when the clock is suspended.
259 *
260 * This function makes it appear to sched_clock() as if the clock
261 * stopped counting at its last update.
262 *
263 * This function must only be called from the critical
264 * section in sched_clock(). It relies on the read_seqcount_retry()
265 * at the end of the critical section to be sure we observe the
266 * correct copy of 'epoch_cyc'.
267 */
268static u64 notrace suspended_sched_clock_read(void)
269{
270 unsigned int seq = raw_read_seqcount_latch(&cd.seq);
271
272 return cd.read_data[seq & 1].epoch_cyc;
273}
274
275int sched_clock_suspend(void)
276{
277 struct clock_read_data *rd = &cd.read_data[0];
278
279 update_sched_clock();
280 hrtimer_cancel(&sched_clock_timer);
281 rd->read_sched_clock = suspended_sched_clock_read;
282
283 return 0;
284}
285
286void sched_clock_resume(void)
287{
288 struct clock_read_data *rd = &cd.read_data[0];
289
290 rd->epoch_cyc = cd.actual_read_sched_clock();
291 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
292 rd->read_sched_clock = cd.actual_read_sched_clock;
293}
294
295static struct syscore_ops sched_clock_ops = {
296 .suspend = sched_clock_suspend,
297 .resume = sched_clock_resume,
298};
299
300static int __init sched_clock_syscore_init(void)
301{
302 register_syscore_ops(&sched_clock_ops);
303
304 return 0;
305}
306device_initcall(sched_clock_syscore_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic sched_clock() support, to extend low level hardware time
4 * counters to full 64-bit ns values.
5 */
6#include <linux/clocksource.h>
7#include <linux/init.h>
8#include <linux/jiffies.h>
9#include <linux/ktime.h>
10#include <linux/kernel.h>
11#include <linux/moduleparam.h>
12#include <linux/sched.h>
13#include <linux/sched/clock.h>
14#include <linux/syscore_ops.h>
15#include <linux/hrtimer.h>
16#include <linux/sched_clock.h>
17#include <linux/seqlock.h>
18#include <linux/bitops.h>
19
20#include "timekeeping.h"
21
22/**
23 * struct clock_data - all data needed for sched_clock() (including
24 * registration of a new clock source)
25 *
26 * @seq: Sequence counter for protecting updates. The lowest
27 * bit is the index for @read_data.
28 * @read_data: Data required to read from sched_clock.
29 * @wrap_kt: Duration for which clock can run before wrapping.
30 * @rate: Tick rate of the registered clock.
31 * @actual_read_sched_clock: Registered hardware level clock read function.
32 *
33 * The ordering of this structure has been chosen to optimize cache
34 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
35 * into a single 64-byte cache line.
36 */
37struct clock_data {
38 seqcount_latch_t seq;
39 struct clock_read_data read_data[2];
40 ktime_t wrap_kt;
41 unsigned long rate;
42
43 u64 (*actual_read_sched_clock)(void);
44};
45
46static struct hrtimer sched_clock_timer;
47static int irqtime = -1;
48
49core_param(irqtime, irqtime, int, 0400);
50
51static u64 notrace jiffy_sched_clock_read(void)
52{
53 /*
54 * We don't need to use get_jiffies_64 on 32-bit arches here
55 * because we register with BITS_PER_LONG
56 */
57 return (u64)(jiffies - INITIAL_JIFFIES);
58}
59
60static struct clock_data cd ____cacheline_aligned = {
61 .read_data[0] = { .mult = NSEC_PER_SEC / HZ,
62 .read_sched_clock = jiffy_sched_clock_read, },
63 .actual_read_sched_clock = jiffy_sched_clock_read,
64};
65
66static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
67{
68 return (cyc * mult) >> shift;
69}
70
71notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
72{
73 *seq = raw_read_seqcount_latch(&cd.seq);
74 return cd.read_data + (*seq & 1);
75}
76
77notrace int sched_clock_read_retry(unsigned int seq)
78{
79 return read_seqcount_latch_retry(&cd.seq, seq);
80}
81
82unsigned long long notrace sched_clock(void)
83{
84 u64 cyc, res;
85 unsigned int seq;
86 struct clock_read_data *rd;
87
88 do {
89 rd = sched_clock_read_begin(&seq);
90
91 cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
92 rd->sched_clock_mask;
93 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
94 } while (sched_clock_read_retry(seq));
95
96 return res;
97}
98
99/*
100 * Updating the data required to read the clock.
101 *
102 * sched_clock() will never observe mis-matched data even if called from
103 * an NMI. We do this by maintaining an odd/even copy of the data and
104 * steering sched_clock() to one or the other using a sequence counter.
105 * In order to preserve the data cache profile of sched_clock() as much
106 * as possible the system reverts back to the even copy when the update
107 * completes; the odd copy is used *only* during an update.
108 */
109static void update_clock_read_data(struct clock_read_data *rd)
110{
111 /* update the backup (odd) copy with the new data */
112 cd.read_data[1] = *rd;
113
114 /* steer readers towards the odd copy */
115 raw_write_seqcount_latch(&cd.seq);
116
117 /* now its safe for us to update the normal (even) copy */
118 cd.read_data[0] = *rd;
119
120 /* switch readers back to the even copy */
121 raw_write_seqcount_latch(&cd.seq);
122}
123
124/*
125 * Atomically update the sched_clock() epoch.
126 */
127static void update_sched_clock(void)
128{
129 u64 cyc;
130 u64 ns;
131 struct clock_read_data rd;
132
133 rd = cd.read_data[0];
134
135 cyc = cd.actual_read_sched_clock();
136 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
137
138 rd.epoch_ns = ns;
139 rd.epoch_cyc = cyc;
140
141 update_clock_read_data(&rd);
142}
143
144static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
145{
146 update_sched_clock();
147 hrtimer_forward_now(hrt, cd.wrap_kt);
148
149 return HRTIMER_RESTART;
150}
151
152void __init
153sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
154{
155 u64 res, wrap, new_mask, new_epoch, cyc, ns;
156 u32 new_mult, new_shift;
157 unsigned long r, flags;
158 char r_unit;
159 struct clock_read_data rd;
160
161 if (cd.rate > rate)
162 return;
163
164 /* Cannot register a sched_clock with interrupts on */
165 local_irq_save(flags);
166
167 /* Calculate the mult/shift to convert counter ticks to ns. */
168 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
169
170 new_mask = CLOCKSOURCE_MASK(bits);
171 cd.rate = rate;
172
173 /* Calculate how many nanosecs until we risk wrapping */
174 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
175 cd.wrap_kt = ns_to_ktime(wrap);
176
177 rd = cd.read_data[0];
178
179 /* Update epoch for new counter and update 'epoch_ns' from old counter*/
180 new_epoch = read();
181 cyc = cd.actual_read_sched_clock();
182 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
183 cd.actual_read_sched_clock = read;
184
185 rd.read_sched_clock = read;
186 rd.sched_clock_mask = new_mask;
187 rd.mult = new_mult;
188 rd.shift = new_shift;
189 rd.epoch_cyc = new_epoch;
190 rd.epoch_ns = ns;
191
192 update_clock_read_data(&rd);
193
194 if (sched_clock_timer.function != NULL) {
195 /* update timeout for clock wrap */
196 hrtimer_start(&sched_clock_timer, cd.wrap_kt,
197 HRTIMER_MODE_REL_HARD);
198 }
199
200 r = rate;
201 if (r >= 4000000) {
202 r /= 1000000;
203 r_unit = 'M';
204 } else {
205 if (r >= 1000) {
206 r /= 1000;
207 r_unit = 'k';
208 } else {
209 r_unit = ' ';
210 }
211 }
212
213 /* Calculate the ns resolution of this counter */
214 res = cyc_to_ns(1ULL, new_mult, new_shift);
215
216 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
217 bits, r, r_unit, res, wrap);
218
219 /* Enable IRQ time accounting if we have a fast enough sched_clock() */
220 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
221 enable_sched_clock_irqtime();
222
223 local_irq_restore(flags);
224
225 pr_debug("Registered %pS as sched_clock source\n", read);
226}
227
228void __init generic_sched_clock_init(void)
229{
230 /*
231 * If no sched_clock() function has been provided at that point,
232 * make it the final one.
233 */
234 if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
235 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
236
237 update_sched_clock();
238
239 /*
240 * Start the timer to keep sched_clock() properly updated and
241 * sets the initial epoch.
242 */
243 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
244 sched_clock_timer.function = sched_clock_poll;
245 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
246}
247
248/*
249 * Clock read function for use when the clock is suspended.
250 *
251 * This function makes it appear to sched_clock() as if the clock
252 * stopped counting at its last update.
253 *
254 * This function must only be called from the critical
255 * section in sched_clock(). It relies on the read_seqcount_retry()
256 * at the end of the critical section to be sure we observe the
257 * correct copy of 'epoch_cyc'.
258 */
259static u64 notrace suspended_sched_clock_read(void)
260{
261 unsigned int seq = raw_read_seqcount_latch(&cd.seq);
262
263 return cd.read_data[seq & 1].epoch_cyc;
264}
265
266int sched_clock_suspend(void)
267{
268 struct clock_read_data *rd = &cd.read_data[0];
269
270 update_sched_clock();
271 hrtimer_cancel(&sched_clock_timer);
272 rd->read_sched_clock = suspended_sched_clock_read;
273
274 return 0;
275}
276
277void sched_clock_resume(void)
278{
279 struct clock_read_data *rd = &cd.read_data[0];
280
281 rd->epoch_cyc = cd.actual_read_sched_clock();
282 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
283 rd->read_sched_clock = cd.actual_read_sched_clock;
284}
285
286static struct syscore_ops sched_clock_ops = {
287 .suspend = sched_clock_suspend,
288 .resume = sched_clock_resume,
289};
290
291static int __init sched_clock_syscore_init(void)
292{
293 register_syscore_ops(&sched_clock_ops);
294
295 return 0;
296}
297device_initcall(sched_clock_syscore_init);