Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic sched_clock() support, to extend low level hardware time
4 * counters to full 64-bit ns values.
5 */
6#include <linux/clocksource.h>
7#include <linux/init.h>
8#include <linux/jiffies.h>
9#include <linux/ktime.h>
10#include <linux/kernel.h>
11#include <linux/moduleparam.h>
12#include <linux/sched.h>
13#include <linux/sched/clock.h>
14#include <linux/syscore_ops.h>
15#include <linux/hrtimer.h>
16#include <linux/sched_clock.h>
17#include <linux/seqlock.h>
18#include <linux/bitops.h>
19
20#include "timekeeping.h"
21
22/**
23 * struct clock_read_data - data required to read from sched_clock()
24 *
25 * @epoch_ns: sched_clock() value at last update
26 * @epoch_cyc: Clock cycle value at last update.
27 * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
28 * clocks.
29 * @read_sched_clock: Current clock source (or dummy source when suspended).
30 * @mult: Multipler for scaled math conversion.
31 * @shift: Shift value for scaled math conversion.
32 *
33 * Care must be taken when updating this structure; it is read by
34 * some very hot code paths. It occupies <=40 bytes and, when combined
35 * with the seqcount used to synchronize access, comfortably fits into
36 * a 64 byte cache line.
37 */
38struct clock_read_data {
39 u64 epoch_ns;
40 u64 epoch_cyc;
41 u64 sched_clock_mask;
42 u64 (*read_sched_clock)(void);
43 u32 mult;
44 u32 shift;
45};
46
47/**
48 * struct clock_data - all data needed for sched_clock() (including
49 * registration of a new clock source)
50 *
51 * @seq: Sequence counter for protecting updates. The lowest
52 * bit is the index for @read_data.
53 * @read_data: Data required to read from sched_clock.
54 * @wrap_kt: Duration for which clock can run before wrapping.
55 * @rate: Tick rate of the registered clock.
56 * @actual_read_sched_clock: Registered hardware level clock read function.
57 *
58 * The ordering of this structure has been chosen to optimize cache
59 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
60 * into a single 64-byte cache line.
61 */
62struct clock_data {
63 seqcount_t seq;
64 struct clock_read_data read_data[2];
65 ktime_t wrap_kt;
66 unsigned long rate;
67
68 u64 (*actual_read_sched_clock)(void);
69};
70
71static struct hrtimer sched_clock_timer;
72static int irqtime = -1;
73
74core_param(irqtime, irqtime, int, 0400);
75
76static u64 notrace jiffy_sched_clock_read(void)
77{
78 /*
79 * We don't need to use get_jiffies_64 on 32-bit arches here
80 * because we register with BITS_PER_LONG
81 */
82 return (u64)(jiffies - INITIAL_JIFFIES);
83}
84
85static struct clock_data cd ____cacheline_aligned = {
86 .read_data[0] = { .mult = NSEC_PER_SEC / HZ,
87 .read_sched_clock = jiffy_sched_clock_read, },
88 .actual_read_sched_clock = jiffy_sched_clock_read,
89};
90
91static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
92{
93 return (cyc * mult) >> shift;
94}
95
96unsigned long long notrace sched_clock(void)
97{
98 u64 cyc, res;
99 unsigned int seq;
100 struct clock_read_data *rd;
101
102 do {
103 seq = raw_read_seqcount(&cd.seq);
104 rd = cd.read_data + (seq & 1);
105
106 cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
107 rd->sched_clock_mask;
108 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
109 } while (read_seqcount_retry(&cd.seq, seq));
110
111 return res;
112}
113
114/*
115 * Updating the data required to read the clock.
116 *
117 * sched_clock() will never observe mis-matched data even if called from
118 * an NMI. We do this by maintaining an odd/even copy of the data and
119 * steering sched_clock() to one or the other using a sequence counter.
120 * In order to preserve the data cache profile of sched_clock() as much
121 * as possible the system reverts back to the even copy when the update
122 * completes; the odd copy is used *only* during an update.
123 */
124static void update_clock_read_data(struct clock_read_data *rd)
125{
126 /* update the backup (odd) copy with the new data */
127 cd.read_data[1] = *rd;
128
129 /* steer readers towards the odd copy */
130 raw_write_seqcount_latch(&cd.seq);
131
132 /* now its safe for us to update the normal (even) copy */
133 cd.read_data[0] = *rd;
134
135 /* switch readers back to the even copy */
136 raw_write_seqcount_latch(&cd.seq);
137}
138
139/*
140 * Atomically update the sched_clock() epoch.
141 */
142static void update_sched_clock(void)
143{
144 u64 cyc;
145 u64 ns;
146 struct clock_read_data rd;
147
148 rd = cd.read_data[0];
149
150 cyc = cd.actual_read_sched_clock();
151 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
152
153 rd.epoch_ns = ns;
154 rd.epoch_cyc = cyc;
155
156 update_clock_read_data(&rd);
157}
158
159static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
160{
161 update_sched_clock();
162 hrtimer_forward_now(hrt, cd.wrap_kt);
163
164 return HRTIMER_RESTART;
165}
166
167void __init
168sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
169{
170 u64 res, wrap, new_mask, new_epoch, cyc, ns;
171 u32 new_mult, new_shift;
172 unsigned long r;
173 char r_unit;
174 struct clock_read_data rd;
175
176 if (cd.rate > rate)
177 return;
178
179 WARN_ON(!irqs_disabled());
180
181 /* Calculate the mult/shift to convert counter ticks to ns. */
182 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
183
184 new_mask = CLOCKSOURCE_MASK(bits);
185 cd.rate = rate;
186
187 /* Calculate how many nanosecs until we risk wrapping */
188 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
189 cd.wrap_kt = ns_to_ktime(wrap);
190
191 rd = cd.read_data[0];
192
193 /* Update epoch for new counter and update 'epoch_ns' from old counter*/
194 new_epoch = read();
195 cyc = cd.actual_read_sched_clock();
196 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
197 cd.actual_read_sched_clock = read;
198
199 rd.read_sched_clock = read;
200 rd.sched_clock_mask = new_mask;
201 rd.mult = new_mult;
202 rd.shift = new_shift;
203 rd.epoch_cyc = new_epoch;
204 rd.epoch_ns = ns;
205
206 update_clock_read_data(&rd);
207
208 if (sched_clock_timer.function != NULL) {
209 /* update timeout for clock wrap */
210 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
211 }
212
213 r = rate;
214 if (r >= 4000000) {
215 r /= 1000000;
216 r_unit = 'M';
217 } else {
218 if (r >= 1000) {
219 r /= 1000;
220 r_unit = 'k';
221 } else {
222 r_unit = ' ';
223 }
224 }
225
226 /* Calculate the ns resolution of this counter */
227 res = cyc_to_ns(1ULL, new_mult, new_shift);
228
229 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
230 bits, r, r_unit, res, wrap);
231
232 /* Enable IRQ time accounting if we have a fast enough sched_clock() */
233 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
234 enable_sched_clock_irqtime();
235
236 pr_debug("Registered %pS as sched_clock source\n", read);
237}
238
239void __init generic_sched_clock_init(void)
240{
241 /*
242 * If no sched_clock() function has been provided at that point,
243 * make it the final one one.
244 */
245 if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
246 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
247
248 update_sched_clock();
249
250 /*
251 * Start the timer to keep sched_clock() properly updated and
252 * sets the initial epoch.
253 */
254 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
255 sched_clock_timer.function = sched_clock_poll;
256 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
257}
258
259/*
260 * Clock read function for use when the clock is suspended.
261 *
262 * This function makes it appear to sched_clock() as if the clock
263 * stopped counting at its last update.
264 *
265 * This function must only be called from the critical
266 * section in sched_clock(). It relies on the read_seqcount_retry()
267 * at the end of the critical section to be sure we observe the
268 * correct copy of 'epoch_cyc'.
269 */
270static u64 notrace suspended_sched_clock_read(void)
271{
272 unsigned int seq = raw_read_seqcount(&cd.seq);
273
274 return cd.read_data[seq & 1].epoch_cyc;
275}
276
277int sched_clock_suspend(void)
278{
279 struct clock_read_data *rd = &cd.read_data[0];
280
281 update_sched_clock();
282 hrtimer_cancel(&sched_clock_timer);
283 rd->read_sched_clock = suspended_sched_clock_read;
284
285 return 0;
286}
287
288void sched_clock_resume(void)
289{
290 struct clock_read_data *rd = &cd.read_data[0];
291
292 rd->epoch_cyc = cd.actual_read_sched_clock();
293 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
294 rd->read_sched_clock = cd.actual_read_sched_clock;
295}
296
297static struct syscore_ops sched_clock_ops = {
298 .suspend = sched_clock_suspend,
299 .resume = sched_clock_resume,
300};
301
302static int __init sched_clock_syscore_init(void)
303{
304 register_syscore_ops(&sched_clock_ops);
305
306 return 0;
307}
308device_initcall(sched_clock_syscore_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic sched_clock() support, to extend low level hardware time
4 * counters to full 64-bit ns values.
5 */
6#include <linux/clocksource.h>
7#include <linux/init.h>
8#include <linux/jiffies.h>
9#include <linux/ktime.h>
10#include <linux/kernel.h>
11#include <linux/math.h>
12#include <linux/moduleparam.h>
13#include <linux/sched.h>
14#include <linux/sched/clock.h>
15#include <linux/syscore_ops.h>
16#include <linux/hrtimer.h>
17#include <linux/sched_clock.h>
18#include <linux/seqlock.h>
19#include <linux/bitops.h>
20
21#include "timekeeping.h"
22
23/**
24 * struct clock_data - all data needed for sched_clock() (including
25 * registration of a new clock source)
26 *
27 * @seq: Sequence counter for protecting updates. The lowest
28 * bit is the index for @read_data.
29 * @read_data: Data required to read from sched_clock.
30 * @wrap_kt: Duration for which clock can run before wrapping.
31 * @rate: Tick rate of the registered clock.
32 * @actual_read_sched_clock: Registered hardware level clock read function.
33 *
34 * The ordering of this structure has been chosen to optimize cache
35 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
36 * into a single 64-byte cache line.
37 */
38struct clock_data {
39 seqcount_latch_t seq;
40 struct clock_read_data read_data[2];
41 ktime_t wrap_kt;
42 unsigned long rate;
43
44 u64 (*actual_read_sched_clock)(void);
45};
46
47static struct hrtimer sched_clock_timer;
48static int irqtime = -1;
49
50core_param(irqtime, irqtime, int, 0400);
51
52static u64 notrace jiffy_sched_clock_read(void)
53{
54 /*
55 * We don't need to use get_jiffies_64 on 32-bit arches here
56 * because we register with BITS_PER_LONG
57 */
58 return (u64)(jiffies - INITIAL_JIFFIES);
59}
60
61static struct clock_data cd ____cacheline_aligned = {
62 .read_data[0] = { .mult = NSEC_PER_SEC / HZ,
63 .read_sched_clock = jiffy_sched_clock_read, },
64 .actual_read_sched_clock = jiffy_sched_clock_read,
65};
66
67static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
68{
69 return (cyc * mult) >> shift;
70}
71
72notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
73{
74 *seq = raw_read_seqcount_latch(&cd.seq);
75 return cd.read_data + (*seq & 1);
76}
77
78notrace int sched_clock_read_retry(unsigned int seq)
79{
80 return raw_read_seqcount_latch_retry(&cd.seq, seq);
81}
82
83unsigned long long noinstr sched_clock_noinstr(void)
84{
85 struct clock_read_data *rd;
86 unsigned int seq;
87 u64 cyc, res;
88
89 do {
90 seq = raw_read_seqcount_latch(&cd.seq);
91 rd = cd.read_data + (seq & 1);
92
93 cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
94 rd->sched_clock_mask;
95 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
96 } while (raw_read_seqcount_latch_retry(&cd.seq, seq));
97
98 return res;
99}
100
101unsigned long long notrace sched_clock(void)
102{
103 unsigned long long ns;
104 preempt_disable_notrace();
105 ns = sched_clock_noinstr();
106 preempt_enable_notrace();
107 return ns;
108}
109
110/*
111 * Updating the data required to read the clock.
112 *
113 * sched_clock() will never observe mis-matched data even if called from
114 * an NMI. We do this by maintaining an odd/even copy of the data and
115 * steering sched_clock() to one or the other using a sequence counter.
116 * In order to preserve the data cache profile of sched_clock() as much
117 * as possible the system reverts back to the even copy when the update
118 * completes; the odd copy is used *only* during an update.
119 */
120static void update_clock_read_data(struct clock_read_data *rd)
121{
122 /* update the backup (odd) copy with the new data */
123 cd.read_data[1] = *rd;
124
125 /* steer readers towards the odd copy */
126 raw_write_seqcount_latch(&cd.seq);
127
128 /* now its safe for us to update the normal (even) copy */
129 cd.read_data[0] = *rd;
130
131 /* switch readers back to the even copy */
132 raw_write_seqcount_latch(&cd.seq);
133}
134
135/*
136 * Atomically update the sched_clock() epoch.
137 */
138static void update_sched_clock(void)
139{
140 u64 cyc;
141 u64 ns;
142 struct clock_read_data rd;
143
144 rd = cd.read_data[0];
145
146 cyc = cd.actual_read_sched_clock();
147 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
148
149 rd.epoch_ns = ns;
150 rd.epoch_cyc = cyc;
151
152 update_clock_read_data(&rd);
153}
154
155static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
156{
157 update_sched_clock();
158 hrtimer_forward_now(hrt, cd.wrap_kt);
159
160 return HRTIMER_RESTART;
161}
162
163void __init
164sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
165{
166 u64 res, wrap, new_mask, new_epoch, cyc, ns;
167 u32 new_mult, new_shift;
168 unsigned long r, flags;
169 char r_unit;
170 struct clock_read_data rd;
171
172 if (cd.rate > rate)
173 return;
174
175 /* Cannot register a sched_clock with interrupts on */
176 local_irq_save(flags);
177
178 /* Calculate the mult/shift to convert counter ticks to ns. */
179 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
180
181 new_mask = CLOCKSOURCE_MASK(bits);
182 cd.rate = rate;
183
184 /* Calculate how many nanosecs until we risk wrapping */
185 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
186 cd.wrap_kt = ns_to_ktime(wrap);
187
188 rd = cd.read_data[0];
189
190 /* Update epoch for new counter and update 'epoch_ns' from old counter*/
191 new_epoch = read();
192 cyc = cd.actual_read_sched_clock();
193 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
194 cd.actual_read_sched_clock = read;
195
196 rd.read_sched_clock = read;
197 rd.sched_clock_mask = new_mask;
198 rd.mult = new_mult;
199 rd.shift = new_shift;
200 rd.epoch_cyc = new_epoch;
201 rd.epoch_ns = ns;
202
203 update_clock_read_data(&rd);
204
205 if (sched_clock_timer.function != NULL) {
206 /* update timeout for clock wrap */
207 hrtimer_start(&sched_clock_timer, cd.wrap_kt,
208 HRTIMER_MODE_REL_HARD);
209 }
210
211 r = rate;
212 if (r >= 4000000) {
213 r = DIV_ROUND_CLOSEST(r, 1000000);
214 r_unit = 'M';
215 } else if (r >= 4000) {
216 r = DIV_ROUND_CLOSEST(r, 1000);
217 r_unit = 'k';
218 } else {
219 r_unit = ' ';
220 }
221
222 /* Calculate the ns resolution of this counter */
223 res = cyc_to_ns(1ULL, new_mult, new_shift);
224
225 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
226 bits, r, r_unit, res, wrap);
227
228 /* Enable IRQ time accounting if we have a fast enough sched_clock() */
229 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
230 enable_sched_clock_irqtime();
231
232 local_irq_restore(flags);
233
234 pr_debug("Registered %pS as sched_clock source\n", read);
235}
236
237void __init generic_sched_clock_init(void)
238{
239 /*
240 * If no sched_clock() function has been provided at that point,
241 * make it the final one.
242 */
243 if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
244 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
245
246 update_sched_clock();
247
248 /*
249 * Start the timer to keep sched_clock() properly updated and
250 * sets the initial epoch.
251 */
252 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
253 sched_clock_timer.function = sched_clock_poll;
254 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
255}
256
257/*
258 * Clock read function for use when the clock is suspended.
259 *
260 * This function makes it appear to sched_clock() as if the clock
261 * stopped counting at its last update.
262 *
263 * This function must only be called from the critical
264 * section in sched_clock(). It relies on the read_seqcount_retry()
265 * at the end of the critical section to be sure we observe the
266 * correct copy of 'epoch_cyc'.
267 */
268static u64 notrace suspended_sched_clock_read(void)
269{
270 unsigned int seq = raw_read_seqcount_latch(&cd.seq);
271
272 return cd.read_data[seq & 1].epoch_cyc;
273}
274
275int sched_clock_suspend(void)
276{
277 struct clock_read_data *rd = &cd.read_data[0];
278
279 update_sched_clock();
280 hrtimer_cancel(&sched_clock_timer);
281 rd->read_sched_clock = suspended_sched_clock_read;
282
283 return 0;
284}
285
286void sched_clock_resume(void)
287{
288 struct clock_read_data *rd = &cd.read_data[0];
289
290 rd->epoch_cyc = cd.actual_read_sched_clock();
291 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
292 rd->read_sched_clock = cd.actual_read_sched_clock;
293}
294
295static struct syscore_ops sched_clock_ops = {
296 .suspend = sched_clock_suspend,
297 .resume = sched_clock_resume,
298};
299
300static int __init sched_clock_syscore_init(void)
301{
302 register_syscore_ops(&sched_clock_ops);
303
304 return 0;
305}
306device_initcall(sched_clock_syscore_init);