Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic sched_clock() support, to extend low level hardware time
4 * counters to full 64-bit ns values.
5 */
6#include <linux/clocksource.h>
7#include <linux/init.h>
8#include <linux/jiffies.h>
9#include <linux/ktime.h>
10#include <linux/kernel.h>
11#include <linux/math.h>
12#include <linux/moduleparam.h>
13#include <linux/sched.h>
14#include <linux/sched/clock.h>
15#include <linux/syscore_ops.h>
16#include <linux/hrtimer.h>
17#include <linux/sched_clock.h>
18#include <linux/seqlock.h>
19#include <linux/bitops.h>
20
21#include "timekeeping.h"
22
23/**
24 * struct clock_data - all data needed for sched_clock() (including
25 * registration of a new clock source)
26 *
27 * @seq: Sequence counter for protecting updates. The lowest
28 * bit is the index for @read_data.
29 * @read_data: Data required to read from sched_clock.
30 * @wrap_kt: Duration for which clock can run before wrapping.
31 * @rate: Tick rate of the registered clock.
32 * @actual_read_sched_clock: Registered hardware level clock read function.
33 *
34 * The ordering of this structure has been chosen to optimize cache
35 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
36 * into a single 64-byte cache line.
37 */
38struct clock_data {
39 seqcount_latch_t seq;
40 struct clock_read_data read_data[2];
41 ktime_t wrap_kt;
42 unsigned long rate;
43
44 u64 (*actual_read_sched_clock)(void);
45};
46
47static struct hrtimer sched_clock_timer;
48static int irqtime = -1;
49
50core_param(irqtime, irqtime, int, 0400);
51
52static u64 notrace jiffy_sched_clock_read(void)
53{
54 /*
55 * We don't need to use get_jiffies_64 on 32-bit arches here
56 * because we register with BITS_PER_LONG
57 */
58 return (u64)(jiffies - INITIAL_JIFFIES);
59}
60
61static struct clock_data cd ____cacheline_aligned = {
62 .read_data[0] = { .mult = NSEC_PER_SEC / HZ,
63 .read_sched_clock = jiffy_sched_clock_read, },
64 .actual_read_sched_clock = jiffy_sched_clock_read,
65};
66
67static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
68{
69 return (cyc * mult) >> shift;
70}
71
72notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
73{
74 *seq = raw_read_seqcount_latch(&cd.seq);
75 return cd.read_data + (*seq & 1);
76}
77
78notrace int sched_clock_read_retry(unsigned int seq)
79{
80 return read_seqcount_latch_retry(&cd.seq, seq);
81}
82
83unsigned long long notrace sched_clock(void)
84{
85 u64 cyc, res;
86 unsigned int seq;
87 struct clock_read_data *rd;
88
89 do {
90 rd = sched_clock_read_begin(&seq);
91
92 cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
93 rd->sched_clock_mask;
94 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
95 } while (sched_clock_read_retry(seq));
96
97 return res;
98}
99
100/*
101 * Updating the data required to read the clock.
102 *
103 * sched_clock() will never observe mis-matched data even if called from
104 * an NMI. We do this by maintaining an odd/even copy of the data and
105 * steering sched_clock() to one or the other using a sequence counter.
106 * In order to preserve the data cache profile of sched_clock() as much
107 * as possible the system reverts back to the even copy when the update
108 * completes; the odd copy is used *only* during an update.
109 */
110static void update_clock_read_data(struct clock_read_data *rd)
111{
112 /* update the backup (odd) copy with the new data */
113 cd.read_data[1] = *rd;
114
115 /* steer readers towards the odd copy */
116 raw_write_seqcount_latch(&cd.seq);
117
118 /* now its safe for us to update the normal (even) copy */
119 cd.read_data[0] = *rd;
120
121 /* switch readers back to the even copy */
122 raw_write_seqcount_latch(&cd.seq);
123}
124
125/*
126 * Atomically update the sched_clock() epoch.
127 */
128static void update_sched_clock(void)
129{
130 u64 cyc;
131 u64 ns;
132 struct clock_read_data rd;
133
134 rd = cd.read_data[0];
135
136 cyc = cd.actual_read_sched_clock();
137 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
138
139 rd.epoch_ns = ns;
140 rd.epoch_cyc = cyc;
141
142 update_clock_read_data(&rd);
143}
144
145static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
146{
147 update_sched_clock();
148 hrtimer_forward_now(hrt, cd.wrap_kt);
149
150 return HRTIMER_RESTART;
151}
152
153void __init
154sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
155{
156 u64 res, wrap, new_mask, new_epoch, cyc, ns;
157 u32 new_mult, new_shift;
158 unsigned long r, flags;
159 char r_unit;
160 struct clock_read_data rd;
161
162 if (cd.rate > rate)
163 return;
164
165 /* Cannot register a sched_clock with interrupts on */
166 local_irq_save(flags);
167
168 /* Calculate the mult/shift to convert counter ticks to ns. */
169 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
170
171 new_mask = CLOCKSOURCE_MASK(bits);
172 cd.rate = rate;
173
174 /* Calculate how many nanosecs until we risk wrapping */
175 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
176 cd.wrap_kt = ns_to_ktime(wrap);
177
178 rd = cd.read_data[0];
179
180 /* Update epoch for new counter and update 'epoch_ns' from old counter*/
181 new_epoch = read();
182 cyc = cd.actual_read_sched_clock();
183 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
184 cd.actual_read_sched_clock = read;
185
186 rd.read_sched_clock = read;
187 rd.sched_clock_mask = new_mask;
188 rd.mult = new_mult;
189 rd.shift = new_shift;
190 rd.epoch_cyc = new_epoch;
191 rd.epoch_ns = ns;
192
193 update_clock_read_data(&rd);
194
195 if (sched_clock_timer.function != NULL) {
196 /* update timeout for clock wrap */
197 hrtimer_start(&sched_clock_timer, cd.wrap_kt,
198 HRTIMER_MODE_REL_HARD);
199 }
200
201 r = rate;
202 if (r >= 4000000) {
203 r = DIV_ROUND_CLOSEST(r, 1000000);
204 r_unit = 'M';
205 } else if (r >= 4000) {
206 r = DIV_ROUND_CLOSEST(r, 1000);
207 r_unit = 'k';
208 } else {
209 r_unit = ' ';
210 }
211
212 /* Calculate the ns resolution of this counter */
213 res = cyc_to_ns(1ULL, new_mult, new_shift);
214
215 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
216 bits, r, r_unit, res, wrap);
217
218 /* Enable IRQ time accounting if we have a fast enough sched_clock() */
219 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
220 enable_sched_clock_irqtime();
221
222 local_irq_restore(flags);
223
224 pr_debug("Registered %pS as sched_clock source\n", read);
225}
226
227void __init generic_sched_clock_init(void)
228{
229 /*
230 * If no sched_clock() function has been provided at that point,
231 * make it the final one.
232 */
233 if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
234 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
235
236 update_sched_clock();
237
238 /*
239 * Start the timer to keep sched_clock() properly updated and
240 * sets the initial epoch.
241 */
242 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
243 sched_clock_timer.function = sched_clock_poll;
244 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
245}
246
247/*
248 * Clock read function for use when the clock is suspended.
249 *
250 * This function makes it appear to sched_clock() as if the clock
251 * stopped counting at its last update.
252 *
253 * This function must only be called from the critical
254 * section in sched_clock(). It relies on the read_seqcount_retry()
255 * at the end of the critical section to be sure we observe the
256 * correct copy of 'epoch_cyc'.
257 */
258static u64 notrace suspended_sched_clock_read(void)
259{
260 unsigned int seq = raw_read_seqcount_latch(&cd.seq);
261
262 return cd.read_data[seq & 1].epoch_cyc;
263}
264
265int sched_clock_suspend(void)
266{
267 struct clock_read_data *rd = &cd.read_data[0];
268
269 update_sched_clock();
270 hrtimer_cancel(&sched_clock_timer);
271 rd->read_sched_clock = suspended_sched_clock_read;
272
273 return 0;
274}
275
276void sched_clock_resume(void)
277{
278 struct clock_read_data *rd = &cd.read_data[0];
279
280 rd->epoch_cyc = cd.actual_read_sched_clock();
281 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
282 rd->read_sched_clock = cd.actual_read_sched_clock;
283}
284
285static struct syscore_ops sched_clock_ops = {
286 .suspend = sched_clock_suspend,
287 .resume = sched_clock_resume,
288};
289
290static int __init sched_clock_syscore_init(void)
291{
292 register_syscore_ops(&sched_clock_ops);
293
294 return 0;
295}
296device_initcall(sched_clock_syscore_init);
1/*
2 * sched_clock.c: support for extending counters to full 64-bit ns counter
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/clocksource.h>
9#include <linux/init.h>
10#include <linux/jiffies.h>
11#include <linux/ktime.h>
12#include <linux/kernel.h>
13#include <linux/moduleparam.h>
14#include <linux/sched.h>
15#include <linux/syscore_ops.h>
16#include <linux/hrtimer.h>
17#include <linux/sched_clock.h>
18#include <linux/seqlock.h>
19#include <linux/bitops.h>
20
21struct clock_data {
22 ktime_t wrap_kt;
23 u64 epoch_ns;
24 u64 epoch_cyc;
25 seqcount_t seq;
26 unsigned long rate;
27 u32 mult;
28 u32 shift;
29 bool suspended;
30};
31
32static struct hrtimer sched_clock_timer;
33static int irqtime = -1;
34
35core_param(irqtime, irqtime, int, 0400);
36
37static struct clock_data cd = {
38 .mult = NSEC_PER_SEC / HZ,
39};
40
41static u64 __read_mostly sched_clock_mask;
42
43static u64 notrace jiffy_sched_clock_read(void)
44{
45 /*
46 * We don't need to use get_jiffies_64 on 32-bit arches here
47 * because we register with BITS_PER_LONG
48 */
49 return (u64)(jiffies - INITIAL_JIFFIES);
50}
51
52static u32 __read_mostly (*read_sched_clock_32)(void);
53
54static u64 notrace read_sched_clock_32_wrapper(void)
55{
56 return read_sched_clock_32();
57}
58
59static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
60
61static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
62{
63 return (cyc * mult) >> shift;
64}
65
66unsigned long long notrace sched_clock(void)
67{
68 u64 epoch_ns;
69 u64 epoch_cyc;
70 u64 cyc;
71 unsigned long seq;
72
73 if (cd.suspended)
74 return cd.epoch_ns;
75
76 do {
77 seq = raw_read_seqcount_begin(&cd.seq);
78 epoch_cyc = cd.epoch_cyc;
79 epoch_ns = cd.epoch_ns;
80 } while (read_seqcount_retry(&cd.seq, seq));
81
82 cyc = read_sched_clock();
83 cyc = (cyc - epoch_cyc) & sched_clock_mask;
84 return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
85}
86
87/*
88 * Atomically update the sched_clock epoch.
89 */
90static void notrace update_sched_clock(void)
91{
92 unsigned long flags;
93 u64 cyc;
94 u64 ns;
95
96 cyc = read_sched_clock();
97 ns = cd.epoch_ns +
98 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
99 cd.mult, cd.shift);
100
101 raw_local_irq_save(flags);
102 raw_write_seqcount_begin(&cd.seq);
103 cd.epoch_ns = ns;
104 cd.epoch_cyc = cyc;
105 raw_write_seqcount_end(&cd.seq);
106 raw_local_irq_restore(flags);
107}
108
109static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
110{
111 update_sched_clock();
112 hrtimer_forward_now(hrt, cd.wrap_kt);
113 return HRTIMER_RESTART;
114}
115
116void __init sched_clock_register(u64 (*read)(void), int bits,
117 unsigned long rate)
118{
119 u64 res, wrap, new_mask, new_epoch, cyc, ns;
120 u32 new_mult, new_shift;
121 ktime_t new_wrap_kt;
122 unsigned long r;
123 char r_unit;
124
125 if (cd.rate > rate)
126 return;
127
128 WARN_ON(!irqs_disabled());
129
130 /* calculate the mult/shift to convert counter ticks to ns. */
131 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
132
133 new_mask = CLOCKSOURCE_MASK(bits);
134
135 /* calculate how many ns until we wrap */
136 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask);
137 new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
138
139 /* update epoch for new counter and update epoch_ns from old counter*/
140 new_epoch = read();
141 cyc = read_sched_clock();
142 ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
143 cd.mult, cd.shift);
144
145 raw_write_seqcount_begin(&cd.seq);
146 read_sched_clock = read;
147 sched_clock_mask = new_mask;
148 cd.rate = rate;
149 cd.wrap_kt = new_wrap_kt;
150 cd.mult = new_mult;
151 cd.shift = new_shift;
152 cd.epoch_cyc = new_epoch;
153 cd.epoch_ns = ns;
154 raw_write_seqcount_end(&cd.seq);
155
156 r = rate;
157 if (r >= 4000000) {
158 r /= 1000000;
159 r_unit = 'M';
160 } else if (r >= 1000) {
161 r /= 1000;
162 r_unit = 'k';
163 } else
164 r_unit = ' ';
165
166 /* calculate the ns resolution of this counter */
167 res = cyc_to_ns(1ULL, new_mult, new_shift);
168
169 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
170 bits, r, r_unit, res, wrap);
171
172 /* Enable IRQ time accounting if we have a fast enough sched_clock */
173 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
174 enable_sched_clock_irqtime();
175
176 pr_debug("Registered %pF as sched_clock source\n", read);
177}
178
179void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
180{
181 read_sched_clock_32 = read;
182 sched_clock_register(read_sched_clock_32_wrapper, bits, rate);
183}
184
185void __init sched_clock_postinit(void)
186{
187 /*
188 * If no sched_clock function has been provided at that point,
189 * make it the final one one.
190 */
191 if (read_sched_clock == jiffy_sched_clock_read)
192 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
193
194 update_sched_clock();
195
196 /*
197 * Start the timer to keep sched_clock() properly updated and
198 * sets the initial epoch.
199 */
200 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
201 sched_clock_timer.function = sched_clock_poll;
202 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
203}
204
205static int sched_clock_suspend(void)
206{
207 sched_clock_poll(&sched_clock_timer);
208 cd.suspended = true;
209 return 0;
210}
211
212static void sched_clock_resume(void)
213{
214 cd.epoch_cyc = read_sched_clock();
215 cd.suspended = false;
216}
217
218static struct syscore_ops sched_clock_ops = {
219 .suspend = sched_clock_suspend,
220 .resume = sched_clock_resume,
221};
222
223static int __init sched_clock_syscore_init(void)
224{
225 register_syscore_ops(&sched_clock_ops);
226 return 0;
227}
228device_initcall(sched_clock_syscore_init);