Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Generic sched_clock() support, to extend low level hardware time
  4 * counters to full 64-bit ns values.
  5 */
  6#include <linux/clocksource.h>
  7#include <linux/init.h>
  8#include <linux/jiffies.h>
  9#include <linux/ktime.h>
 10#include <linux/kernel.h>
 11#include <linux/math.h>
 12#include <linux/moduleparam.h>
 13#include <linux/sched.h>
 14#include <linux/sched/clock.h>
 15#include <linux/syscore_ops.h>
 16#include <linux/hrtimer.h>
 17#include <linux/sched_clock.h>
 18#include <linux/seqlock.h>
 19#include <linux/bitops.h>
 20
 21#include "timekeeping.h"
 22
 23/**
 24 * struct clock_data - all data needed for sched_clock() (including
 25 *                     registration of a new clock source)
 26 *
 27 * @seq:		Sequence counter for protecting updates. The lowest
 28 *			bit is the index for @read_data.
 29 * @read_data:		Data required to read from sched_clock.
 30 * @wrap_kt:		Duration for which clock can run before wrapping.
 31 * @rate:		Tick rate of the registered clock.
 32 * @actual_read_sched_clock: Registered hardware level clock read function.
 33 *
 34 * The ordering of this structure has been chosen to optimize cache
 35 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
 36 * into a single 64-byte cache line.
 37 */
 38struct clock_data {
 39	seqcount_latch_t	seq;
 40	struct clock_read_data	read_data[2];
 41	ktime_t			wrap_kt;
 42	unsigned long		rate;
 43
 44	u64 (*actual_read_sched_clock)(void);
 45};
 46
 47static struct hrtimer sched_clock_timer;
 48static int irqtime = -1;
 49
 50core_param(irqtime, irqtime, int, 0400);
 51
 52static u64 notrace jiffy_sched_clock_read(void)
 53{
 54	/*
 55	 * We don't need to use get_jiffies_64 on 32-bit arches here
 56	 * because we register with BITS_PER_LONG
 57	 */
 58	return (u64)(jiffies - INITIAL_JIFFIES);
 59}
 60
 61static struct clock_data cd ____cacheline_aligned = {
 62	.read_data[0] = { .mult = NSEC_PER_SEC / HZ,
 63			  .read_sched_clock = jiffy_sched_clock_read, },
 64	.actual_read_sched_clock = jiffy_sched_clock_read,
 65};
 66
 67static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
 68{
 69	return (cyc * mult) >> shift;
 70}
 71
 72notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
 73{
 74	*seq = raw_read_seqcount_latch(&cd.seq);
 75	return cd.read_data + (*seq & 1);
 76}
 77
 78notrace int sched_clock_read_retry(unsigned int seq)
 79{
 80	return read_seqcount_latch_retry(&cd.seq, seq);
 81}
 82
 83unsigned long long notrace sched_clock(void)
 84{
 85	u64 cyc, res;
 86	unsigned int seq;
 87	struct clock_read_data *rd;
 
 
 88
 89	do {
 90		rd = sched_clock_read_begin(&seq);
 
 91
 92		cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
 93		      rd->sched_clock_mask;
 94		res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
 95	} while (sched_clock_read_retry(seq));
 96
 97	return res;
 98}
 99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100/*
101 * Updating the data required to read the clock.
102 *
103 * sched_clock() will never observe mis-matched data even if called from
104 * an NMI. We do this by maintaining an odd/even copy of the data and
105 * steering sched_clock() to one or the other using a sequence counter.
106 * In order to preserve the data cache profile of sched_clock() as much
107 * as possible the system reverts back to the even copy when the update
108 * completes; the odd copy is used *only* during an update.
109 */
110static void update_clock_read_data(struct clock_read_data *rd)
111{
112	/* update the backup (odd) copy with the new data */
113	cd.read_data[1] = *rd;
114
115	/* steer readers towards the odd copy */
116	raw_write_seqcount_latch(&cd.seq);
117
118	/* now its safe for us to update the normal (even) copy */
119	cd.read_data[0] = *rd;
120
121	/* switch readers back to the even copy */
122	raw_write_seqcount_latch(&cd.seq);
 
 
 
 
 
123}
124
125/*
126 * Atomically update the sched_clock() epoch.
127 */
128static void update_sched_clock(void)
129{
130	u64 cyc;
131	u64 ns;
132	struct clock_read_data rd;
133
134	rd = cd.read_data[0];
135
136	cyc = cd.actual_read_sched_clock();
137	ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
138
139	rd.epoch_ns = ns;
140	rd.epoch_cyc = cyc;
141
142	update_clock_read_data(&rd);
143}
144
145static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
146{
147	update_sched_clock();
148	hrtimer_forward_now(hrt, cd.wrap_kt);
149
150	return HRTIMER_RESTART;
151}
152
153void __init
154sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
155{
156	u64 res, wrap, new_mask, new_epoch, cyc, ns;
157	u32 new_mult, new_shift;
158	unsigned long r, flags;
159	char r_unit;
160	struct clock_read_data rd;
161
162	if (cd.rate > rate)
163		return;
164
165	/* Cannot register a sched_clock with interrupts on */
166	local_irq_save(flags);
167
168	/* Calculate the mult/shift to convert counter ticks to ns. */
169	clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
170
171	new_mask = CLOCKSOURCE_MASK(bits);
172	cd.rate = rate;
173
174	/* Calculate how many nanosecs until we risk wrapping */
175	wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
176	cd.wrap_kt = ns_to_ktime(wrap);
177
178	rd = cd.read_data[0];
179
180	/* Update epoch for new counter and update 'epoch_ns' from old counter*/
181	new_epoch = read();
182	cyc = cd.actual_read_sched_clock();
183	ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
184	cd.actual_read_sched_clock = read;
185
186	rd.read_sched_clock	= read;
187	rd.sched_clock_mask	= new_mask;
188	rd.mult			= new_mult;
189	rd.shift		= new_shift;
190	rd.epoch_cyc		= new_epoch;
191	rd.epoch_ns		= ns;
192
193	update_clock_read_data(&rd);
194
195	if (sched_clock_timer.function != NULL) {
196		/* update timeout for clock wrap */
197		hrtimer_start(&sched_clock_timer, cd.wrap_kt,
198			      HRTIMER_MODE_REL_HARD);
199	}
200
201	r = rate;
202	if (r >= 4000000) {
203		r = DIV_ROUND_CLOSEST(r, 1000000);
204		r_unit = 'M';
205	} else if (r >= 4000) {
206		r = DIV_ROUND_CLOSEST(r, 1000);
207		r_unit = 'k';
208	} else {
209		r_unit = ' ';
210	}
211
212	/* Calculate the ns resolution of this counter */
213	res = cyc_to_ns(1ULL, new_mult, new_shift);
214
215	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
216		bits, r, r_unit, res, wrap);
217
218	/* Enable IRQ time accounting if we have a fast enough sched_clock() */
219	if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
220		enable_sched_clock_irqtime();
221
222	local_irq_restore(flags);
223
224	pr_debug("Registered %pS as sched_clock source\n", read);
225}
226
227void __init generic_sched_clock_init(void)
228{
229	/*
230	 * If no sched_clock() function has been provided at that point,
231	 * make it the final one.
232	 */
233	if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
234		sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
235
236	update_sched_clock();
237
238	/*
239	 * Start the timer to keep sched_clock() properly updated and
240	 * sets the initial epoch.
241	 */
242	hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
243	sched_clock_timer.function = sched_clock_poll;
244	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
245}
246
247/*
248 * Clock read function for use when the clock is suspended.
249 *
250 * This function makes it appear to sched_clock() as if the clock
251 * stopped counting at its last update.
252 *
253 * This function must only be called from the critical
254 * section in sched_clock(). It relies on the read_seqcount_retry()
255 * at the end of the critical section to be sure we observe the
256 * correct copy of 'epoch_cyc'.
257 */
258static u64 notrace suspended_sched_clock_read(void)
259{
260	unsigned int seq = raw_read_seqcount_latch(&cd.seq);
261
262	return cd.read_data[seq & 1].epoch_cyc;
263}
264
265int sched_clock_suspend(void)
266{
267	struct clock_read_data *rd = &cd.read_data[0];
268
269	update_sched_clock();
270	hrtimer_cancel(&sched_clock_timer);
271	rd->read_sched_clock = suspended_sched_clock_read;
272
273	return 0;
274}
275
276void sched_clock_resume(void)
277{
278	struct clock_read_data *rd = &cd.read_data[0];
279
280	rd->epoch_cyc = cd.actual_read_sched_clock();
281	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
282	rd->read_sched_clock = cd.actual_read_sched_clock;
283}
284
285static struct syscore_ops sched_clock_ops = {
286	.suspend	= sched_clock_suspend,
287	.resume		= sched_clock_resume,
288};
289
290static int __init sched_clock_syscore_init(void)
291{
292	register_syscore_ops(&sched_clock_ops);
293
294	return 0;
295}
296device_initcall(sched_clock_syscore_init);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Generic sched_clock() support, to extend low level hardware time
  4 * counters to full 64-bit ns values.
  5 */
  6#include <linux/clocksource.h>
  7#include <linux/init.h>
  8#include <linux/jiffies.h>
  9#include <linux/ktime.h>
 10#include <linux/kernel.h>
 11#include <linux/math.h>
 12#include <linux/moduleparam.h>
 13#include <linux/sched.h>
 14#include <linux/sched/clock.h>
 15#include <linux/syscore_ops.h>
 16#include <linux/hrtimer.h>
 17#include <linux/sched_clock.h>
 18#include <linux/seqlock.h>
 19#include <linux/bitops.h>
 20
 21#include "timekeeping.h"
 22
 23/**
 24 * struct clock_data - all data needed for sched_clock() (including
 25 *                     registration of a new clock source)
 26 *
 27 * @seq:		Sequence counter for protecting updates. The lowest
 28 *			bit is the index for @read_data.
 29 * @read_data:		Data required to read from sched_clock.
 30 * @wrap_kt:		Duration for which clock can run before wrapping.
 31 * @rate:		Tick rate of the registered clock.
 32 * @actual_read_sched_clock: Registered hardware level clock read function.
 33 *
 34 * The ordering of this structure has been chosen to optimize cache
 35 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
 36 * into a single 64-byte cache line.
 37 */
 38struct clock_data {
 39	seqcount_latch_t	seq;
 40	struct clock_read_data	read_data[2];
 41	ktime_t			wrap_kt;
 42	unsigned long		rate;
 43
 44	u64 (*actual_read_sched_clock)(void);
 45};
 46
 47static struct hrtimer sched_clock_timer;
 48static int irqtime = -1;
 49
 50core_param(irqtime, irqtime, int, 0400);
 51
 52static u64 notrace jiffy_sched_clock_read(void)
 53{
 54	/*
 55	 * We don't need to use get_jiffies_64 on 32-bit arches here
 56	 * because we register with BITS_PER_LONG
 57	 */
 58	return (u64)(jiffies - INITIAL_JIFFIES);
 59}
 60
 61static struct clock_data cd ____cacheline_aligned = {
 62	.read_data[0] = { .mult = NSEC_PER_SEC / HZ,
 63			  .read_sched_clock = jiffy_sched_clock_read, },
 64	.actual_read_sched_clock = jiffy_sched_clock_read,
 65};
 66
 67static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
 68{
 69	return (cyc * mult) >> shift;
 70}
 71
 72notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
 73{
 74	*seq = read_seqcount_latch(&cd.seq);
 75	return cd.read_data + (*seq & 1);
 76}
 77
 78notrace int sched_clock_read_retry(unsigned int seq)
 79{
 80	return read_seqcount_latch_retry(&cd.seq, seq);
 81}
 82
 83static __always_inline unsigned long long __sched_clock(void)
 84{
 
 
 85	struct clock_read_data *rd;
 86	unsigned int seq;
 87	u64 cyc, res;
 88
 89	do {
 90		seq = raw_read_seqcount_latch(&cd.seq);
 91		rd = cd.read_data + (seq & 1);
 92
 93		cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
 94		      rd->sched_clock_mask;
 95		res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
 96	} while (raw_read_seqcount_latch_retry(&cd.seq, seq));
 97
 98	return res;
 99}
100
101unsigned long long noinstr sched_clock_noinstr(void)
102{
103	return __sched_clock();
104}
105
106unsigned long long notrace sched_clock(void)
107{
108	unsigned long long ns;
109	preempt_disable_notrace();
110	/*
111	 * All of __sched_clock() is a seqcount_latch reader critical section,
112	 * but relies on the raw helpers which are uninstrumented. For KCSAN,
113	 * mark all accesses in __sched_clock() as atomic.
114	 */
115	kcsan_nestable_atomic_begin();
116	ns = __sched_clock();
117	kcsan_nestable_atomic_end();
118	preempt_enable_notrace();
119	return ns;
120}
121
122/*
123 * Updating the data required to read the clock.
124 *
125 * sched_clock() will never observe mis-matched data even if called from
126 * an NMI. We do this by maintaining an odd/even copy of the data and
127 * steering sched_clock() to one or the other using a sequence counter.
128 * In order to preserve the data cache profile of sched_clock() as much
129 * as possible the system reverts back to the even copy when the update
130 * completes; the odd copy is used *only* during an update.
131 */
132static void update_clock_read_data(struct clock_read_data *rd)
133{
 
 
 
134	/* steer readers towards the odd copy */
135	write_seqcount_latch_begin(&cd.seq);
136
137	/* now its safe for us to update the normal (even) copy */
138	cd.read_data[0] = *rd;
139
140	/* switch readers back to the even copy */
141	write_seqcount_latch(&cd.seq);
142
143	/* update the backup (odd) copy with the new data */
144	cd.read_data[1] = *rd;
145
146	write_seqcount_latch_end(&cd.seq);
147}
148
149/*
150 * Atomically update the sched_clock() epoch.
151 */
152static void update_sched_clock(void)
153{
154	u64 cyc;
155	u64 ns;
156	struct clock_read_data rd;
157
158	rd = cd.read_data[0];
159
160	cyc = cd.actual_read_sched_clock();
161	ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
162
163	rd.epoch_ns = ns;
164	rd.epoch_cyc = cyc;
165
166	update_clock_read_data(&rd);
167}
168
169static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
170{
171	update_sched_clock();
172	hrtimer_forward_now(hrt, cd.wrap_kt);
173
174	return HRTIMER_RESTART;
175}
176
177void __init
178sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
179{
180	u64 res, wrap, new_mask, new_epoch, cyc, ns;
181	u32 new_mult, new_shift;
182	unsigned long r, flags;
183	char r_unit;
184	struct clock_read_data rd;
185
186	if (cd.rate > rate)
187		return;
188
189	/* Cannot register a sched_clock with interrupts on */
190	local_irq_save(flags);
191
192	/* Calculate the mult/shift to convert counter ticks to ns. */
193	clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
194
195	new_mask = CLOCKSOURCE_MASK(bits);
196	cd.rate = rate;
197
198	/* Calculate how many nanosecs until we risk wrapping */
199	wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
200	cd.wrap_kt = ns_to_ktime(wrap);
201
202	rd = cd.read_data[0];
203
204	/* Update epoch for new counter and update 'epoch_ns' from old counter*/
205	new_epoch = read();
206	cyc = cd.actual_read_sched_clock();
207	ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
208	cd.actual_read_sched_clock = read;
209
210	rd.read_sched_clock	= read;
211	rd.sched_clock_mask	= new_mask;
212	rd.mult			= new_mult;
213	rd.shift		= new_shift;
214	rd.epoch_cyc		= new_epoch;
215	rd.epoch_ns		= ns;
216
217	update_clock_read_data(&rd);
218
219	if (sched_clock_timer.function != NULL) {
220		/* update timeout for clock wrap */
221		hrtimer_start(&sched_clock_timer, cd.wrap_kt,
222			      HRTIMER_MODE_REL_HARD);
223	}
224
225	r = rate;
226	if (r >= 4000000) {
227		r = DIV_ROUND_CLOSEST(r, 1000000);
228		r_unit = 'M';
229	} else if (r >= 4000) {
230		r = DIV_ROUND_CLOSEST(r, 1000);
231		r_unit = 'k';
232	} else {
233		r_unit = ' ';
234	}
235
236	/* Calculate the ns resolution of this counter */
237	res = cyc_to_ns(1ULL, new_mult, new_shift);
238
239	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
240		bits, r, r_unit, res, wrap);
241
242	/* Enable IRQ time accounting if we have a fast enough sched_clock() */
243	if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
244		enable_sched_clock_irqtime();
245
246	local_irq_restore(flags);
247
248	pr_debug("Registered %pS as sched_clock source\n", read);
249}
250
251void __init generic_sched_clock_init(void)
252{
253	/*
254	 * If no sched_clock() function has been provided at that point,
255	 * make it the final one.
256	 */
257	if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
258		sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
259
260	update_sched_clock();
261
262	/*
263	 * Start the timer to keep sched_clock() properly updated and
264	 * sets the initial epoch.
265	 */
266	hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
267	sched_clock_timer.function = sched_clock_poll;
268	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
269}
270
271/*
272 * Clock read function for use when the clock is suspended.
273 *
274 * This function makes it appear to sched_clock() as if the clock
275 * stopped counting at its last update.
276 *
277 * This function must only be called from the critical
278 * section in sched_clock(). It relies on the read_seqcount_retry()
279 * at the end of the critical section to be sure we observe the
280 * correct copy of 'epoch_cyc'.
281 */
282static u64 notrace suspended_sched_clock_read(void)
283{
284	unsigned int seq = read_seqcount_latch(&cd.seq);
285
286	return cd.read_data[seq & 1].epoch_cyc;
287}
288
289int sched_clock_suspend(void)
290{
291	struct clock_read_data *rd = &cd.read_data[0];
292
293	update_sched_clock();
294	hrtimer_cancel(&sched_clock_timer);
295	rd->read_sched_clock = suspended_sched_clock_read;
296
297	return 0;
298}
299
300void sched_clock_resume(void)
301{
302	struct clock_read_data *rd = &cd.read_data[0];
303
304	rd->epoch_cyc = cd.actual_read_sched_clock();
305	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
306	rd->read_sched_clock = cd.actual_read_sched_clock;
307}
308
309static struct syscore_ops sched_clock_ops = {
310	.suspend	= sched_clock_suspend,
311	.resume		= sched_clock_resume,
312};
313
314static int __init sched_clock_syscore_init(void)
315{
316	register_syscore_ops(&sched_clock_ops);
317
318	return 0;
319}
320device_initcall(sched_clock_syscore_init);