Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * sched_clock.c: support for extending counters to full 64-bit ns counter
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
  8#include <linux/clocksource.h>
  9#include <linux/init.h>
 10#include <linux/jiffies.h>
 11#include <linux/ktime.h>
 12#include <linux/kernel.h>
 13#include <linux/moduleparam.h>
 14#include <linux/sched.h>
 
 15#include <linux/syscore_ops.h>
 16#include <linux/hrtimer.h>
 17#include <linux/sched_clock.h>
 18#include <linux/seqlock.h>
 19#include <linux/bitops.h>
 20
 21struct clock_data {
 22	ktime_t wrap_kt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23	u64 epoch_ns;
 24	u64 epoch_cyc;
 25	seqcount_t seq;
 26	unsigned long rate;
 27	u32 mult;
 28	u32 shift;
 29	bool suspended;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30};
 31
 32static struct hrtimer sched_clock_timer;
 33static int irqtime = -1;
 34
 35core_param(irqtime, irqtime, int, 0400);
 36
 37static struct clock_data cd = {
 38	.mult	= NSEC_PER_SEC / HZ,
 39};
 40
 41static u64 __read_mostly sched_clock_mask;
 42
 43static u64 notrace jiffy_sched_clock_read(void)
 44{
 45	/*
 46	 * We don't need to use get_jiffies_64 on 32-bit arches here
 47	 * because we register with BITS_PER_LONG
 48	 */
 49	return (u64)(jiffies - INITIAL_JIFFIES);
 50}
 51
 52static u32 __read_mostly (*read_sched_clock_32)(void);
 53
 54static u64 notrace read_sched_clock_32_wrapper(void)
 55{
 56	return read_sched_clock_32();
 57}
 58
 59static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
 60
 61static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
 62{
 63	return (cyc * mult) >> shift;
 64}
 65
 66unsigned long long notrace sched_clock(void)
 67{
 68	u64 epoch_ns;
 69	u64 epoch_cyc;
 70	u64 cyc;
 71	unsigned long seq;
 72
 73	if (cd.suspended)
 74		return cd.epoch_ns;
 75
 76	do {
 77		seq = raw_read_seqcount_begin(&cd.seq);
 78		epoch_cyc = cd.epoch_cyc;
 79		epoch_ns = cd.epoch_ns;
 
 
 
 80	} while (read_seqcount_retry(&cd.seq, seq));
 81
 82	cyc = read_sched_clock();
 83	cyc = (cyc - epoch_cyc) & sched_clock_mask;
 84	return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
 85}
 86
 87/*
 88 * Atomically update the sched_clock epoch.
 
 
 
 
 
 
 
 89 */
 90static void notrace update_sched_clock(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91{
 92	unsigned long flags;
 93	u64 cyc;
 94	u64 ns;
 
 
 
 
 
 
 
 
 
 95
 96	cyc = read_sched_clock();
 97	ns = cd.epoch_ns +
 98		cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
 99			  cd.mult, cd.shift);
100
101	raw_local_irq_save(flags);
102	raw_write_seqcount_begin(&cd.seq);
103	cd.epoch_ns = ns;
104	cd.epoch_cyc = cyc;
105	raw_write_seqcount_end(&cd.seq);
106	raw_local_irq_restore(flags);
107}
108
109static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
110{
111	update_sched_clock();
112	hrtimer_forward_now(hrt, cd.wrap_kt);
 
113	return HRTIMER_RESTART;
114}
115
116void __init sched_clock_register(u64 (*read)(void), int bits,
117				 unsigned long rate)
118{
119	u64 res, wrap, new_mask, new_epoch, cyc, ns;
120	u32 new_mult, new_shift;
121	ktime_t new_wrap_kt;
122	unsigned long r;
123	char r_unit;
 
124
125	if (cd.rate > rate)
126		return;
127
128	WARN_ON(!irqs_disabled());
129
130	/* calculate the mult/shift to convert counter ticks to ns. */
131	clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
132
133	new_mask = CLOCKSOURCE_MASK(bits);
 
134
135	/* calculate how many ns until we wrap */
136	wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask);
137	new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
138
139	/* update epoch for new counter and update epoch_ns from old counter*/
 
 
140	new_epoch = read();
141	cyc = read_sched_clock();
142	ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
143			  cd.mult, cd.shift);
144
145	raw_write_seqcount_begin(&cd.seq);
146	read_sched_clock = read;
147	sched_clock_mask = new_mask;
148	cd.rate = rate;
149	cd.wrap_kt = new_wrap_kt;
150	cd.mult = new_mult;
151	cd.shift = new_shift;
152	cd.epoch_cyc = new_epoch;
153	cd.epoch_ns = ns;
154	raw_write_seqcount_end(&cd.seq);
 
 
 
155
156	r = rate;
157	if (r >= 4000000) {
158		r /= 1000000;
159		r_unit = 'M';
160	} else if (r >= 1000) {
161		r /= 1000;
162		r_unit = 'k';
163	} else
164		r_unit = ' ';
 
 
 
165
166	/* calculate the ns resolution of this counter */
167	res = cyc_to_ns(1ULL, new_mult, new_shift);
168
169	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
170		bits, r, r_unit, res, wrap);
171
172	/* Enable IRQ time accounting if we have a fast enough sched_clock */
173	if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
174		enable_sched_clock_irqtime();
175
176	pr_debug("Registered %pF as sched_clock source\n", read);
177}
178
179void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
180{
181	read_sched_clock_32 = read;
182	sched_clock_register(read_sched_clock_32_wrapper, bits, rate);
183}
184
185void __init sched_clock_postinit(void)
186{
187	/*
188	 * If no sched_clock function has been provided at that point,
189	 * make it the final one one.
190	 */
191	if (read_sched_clock == jiffy_sched_clock_read)
192		sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
193
194	update_sched_clock();
195
196	/*
197	 * Start the timer to keep sched_clock() properly updated and
198	 * sets the initial epoch.
199	 */
200	hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
201	sched_clock_timer.function = sched_clock_poll;
202	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
203}
204
205static int sched_clock_suspend(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206{
207	sched_clock_poll(&sched_clock_timer);
208	cd.suspended = true;
 
 
 
 
209	return 0;
210}
211
212static void sched_clock_resume(void)
213{
214	cd.epoch_cyc = read_sched_clock();
215	cd.suspended = false;
 
 
 
216}
217
218static struct syscore_ops sched_clock_ops = {
219	.suspend = sched_clock_suspend,
220	.resume = sched_clock_resume,
221};
222
223static int __init sched_clock_syscore_init(void)
224{
225	register_syscore_ops(&sched_clock_ops);
 
226	return 0;
227}
228device_initcall(sched_clock_syscore_init);
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Generic sched_clock() support, to extend low level hardware time
  4 * counters to full 64-bit ns values.
 
 
 
  5 */
  6#include <linux/clocksource.h>
  7#include <linux/init.h>
  8#include <linux/jiffies.h>
  9#include <linux/ktime.h>
 10#include <linux/kernel.h>
 11#include <linux/moduleparam.h>
 12#include <linux/sched.h>
 13#include <linux/sched/clock.h>
 14#include <linux/syscore_ops.h>
 15#include <linux/hrtimer.h>
 16#include <linux/sched_clock.h>
 17#include <linux/seqlock.h>
 18#include <linux/bitops.h>
 19
 20#include "timekeeping.h"
 21
 22/**
 23 * struct clock_read_data - data required to read from sched_clock()
 24 *
 25 * @epoch_ns:		sched_clock() value at last update
 26 * @epoch_cyc:		Clock cycle value at last update.
 27 * @sched_clock_mask:   Bitmask for two's complement subtraction of non 64bit
 28 *			clocks.
 29 * @read_sched_clock:	Current clock source (or dummy source when suspended).
 30 * @mult:		Multipler for scaled math conversion.
 31 * @shift:		Shift value for scaled math conversion.
 32 *
 33 * Care must be taken when updating this structure; it is read by
 34 * some very hot code paths. It occupies <=40 bytes and, when combined
 35 * with the seqcount used to synchronize access, comfortably fits into
 36 * a 64 byte cache line.
 37 */
 38struct clock_read_data {
 39	u64 epoch_ns;
 40	u64 epoch_cyc;
 41	u64 sched_clock_mask;
 42	u64 (*read_sched_clock)(void);
 43	u32 mult;
 44	u32 shift;
 45};
 46
 47/**
 48 * struct clock_data - all data needed for sched_clock() (including
 49 *                     registration of a new clock source)
 50 *
 51 * @seq:		Sequence counter for protecting updates. The lowest
 52 *			bit is the index for @read_data.
 53 * @read_data:		Data required to read from sched_clock.
 54 * @wrap_kt:		Duration for which clock can run before wrapping.
 55 * @rate:		Tick rate of the registered clock.
 56 * @actual_read_sched_clock: Registered hardware level clock read function.
 57 *
 58 * The ordering of this structure has been chosen to optimize cache
 59 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
 60 * into a single 64-byte cache line.
 61 */
 62struct clock_data {
 63	seqcount_t		seq;
 64	struct clock_read_data	read_data[2];
 65	ktime_t			wrap_kt;
 66	unsigned long		rate;
 67
 68	u64 (*actual_read_sched_clock)(void);
 69};
 70
 71static struct hrtimer sched_clock_timer;
 72static int irqtime = -1;
 73
 74core_param(irqtime, irqtime, int, 0400);
 75
 
 
 
 
 
 
 76static u64 notrace jiffy_sched_clock_read(void)
 77{
 78	/*
 79	 * We don't need to use get_jiffies_64 on 32-bit arches here
 80	 * because we register with BITS_PER_LONG
 81	 */
 82	return (u64)(jiffies - INITIAL_JIFFIES);
 83}
 84
 85static struct clock_data cd ____cacheline_aligned = {
 86	.read_data[0] = { .mult = NSEC_PER_SEC / HZ,
 87			  .read_sched_clock = jiffy_sched_clock_read, },
 88	.actual_read_sched_clock = jiffy_sched_clock_read,
 89};
 
 
 
 90
 91static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
 92{
 93	return (cyc * mult) >> shift;
 94}
 95
 96unsigned long long notrace sched_clock(void)
 97{
 98	u64 cyc, res;
 99	unsigned int seq;
100	struct clock_read_data *rd;
 
 
 
 
101
102	do {
103		seq = raw_read_seqcount(&cd.seq);
104		rd = cd.read_data + (seq & 1);
105
106		cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
107		      rd->sched_clock_mask;
108		res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
109	} while (read_seqcount_retry(&cd.seq, seq));
110
111	return res;
 
 
112}
113
114/*
115 * Updating the data required to read the clock.
116 *
117 * sched_clock() will never observe mis-matched data even if called from
118 * an NMI. We do this by maintaining an odd/even copy of the data and
119 * steering sched_clock() to one or the other using a sequence counter.
120 * In order to preserve the data cache profile of sched_clock() as much
121 * as possible the system reverts back to the even copy when the update
122 * completes; the odd copy is used *only* during an update.
123 */
124static void update_clock_read_data(struct clock_read_data *rd)
125{
126	/* update the backup (odd) copy with the new data */
127	cd.read_data[1] = *rd;
128
129	/* steer readers towards the odd copy */
130	raw_write_seqcount_latch(&cd.seq);
131
132	/* now its safe for us to update the normal (even) copy */
133	cd.read_data[0] = *rd;
134
135	/* switch readers back to the even copy */
136	raw_write_seqcount_latch(&cd.seq);
137}
138
139/*
140 * Atomically update the sched_clock() epoch.
141 */
142static void update_sched_clock(void)
143{
 
144	u64 cyc;
145	u64 ns;
146	struct clock_read_data rd;
147
148	rd = cd.read_data[0];
149
150	cyc = cd.actual_read_sched_clock();
151	ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
152
153	rd.epoch_ns = ns;
154	rd.epoch_cyc = cyc;
155
156	update_clock_read_data(&rd);
 
 
 
 
 
 
 
 
 
 
157}
158
159static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
160{
161	update_sched_clock();
162	hrtimer_forward_now(hrt, cd.wrap_kt);
163
164	return HRTIMER_RESTART;
165}
166
167void __init
168sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
169{
170	u64 res, wrap, new_mask, new_epoch, cyc, ns;
171	u32 new_mult, new_shift;
 
172	unsigned long r;
173	char r_unit;
174	struct clock_read_data rd;
175
176	if (cd.rate > rate)
177		return;
178
179	WARN_ON(!irqs_disabled());
180
181	/* Calculate the mult/shift to convert counter ticks to ns. */
182	clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
183
184	new_mask = CLOCKSOURCE_MASK(bits);
185	cd.rate = rate;
186
187	/* Calculate how many nanosecs until we risk wrapping */
188	wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
189	cd.wrap_kt = ns_to_ktime(wrap);
190
191	rd = cd.read_data[0];
192
193	/* Update epoch for new counter and update 'epoch_ns' from old counter*/
194	new_epoch = read();
195	cyc = cd.actual_read_sched_clock();
196	ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
197	cd.actual_read_sched_clock = read;
198
199	rd.read_sched_clock	= read;
200	rd.sched_clock_mask	= new_mask;
201	rd.mult			= new_mult;
202	rd.shift		= new_shift;
203	rd.epoch_cyc		= new_epoch;
204	rd.epoch_ns		= ns;
205
206	update_clock_read_data(&rd);
207
208	if (sched_clock_timer.function != NULL) {
209		/* update timeout for clock wrap */
210		hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
211	}
212
213	r = rate;
214	if (r >= 4000000) {
215		r /= 1000000;
216		r_unit = 'M';
217	} else {
218		if (r >= 1000) {
219			r /= 1000;
220			r_unit = 'k';
221		} else {
222			r_unit = ' ';
223		}
224	}
225
226	/* Calculate the ns resolution of this counter */
227	res = cyc_to_ns(1ULL, new_mult, new_shift);
228
229	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
230		bits, r, r_unit, res, wrap);
231
232	/* Enable IRQ time accounting if we have a fast enough sched_clock() */
233	if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
234		enable_sched_clock_irqtime();
235
236	pr_debug("Registered %pS as sched_clock source\n", read);
 
 
 
 
 
 
237}
238
239void __init generic_sched_clock_init(void)
240{
241	/*
242	 * If no sched_clock() function has been provided at that point,
243	 * make it the final one one.
244	 */
245	if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
246		sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
247
248	update_sched_clock();
249
250	/*
251	 * Start the timer to keep sched_clock() properly updated and
252	 * sets the initial epoch.
253	 */
254	hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
255	sched_clock_timer.function = sched_clock_poll;
256	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
257}
258
259/*
260 * Clock read function for use when the clock is suspended.
261 *
262 * This function makes it appear to sched_clock() as if the clock
263 * stopped counting at its last update.
264 *
265 * This function must only be called from the critical
266 * section in sched_clock(). It relies on the read_seqcount_retry()
267 * at the end of the critical section to be sure we observe the
268 * correct copy of 'epoch_cyc'.
269 */
270static u64 notrace suspended_sched_clock_read(void)
271{
272	unsigned int seq = raw_read_seqcount(&cd.seq);
273
274	return cd.read_data[seq & 1].epoch_cyc;
275}
276
277int sched_clock_suspend(void)
278{
279	struct clock_read_data *rd = &cd.read_data[0];
280
281	update_sched_clock();
282	hrtimer_cancel(&sched_clock_timer);
283	rd->read_sched_clock = suspended_sched_clock_read;
284
285	return 0;
286}
287
288void sched_clock_resume(void)
289{
290	struct clock_read_data *rd = &cd.read_data[0];
291
292	rd->epoch_cyc = cd.actual_read_sched_clock();
293	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
294	rd->read_sched_clock = cd.actual_read_sched_clock;
295}
296
297static struct syscore_ops sched_clock_ops = {
298	.suspend	= sched_clock_suspend,
299	.resume		= sched_clock_resume,
300};
301
302static int __init sched_clock_syscore_init(void)
303{
304	register_syscore_ops(&sched_clock_ops);
305
306	return 0;
307}
308device_initcall(sched_clock_syscore_init);