Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * RISC-V performance counter support.
  4 *
  5 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
  6 *
  7 * This implementation is based on old RISC-V perf and ARM perf event code
  8 * which are in turn based on sparc64 and x86 code.
  9 */
 10
 11#include <linux/cpumask.h>
 12#include <linux/irq.h>
 13#include <linux/irqdesc.h>
 14#include <linux/perf/riscv_pmu.h>
 15#include <linux/printk.h>
 16#include <linux/smp.h>
 17
 18#include <asm/sbi.h>
 19
 20static unsigned long csr_read_num(int csr_num)
 21{
 22#define switchcase_csr_read(__csr_num, __val)		{\
 23	case __csr_num:					\
 24		__val = csr_read(__csr_num);		\
 25		break; }
 26#define switchcase_csr_read_2(__csr_num, __val)		{\
 27	switchcase_csr_read(__csr_num + 0, __val)	 \
 28	switchcase_csr_read(__csr_num + 1, __val)}
 29#define switchcase_csr_read_4(__csr_num, __val)		{\
 30	switchcase_csr_read_2(__csr_num + 0, __val)	 \
 31	switchcase_csr_read_2(__csr_num + 2, __val)}
 32#define switchcase_csr_read_8(__csr_num, __val)		{\
 33	switchcase_csr_read_4(__csr_num + 0, __val)	 \
 34	switchcase_csr_read_4(__csr_num + 4, __val)}
 35#define switchcase_csr_read_16(__csr_num, __val)	{\
 36	switchcase_csr_read_8(__csr_num + 0, __val)	 \
 37	switchcase_csr_read_8(__csr_num + 8, __val)}
 38#define switchcase_csr_read_32(__csr_num, __val)	{\
 39	switchcase_csr_read_16(__csr_num + 0, __val)	 \
 40	switchcase_csr_read_16(__csr_num + 16, __val)}
 41
 42	unsigned long ret = 0;
 43
 44	switch (csr_num) {
 45	switchcase_csr_read_32(CSR_CYCLE, ret)
 46	switchcase_csr_read_32(CSR_CYCLEH, ret)
 47	default :
 48		break;
 49	}
 50
 51	return ret;
 52#undef switchcase_csr_read_32
 53#undef switchcase_csr_read_16
 54#undef switchcase_csr_read_8
 55#undef switchcase_csr_read_4
 56#undef switchcase_csr_read_2
 57#undef switchcase_csr_read
 58}
 59
 60/*
 61 * Read the CSR of a corresponding counter.
 62 */
 63unsigned long riscv_pmu_ctr_read_csr(unsigned long csr)
 64{
 65	if (csr < CSR_CYCLE || csr > CSR_HPMCOUNTER31H ||
 66	   (csr > CSR_HPMCOUNTER31 && csr < CSR_CYCLEH)) {
 67		pr_err("Invalid performance counter csr %lx\n", csr);
 68		return -EINVAL;
 69	}
 70
 71	return csr_read_num(csr);
 72}
 73
 74u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
 75{
 76	int cwidth;
 77	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
 78	struct hw_perf_event *hwc = &event->hw;
 79
 80	if (!rvpmu->ctr_get_width)
 81	/**
 82	 * If the pmu driver doesn't support counter width, set it to default
 83	 * maximum allowed by the specification.
 84	 */
 85		cwidth = 63;
 86	else {
 87		if (hwc->idx == -1)
 88			/* Handle init case where idx is not initialized yet */
 89			cwidth = rvpmu->ctr_get_width(0);
 90		else
 91			cwidth = rvpmu->ctr_get_width(hwc->idx);
 92	}
 93
 94	return GENMASK_ULL(cwidth, 0);
 95}
 96
 97u64 riscv_pmu_event_update(struct perf_event *event)
 98{
 99	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
100	struct hw_perf_event *hwc = &event->hw;
101	u64 prev_raw_count, new_raw_count;
102	unsigned long cmask;
103	u64 oldval, delta;
104
105	if (!rvpmu->ctr_read)
106		return 0;
107
108	cmask = riscv_pmu_ctr_get_width_mask(event);
109
110	do {
111		prev_raw_count = local64_read(&hwc->prev_count);
112		new_raw_count = rvpmu->ctr_read(event);
113		oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
114					 new_raw_count);
115	} while (oldval != prev_raw_count);
116
117	delta = (new_raw_count - prev_raw_count) & cmask;
118	local64_add(delta, &event->count);
119	local64_sub(delta, &hwc->period_left);
120
121	return delta;
122}
123
124void riscv_pmu_stop(struct perf_event *event, int flags)
125{
126	struct hw_perf_event *hwc = &event->hw;
127	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
128
129	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
130
131	if (!(hwc->state & PERF_HES_STOPPED)) {
132		if (rvpmu->ctr_stop) {
133			rvpmu->ctr_stop(event, 0);
134			hwc->state |= PERF_HES_STOPPED;
135		}
136		riscv_pmu_event_update(event);
137		hwc->state |= PERF_HES_UPTODATE;
138	}
139}
140
141int riscv_pmu_event_set_period(struct perf_event *event)
142{
143	struct hw_perf_event *hwc = &event->hw;
144	s64 left = local64_read(&hwc->period_left);
145	s64 period = hwc->sample_period;
146	int overflow = 0;
147	uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
148
149	if (unlikely(left <= -period)) {
150		left = period;
151		local64_set(&hwc->period_left, left);
152		hwc->last_period = period;
153		overflow = 1;
154	}
155
156	if (unlikely(left <= 0)) {
157		left += period;
158		local64_set(&hwc->period_left, left);
159		hwc->last_period = period;
160		overflow = 1;
161	}
162
163	/*
164	 * Limit the maximum period to prevent the counter value
165	 * from overtaking the one we are about to program. In
166	 * effect we are reducing max_period to account for
167	 * interrupt latency (and we are being very conservative).
168	 */
169	if (left > (max_period >> 1))
170		left = (max_period >> 1);
171
172	local64_set(&hwc->prev_count, (u64)-left);
173
174	return overflow;
175}
176
177void riscv_pmu_start(struct perf_event *event, int flags)
178{
179	struct hw_perf_event *hwc = &event->hw;
180	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
181	uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
182	u64 init_val;
183
184	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
185		return;
186
187	if (flags & PERF_EF_RELOAD)
188		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
189
190	hwc->state = 0;
191	riscv_pmu_event_set_period(event);
192	init_val = local64_read(&hwc->prev_count) & max_period;
193	rvpmu->ctr_start(event, init_val);
194	perf_event_update_userpage(event);
195}
196
197static int riscv_pmu_add(struct perf_event *event, int flags)
198{
199	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
200	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
201	struct hw_perf_event *hwc = &event->hw;
202	int idx;
203
204	idx = rvpmu->ctr_get_idx(event);
205	if (idx < 0)
206		return idx;
207
208	hwc->idx = idx;
209	cpuc->events[idx] = event;
210	cpuc->n_events++;
211	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
212	if (flags & PERF_EF_START)
213		riscv_pmu_start(event, PERF_EF_RELOAD);
214
215	/* Propagate our changes to the userspace mapping. */
216	perf_event_update_userpage(event);
217
218	return 0;
219}
220
221static void riscv_pmu_del(struct perf_event *event, int flags)
222{
223	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
224	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
225	struct hw_perf_event *hwc = &event->hw;
226
227	riscv_pmu_stop(event, PERF_EF_UPDATE);
228	cpuc->events[hwc->idx] = NULL;
229	/* The firmware need to reset the counter mapping */
230	if (rvpmu->ctr_stop)
231		rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET);
232	cpuc->n_events--;
233	if (rvpmu->ctr_clear_idx)
234		rvpmu->ctr_clear_idx(event);
235	perf_event_update_userpage(event);
236	hwc->idx = -1;
237}
238
239static void riscv_pmu_read(struct perf_event *event)
240{
241	riscv_pmu_event_update(event);
242}
243
244static int riscv_pmu_event_init(struct perf_event *event)
245{
246	struct hw_perf_event *hwc = &event->hw;
247	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
248	int mapped_event;
249	u64 event_config = 0;
250	uint64_t cmask;
251
252	hwc->flags = 0;
253	mapped_event = rvpmu->event_map(event, &event_config);
254	if (mapped_event < 0) {
255		pr_debug("event %x:%llx not supported\n", event->attr.type,
256			 event->attr.config);
257		return mapped_event;
258	}
259
260	/*
261	 * idx is set to -1 because the index of a general event should not be
262	 * decided until binding to some counter in pmu->add().
263	 * config will contain the information about counter CSR
264	 * the idx will contain the counter index
265	 */
266	hwc->config = event_config;
267	hwc->idx = -1;
268	hwc->event_base = mapped_event;
269
270	if (!is_sampling_event(event)) {
271		/*
272		 * For non-sampling runs, limit the sample_period to half
273		 * of the counter width. That way, the new counter value
274		 * is far less likely to overtake the previous one unless
275		 * you have some serious IRQ latency issues.
276		 */
277		cmask = riscv_pmu_ctr_get_width_mask(event);
278		hwc->sample_period  =  cmask >> 1;
279		hwc->last_period    = hwc->sample_period;
280		local64_set(&hwc->period_left, hwc->sample_period);
281	}
282
283	return 0;
284}
285
286struct riscv_pmu *riscv_pmu_alloc(void)
287{
288	struct riscv_pmu *pmu;
289	int cpuid, i;
290	struct cpu_hw_events *cpuc;
291
292	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
293	if (!pmu)
294		goto out;
295
296	pmu->hw_events = alloc_percpu_gfp(struct cpu_hw_events, GFP_KERNEL);
297	if (!pmu->hw_events) {
298		pr_info("failed to allocate per-cpu PMU data.\n");
299		goto out_free_pmu;
300	}
301
302	for_each_possible_cpu(cpuid) {
303		cpuc = per_cpu_ptr(pmu->hw_events, cpuid);
304		cpuc->n_events = 0;
305		for (i = 0; i < RISCV_MAX_COUNTERS; i++)
306			cpuc->events[i] = NULL;
307	}
308	pmu->pmu = (struct pmu) {
309		.event_init	= riscv_pmu_event_init,
310		.add		= riscv_pmu_add,
311		.del		= riscv_pmu_del,
312		.start		= riscv_pmu_start,
313		.stop		= riscv_pmu_stop,
314		.read		= riscv_pmu_read,
315	};
316
317	return pmu;
318
319out_free_pmu:
320	kfree(pmu);
321out:
322	return NULL;
323}