Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * RISC-V performance counter support.
  4 *
  5 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
  6 *
  7 * This implementation is based on old RISC-V perf and ARM perf event code
  8 * which are in turn based on sparc64 and x86 code.
  9 */
 10
 11#include <linux/cpumask.h>
 12#include <linux/irq.h>
 13#include <linux/irqdesc.h>
 14#include <linux/perf/riscv_pmu.h>
 15#include <linux/printk.h>
 16#include <linux/smp.h>
 
 17
 18#include <asm/sbi.h>
 19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20static unsigned long csr_read_num(int csr_num)
 21{
 22#define switchcase_csr_read(__csr_num, __val)		{\
 23	case __csr_num:					\
 24		__val = csr_read(__csr_num);		\
 25		break; }
 26#define switchcase_csr_read_2(__csr_num, __val)		{\
 27	switchcase_csr_read(__csr_num + 0, __val)	 \
 28	switchcase_csr_read(__csr_num + 1, __val)}
 29#define switchcase_csr_read_4(__csr_num, __val)		{\
 30	switchcase_csr_read_2(__csr_num + 0, __val)	 \
 31	switchcase_csr_read_2(__csr_num + 2, __val)}
 32#define switchcase_csr_read_8(__csr_num, __val)		{\
 33	switchcase_csr_read_4(__csr_num + 0, __val)	 \
 34	switchcase_csr_read_4(__csr_num + 4, __val)}
 35#define switchcase_csr_read_16(__csr_num, __val)	{\
 36	switchcase_csr_read_8(__csr_num + 0, __val)	 \
 37	switchcase_csr_read_8(__csr_num + 8, __val)}
 38#define switchcase_csr_read_32(__csr_num, __val)	{\
 39	switchcase_csr_read_16(__csr_num + 0, __val)	 \
 40	switchcase_csr_read_16(__csr_num + 16, __val)}
 41
 42	unsigned long ret = 0;
 43
 44	switch (csr_num) {
 45	switchcase_csr_read_32(CSR_CYCLE, ret)
 46	switchcase_csr_read_32(CSR_CYCLEH, ret)
 47	default :
 48		break;
 49	}
 50
 51	return ret;
 52#undef switchcase_csr_read_32
 53#undef switchcase_csr_read_16
 54#undef switchcase_csr_read_8
 55#undef switchcase_csr_read_4
 56#undef switchcase_csr_read_2
 57#undef switchcase_csr_read
 58}
 59
 60/*
 61 * Read the CSR of a corresponding counter.
 62 */
 63unsigned long riscv_pmu_ctr_read_csr(unsigned long csr)
 64{
 65	if (csr < CSR_CYCLE || csr > CSR_HPMCOUNTER31H ||
 66	   (csr > CSR_HPMCOUNTER31 && csr < CSR_CYCLEH)) {
 67		pr_err("Invalid performance counter csr %lx\n", csr);
 68		return -EINVAL;
 69	}
 70
 71	return csr_read_num(csr);
 72}
 73
 74u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
 75{
 76	int cwidth;
 77	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
 78	struct hw_perf_event *hwc = &event->hw;
 79
 80	if (!rvpmu->ctr_get_width)
 81	/**
 82	 * If the pmu driver doesn't support counter width, set it to default
 83	 * maximum allowed by the specification.
 84	 */
 85		cwidth = 63;
 86	else {
 87		if (hwc->idx == -1)
 88			/* Handle init case where idx is not initialized yet */
 89			cwidth = rvpmu->ctr_get_width(0);
 90		else
 91			cwidth = rvpmu->ctr_get_width(hwc->idx);
 92	}
 93
 94	return GENMASK_ULL(cwidth, 0);
 95}
 96
 97u64 riscv_pmu_event_update(struct perf_event *event)
 98{
 99	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
100	struct hw_perf_event *hwc = &event->hw;
101	u64 prev_raw_count, new_raw_count;
102	unsigned long cmask;
103	u64 oldval, delta;
104
105	if (!rvpmu->ctr_read)
106		return 0;
107
108	cmask = riscv_pmu_ctr_get_width_mask(event);
109
110	do {
111		prev_raw_count = local64_read(&hwc->prev_count);
112		new_raw_count = rvpmu->ctr_read(event);
113		oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
114					 new_raw_count);
115	} while (oldval != prev_raw_count);
116
117	delta = (new_raw_count - prev_raw_count) & cmask;
118	local64_add(delta, &event->count);
119	local64_sub(delta, &hwc->period_left);
120
121	return delta;
122}
123
124void riscv_pmu_stop(struct perf_event *event, int flags)
125{
126	struct hw_perf_event *hwc = &event->hw;
127	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
128
129	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
130
131	if (!(hwc->state & PERF_HES_STOPPED)) {
132		if (rvpmu->ctr_stop) {
133			rvpmu->ctr_stop(event, 0);
134			hwc->state |= PERF_HES_STOPPED;
135		}
136		riscv_pmu_event_update(event);
137		hwc->state |= PERF_HES_UPTODATE;
138	}
139}
140
141int riscv_pmu_event_set_period(struct perf_event *event)
142{
143	struct hw_perf_event *hwc = &event->hw;
144	s64 left = local64_read(&hwc->period_left);
145	s64 period = hwc->sample_period;
146	int overflow = 0;
147	uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
148
149	if (unlikely(left <= -period)) {
150		left = period;
151		local64_set(&hwc->period_left, left);
152		hwc->last_period = period;
153		overflow = 1;
154	}
155
156	if (unlikely(left <= 0)) {
157		left += period;
158		local64_set(&hwc->period_left, left);
159		hwc->last_period = period;
160		overflow = 1;
161	}
162
163	/*
164	 * Limit the maximum period to prevent the counter value
165	 * from overtaking the one we are about to program. In
166	 * effect we are reducing max_period to account for
167	 * interrupt latency (and we are being very conservative).
168	 */
169	if (left > (max_period >> 1))
170		left = (max_period >> 1);
171
172	local64_set(&hwc->prev_count, (u64)-left);
173
 
 
174	return overflow;
175}
176
177void riscv_pmu_start(struct perf_event *event, int flags)
178{
179	struct hw_perf_event *hwc = &event->hw;
180	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
181	uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
182	u64 init_val;
183
184	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
185		return;
186
187	if (flags & PERF_EF_RELOAD)
188		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
189
190	hwc->state = 0;
191	riscv_pmu_event_set_period(event);
192	init_val = local64_read(&hwc->prev_count) & max_period;
193	rvpmu->ctr_start(event, init_val);
194	perf_event_update_userpage(event);
195}
196
197static int riscv_pmu_add(struct perf_event *event, int flags)
198{
199	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
200	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
201	struct hw_perf_event *hwc = &event->hw;
202	int idx;
203
204	idx = rvpmu->ctr_get_idx(event);
205	if (idx < 0)
206		return idx;
207
208	hwc->idx = idx;
209	cpuc->events[idx] = event;
210	cpuc->n_events++;
211	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
212	if (flags & PERF_EF_START)
213		riscv_pmu_start(event, PERF_EF_RELOAD);
214
215	/* Propagate our changes to the userspace mapping. */
216	perf_event_update_userpage(event);
217
218	return 0;
219}
220
221static void riscv_pmu_del(struct perf_event *event, int flags)
222{
223	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
224	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
225	struct hw_perf_event *hwc = &event->hw;
226
227	riscv_pmu_stop(event, PERF_EF_UPDATE);
228	cpuc->events[hwc->idx] = NULL;
229	/* The firmware need to reset the counter mapping */
230	if (rvpmu->ctr_stop)
231		rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET);
232	cpuc->n_events--;
233	if (rvpmu->ctr_clear_idx)
234		rvpmu->ctr_clear_idx(event);
235	perf_event_update_userpage(event);
236	hwc->idx = -1;
237}
238
239static void riscv_pmu_read(struct perf_event *event)
240{
241	riscv_pmu_event_update(event);
242}
243
244static int riscv_pmu_event_init(struct perf_event *event)
245{
246	struct hw_perf_event *hwc = &event->hw;
247	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
248	int mapped_event;
249	u64 event_config = 0;
250	uint64_t cmask;
251
252	hwc->flags = 0;
253	mapped_event = rvpmu->event_map(event, &event_config);
254	if (mapped_event < 0) {
255		pr_debug("event %x:%llx not supported\n", event->attr.type,
256			 event->attr.config);
257		return mapped_event;
258	}
259
260	/*
261	 * idx is set to -1 because the index of a general event should not be
262	 * decided until binding to some counter in pmu->add().
263	 * config will contain the information about counter CSR
264	 * the idx will contain the counter index
265	 */
266	hwc->config = event_config;
267	hwc->idx = -1;
268	hwc->event_base = mapped_event;
269
 
 
 
270	if (!is_sampling_event(event)) {
271		/*
272		 * For non-sampling runs, limit the sample_period to half
273		 * of the counter width. That way, the new counter value
274		 * is far less likely to overtake the previous one unless
275		 * you have some serious IRQ latency issues.
276		 */
277		cmask = riscv_pmu_ctr_get_width_mask(event);
278		hwc->sample_period  =  cmask >> 1;
279		hwc->last_period    = hwc->sample_period;
280		local64_set(&hwc->period_left, hwc->sample_period);
281	}
282
283	return 0;
284}
285
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286struct riscv_pmu *riscv_pmu_alloc(void)
287{
288	struct riscv_pmu *pmu;
289	int cpuid, i;
290	struct cpu_hw_events *cpuc;
291
292	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
293	if (!pmu)
294		goto out;
295
296	pmu->hw_events = alloc_percpu_gfp(struct cpu_hw_events, GFP_KERNEL);
297	if (!pmu->hw_events) {
298		pr_info("failed to allocate per-cpu PMU data.\n");
299		goto out_free_pmu;
300	}
301
302	for_each_possible_cpu(cpuid) {
303		cpuc = per_cpu_ptr(pmu->hw_events, cpuid);
304		cpuc->n_events = 0;
305		for (i = 0; i < RISCV_MAX_COUNTERS; i++)
306			cpuc->events[i] = NULL;
307	}
308	pmu->pmu = (struct pmu) {
309		.event_init	= riscv_pmu_event_init,
 
 
 
310		.add		= riscv_pmu_add,
311		.del		= riscv_pmu_del,
312		.start		= riscv_pmu_start,
313		.stop		= riscv_pmu_stop,
314		.read		= riscv_pmu_read,
315	};
316
317	return pmu;
318
319out_free_pmu:
320	kfree(pmu);
321out:
322	return NULL;
323}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * RISC-V performance counter support.
  4 *
  5 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
  6 *
  7 * This implementation is based on old RISC-V perf and ARM perf event code
  8 * which are in turn based on sparc64 and x86 code.
  9 */
 10
 11#include <linux/cpumask.h>
 12#include <linux/irq.h>
 13#include <linux/irqdesc.h>
 14#include <linux/perf/riscv_pmu.h>
 15#include <linux/printk.h>
 16#include <linux/smp.h>
 17#include <linux/sched_clock.h>
 18
 19#include <asm/sbi.h>
 20
 21static bool riscv_perf_user_access(struct perf_event *event)
 22{
 23	return ((event->attr.type == PERF_TYPE_HARDWARE) ||
 24		(event->attr.type == PERF_TYPE_HW_CACHE) ||
 25		(event->attr.type == PERF_TYPE_RAW)) &&
 26		!!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) &&
 27		(event->hw.idx != -1);
 28}
 29
 30void arch_perf_update_userpage(struct perf_event *event,
 31			       struct perf_event_mmap_page *userpg, u64 now)
 32{
 33	struct clock_read_data *rd;
 34	unsigned int seq;
 35	u64 ns;
 36
 37	userpg->cap_user_time = 0;
 38	userpg->cap_user_time_zero = 0;
 39	userpg->cap_user_time_short = 0;
 40	userpg->cap_user_rdpmc = riscv_perf_user_access(event);
 41
 42#ifdef CONFIG_RISCV_PMU
 43	/*
 44	 * The counters are 64-bit but the priv spec doesn't mandate all the
 45	 * bits to be implemented: that's why, counter width can vary based on
 46	 * the cpu vendor.
 47	 */
 48	if (userpg->cap_user_rdpmc)
 49		userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1;
 50#endif
 51
 52	do {
 53		rd = sched_clock_read_begin(&seq);
 54
 55		userpg->time_mult = rd->mult;
 56		userpg->time_shift = rd->shift;
 57		userpg->time_zero = rd->epoch_ns;
 58		userpg->time_cycles = rd->epoch_cyc;
 59		userpg->time_mask = rd->sched_clock_mask;
 60
 61		/*
 62		 * Subtract the cycle base, such that software that
 63		 * doesn't know about cap_user_time_short still 'works'
 64		 * assuming no wraps.
 65		 */
 66		ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
 67		userpg->time_zero -= ns;
 68
 69	} while (sched_clock_read_retry(seq));
 70
 71	userpg->time_offset = userpg->time_zero - now;
 72
 73	/*
 74	 * time_shift is not expected to be greater than 31 due to
 75	 * the original published conversion algorithm shifting a
 76	 * 32-bit value (now specifies a 64-bit value) - refer
 77	 * perf_event_mmap_page documentation in perf_event.h.
 78	 */
 79	if (userpg->time_shift == 32) {
 80		userpg->time_shift = 31;
 81		userpg->time_mult >>= 1;
 82	}
 83
 84	/*
 85	 * Internal timekeeping for enabled/running/stopped times
 86	 * is always computed with the sched_clock.
 87	 */
 88	userpg->cap_user_time = 1;
 89	userpg->cap_user_time_zero = 1;
 90	userpg->cap_user_time_short = 1;
 91}
 92
 93static unsigned long csr_read_num(int csr_num)
 94{
 95#define switchcase_csr_read(__csr_num, __val)		{\
 96	case __csr_num:					\
 97		__val = csr_read(__csr_num);		\
 98		break; }
 99#define switchcase_csr_read_2(__csr_num, __val)		{\
100	switchcase_csr_read(__csr_num + 0, __val)	 \
101	switchcase_csr_read(__csr_num + 1, __val)}
102#define switchcase_csr_read_4(__csr_num, __val)		{\
103	switchcase_csr_read_2(__csr_num + 0, __val)	 \
104	switchcase_csr_read_2(__csr_num + 2, __val)}
105#define switchcase_csr_read_8(__csr_num, __val)		{\
106	switchcase_csr_read_4(__csr_num + 0, __val)	 \
107	switchcase_csr_read_4(__csr_num + 4, __val)}
108#define switchcase_csr_read_16(__csr_num, __val)	{\
109	switchcase_csr_read_8(__csr_num + 0, __val)	 \
110	switchcase_csr_read_8(__csr_num + 8, __val)}
111#define switchcase_csr_read_32(__csr_num, __val)	{\
112	switchcase_csr_read_16(__csr_num + 0, __val)	 \
113	switchcase_csr_read_16(__csr_num + 16, __val)}
114
115	unsigned long ret = 0;
116
117	switch (csr_num) {
118	switchcase_csr_read_32(CSR_CYCLE, ret)
119	switchcase_csr_read_32(CSR_CYCLEH, ret)
120	default :
121		break;
122	}
123
124	return ret;
125#undef switchcase_csr_read_32
126#undef switchcase_csr_read_16
127#undef switchcase_csr_read_8
128#undef switchcase_csr_read_4
129#undef switchcase_csr_read_2
130#undef switchcase_csr_read
131}
132
133/*
134 * Read the CSR of a corresponding counter.
135 */
136unsigned long riscv_pmu_ctr_read_csr(unsigned long csr)
137{
138	if (csr < CSR_CYCLE || csr > CSR_HPMCOUNTER31H ||
139	   (csr > CSR_HPMCOUNTER31 && csr < CSR_CYCLEH)) {
140		pr_err("Invalid performance counter csr %lx\n", csr);
141		return -EINVAL;
142	}
143
144	return csr_read_num(csr);
145}
146
147u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
148{
149	int cwidth;
150	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
151	struct hw_perf_event *hwc = &event->hw;
152
153	if (hwc->idx == -1)
154		/* Handle init case where idx is not initialized yet */
155		cwidth = rvpmu->ctr_get_width(0);
156	else
157		cwidth = rvpmu->ctr_get_width(hwc->idx);
 
 
 
 
 
 
 
 
158
159	return GENMASK_ULL(cwidth, 0);
160}
161
162u64 riscv_pmu_event_update(struct perf_event *event)
163{
164	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
165	struct hw_perf_event *hwc = &event->hw;
166	u64 prev_raw_count, new_raw_count;
167	unsigned long cmask;
168	u64 oldval, delta;
169
170	if (!rvpmu->ctr_read)
171		return 0;
172
173	cmask = riscv_pmu_ctr_get_width_mask(event);
174
175	do {
176		prev_raw_count = local64_read(&hwc->prev_count);
177		new_raw_count = rvpmu->ctr_read(event);
178		oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
179					 new_raw_count);
180	} while (oldval != prev_raw_count);
181
182	delta = (new_raw_count - prev_raw_count) & cmask;
183	local64_add(delta, &event->count);
184	local64_sub(delta, &hwc->period_left);
185
186	return delta;
187}
188
189void riscv_pmu_stop(struct perf_event *event, int flags)
190{
191	struct hw_perf_event *hwc = &event->hw;
192	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
193
194	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
195
196	if (!(hwc->state & PERF_HES_STOPPED)) {
197		if (rvpmu->ctr_stop) {
198			rvpmu->ctr_stop(event, 0);
199			hwc->state |= PERF_HES_STOPPED;
200		}
201		riscv_pmu_event_update(event);
202		hwc->state |= PERF_HES_UPTODATE;
203	}
204}
205
206int riscv_pmu_event_set_period(struct perf_event *event)
207{
208	struct hw_perf_event *hwc = &event->hw;
209	s64 left = local64_read(&hwc->period_left);
210	s64 period = hwc->sample_period;
211	int overflow = 0;
212	uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
213
214	if (unlikely(left <= -period)) {
215		left = period;
216		local64_set(&hwc->period_left, left);
217		hwc->last_period = period;
218		overflow = 1;
219	}
220
221	if (unlikely(left <= 0)) {
222		left += period;
223		local64_set(&hwc->period_left, left);
224		hwc->last_period = period;
225		overflow = 1;
226	}
227
228	/*
229	 * Limit the maximum period to prevent the counter value
230	 * from overtaking the one we are about to program. In
231	 * effect we are reducing max_period to account for
232	 * interrupt latency (and we are being very conservative).
233	 */
234	if (left > (max_period >> 1))
235		left = (max_period >> 1);
236
237	local64_set(&hwc->prev_count, (u64)-left);
238
239	perf_event_update_userpage(event);
240
241	return overflow;
242}
243
244void riscv_pmu_start(struct perf_event *event, int flags)
245{
246	struct hw_perf_event *hwc = &event->hw;
247	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
248	uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
249	u64 init_val;
250
 
 
 
251	if (flags & PERF_EF_RELOAD)
252		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
253
254	hwc->state = 0;
255	riscv_pmu_event_set_period(event);
256	init_val = local64_read(&hwc->prev_count) & max_period;
257	rvpmu->ctr_start(event, init_val);
258	perf_event_update_userpage(event);
259}
260
261static int riscv_pmu_add(struct perf_event *event, int flags)
262{
263	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
264	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
265	struct hw_perf_event *hwc = &event->hw;
266	int idx;
267
268	idx = rvpmu->ctr_get_idx(event);
269	if (idx < 0)
270		return idx;
271
272	hwc->idx = idx;
273	cpuc->events[idx] = event;
274	cpuc->n_events++;
275	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
276	if (flags & PERF_EF_START)
277		riscv_pmu_start(event, PERF_EF_RELOAD);
278
279	/* Propagate our changes to the userspace mapping. */
280	perf_event_update_userpage(event);
281
282	return 0;
283}
284
285static void riscv_pmu_del(struct perf_event *event, int flags)
286{
287	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
288	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
289	struct hw_perf_event *hwc = &event->hw;
290
291	riscv_pmu_stop(event, PERF_EF_UPDATE);
292	cpuc->events[hwc->idx] = NULL;
293	/* The firmware need to reset the counter mapping */
294	if (rvpmu->ctr_stop)
295		rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET);
296	cpuc->n_events--;
297	if (rvpmu->ctr_clear_idx)
298		rvpmu->ctr_clear_idx(event);
299	perf_event_update_userpage(event);
300	hwc->idx = -1;
301}
302
303static void riscv_pmu_read(struct perf_event *event)
304{
305	riscv_pmu_event_update(event);
306}
307
308static int riscv_pmu_event_init(struct perf_event *event)
309{
310	struct hw_perf_event *hwc = &event->hw;
311	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
312	int mapped_event;
313	u64 event_config = 0;
314	uint64_t cmask;
315
316	hwc->flags = 0;
317	mapped_event = rvpmu->event_map(event, &event_config);
318	if (mapped_event < 0) {
319		pr_debug("event %x:%llx not supported\n", event->attr.type,
320			 event->attr.config);
321		return mapped_event;
322	}
323
324	/*
325	 * idx is set to -1 because the index of a general event should not be
326	 * decided until binding to some counter in pmu->add().
327	 * config will contain the information about counter CSR
328	 * the idx will contain the counter index
329	 */
330	hwc->config = event_config;
331	hwc->idx = -1;
332	hwc->event_base = mapped_event;
333
334	if (rvpmu->event_init)
335		rvpmu->event_init(event);
336
337	if (!is_sampling_event(event)) {
338		/*
339		 * For non-sampling runs, limit the sample_period to half
340		 * of the counter width. That way, the new counter value
341		 * is far less likely to overtake the previous one unless
342		 * you have some serious IRQ latency issues.
343		 */
344		cmask = riscv_pmu_ctr_get_width_mask(event);
345		hwc->sample_period  =  cmask >> 1;
346		hwc->last_period    = hwc->sample_period;
347		local64_set(&hwc->period_left, hwc->sample_period);
348	}
349
350	return 0;
351}
352
353static int riscv_pmu_event_idx(struct perf_event *event)
354{
355	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
356
357	if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT))
358		return 0;
359
360	if (rvpmu->csr_index)
361		return rvpmu->csr_index(event) + 1;
362
363	return 0;
364}
365
366static void riscv_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
367{
368	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
369
370	if (rvpmu->event_mapped) {
371		rvpmu->event_mapped(event, mm);
372		perf_event_update_userpage(event);
373	}
374}
375
376static void riscv_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
377{
378	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
379
380	if (rvpmu->event_unmapped) {
381		rvpmu->event_unmapped(event, mm);
382		perf_event_update_userpage(event);
383	}
384}
385
386struct riscv_pmu *riscv_pmu_alloc(void)
387{
388	struct riscv_pmu *pmu;
389	int cpuid, i;
390	struct cpu_hw_events *cpuc;
391
392	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
393	if (!pmu)
394		goto out;
395
396	pmu->hw_events = alloc_percpu_gfp(struct cpu_hw_events, GFP_KERNEL);
397	if (!pmu->hw_events) {
398		pr_info("failed to allocate per-cpu PMU data.\n");
399		goto out_free_pmu;
400	}
401
402	for_each_possible_cpu(cpuid) {
403		cpuc = per_cpu_ptr(pmu->hw_events, cpuid);
404		cpuc->n_events = 0;
405		for (i = 0; i < RISCV_MAX_COUNTERS; i++)
406			cpuc->events[i] = NULL;
407	}
408	pmu->pmu = (struct pmu) {
409		.event_init	= riscv_pmu_event_init,
410		.event_mapped	= riscv_pmu_event_mapped,
411		.event_unmapped	= riscv_pmu_event_unmapped,
412		.event_idx	= riscv_pmu_event_idx,
413		.add		= riscv_pmu_add,
414		.del		= riscv_pmu_del,
415		.start		= riscv_pmu_start,
416		.stop		= riscv_pmu_stop,
417		.read		= riscv_pmu_read,
418	};
419
420	return pmu;
421
422out_free_pmu:
423	kfree(pmu);
424out:
425	return NULL;
426}