Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Perf support for the Statistical Profiling Extension, introduced as
4 * part of ARMv8.2.
5 *
6 * Copyright (C) 2016 ARM Limited
7 *
8 * Author: Will Deacon <will.deacon@arm.com>
9 */
10
11#define PMUNAME "arm_spe"
12#define DRVNAME PMUNAME "_pmu"
13#define pr_fmt(fmt) DRVNAME ": " fmt
14
15#include <linux/bitfield.h>
16#include <linux/bitops.h>
17#include <linux/bug.h>
18#include <linux/capability.h>
19#include <linux/cpuhotplug.h>
20#include <linux/cpumask.h>
21#include <linux/device.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/irq.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/of.h>
29#include <linux/perf_event.h>
30#include <linux/perf/arm_pmu.h>
31#include <linux/platform_device.h>
32#include <linux/printk.h>
33#include <linux/slab.h>
34#include <linux/smp.h>
35#include <linux/vmalloc.h>
36
37#include <asm/barrier.h>
38#include <asm/cpufeature.h>
39#include <asm/mmu.h>
40#include <asm/sysreg.h>
41
42/*
43 * Cache if the event is allowed to trace Context information.
44 * This allows us to perform the check, i.e, perfmon_capable(),
45 * in the context of the event owner, once, during the event_init().
46 */
47#define SPE_PMU_HW_FLAGS_CX 0x00001
48
49static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_CX);
50
51static void set_spe_event_has_cx(struct perf_event *event)
52{
53 if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
54 event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
55}
56
57static bool get_spe_event_has_cx(struct perf_event *event)
58{
59 return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX);
60}
61
62#define ARM_SPE_BUF_PAD_BYTE 0
63
64struct arm_spe_pmu_buf {
65 int nr_pages;
66 bool snapshot;
67 void *base;
68};
69
70struct arm_spe_pmu {
71 struct pmu pmu;
72 struct platform_device *pdev;
73 cpumask_t supported_cpus;
74 struct hlist_node hotplug_node;
75
76 int irq; /* PPI */
77 u16 pmsver;
78 u16 min_period;
79 u16 counter_sz;
80
81#define SPE_PMU_FEAT_FILT_EVT (1UL << 0)
82#define SPE_PMU_FEAT_FILT_TYP (1UL << 1)
83#define SPE_PMU_FEAT_FILT_LAT (1UL << 2)
84#define SPE_PMU_FEAT_ARCH_INST (1UL << 3)
85#define SPE_PMU_FEAT_LDS (1UL << 4)
86#define SPE_PMU_FEAT_ERND (1UL << 5)
87#define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6)
88#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
89 u64 features;
90
91 u16 max_record_sz;
92 u16 align;
93 struct perf_output_handle __percpu *handle;
94};
95
96#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
97
98/* Convert a free-running index from perf into an SPE buffer offset */
99#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
100
101/* Keep track of our dynamic hotplug state */
102static enum cpuhp_state arm_spe_pmu_online;
103
104enum arm_spe_pmu_buf_fault_action {
105 SPE_PMU_BUF_FAULT_ACT_SPURIOUS,
106 SPE_PMU_BUF_FAULT_ACT_FATAL,
107 SPE_PMU_BUF_FAULT_ACT_OK,
108};
109
110/* This sysfs gunk was really good fun to write. */
111enum arm_spe_pmu_capabilities {
112 SPE_PMU_CAP_ARCH_INST = 0,
113 SPE_PMU_CAP_ERND,
114 SPE_PMU_CAP_FEAT_MAX,
115 SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
116 SPE_PMU_CAP_MIN_IVAL,
117};
118
119static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
120 [SPE_PMU_CAP_ARCH_INST] = SPE_PMU_FEAT_ARCH_INST,
121 [SPE_PMU_CAP_ERND] = SPE_PMU_FEAT_ERND,
122};
123
124static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
125{
126 if (cap < SPE_PMU_CAP_FEAT_MAX)
127 return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
128
129 switch (cap) {
130 case SPE_PMU_CAP_CNT_SZ:
131 return spe_pmu->counter_sz;
132 case SPE_PMU_CAP_MIN_IVAL:
133 return spe_pmu->min_period;
134 default:
135 WARN(1, "unknown cap %d\n", cap);
136 }
137
138 return 0;
139}
140
141static ssize_t arm_spe_pmu_cap_show(struct device *dev,
142 struct device_attribute *attr,
143 char *buf)
144{
145 struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
146 struct dev_ext_attribute *ea =
147 container_of(attr, struct dev_ext_attribute, attr);
148 int cap = (long)ea->var;
149
150 return sysfs_emit(buf, "%u\n", arm_spe_pmu_cap_get(spe_pmu, cap));
151}
152
153#define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \
154 &((struct dev_ext_attribute[]) { \
155 { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var } \
156 })[0].attr.attr
157
158#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var) \
159 SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
160
161static struct attribute *arm_spe_pmu_cap_attr[] = {
162 SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
163 SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
164 SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
165 SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
166 NULL,
167};
168
169static const struct attribute_group arm_spe_pmu_cap_group = {
170 .name = "caps",
171 .attrs = arm_spe_pmu_cap_attr,
172};
173
174/* User ABI */
175#define ATTR_CFG_FLD_ts_enable_CFG config /* PMSCR_EL1.TS */
176#define ATTR_CFG_FLD_ts_enable_LO 0
177#define ATTR_CFG_FLD_ts_enable_HI 0
178#define ATTR_CFG_FLD_pa_enable_CFG config /* PMSCR_EL1.PA */
179#define ATTR_CFG_FLD_pa_enable_LO 1
180#define ATTR_CFG_FLD_pa_enable_HI 1
181#define ATTR_CFG_FLD_pct_enable_CFG config /* PMSCR_EL1.PCT */
182#define ATTR_CFG_FLD_pct_enable_LO 2
183#define ATTR_CFG_FLD_pct_enable_HI 2
184#define ATTR_CFG_FLD_jitter_CFG config /* PMSIRR_EL1.RND */
185#define ATTR_CFG_FLD_jitter_LO 16
186#define ATTR_CFG_FLD_jitter_HI 16
187#define ATTR_CFG_FLD_branch_filter_CFG config /* PMSFCR_EL1.B */
188#define ATTR_CFG_FLD_branch_filter_LO 32
189#define ATTR_CFG_FLD_branch_filter_HI 32
190#define ATTR_CFG_FLD_load_filter_CFG config /* PMSFCR_EL1.LD */
191#define ATTR_CFG_FLD_load_filter_LO 33
192#define ATTR_CFG_FLD_load_filter_HI 33
193#define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */
194#define ATTR_CFG_FLD_store_filter_LO 34
195#define ATTR_CFG_FLD_store_filter_HI 34
196
197#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */
198#define ATTR_CFG_FLD_event_filter_LO 0
199#define ATTR_CFG_FLD_event_filter_HI 63
200
201#define ATTR_CFG_FLD_min_latency_CFG config2 /* PMSLATFR_EL1.MINLAT */
202#define ATTR_CFG_FLD_min_latency_LO 0
203#define ATTR_CFG_FLD_min_latency_HI 11
204
205#define ATTR_CFG_FLD_inv_event_filter_CFG config3 /* PMSNEVFR_EL1 */
206#define ATTR_CFG_FLD_inv_event_filter_LO 0
207#define ATTR_CFG_FLD_inv_event_filter_HI 63
208
209GEN_PMU_FORMAT_ATTR(ts_enable);
210GEN_PMU_FORMAT_ATTR(pa_enable);
211GEN_PMU_FORMAT_ATTR(pct_enable);
212GEN_PMU_FORMAT_ATTR(jitter);
213GEN_PMU_FORMAT_ATTR(branch_filter);
214GEN_PMU_FORMAT_ATTR(load_filter);
215GEN_PMU_FORMAT_ATTR(store_filter);
216GEN_PMU_FORMAT_ATTR(event_filter);
217GEN_PMU_FORMAT_ATTR(inv_event_filter);
218GEN_PMU_FORMAT_ATTR(min_latency);
219
220static struct attribute *arm_spe_pmu_formats_attr[] = {
221 &format_attr_ts_enable.attr,
222 &format_attr_pa_enable.attr,
223 &format_attr_pct_enable.attr,
224 &format_attr_jitter.attr,
225 &format_attr_branch_filter.attr,
226 &format_attr_load_filter.attr,
227 &format_attr_store_filter.attr,
228 &format_attr_event_filter.attr,
229 &format_attr_inv_event_filter.attr,
230 &format_attr_min_latency.attr,
231 NULL,
232};
233
234static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
235 struct attribute *attr,
236 int unused)
237 {
238 struct device *dev = kobj_to_dev(kobj);
239 struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
240
241 if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
242 return 0;
243
244 return attr->mode;
245}
246
247static const struct attribute_group arm_spe_pmu_format_group = {
248 .name = "format",
249 .is_visible = arm_spe_pmu_format_attr_is_visible,
250 .attrs = arm_spe_pmu_formats_attr,
251};
252
253static ssize_t cpumask_show(struct device *dev,
254 struct device_attribute *attr, char *buf)
255{
256 struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
257
258 return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
259}
260static DEVICE_ATTR_RO(cpumask);
261
262static struct attribute *arm_spe_pmu_attrs[] = {
263 &dev_attr_cpumask.attr,
264 NULL,
265};
266
267static const struct attribute_group arm_spe_pmu_group = {
268 .attrs = arm_spe_pmu_attrs,
269};
270
271static const struct attribute_group *arm_spe_pmu_attr_groups[] = {
272 &arm_spe_pmu_group,
273 &arm_spe_pmu_cap_group,
274 &arm_spe_pmu_format_group,
275 NULL,
276};
277
278/* Convert between user ABI and register values */
279static u64 arm_spe_event_to_pmscr(struct perf_event *event)
280{
281 struct perf_event_attr *attr = &event->attr;
282 u64 reg = 0;
283
284 reg |= FIELD_PREP(PMSCR_EL1_TS, ATTR_CFG_GET_FLD(attr, ts_enable));
285 reg |= FIELD_PREP(PMSCR_EL1_PA, ATTR_CFG_GET_FLD(attr, pa_enable));
286 reg |= FIELD_PREP(PMSCR_EL1_PCT, ATTR_CFG_GET_FLD(attr, pct_enable));
287
288 if (!attr->exclude_user)
289 reg |= PMSCR_EL1_E0SPE;
290
291 if (!attr->exclude_kernel)
292 reg |= PMSCR_EL1_E1SPE;
293
294 if (get_spe_event_has_cx(event))
295 reg |= PMSCR_EL1_CX;
296
297 return reg;
298}
299
300static void arm_spe_event_sanitise_period(struct perf_event *event)
301{
302 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
303 u64 period = event->hw.sample_period;
304 u64 max_period = PMSIRR_EL1_INTERVAL_MASK;
305
306 if (period < spe_pmu->min_period)
307 period = spe_pmu->min_period;
308 else if (period > max_period)
309 period = max_period;
310 else
311 period &= max_period;
312
313 event->hw.sample_period = period;
314}
315
316static u64 arm_spe_event_to_pmsirr(struct perf_event *event)
317{
318 struct perf_event_attr *attr = &event->attr;
319 u64 reg = 0;
320
321 arm_spe_event_sanitise_period(event);
322
323 reg |= FIELD_PREP(PMSIRR_EL1_RND, ATTR_CFG_GET_FLD(attr, jitter));
324 reg |= event->hw.sample_period;
325
326 return reg;
327}
328
329static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
330{
331 struct perf_event_attr *attr = &event->attr;
332 u64 reg = 0;
333
334 reg |= FIELD_PREP(PMSFCR_EL1_LD, ATTR_CFG_GET_FLD(attr, load_filter));
335 reg |= FIELD_PREP(PMSFCR_EL1_ST, ATTR_CFG_GET_FLD(attr, store_filter));
336 reg |= FIELD_PREP(PMSFCR_EL1_B, ATTR_CFG_GET_FLD(attr, branch_filter));
337
338 if (reg)
339 reg |= PMSFCR_EL1_FT;
340
341 if (ATTR_CFG_GET_FLD(attr, event_filter))
342 reg |= PMSFCR_EL1_FE;
343
344 if (ATTR_CFG_GET_FLD(attr, inv_event_filter))
345 reg |= PMSFCR_EL1_FnE;
346
347 if (ATTR_CFG_GET_FLD(attr, min_latency))
348 reg |= PMSFCR_EL1_FL;
349
350 return reg;
351}
352
353static u64 arm_spe_event_to_pmsevfr(struct perf_event *event)
354{
355 struct perf_event_attr *attr = &event->attr;
356 return ATTR_CFG_GET_FLD(attr, event_filter);
357}
358
359static u64 arm_spe_event_to_pmsnevfr(struct perf_event *event)
360{
361 struct perf_event_attr *attr = &event->attr;
362 return ATTR_CFG_GET_FLD(attr, inv_event_filter);
363}
364
365static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
366{
367 struct perf_event_attr *attr = &event->attr;
368 return FIELD_PREP(PMSLATFR_EL1_MINLAT, ATTR_CFG_GET_FLD(attr, min_latency));
369}
370
371static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
372{
373 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
374 u64 head = PERF_IDX2OFF(handle->head, buf);
375
376 memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len);
377 if (!buf->snapshot)
378 perf_aux_output_skip(handle, len);
379}
380
381static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle)
382{
383 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
384 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
385 u64 head = PERF_IDX2OFF(handle->head, buf);
386 u64 limit = buf->nr_pages * PAGE_SIZE;
387
388 /*
389 * The trace format isn't parseable in reverse, so clamp
390 * the limit to half of the buffer size in snapshot mode
391 * so that the worst case is half a buffer of records, as
392 * opposed to a single record.
393 */
394 if (head < limit >> 1)
395 limit >>= 1;
396
397 /*
398 * If we're within max_record_sz of the limit, we must
399 * pad, move the head index and recompute the limit.
400 */
401 if (limit - head < spe_pmu->max_record_sz) {
402 arm_spe_pmu_pad_buf(handle, limit - head);
403 handle->head = PERF_IDX2OFF(limit, buf);
404 limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head;
405 }
406
407 return limit;
408}
409
410static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle)
411{
412 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
413 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
414 const u64 bufsize = buf->nr_pages * PAGE_SIZE;
415 u64 limit = bufsize;
416 u64 head, tail, wakeup;
417
418 /*
419 * The head can be misaligned for two reasons:
420 *
421 * 1. The hardware left PMBPTR pointing to the first byte after
422 * a record when generating a buffer management event.
423 *
424 * 2. We used perf_aux_output_skip to consume handle->size bytes
425 * and CIRC_SPACE was used to compute the size, which always
426 * leaves one entry free.
427 *
428 * Deal with this by padding to the next alignment boundary and
429 * moving the head index. If we run out of buffer space, we'll
430 * reduce handle->size to zero and end up reporting truncation.
431 */
432 head = PERF_IDX2OFF(handle->head, buf);
433 if (!IS_ALIGNED(head, spe_pmu->align)) {
434 unsigned long delta = roundup(head, spe_pmu->align) - head;
435
436 delta = min(delta, handle->size);
437 arm_spe_pmu_pad_buf(handle, delta);
438 head = PERF_IDX2OFF(handle->head, buf);
439 }
440
441 /* If we've run out of free space, then nothing more to do */
442 if (!handle->size)
443 goto no_space;
444
445 /* Compute the tail and wakeup indices now that we've aligned head */
446 tail = PERF_IDX2OFF(handle->head + handle->size, buf);
447 wakeup = PERF_IDX2OFF(handle->wakeup, buf);
448
449 /*
450 * Avoid clobbering unconsumed data. We know we have space, so
451 * if we see head == tail we know that the buffer is empty. If
452 * head > tail, then there's nothing to clobber prior to
453 * wrapping.
454 */
455 if (head < tail)
456 limit = round_down(tail, PAGE_SIZE);
457
458 /*
459 * Wakeup may be arbitrarily far into the future. If it's not in
460 * the current generation, either we'll wrap before hitting it,
461 * or it's in the past and has been handled already.
462 *
463 * If there's a wakeup before we wrap, arrange to be woken up by
464 * the page boundary following it. Keep the tail boundary if
465 * that's lower.
466 */
467 if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
468 limit = min(limit, round_up(wakeup, PAGE_SIZE));
469
470 if (limit > head)
471 return limit;
472
473 arm_spe_pmu_pad_buf(handle, handle->size);
474no_space:
475 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
476 perf_aux_output_end(handle, 0);
477 return 0;
478}
479
480static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle)
481{
482 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
483 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
484 u64 limit = __arm_spe_pmu_next_off(handle);
485 u64 head = PERF_IDX2OFF(handle->head, buf);
486
487 /*
488 * If the head has come too close to the end of the buffer,
489 * then pad to the end and recompute the limit.
490 */
491 if (limit && (limit - head < spe_pmu->max_record_sz)) {
492 arm_spe_pmu_pad_buf(handle, limit - head);
493 limit = __arm_spe_pmu_next_off(handle);
494 }
495
496 return limit;
497}
498
499static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
500 struct perf_event *event)
501{
502 u64 base, limit;
503 struct arm_spe_pmu_buf *buf;
504
505 /* Start a new aux session */
506 buf = perf_aux_output_begin(handle, event);
507 if (!buf) {
508 event->hw.state |= PERF_HES_STOPPED;
509 /*
510 * We still need to clear the limit pointer, since the
511 * profiler might only be disabled by virtue of a fault.
512 */
513 limit = 0;
514 goto out_write_limit;
515 }
516
517 limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle)
518 : arm_spe_pmu_next_off(handle);
519 if (limit)
520 limit |= PMBLIMITR_EL1_E;
521
522 limit += (u64)buf->base;
523 base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf);
524 write_sysreg_s(base, SYS_PMBPTR_EL1);
525
526out_write_limit:
527 write_sysreg_s(limit, SYS_PMBLIMITR_EL1);
528}
529
530static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle)
531{
532 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
533 u64 offset, size;
534
535 offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base;
536 size = offset - PERF_IDX2OFF(handle->head, buf);
537
538 if (buf->snapshot)
539 handle->head = offset;
540
541 perf_aux_output_end(handle, size);
542}
543
544static void arm_spe_pmu_disable_and_drain_local(void)
545{
546 /* Disable profiling at EL0 and EL1 */
547 write_sysreg_s(0, SYS_PMSCR_EL1);
548 isb();
549
550 /* Drain any buffered data */
551 psb_csync();
552 dsb(nsh);
553
554 /* Disable the profiling buffer */
555 write_sysreg_s(0, SYS_PMBLIMITR_EL1);
556 isb();
557}
558
559/* IRQ handling */
560static enum arm_spe_pmu_buf_fault_action
561arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
562{
563 const char *err_str;
564 u64 pmbsr;
565 enum arm_spe_pmu_buf_fault_action ret;
566
567 /*
568 * Ensure new profiling data is visible to the CPU and any external
569 * aborts have been resolved.
570 */
571 psb_csync();
572 dsb(nsh);
573
574 /* Ensure hardware updates to PMBPTR_EL1 are visible */
575 isb();
576
577 /* Service required? */
578 pmbsr = read_sysreg_s(SYS_PMBSR_EL1);
579 if (!FIELD_GET(PMBSR_EL1_S, pmbsr))
580 return SPE_PMU_BUF_FAULT_ACT_SPURIOUS;
581
582 /*
583 * If we've lost data, disable profiling and also set the PARTIAL
584 * flag to indicate that the last record is corrupted.
585 */
586 if (FIELD_GET(PMBSR_EL1_DL, pmbsr))
587 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED |
588 PERF_AUX_FLAG_PARTIAL);
589
590 /* Report collisions to userspace so that it can up the period */
591 if (FIELD_GET(PMBSR_EL1_COLL, pmbsr))
592 perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
593
594 /* We only expect buffer management events */
595 switch (FIELD_GET(PMBSR_EL1_EC, pmbsr)) {
596 case PMBSR_EL1_EC_BUF:
597 /* Handled below */
598 break;
599 case PMBSR_EL1_EC_FAULT_S1:
600 case PMBSR_EL1_EC_FAULT_S2:
601 err_str = "Unexpected buffer fault";
602 goto out_err;
603 default:
604 err_str = "Unknown error code";
605 goto out_err;
606 }
607
608 /* Buffer management event */
609 switch (FIELD_GET(PMBSR_EL1_BUF_BSC_MASK, pmbsr)) {
610 case PMBSR_EL1_BUF_BSC_FULL:
611 ret = SPE_PMU_BUF_FAULT_ACT_OK;
612 goto out_stop;
613 default:
614 err_str = "Unknown buffer status code";
615 }
616
617out_err:
618 pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n",
619 err_str, smp_processor_id(), pmbsr,
620 read_sysreg_s(SYS_PMBPTR_EL1),
621 read_sysreg_s(SYS_PMBLIMITR_EL1));
622 ret = SPE_PMU_BUF_FAULT_ACT_FATAL;
623
624out_stop:
625 arm_spe_perf_aux_output_end(handle);
626 return ret;
627}
628
629static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
630{
631 struct perf_output_handle *handle = dev;
632 struct perf_event *event = handle->event;
633 enum arm_spe_pmu_buf_fault_action act;
634
635 if (!perf_get_aux(handle))
636 return IRQ_NONE;
637
638 act = arm_spe_pmu_buf_get_fault_act(handle);
639 if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
640 return IRQ_NONE;
641
642 /*
643 * Ensure perf callbacks have completed, which may disable the
644 * profiling buffer in response to a TRUNCATION flag.
645 */
646 irq_work_run();
647
648 switch (act) {
649 case SPE_PMU_BUF_FAULT_ACT_FATAL:
650 /*
651 * If a fatal exception occurred then leaving the profiling
652 * buffer enabled is a recipe waiting to happen. Since
653 * fatal faults don't always imply truncation, make sure
654 * that the profiling buffer is disabled explicitly before
655 * clearing the syndrome register.
656 */
657 arm_spe_pmu_disable_and_drain_local();
658 break;
659 case SPE_PMU_BUF_FAULT_ACT_OK:
660 /*
661 * We handled the fault (the buffer was full), so resume
662 * profiling as long as we didn't detect truncation.
663 * PMBPTR might be misaligned, but we'll burn that bridge
664 * when we get to it.
665 */
666 if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) {
667 arm_spe_perf_aux_output_begin(handle, event);
668 isb();
669 }
670 break;
671 case SPE_PMU_BUF_FAULT_ACT_SPURIOUS:
672 /* We've seen you before, but GCC has the memory of a sieve. */
673 break;
674 }
675
676 /* The buffer pointers are now sane, so resume profiling. */
677 write_sysreg_s(0, SYS_PMBSR_EL1);
678 return IRQ_HANDLED;
679}
680
681static u64 arm_spe_pmsevfr_res0(u16 pmsver)
682{
683 switch (pmsver) {
684 case ID_AA64DFR0_EL1_PMSVer_IMP:
685 return PMSEVFR_EL1_RES0_IMP;
686 case ID_AA64DFR0_EL1_PMSVer_V1P1:
687 return PMSEVFR_EL1_RES0_V1P1;
688 case ID_AA64DFR0_EL1_PMSVer_V1P2:
689 /* Return the highest version we support in default */
690 default:
691 return PMSEVFR_EL1_RES0_V1P2;
692 }
693}
694
695/* Perf callbacks */
696static int arm_spe_pmu_event_init(struct perf_event *event)
697{
698 u64 reg;
699 struct perf_event_attr *attr = &event->attr;
700 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
701
702 /* This is, of course, deeply driver-specific */
703 if (attr->type != event->pmu->type)
704 return -ENOENT;
705
706 if (event->cpu >= 0 &&
707 !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
708 return -ENOENT;
709
710 if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
711 return -EOPNOTSUPP;
712
713 if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
714 return -EOPNOTSUPP;
715
716 if (attr->exclude_idle)
717 return -EOPNOTSUPP;
718
719 /*
720 * Feedback-directed frequency throttling doesn't work when we
721 * have a buffer of samples. We'd need to manually count the
722 * samples in the buffer when it fills up and adjust the event
723 * count to reflect that. Instead, just force the user to specify
724 * a sample period.
725 */
726 if (attr->freq)
727 return -EINVAL;
728
729 reg = arm_spe_event_to_pmsfcr(event);
730 if ((FIELD_GET(PMSFCR_EL1_FE, reg)) &&
731 !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT))
732 return -EOPNOTSUPP;
733
734 if ((FIELD_GET(PMSFCR_EL1_FnE, reg)) &&
735 !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
736 return -EOPNOTSUPP;
737
738 if ((FIELD_GET(PMSFCR_EL1_FT, reg)) &&
739 !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP))
740 return -EOPNOTSUPP;
741
742 if ((FIELD_GET(PMSFCR_EL1_FL, reg)) &&
743 !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
744 return -EOPNOTSUPP;
745
746 set_spe_event_has_cx(event);
747 reg = arm_spe_event_to_pmscr(event);
748 if (!perfmon_capable() &&
749 (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT)))
750 return -EACCES;
751
752 return 0;
753}
754
755static void arm_spe_pmu_start(struct perf_event *event, int flags)
756{
757 u64 reg;
758 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
759 struct hw_perf_event *hwc = &event->hw;
760 struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
761
762 hwc->state = 0;
763 arm_spe_perf_aux_output_begin(handle, event);
764 if (hwc->state)
765 return;
766
767 reg = arm_spe_event_to_pmsfcr(event);
768 write_sysreg_s(reg, SYS_PMSFCR_EL1);
769
770 reg = arm_spe_event_to_pmsevfr(event);
771 write_sysreg_s(reg, SYS_PMSEVFR_EL1);
772
773 if (spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT) {
774 reg = arm_spe_event_to_pmsnevfr(event);
775 write_sysreg_s(reg, SYS_PMSNEVFR_EL1);
776 }
777
778 reg = arm_spe_event_to_pmslatfr(event);
779 write_sysreg_s(reg, SYS_PMSLATFR_EL1);
780
781 if (flags & PERF_EF_RELOAD) {
782 reg = arm_spe_event_to_pmsirr(event);
783 write_sysreg_s(reg, SYS_PMSIRR_EL1);
784 isb();
785 reg = local64_read(&hwc->period_left);
786 write_sysreg_s(reg, SYS_PMSICR_EL1);
787 }
788
789 reg = arm_spe_event_to_pmscr(event);
790 isb();
791 write_sysreg_s(reg, SYS_PMSCR_EL1);
792}
793
794static void arm_spe_pmu_stop(struct perf_event *event, int flags)
795{
796 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
797 struct hw_perf_event *hwc = &event->hw;
798 struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
799
800 /* If we're already stopped, then nothing to do */
801 if (hwc->state & PERF_HES_STOPPED)
802 return;
803
804 /* Stop all trace generation */
805 arm_spe_pmu_disable_and_drain_local();
806
807 if (flags & PERF_EF_UPDATE) {
808 /*
809 * If there's a fault pending then ensure we contain it
810 * to this buffer, since we might be on the context-switch
811 * path.
812 */
813 if (perf_get_aux(handle)) {
814 enum arm_spe_pmu_buf_fault_action act;
815
816 act = arm_spe_pmu_buf_get_fault_act(handle);
817 if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
818 arm_spe_perf_aux_output_end(handle);
819 else
820 write_sysreg_s(0, SYS_PMBSR_EL1);
821 }
822
823 /*
824 * This may also contain ECOUNT, but nobody else should
825 * be looking at period_left, since we forbid frequency
826 * based sampling.
827 */
828 local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1));
829 hwc->state |= PERF_HES_UPTODATE;
830 }
831
832 hwc->state |= PERF_HES_STOPPED;
833}
834
835static int arm_spe_pmu_add(struct perf_event *event, int flags)
836{
837 int ret = 0;
838 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
839 struct hw_perf_event *hwc = &event->hw;
840 int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu;
841
842 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
843 return -ENOENT;
844
845 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
846
847 if (flags & PERF_EF_START) {
848 arm_spe_pmu_start(event, PERF_EF_RELOAD);
849 if (hwc->state & PERF_HES_STOPPED)
850 ret = -EINVAL;
851 }
852
853 return ret;
854}
855
856static void arm_spe_pmu_del(struct perf_event *event, int flags)
857{
858 arm_spe_pmu_stop(event, PERF_EF_UPDATE);
859}
860
861static void arm_spe_pmu_read(struct perf_event *event)
862{
863}
864
865static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
866 int nr_pages, bool snapshot)
867{
868 int i, cpu = event->cpu;
869 struct page **pglist;
870 struct arm_spe_pmu_buf *buf;
871
872 /* We need at least two pages for this to work. */
873 if (nr_pages < 2)
874 return NULL;
875
876 /*
877 * We require an even number of pages for snapshot mode, so that
878 * we can effectively treat the buffer as consisting of two equal
879 * parts and give userspace a fighting chance of getting some
880 * useful data out of it.
881 */
882 if (snapshot && (nr_pages & 1))
883 return NULL;
884
885 if (cpu == -1)
886 cpu = raw_smp_processor_id();
887
888 buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
889 if (!buf)
890 return NULL;
891
892 pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
893 if (!pglist)
894 goto out_free_buf;
895
896 for (i = 0; i < nr_pages; ++i)
897 pglist[i] = virt_to_page(pages[i]);
898
899 buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
900 if (!buf->base)
901 goto out_free_pglist;
902
903 buf->nr_pages = nr_pages;
904 buf->snapshot = snapshot;
905
906 kfree(pglist);
907 return buf;
908
909out_free_pglist:
910 kfree(pglist);
911out_free_buf:
912 kfree(buf);
913 return NULL;
914}
915
916static void arm_spe_pmu_free_aux(void *aux)
917{
918 struct arm_spe_pmu_buf *buf = aux;
919
920 vunmap(buf->base);
921 kfree(buf);
922}
923
924/* Initialisation and teardown functions */
925static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
926{
927 static atomic_t pmu_idx = ATOMIC_INIT(-1);
928
929 int idx;
930 char *name;
931 struct device *dev = &spe_pmu->pdev->dev;
932
933 spe_pmu->pmu = (struct pmu) {
934 .module = THIS_MODULE,
935 .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
936 .attr_groups = arm_spe_pmu_attr_groups,
937 /*
938 * We hitch a ride on the software context here, so that
939 * we can support per-task profiling (which is not possible
940 * with the invalid context as it doesn't get sched callbacks).
941 * This requires that userspace either uses a dummy event for
942 * perf_event_open, since the aux buffer is not setup until
943 * a subsequent mmap, or creates the profiling event in a
944 * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it
945 * once the buffer has been created.
946 */
947 .task_ctx_nr = perf_sw_context,
948 .event_init = arm_spe_pmu_event_init,
949 .add = arm_spe_pmu_add,
950 .del = arm_spe_pmu_del,
951 .start = arm_spe_pmu_start,
952 .stop = arm_spe_pmu_stop,
953 .read = arm_spe_pmu_read,
954 .setup_aux = arm_spe_pmu_setup_aux,
955 .free_aux = arm_spe_pmu_free_aux,
956 };
957
958 idx = atomic_inc_return(&pmu_idx);
959 name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
960 if (!name) {
961 dev_err(dev, "failed to allocate name for pmu %d\n", idx);
962 return -ENOMEM;
963 }
964
965 return perf_pmu_register(&spe_pmu->pmu, name, -1);
966}
967
968static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu)
969{
970 perf_pmu_unregister(&spe_pmu->pmu);
971}
972
973static void __arm_spe_pmu_dev_probe(void *info)
974{
975 int fld;
976 u64 reg;
977 struct arm_spe_pmu *spe_pmu = info;
978 struct device *dev = &spe_pmu->pdev->dev;
979
980 fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
981 ID_AA64DFR0_EL1_PMSVer_SHIFT);
982 if (!fld) {
983 dev_err(dev,
984 "unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
985 fld, smp_processor_id());
986 return;
987 }
988 spe_pmu->pmsver = (u16)fld;
989
990 /* Read PMBIDR first to determine whether or not we have access */
991 reg = read_sysreg_s(SYS_PMBIDR_EL1);
992 if (FIELD_GET(PMBIDR_EL1_P, reg)) {
993 dev_err(dev,
994 "profiling buffer owned by higher exception level\n");
995 return;
996 }
997
998 /* Minimum alignment. If it's out-of-range, then fail the probe */
999 fld = FIELD_GET(PMBIDR_EL1_ALIGN, reg);
1000 spe_pmu->align = 1 << fld;
1001 if (spe_pmu->align > SZ_2K) {
1002 dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n",
1003 fld, smp_processor_id());
1004 return;
1005 }
1006
1007 /* It's now safe to read PMSIDR and figure out what we've got */
1008 reg = read_sysreg_s(SYS_PMSIDR_EL1);
1009 if (FIELD_GET(PMSIDR_EL1_FE, reg))
1010 spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT;
1011
1012 if (FIELD_GET(PMSIDR_EL1_FnE, reg))
1013 spe_pmu->features |= SPE_PMU_FEAT_INV_FILT_EVT;
1014
1015 if (FIELD_GET(PMSIDR_EL1_FT, reg))
1016 spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP;
1017
1018 if (FIELD_GET(PMSIDR_EL1_FL, reg))
1019 spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT;
1020
1021 if (FIELD_GET(PMSIDR_EL1_ARCHINST, reg))
1022 spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST;
1023
1024 if (FIELD_GET(PMSIDR_EL1_LDS, reg))
1025 spe_pmu->features |= SPE_PMU_FEAT_LDS;
1026
1027 if (FIELD_GET(PMSIDR_EL1_ERND, reg))
1028 spe_pmu->features |= SPE_PMU_FEAT_ERND;
1029
1030 /* This field has a spaced out encoding, so just use a look-up */
1031 fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
1032 switch (fld) {
1033 case PMSIDR_EL1_INTERVAL_256:
1034 spe_pmu->min_period = 256;
1035 break;
1036 case PMSIDR_EL1_INTERVAL_512:
1037 spe_pmu->min_period = 512;
1038 break;
1039 case PMSIDR_EL1_INTERVAL_768:
1040 spe_pmu->min_period = 768;
1041 break;
1042 case PMSIDR_EL1_INTERVAL_1024:
1043 spe_pmu->min_period = 1024;
1044 break;
1045 case PMSIDR_EL1_INTERVAL_1536:
1046 spe_pmu->min_period = 1536;
1047 break;
1048 case PMSIDR_EL1_INTERVAL_2048:
1049 spe_pmu->min_period = 2048;
1050 break;
1051 case PMSIDR_EL1_INTERVAL_3072:
1052 spe_pmu->min_period = 3072;
1053 break;
1054 default:
1055 dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
1056 fld);
1057 fallthrough;
1058 case PMSIDR_EL1_INTERVAL_4096:
1059 spe_pmu->min_period = 4096;
1060 }
1061
1062 /* Maximum record size. If it's out-of-range, then fail the probe */
1063 fld = FIELD_GET(PMSIDR_EL1_MAXSIZE, reg);
1064 spe_pmu->max_record_sz = 1 << fld;
1065 if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) {
1066 dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n",
1067 fld, smp_processor_id());
1068 return;
1069 }
1070
1071 fld = FIELD_GET(PMSIDR_EL1_COUNTSIZE, reg);
1072 switch (fld) {
1073 default:
1074 dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
1075 fld);
1076 fallthrough;
1077 case PMSIDR_EL1_COUNTSIZE_12_BIT_SAT:
1078 spe_pmu->counter_sz = 12;
1079 break;
1080 case PMSIDR_EL1_COUNTSIZE_16_BIT_SAT:
1081 spe_pmu->counter_sz = 16;
1082 }
1083
1084 dev_info(dev,
1085 "probed SPEv1.%d for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
1086 spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus),
1087 spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
1088
1089 spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
1090}
1091
1092static void __arm_spe_pmu_reset_local(void)
1093{
1094 /*
1095 * This is probably overkill, as we have no idea where we're
1096 * draining any buffered data to...
1097 */
1098 arm_spe_pmu_disable_and_drain_local();
1099
1100 /* Reset the buffer base pointer */
1101 write_sysreg_s(0, SYS_PMBPTR_EL1);
1102 isb();
1103
1104 /* Clear any pending management interrupts */
1105 write_sysreg_s(0, SYS_PMBSR_EL1);
1106 isb();
1107}
1108
1109static void __arm_spe_pmu_setup_one(void *info)
1110{
1111 struct arm_spe_pmu *spe_pmu = info;
1112
1113 __arm_spe_pmu_reset_local();
1114 enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE);
1115}
1116
1117static void __arm_spe_pmu_stop_one(void *info)
1118{
1119 struct arm_spe_pmu *spe_pmu = info;
1120
1121 disable_percpu_irq(spe_pmu->irq);
1122 __arm_spe_pmu_reset_local();
1123}
1124
1125static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
1126{
1127 struct arm_spe_pmu *spe_pmu;
1128
1129 spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
1130 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
1131 return 0;
1132
1133 __arm_spe_pmu_setup_one(spe_pmu);
1134 return 0;
1135}
1136
1137static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1138{
1139 struct arm_spe_pmu *spe_pmu;
1140
1141 spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
1142 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
1143 return 0;
1144
1145 __arm_spe_pmu_stop_one(spe_pmu);
1146 return 0;
1147}
1148
1149static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
1150{
1151 int ret;
1152 cpumask_t *mask = &spe_pmu->supported_cpus;
1153
1154 /* Make sure we probe the hardware on a relevant CPU */
1155 ret = smp_call_function_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, 1);
1156 if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED))
1157 return -ENXIO;
1158
1159 /* Request our PPIs (note that the IRQ is still disabled) */
1160 ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME,
1161 spe_pmu->handle);
1162 if (ret)
1163 return ret;
1164
1165 /*
1166 * Register our hotplug notifier now so we don't miss any events.
1167 * This will enable the IRQ for any supported CPUs that are already
1168 * up.
1169 */
1170 ret = cpuhp_state_add_instance(arm_spe_pmu_online,
1171 &spe_pmu->hotplug_node);
1172 if (ret)
1173 free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
1174
1175 return ret;
1176}
1177
1178static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu)
1179{
1180 cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node);
1181 free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
1182}
1183
1184/* Driver and device probing */
1185static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
1186{
1187 struct platform_device *pdev = spe_pmu->pdev;
1188 int irq = platform_get_irq(pdev, 0);
1189
1190 if (irq < 0)
1191 return -ENXIO;
1192
1193 if (!irq_is_percpu(irq)) {
1194 dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq);
1195 return -EINVAL;
1196 }
1197
1198 if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) {
1199 dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq);
1200 return -EINVAL;
1201 }
1202
1203 spe_pmu->irq = irq;
1204 return 0;
1205}
1206
1207static const struct of_device_id arm_spe_pmu_of_match[] = {
1208 { .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 },
1209 { /* Sentinel */ },
1210};
1211MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match);
1212
1213static const struct platform_device_id arm_spe_match[] = {
1214 { ARMV8_SPE_PDEV_NAME, 0},
1215 { }
1216};
1217MODULE_DEVICE_TABLE(platform, arm_spe_match);
1218
1219static int arm_spe_pmu_device_probe(struct platform_device *pdev)
1220{
1221 int ret;
1222 struct arm_spe_pmu *spe_pmu;
1223 struct device *dev = &pdev->dev;
1224
1225 /*
1226 * If kernelspace is unmapped when running at EL0, then the SPE
1227 * buffer will fault and prematurely terminate the AUX session.
1228 */
1229 if (arm64_kernel_unmapped_at_el0()) {
1230 dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
1231 return -EPERM;
1232 }
1233
1234 spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
1235 if (!spe_pmu)
1236 return -ENOMEM;
1237
1238 spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
1239 if (!spe_pmu->handle)
1240 return -ENOMEM;
1241
1242 spe_pmu->pdev = pdev;
1243 platform_set_drvdata(pdev, spe_pmu);
1244
1245 ret = arm_spe_pmu_irq_probe(spe_pmu);
1246 if (ret)
1247 goto out_free_handle;
1248
1249 ret = arm_spe_pmu_dev_init(spe_pmu);
1250 if (ret)
1251 goto out_free_handle;
1252
1253 ret = arm_spe_pmu_perf_init(spe_pmu);
1254 if (ret)
1255 goto out_teardown_dev;
1256
1257 return 0;
1258
1259out_teardown_dev:
1260 arm_spe_pmu_dev_teardown(spe_pmu);
1261out_free_handle:
1262 free_percpu(spe_pmu->handle);
1263 return ret;
1264}
1265
1266static int arm_spe_pmu_device_remove(struct platform_device *pdev)
1267{
1268 struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
1269
1270 arm_spe_pmu_perf_destroy(spe_pmu);
1271 arm_spe_pmu_dev_teardown(spe_pmu);
1272 free_percpu(spe_pmu->handle);
1273 return 0;
1274}
1275
1276static struct platform_driver arm_spe_pmu_driver = {
1277 .id_table = arm_spe_match,
1278 .driver = {
1279 .name = DRVNAME,
1280 .of_match_table = of_match_ptr(arm_spe_pmu_of_match),
1281 .suppress_bind_attrs = true,
1282 },
1283 .probe = arm_spe_pmu_device_probe,
1284 .remove = arm_spe_pmu_device_remove,
1285};
1286
1287static int __init arm_spe_pmu_init(void)
1288{
1289 int ret;
1290
1291 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
1292 arm_spe_pmu_cpu_startup,
1293 arm_spe_pmu_cpu_teardown);
1294 if (ret < 0)
1295 return ret;
1296 arm_spe_pmu_online = ret;
1297
1298 ret = platform_driver_register(&arm_spe_pmu_driver);
1299 if (ret)
1300 cpuhp_remove_multi_state(arm_spe_pmu_online);
1301
1302 return ret;
1303}
1304
1305static void __exit arm_spe_pmu_exit(void)
1306{
1307 platform_driver_unregister(&arm_spe_pmu_driver);
1308 cpuhp_remove_multi_state(arm_spe_pmu_online);
1309}
1310
1311module_init(arm_spe_pmu_init);
1312module_exit(arm_spe_pmu_exit);
1313
1314MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension");
1315MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1316MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Perf support for the Statistical Profiling Extension, introduced as
4 * part of ARMv8.2.
5 *
6 * Copyright (C) 2016 ARM Limited
7 *
8 * Author: Will Deacon <will.deacon@arm.com>
9 */
10
11#define PMUNAME "arm_spe"
12#define DRVNAME PMUNAME "_pmu"
13#define pr_fmt(fmt) DRVNAME ": " fmt
14
15#include <linux/bitops.h>
16#include <linux/bug.h>
17#include <linux/capability.h>
18#include <linux/cpuhotplug.h>
19#include <linux/cpumask.h>
20#include <linux/device.h>
21#include <linux/errno.h>
22#include <linux/interrupt.h>
23#include <linux/irq.h>
24#include <linux/kernel.h>
25#include <linux/list.h>
26#include <linux/module.h>
27#include <linux/of_address.h>
28#include <linux/of_device.h>
29#include <linux/perf_event.h>
30#include <linux/perf/arm_pmu.h>
31#include <linux/platform_device.h>
32#include <linux/printk.h>
33#include <linux/slab.h>
34#include <linux/smp.h>
35#include <linux/vmalloc.h>
36
37#include <asm/barrier.h>
38#include <asm/cpufeature.h>
39#include <asm/mmu.h>
40#include <asm/sysreg.h>
41
42#define ARM_SPE_BUF_PAD_BYTE 0
43
44struct arm_spe_pmu_buf {
45 int nr_pages;
46 bool snapshot;
47 void *base;
48};
49
50struct arm_spe_pmu {
51 struct pmu pmu;
52 struct platform_device *pdev;
53 cpumask_t supported_cpus;
54 struct hlist_node hotplug_node;
55
56 int irq; /* PPI */
57
58 u16 min_period;
59 u16 counter_sz;
60
61#define SPE_PMU_FEAT_FILT_EVT (1UL << 0)
62#define SPE_PMU_FEAT_FILT_TYP (1UL << 1)
63#define SPE_PMU_FEAT_FILT_LAT (1UL << 2)
64#define SPE_PMU_FEAT_ARCH_INST (1UL << 3)
65#define SPE_PMU_FEAT_LDS (1UL << 4)
66#define SPE_PMU_FEAT_ERND (1UL << 5)
67#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
68 u64 features;
69
70 u16 max_record_sz;
71 u16 align;
72 struct perf_output_handle __percpu *handle;
73};
74
75#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
76
77/* Convert a free-running index from perf into an SPE buffer offset */
78#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
79
80/* Keep track of our dynamic hotplug state */
81static enum cpuhp_state arm_spe_pmu_online;
82
83enum arm_spe_pmu_buf_fault_action {
84 SPE_PMU_BUF_FAULT_ACT_SPURIOUS,
85 SPE_PMU_BUF_FAULT_ACT_FATAL,
86 SPE_PMU_BUF_FAULT_ACT_OK,
87};
88
89/* This sysfs gunk was really good fun to write. */
90enum arm_spe_pmu_capabilities {
91 SPE_PMU_CAP_ARCH_INST = 0,
92 SPE_PMU_CAP_ERND,
93 SPE_PMU_CAP_FEAT_MAX,
94 SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
95 SPE_PMU_CAP_MIN_IVAL,
96};
97
98static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
99 [SPE_PMU_CAP_ARCH_INST] = SPE_PMU_FEAT_ARCH_INST,
100 [SPE_PMU_CAP_ERND] = SPE_PMU_FEAT_ERND,
101};
102
103static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
104{
105 if (cap < SPE_PMU_CAP_FEAT_MAX)
106 return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
107
108 switch (cap) {
109 case SPE_PMU_CAP_CNT_SZ:
110 return spe_pmu->counter_sz;
111 case SPE_PMU_CAP_MIN_IVAL:
112 return spe_pmu->min_period;
113 default:
114 WARN(1, "unknown cap %d\n", cap);
115 }
116
117 return 0;
118}
119
120static ssize_t arm_spe_pmu_cap_show(struct device *dev,
121 struct device_attribute *attr,
122 char *buf)
123{
124 struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
125 struct dev_ext_attribute *ea =
126 container_of(attr, struct dev_ext_attribute, attr);
127 int cap = (long)ea->var;
128
129 return snprintf(buf, PAGE_SIZE, "%u\n",
130 arm_spe_pmu_cap_get(spe_pmu, cap));
131}
132
133#define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \
134 &((struct dev_ext_attribute[]) { \
135 { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var } \
136 })[0].attr.attr
137
138#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var) \
139 SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
140
141static struct attribute *arm_spe_pmu_cap_attr[] = {
142 SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
143 SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
144 SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
145 SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
146 NULL,
147};
148
149static struct attribute_group arm_spe_pmu_cap_group = {
150 .name = "caps",
151 .attrs = arm_spe_pmu_cap_attr,
152};
153
154/* User ABI */
155#define ATTR_CFG_FLD_ts_enable_CFG config /* PMSCR_EL1.TS */
156#define ATTR_CFG_FLD_ts_enable_LO 0
157#define ATTR_CFG_FLD_ts_enable_HI 0
158#define ATTR_CFG_FLD_pa_enable_CFG config /* PMSCR_EL1.PA */
159#define ATTR_CFG_FLD_pa_enable_LO 1
160#define ATTR_CFG_FLD_pa_enable_HI 1
161#define ATTR_CFG_FLD_pct_enable_CFG config /* PMSCR_EL1.PCT */
162#define ATTR_CFG_FLD_pct_enable_LO 2
163#define ATTR_CFG_FLD_pct_enable_HI 2
164#define ATTR_CFG_FLD_jitter_CFG config /* PMSIRR_EL1.RND */
165#define ATTR_CFG_FLD_jitter_LO 16
166#define ATTR_CFG_FLD_jitter_HI 16
167#define ATTR_CFG_FLD_branch_filter_CFG config /* PMSFCR_EL1.B */
168#define ATTR_CFG_FLD_branch_filter_LO 32
169#define ATTR_CFG_FLD_branch_filter_HI 32
170#define ATTR_CFG_FLD_load_filter_CFG config /* PMSFCR_EL1.LD */
171#define ATTR_CFG_FLD_load_filter_LO 33
172#define ATTR_CFG_FLD_load_filter_HI 33
173#define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */
174#define ATTR_CFG_FLD_store_filter_LO 34
175#define ATTR_CFG_FLD_store_filter_HI 34
176
177#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */
178#define ATTR_CFG_FLD_event_filter_LO 0
179#define ATTR_CFG_FLD_event_filter_HI 63
180
181#define ATTR_CFG_FLD_min_latency_CFG config2 /* PMSLATFR_EL1.MINLAT */
182#define ATTR_CFG_FLD_min_latency_LO 0
183#define ATTR_CFG_FLD_min_latency_HI 11
184
185/* Why does everything I do descend into this? */
186#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
187 (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
188
189#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
190 __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
191
192#define GEN_PMU_FORMAT_ATTR(name) \
193 PMU_FORMAT_ATTR(name, \
194 _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
195 ATTR_CFG_FLD_##name##_LO, \
196 ATTR_CFG_FLD_##name##_HI))
197
198#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
199 ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
200
201#define ATTR_CFG_GET_FLD(attr, name) \
202 _ATTR_CFG_GET_FLD(attr, \
203 ATTR_CFG_FLD_##name##_CFG, \
204 ATTR_CFG_FLD_##name##_LO, \
205 ATTR_CFG_FLD_##name##_HI)
206
207GEN_PMU_FORMAT_ATTR(ts_enable);
208GEN_PMU_FORMAT_ATTR(pa_enable);
209GEN_PMU_FORMAT_ATTR(pct_enable);
210GEN_PMU_FORMAT_ATTR(jitter);
211GEN_PMU_FORMAT_ATTR(branch_filter);
212GEN_PMU_FORMAT_ATTR(load_filter);
213GEN_PMU_FORMAT_ATTR(store_filter);
214GEN_PMU_FORMAT_ATTR(event_filter);
215GEN_PMU_FORMAT_ATTR(min_latency);
216
217static struct attribute *arm_spe_pmu_formats_attr[] = {
218 &format_attr_ts_enable.attr,
219 &format_attr_pa_enable.attr,
220 &format_attr_pct_enable.attr,
221 &format_attr_jitter.attr,
222 &format_attr_branch_filter.attr,
223 &format_attr_load_filter.attr,
224 &format_attr_store_filter.attr,
225 &format_attr_event_filter.attr,
226 &format_attr_min_latency.attr,
227 NULL,
228};
229
230static struct attribute_group arm_spe_pmu_format_group = {
231 .name = "format",
232 .attrs = arm_spe_pmu_formats_attr,
233};
234
235static ssize_t arm_spe_pmu_get_attr_cpumask(struct device *dev,
236 struct device_attribute *attr,
237 char *buf)
238{
239 struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
240
241 return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
242}
243static DEVICE_ATTR(cpumask, S_IRUGO, arm_spe_pmu_get_attr_cpumask, NULL);
244
245static struct attribute *arm_spe_pmu_attrs[] = {
246 &dev_attr_cpumask.attr,
247 NULL,
248};
249
250static struct attribute_group arm_spe_pmu_group = {
251 .attrs = arm_spe_pmu_attrs,
252};
253
254static const struct attribute_group *arm_spe_pmu_attr_groups[] = {
255 &arm_spe_pmu_group,
256 &arm_spe_pmu_cap_group,
257 &arm_spe_pmu_format_group,
258 NULL,
259};
260
261/* Convert between user ABI and register values */
262static u64 arm_spe_event_to_pmscr(struct perf_event *event)
263{
264 struct perf_event_attr *attr = &event->attr;
265 u64 reg = 0;
266
267 reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << SYS_PMSCR_EL1_TS_SHIFT;
268 reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << SYS_PMSCR_EL1_PA_SHIFT;
269 reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << SYS_PMSCR_EL1_PCT_SHIFT;
270
271 if (!attr->exclude_user)
272 reg |= BIT(SYS_PMSCR_EL1_E0SPE_SHIFT);
273
274 if (!attr->exclude_kernel)
275 reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
276
277 if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
278 reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
279
280 return reg;
281}
282
283static void arm_spe_event_sanitise_period(struct perf_event *event)
284{
285 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
286 u64 period = event->hw.sample_period;
287 u64 max_period = SYS_PMSIRR_EL1_INTERVAL_MASK
288 << SYS_PMSIRR_EL1_INTERVAL_SHIFT;
289
290 if (period < spe_pmu->min_period)
291 period = spe_pmu->min_period;
292 else if (period > max_period)
293 period = max_period;
294 else
295 period &= max_period;
296
297 event->hw.sample_period = period;
298}
299
300static u64 arm_spe_event_to_pmsirr(struct perf_event *event)
301{
302 struct perf_event_attr *attr = &event->attr;
303 u64 reg = 0;
304
305 arm_spe_event_sanitise_period(event);
306
307 reg |= ATTR_CFG_GET_FLD(attr, jitter) << SYS_PMSIRR_EL1_RND_SHIFT;
308 reg |= event->hw.sample_period;
309
310 return reg;
311}
312
313static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
314{
315 struct perf_event_attr *attr = &event->attr;
316 u64 reg = 0;
317
318 reg |= ATTR_CFG_GET_FLD(attr, load_filter) << SYS_PMSFCR_EL1_LD_SHIFT;
319 reg |= ATTR_CFG_GET_FLD(attr, store_filter) << SYS_PMSFCR_EL1_ST_SHIFT;
320 reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << SYS_PMSFCR_EL1_B_SHIFT;
321
322 if (reg)
323 reg |= BIT(SYS_PMSFCR_EL1_FT_SHIFT);
324
325 if (ATTR_CFG_GET_FLD(attr, event_filter))
326 reg |= BIT(SYS_PMSFCR_EL1_FE_SHIFT);
327
328 if (ATTR_CFG_GET_FLD(attr, min_latency))
329 reg |= BIT(SYS_PMSFCR_EL1_FL_SHIFT);
330
331 return reg;
332}
333
334static u64 arm_spe_event_to_pmsevfr(struct perf_event *event)
335{
336 struct perf_event_attr *attr = &event->attr;
337 return ATTR_CFG_GET_FLD(attr, event_filter);
338}
339
340static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
341{
342 struct perf_event_attr *attr = &event->attr;
343 return ATTR_CFG_GET_FLD(attr, min_latency)
344 << SYS_PMSLATFR_EL1_MINLAT_SHIFT;
345}
346
347static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
348{
349 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
350 u64 head = PERF_IDX2OFF(handle->head, buf);
351
352 memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len);
353 if (!buf->snapshot)
354 perf_aux_output_skip(handle, len);
355}
356
357static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle)
358{
359 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
360 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
361 u64 head = PERF_IDX2OFF(handle->head, buf);
362 u64 limit = buf->nr_pages * PAGE_SIZE;
363
364 /*
365 * The trace format isn't parseable in reverse, so clamp
366 * the limit to half of the buffer size in snapshot mode
367 * so that the worst case is half a buffer of records, as
368 * opposed to a single record.
369 */
370 if (head < limit >> 1)
371 limit >>= 1;
372
373 /*
374 * If we're within max_record_sz of the limit, we must
375 * pad, move the head index and recompute the limit.
376 */
377 if (limit - head < spe_pmu->max_record_sz) {
378 arm_spe_pmu_pad_buf(handle, limit - head);
379 handle->head = PERF_IDX2OFF(limit, buf);
380 limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head;
381 }
382
383 return limit;
384}
385
386static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle)
387{
388 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
389 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
390 const u64 bufsize = buf->nr_pages * PAGE_SIZE;
391 u64 limit = bufsize;
392 u64 head, tail, wakeup;
393
394 /*
395 * The head can be misaligned for two reasons:
396 *
397 * 1. The hardware left PMBPTR pointing to the first byte after
398 * a record when generating a buffer management event.
399 *
400 * 2. We used perf_aux_output_skip to consume handle->size bytes
401 * and CIRC_SPACE was used to compute the size, which always
402 * leaves one entry free.
403 *
404 * Deal with this by padding to the next alignment boundary and
405 * moving the head index. If we run out of buffer space, we'll
406 * reduce handle->size to zero and end up reporting truncation.
407 */
408 head = PERF_IDX2OFF(handle->head, buf);
409 if (!IS_ALIGNED(head, spe_pmu->align)) {
410 unsigned long delta = roundup(head, spe_pmu->align) - head;
411
412 delta = min(delta, handle->size);
413 arm_spe_pmu_pad_buf(handle, delta);
414 head = PERF_IDX2OFF(handle->head, buf);
415 }
416
417 /* If we've run out of free space, then nothing more to do */
418 if (!handle->size)
419 goto no_space;
420
421 /* Compute the tail and wakeup indices now that we've aligned head */
422 tail = PERF_IDX2OFF(handle->head + handle->size, buf);
423 wakeup = PERF_IDX2OFF(handle->wakeup, buf);
424
425 /*
426 * Avoid clobbering unconsumed data. We know we have space, so
427 * if we see head == tail we know that the buffer is empty. If
428 * head > tail, then there's nothing to clobber prior to
429 * wrapping.
430 */
431 if (head < tail)
432 limit = round_down(tail, PAGE_SIZE);
433
434 /*
435 * Wakeup may be arbitrarily far into the future. If it's not in
436 * the current generation, either we'll wrap before hitting it,
437 * or it's in the past and has been handled already.
438 *
439 * If there's a wakeup before we wrap, arrange to be woken up by
440 * the page boundary following it. Keep the tail boundary if
441 * that's lower.
442 */
443 if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
444 limit = min(limit, round_up(wakeup, PAGE_SIZE));
445
446 if (limit > head)
447 return limit;
448
449 arm_spe_pmu_pad_buf(handle, handle->size);
450no_space:
451 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
452 perf_aux_output_end(handle, 0);
453 return 0;
454}
455
456static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle)
457{
458 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
459 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
460 u64 limit = __arm_spe_pmu_next_off(handle);
461 u64 head = PERF_IDX2OFF(handle->head, buf);
462
463 /*
464 * If the head has come too close to the end of the buffer,
465 * then pad to the end and recompute the limit.
466 */
467 if (limit && (limit - head < spe_pmu->max_record_sz)) {
468 arm_spe_pmu_pad_buf(handle, limit - head);
469 limit = __arm_spe_pmu_next_off(handle);
470 }
471
472 return limit;
473}
474
475static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
476 struct perf_event *event)
477{
478 u64 base, limit;
479 struct arm_spe_pmu_buf *buf;
480
481 /* Start a new aux session */
482 buf = perf_aux_output_begin(handle, event);
483 if (!buf) {
484 event->hw.state |= PERF_HES_STOPPED;
485 /*
486 * We still need to clear the limit pointer, since the
487 * profiler might only be disabled by virtue of a fault.
488 */
489 limit = 0;
490 goto out_write_limit;
491 }
492
493 limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle)
494 : arm_spe_pmu_next_off(handle);
495 if (limit)
496 limit |= BIT(SYS_PMBLIMITR_EL1_E_SHIFT);
497
498 limit += (u64)buf->base;
499 base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf);
500 write_sysreg_s(base, SYS_PMBPTR_EL1);
501
502out_write_limit:
503 write_sysreg_s(limit, SYS_PMBLIMITR_EL1);
504}
505
506static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle)
507{
508 struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
509 u64 offset, size;
510
511 offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base;
512 size = offset - PERF_IDX2OFF(handle->head, buf);
513
514 if (buf->snapshot)
515 handle->head = offset;
516
517 perf_aux_output_end(handle, size);
518}
519
520static void arm_spe_pmu_disable_and_drain_local(void)
521{
522 /* Disable profiling at EL0 and EL1 */
523 write_sysreg_s(0, SYS_PMSCR_EL1);
524 isb();
525
526 /* Drain any buffered data */
527 psb_csync();
528 dsb(nsh);
529
530 /* Disable the profiling buffer */
531 write_sysreg_s(0, SYS_PMBLIMITR_EL1);
532 isb();
533}
534
535/* IRQ handling */
536static enum arm_spe_pmu_buf_fault_action
537arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
538{
539 const char *err_str;
540 u64 pmbsr;
541 enum arm_spe_pmu_buf_fault_action ret;
542
543 /*
544 * Ensure new profiling data is visible to the CPU and any external
545 * aborts have been resolved.
546 */
547 psb_csync();
548 dsb(nsh);
549
550 /* Ensure hardware updates to PMBPTR_EL1 are visible */
551 isb();
552
553 /* Service required? */
554 pmbsr = read_sysreg_s(SYS_PMBSR_EL1);
555 if (!(pmbsr & BIT(SYS_PMBSR_EL1_S_SHIFT)))
556 return SPE_PMU_BUF_FAULT_ACT_SPURIOUS;
557
558 /*
559 * If we've lost data, disable profiling and also set the PARTIAL
560 * flag to indicate that the last record is corrupted.
561 */
562 if (pmbsr & BIT(SYS_PMBSR_EL1_DL_SHIFT))
563 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED |
564 PERF_AUX_FLAG_PARTIAL);
565
566 /* Report collisions to userspace so that it can up the period */
567 if (pmbsr & BIT(SYS_PMBSR_EL1_COLL_SHIFT))
568 perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
569
570 /* We only expect buffer management events */
571 switch (pmbsr & (SYS_PMBSR_EL1_EC_MASK << SYS_PMBSR_EL1_EC_SHIFT)) {
572 case SYS_PMBSR_EL1_EC_BUF:
573 /* Handled below */
574 break;
575 case SYS_PMBSR_EL1_EC_FAULT_S1:
576 case SYS_PMBSR_EL1_EC_FAULT_S2:
577 err_str = "Unexpected buffer fault";
578 goto out_err;
579 default:
580 err_str = "Unknown error code";
581 goto out_err;
582 }
583
584 /* Buffer management event */
585 switch (pmbsr &
586 (SYS_PMBSR_EL1_BUF_BSC_MASK << SYS_PMBSR_EL1_BUF_BSC_SHIFT)) {
587 case SYS_PMBSR_EL1_BUF_BSC_FULL:
588 ret = SPE_PMU_BUF_FAULT_ACT_OK;
589 goto out_stop;
590 default:
591 err_str = "Unknown buffer status code";
592 }
593
594out_err:
595 pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n",
596 err_str, smp_processor_id(), pmbsr,
597 read_sysreg_s(SYS_PMBPTR_EL1),
598 read_sysreg_s(SYS_PMBLIMITR_EL1));
599 ret = SPE_PMU_BUF_FAULT_ACT_FATAL;
600
601out_stop:
602 arm_spe_perf_aux_output_end(handle);
603 return ret;
604}
605
606static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
607{
608 struct perf_output_handle *handle = dev;
609 struct perf_event *event = handle->event;
610 enum arm_spe_pmu_buf_fault_action act;
611
612 if (!perf_get_aux(handle))
613 return IRQ_NONE;
614
615 act = arm_spe_pmu_buf_get_fault_act(handle);
616 if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
617 return IRQ_NONE;
618
619 /*
620 * Ensure perf callbacks have completed, which may disable the
621 * profiling buffer in response to a TRUNCATION flag.
622 */
623 irq_work_run();
624
625 switch (act) {
626 case SPE_PMU_BUF_FAULT_ACT_FATAL:
627 /*
628 * If a fatal exception occurred then leaving the profiling
629 * buffer enabled is a recipe waiting to happen. Since
630 * fatal faults don't always imply truncation, make sure
631 * that the profiling buffer is disabled explicitly before
632 * clearing the syndrome register.
633 */
634 arm_spe_pmu_disable_and_drain_local();
635 break;
636 case SPE_PMU_BUF_FAULT_ACT_OK:
637 /*
638 * We handled the fault (the buffer was full), so resume
639 * profiling as long as we didn't detect truncation.
640 * PMBPTR might be misaligned, but we'll burn that bridge
641 * when we get to it.
642 */
643 if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) {
644 arm_spe_perf_aux_output_begin(handle, event);
645 isb();
646 }
647 break;
648 case SPE_PMU_BUF_FAULT_ACT_SPURIOUS:
649 /* We've seen you before, but GCC has the memory of a sieve. */
650 break;
651 }
652
653 /* The buffer pointers are now sane, so resume profiling. */
654 write_sysreg_s(0, SYS_PMBSR_EL1);
655 return IRQ_HANDLED;
656}
657
658/* Perf callbacks */
659static int arm_spe_pmu_event_init(struct perf_event *event)
660{
661 u64 reg;
662 struct perf_event_attr *attr = &event->attr;
663 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
664
665 /* This is, of course, deeply driver-specific */
666 if (attr->type != event->pmu->type)
667 return -ENOENT;
668
669 if (event->cpu >= 0 &&
670 !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
671 return -ENOENT;
672
673 if (arm_spe_event_to_pmsevfr(event) & SYS_PMSEVFR_EL1_RES0)
674 return -EOPNOTSUPP;
675
676 if (attr->exclude_idle)
677 return -EOPNOTSUPP;
678
679 /*
680 * Feedback-directed frequency throttling doesn't work when we
681 * have a buffer of samples. We'd need to manually count the
682 * samples in the buffer when it fills up and adjust the event
683 * count to reflect that. Instead, just force the user to specify
684 * a sample period.
685 */
686 if (attr->freq)
687 return -EINVAL;
688
689 reg = arm_spe_event_to_pmsfcr(event);
690 if ((reg & BIT(SYS_PMSFCR_EL1_FE_SHIFT)) &&
691 !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT))
692 return -EOPNOTSUPP;
693
694 if ((reg & BIT(SYS_PMSFCR_EL1_FT_SHIFT)) &&
695 !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP))
696 return -EOPNOTSUPP;
697
698 if ((reg & BIT(SYS_PMSFCR_EL1_FL_SHIFT)) &&
699 !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
700 return -EOPNOTSUPP;
701
702 reg = arm_spe_event_to_pmscr(event);
703 if (!perfmon_capable() &&
704 (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
705 BIT(SYS_PMSCR_EL1_CX_SHIFT) |
706 BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
707 return -EACCES;
708
709 return 0;
710}
711
712static void arm_spe_pmu_start(struct perf_event *event, int flags)
713{
714 u64 reg;
715 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
716 struct hw_perf_event *hwc = &event->hw;
717 struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
718
719 hwc->state = 0;
720 arm_spe_perf_aux_output_begin(handle, event);
721 if (hwc->state)
722 return;
723
724 reg = arm_spe_event_to_pmsfcr(event);
725 write_sysreg_s(reg, SYS_PMSFCR_EL1);
726
727 reg = arm_spe_event_to_pmsevfr(event);
728 write_sysreg_s(reg, SYS_PMSEVFR_EL1);
729
730 reg = arm_spe_event_to_pmslatfr(event);
731 write_sysreg_s(reg, SYS_PMSLATFR_EL1);
732
733 if (flags & PERF_EF_RELOAD) {
734 reg = arm_spe_event_to_pmsirr(event);
735 write_sysreg_s(reg, SYS_PMSIRR_EL1);
736 isb();
737 reg = local64_read(&hwc->period_left);
738 write_sysreg_s(reg, SYS_PMSICR_EL1);
739 }
740
741 reg = arm_spe_event_to_pmscr(event);
742 isb();
743 write_sysreg_s(reg, SYS_PMSCR_EL1);
744}
745
746static void arm_spe_pmu_stop(struct perf_event *event, int flags)
747{
748 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
749 struct hw_perf_event *hwc = &event->hw;
750 struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
751
752 /* If we're already stopped, then nothing to do */
753 if (hwc->state & PERF_HES_STOPPED)
754 return;
755
756 /* Stop all trace generation */
757 arm_spe_pmu_disable_and_drain_local();
758
759 if (flags & PERF_EF_UPDATE) {
760 /*
761 * If there's a fault pending then ensure we contain it
762 * to this buffer, since we might be on the context-switch
763 * path.
764 */
765 if (perf_get_aux(handle)) {
766 enum arm_spe_pmu_buf_fault_action act;
767
768 act = arm_spe_pmu_buf_get_fault_act(handle);
769 if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
770 arm_spe_perf_aux_output_end(handle);
771 else
772 write_sysreg_s(0, SYS_PMBSR_EL1);
773 }
774
775 /*
776 * This may also contain ECOUNT, but nobody else should
777 * be looking at period_left, since we forbid frequency
778 * based sampling.
779 */
780 local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1));
781 hwc->state |= PERF_HES_UPTODATE;
782 }
783
784 hwc->state |= PERF_HES_STOPPED;
785}
786
787static int arm_spe_pmu_add(struct perf_event *event, int flags)
788{
789 int ret = 0;
790 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
791 struct hw_perf_event *hwc = &event->hw;
792 int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu;
793
794 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
795 return -ENOENT;
796
797 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
798
799 if (flags & PERF_EF_START) {
800 arm_spe_pmu_start(event, PERF_EF_RELOAD);
801 if (hwc->state & PERF_HES_STOPPED)
802 ret = -EINVAL;
803 }
804
805 return ret;
806}
807
808static void arm_spe_pmu_del(struct perf_event *event, int flags)
809{
810 arm_spe_pmu_stop(event, PERF_EF_UPDATE);
811}
812
813static void arm_spe_pmu_read(struct perf_event *event)
814{
815}
816
817static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
818 int nr_pages, bool snapshot)
819{
820 int i, cpu = event->cpu;
821 struct page **pglist;
822 struct arm_spe_pmu_buf *buf;
823
824 /* We need at least two pages for this to work. */
825 if (nr_pages < 2)
826 return NULL;
827
828 /*
829 * We require an even number of pages for snapshot mode, so that
830 * we can effectively treat the buffer as consisting of two equal
831 * parts and give userspace a fighting chance of getting some
832 * useful data out of it.
833 */
834 if (snapshot && (nr_pages & 1))
835 return NULL;
836
837 if (cpu == -1)
838 cpu = raw_smp_processor_id();
839
840 buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
841 if (!buf)
842 return NULL;
843
844 pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
845 if (!pglist)
846 goto out_free_buf;
847
848 for (i = 0; i < nr_pages; ++i)
849 pglist[i] = virt_to_page(pages[i]);
850
851 buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
852 if (!buf->base)
853 goto out_free_pglist;
854
855 buf->nr_pages = nr_pages;
856 buf->snapshot = snapshot;
857
858 kfree(pglist);
859 return buf;
860
861out_free_pglist:
862 kfree(pglist);
863out_free_buf:
864 kfree(buf);
865 return NULL;
866}
867
868static void arm_spe_pmu_free_aux(void *aux)
869{
870 struct arm_spe_pmu_buf *buf = aux;
871
872 vunmap(buf->base);
873 kfree(buf);
874}
875
876/* Initialisation and teardown functions */
877static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
878{
879 static atomic_t pmu_idx = ATOMIC_INIT(-1);
880
881 int idx;
882 char *name;
883 struct device *dev = &spe_pmu->pdev->dev;
884
885 spe_pmu->pmu = (struct pmu) {
886 .module = THIS_MODULE,
887 .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
888 .attr_groups = arm_spe_pmu_attr_groups,
889 /*
890 * We hitch a ride on the software context here, so that
891 * we can support per-task profiling (which is not possible
892 * with the invalid context as it doesn't get sched callbacks).
893 * This requires that userspace either uses a dummy event for
894 * perf_event_open, since the aux buffer is not setup until
895 * a subsequent mmap, or creates the profiling event in a
896 * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it
897 * once the buffer has been created.
898 */
899 .task_ctx_nr = perf_sw_context,
900 .event_init = arm_spe_pmu_event_init,
901 .add = arm_spe_pmu_add,
902 .del = arm_spe_pmu_del,
903 .start = arm_spe_pmu_start,
904 .stop = arm_spe_pmu_stop,
905 .read = arm_spe_pmu_read,
906 .setup_aux = arm_spe_pmu_setup_aux,
907 .free_aux = arm_spe_pmu_free_aux,
908 };
909
910 idx = atomic_inc_return(&pmu_idx);
911 name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
912 if (!name) {
913 dev_err(dev, "failed to allocate name for pmu %d\n", idx);
914 return -ENOMEM;
915 }
916
917 return perf_pmu_register(&spe_pmu->pmu, name, -1);
918}
919
920static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu)
921{
922 perf_pmu_unregister(&spe_pmu->pmu);
923}
924
925static void __arm_spe_pmu_dev_probe(void *info)
926{
927 int fld;
928 u64 reg;
929 struct arm_spe_pmu *spe_pmu = info;
930 struct device *dev = &spe_pmu->pdev->dev;
931
932 fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
933 ID_AA64DFR0_PMSVER_SHIFT);
934 if (!fld) {
935 dev_err(dev,
936 "unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
937 fld, smp_processor_id());
938 return;
939 }
940
941 /* Read PMBIDR first to determine whether or not we have access */
942 reg = read_sysreg_s(SYS_PMBIDR_EL1);
943 if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) {
944 dev_err(dev,
945 "profiling buffer owned by higher exception level\n");
946 return;
947 }
948
949 /* Minimum alignment. If it's out-of-range, then fail the probe */
950 fld = reg >> SYS_PMBIDR_EL1_ALIGN_SHIFT & SYS_PMBIDR_EL1_ALIGN_MASK;
951 spe_pmu->align = 1 << fld;
952 if (spe_pmu->align > SZ_2K) {
953 dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n",
954 fld, smp_processor_id());
955 return;
956 }
957
958 /* It's now safe to read PMSIDR and figure out what we've got */
959 reg = read_sysreg_s(SYS_PMSIDR_EL1);
960 if (reg & BIT(SYS_PMSIDR_EL1_FE_SHIFT))
961 spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT;
962
963 if (reg & BIT(SYS_PMSIDR_EL1_FT_SHIFT))
964 spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP;
965
966 if (reg & BIT(SYS_PMSIDR_EL1_FL_SHIFT))
967 spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT;
968
969 if (reg & BIT(SYS_PMSIDR_EL1_ARCHINST_SHIFT))
970 spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST;
971
972 if (reg & BIT(SYS_PMSIDR_EL1_LDS_SHIFT))
973 spe_pmu->features |= SPE_PMU_FEAT_LDS;
974
975 if (reg & BIT(SYS_PMSIDR_EL1_ERND_SHIFT))
976 spe_pmu->features |= SPE_PMU_FEAT_ERND;
977
978 /* This field has a spaced out encoding, so just use a look-up */
979 fld = reg >> SYS_PMSIDR_EL1_INTERVAL_SHIFT & SYS_PMSIDR_EL1_INTERVAL_MASK;
980 switch (fld) {
981 case 0:
982 spe_pmu->min_period = 256;
983 break;
984 case 2:
985 spe_pmu->min_period = 512;
986 break;
987 case 3:
988 spe_pmu->min_period = 768;
989 break;
990 case 4:
991 spe_pmu->min_period = 1024;
992 break;
993 case 5:
994 spe_pmu->min_period = 1536;
995 break;
996 case 6:
997 spe_pmu->min_period = 2048;
998 break;
999 case 7:
1000 spe_pmu->min_period = 3072;
1001 break;
1002 default:
1003 dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
1004 fld);
1005 fallthrough;
1006 case 8:
1007 spe_pmu->min_period = 4096;
1008 }
1009
1010 /* Maximum record size. If it's out-of-range, then fail the probe */
1011 fld = reg >> SYS_PMSIDR_EL1_MAXSIZE_SHIFT & SYS_PMSIDR_EL1_MAXSIZE_MASK;
1012 spe_pmu->max_record_sz = 1 << fld;
1013 if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) {
1014 dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n",
1015 fld, smp_processor_id());
1016 return;
1017 }
1018
1019 fld = reg >> SYS_PMSIDR_EL1_COUNTSIZE_SHIFT & SYS_PMSIDR_EL1_COUNTSIZE_MASK;
1020 switch (fld) {
1021 default:
1022 dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
1023 fld);
1024 fallthrough;
1025 case 2:
1026 spe_pmu->counter_sz = 12;
1027 }
1028
1029 dev_info(dev,
1030 "probed for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
1031 cpumask_pr_args(&spe_pmu->supported_cpus),
1032 spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
1033
1034 spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
1035 return;
1036}
1037
1038static void __arm_spe_pmu_reset_local(void)
1039{
1040 /*
1041 * This is probably overkill, as we have no idea where we're
1042 * draining any buffered data to...
1043 */
1044 arm_spe_pmu_disable_and_drain_local();
1045
1046 /* Reset the buffer base pointer */
1047 write_sysreg_s(0, SYS_PMBPTR_EL1);
1048 isb();
1049
1050 /* Clear any pending management interrupts */
1051 write_sysreg_s(0, SYS_PMBSR_EL1);
1052 isb();
1053}
1054
1055static void __arm_spe_pmu_setup_one(void *info)
1056{
1057 struct arm_spe_pmu *spe_pmu = info;
1058
1059 __arm_spe_pmu_reset_local();
1060 enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE);
1061}
1062
1063static void __arm_spe_pmu_stop_one(void *info)
1064{
1065 struct arm_spe_pmu *spe_pmu = info;
1066
1067 disable_percpu_irq(spe_pmu->irq);
1068 __arm_spe_pmu_reset_local();
1069}
1070
1071static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
1072{
1073 struct arm_spe_pmu *spe_pmu;
1074
1075 spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
1076 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
1077 return 0;
1078
1079 __arm_spe_pmu_setup_one(spe_pmu);
1080 return 0;
1081}
1082
1083static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1084{
1085 struct arm_spe_pmu *spe_pmu;
1086
1087 spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
1088 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
1089 return 0;
1090
1091 __arm_spe_pmu_stop_one(spe_pmu);
1092 return 0;
1093}
1094
1095static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
1096{
1097 int ret;
1098 cpumask_t *mask = &spe_pmu->supported_cpus;
1099
1100 /* Make sure we probe the hardware on a relevant CPU */
1101 ret = smp_call_function_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, 1);
1102 if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED))
1103 return -ENXIO;
1104
1105 /* Request our PPIs (note that the IRQ is still disabled) */
1106 ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME,
1107 spe_pmu->handle);
1108 if (ret)
1109 return ret;
1110
1111 /*
1112 * Register our hotplug notifier now so we don't miss any events.
1113 * This will enable the IRQ for any supported CPUs that are already
1114 * up.
1115 */
1116 ret = cpuhp_state_add_instance(arm_spe_pmu_online,
1117 &spe_pmu->hotplug_node);
1118 if (ret)
1119 free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
1120
1121 return ret;
1122}
1123
1124static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu)
1125{
1126 cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node);
1127 free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
1128}
1129
1130/* Driver and device probing */
1131static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
1132{
1133 struct platform_device *pdev = spe_pmu->pdev;
1134 int irq = platform_get_irq(pdev, 0);
1135
1136 if (irq < 0)
1137 return -ENXIO;
1138
1139 if (!irq_is_percpu(irq)) {
1140 dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq);
1141 return -EINVAL;
1142 }
1143
1144 if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) {
1145 dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq);
1146 return -EINVAL;
1147 }
1148
1149 spe_pmu->irq = irq;
1150 return 0;
1151}
1152
1153static const struct of_device_id arm_spe_pmu_of_match[] = {
1154 { .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 },
1155 { /* Sentinel */ },
1156};
1157MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match);
1158
1159static const struct platform_device_id arm_spe_match[] = {
1160 { ARMV8_SPE_PDEV_NAME, 0},
1161 { }
1162};
1163MODULE_DEVICE_TABLE(platform, arm_spe_match);
1164
1165static int arm_spe_pmu_device_probe(struct platform_device *pdev)
1166{
1167 int ret;
1168 struct arm_spe_pmu *spe_pmu;
1169 struct device *dev = &pdev->dev;
1170
1171 /*
1172 * If kernelspace is unmapped when running at EL0, then the SPE
1173 * buffer will fault and prematurely terminate the AUX session.
1174 */
1175 if (arm64_kernel_unmapped_at_el0()) {
1176 dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
1177 return -EPERM;
1178 }
1179
1180 spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
1181 if (!spe_pmu) {
1182 dev_err(dev, "failed to allocate spe_pmu\n");
1183 return -ENOMEM;
1184 }
1185
1186 spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
1187 if (!spe_pmu->handle)
1188 return -ENOMEM;
1189
1190 spe_pmu->pdev = pdev;
1191 platform_set_drvdata(pdev, spe_pmu);
1192
1193 ret = arm_spe_pmu_irq_probe(spe_pmu);
1194 if (ret)
1195 goto out_free_handle;
1196
1197 ret = arm_spe_pmu_dev_init(spe_pmu);
1198 if (ret)
1199 goto out_free_handle;
1200
1201 ret = arm_spe_pmu_perf_init(spe_pmu);
1202 if (ret)
1203 goto out_teardown_dev;
1204
1205 return 0;
1206
1207out_teardown_dev:
1208 arm_spe_pmu_dev_teardown(spe_pmu);
1209out_free_handle:
1210 free_percpu(spe_pmu->handle);
1211 return ret;
1212}
1213
1214static int arm_spe_pmu_device_remove(struct platform_device *pdev)
1215{
1216 struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
1217
1218 arm_spe_pmu_perf_destroy(spe_pmu);
1219 arm_spe_pmu_dev_teardown(spe_pmu);
1220 free_percpu(spe_pmu->handle);
1221 return 0;
1222}
1223
1224static struct platform_driver arm_spe_pmu_driver = {
1225 .id_table = arm_spe_match,
1226 .driver = {
1227 .name = DRVNAME,
1228 .of_match_table = of_match_ptr(arm_spe_pmu_of_match),
1229 .suppress_bind_attrs = true,
1230 },
1231 .probe = arm_spe_pmu_device_probe,
1232 .remove = arm_spe_pmu_device_remove,
1233};
1234
1235static int __init arm_spe_pmu_init(void)
1236{
1237 int ret;
1238
1239 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
1240 arm_spe_pmu_cpu_startup,
1241 arm_spe_pmu_cpu_teardown);
1242 if (ret < 0)
1243 return ret;
1244 arm_spe_pmu_online = ret;
1245
1246 ret = platform_driver_register(&arm_spe_pmu_driver);
1247 if (ret)
1248 cpuhp_remove_multi_state(arm_spe_pmu_online);
1249
1250 return ret;
1251}
1252
1253static void __exit arm_spe_pmu_exit(void)
1254{
1255 platform_driver_unregister(&arm_spe_pmu_driver);
1256 cpuhp_remove_multi_state(arm_spe_pmu_online);
1257}
1258
1259module_init(arm_spe_pmu_init);
1260module_exit(arm_spe_pmu_exit);
1261
1262MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension");
1263MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1264MODULE_LICENSE("GPL v2");