Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Perf support for the Statistical Profiling Extension, introduced as
   4 * part of ARMv8.2.
   5 *
   6 * Copyright (C) 2016 ARM Limited
   7 *
   8 * Author: Will Deacon <will.deacon@arm.com>
   9 */
  10
  11#define PMUNAME					"arm_spe"
  12#define DRVNAME					PMUNAME "_pmu"
  13#define pr_fmt(fmt)				DRVNAME ": " fmt
  14
  15#include <linux/bitfield.h>
  16#include <linux/bitops.h>
  17#include <linux/bug.h>
  18#include <linux/capability.h>
  19#include <linux/cpuhotplug.h>
  20#include <linux/cpumask.h>
  21#include <linux/device.h>
  22#include <linux/errno.h>
  23#include <linux/interrupt.h>
  24#include <linux/irq.h>
  25#include <linux/kernel.h>
  26#include <linux/list.h>
  27#include <linux/module.h>
  28#include <linux/of.h>
  29#include <linux/perf_event.h>
  30#include <linux/perf/arm_pmu.h>
  31#include <linux/platform_device.h>
  32#include <linux/printk.h>
  33#include <linux/slab.h>
  34#include <linux/smp.h>
  35#include <linux/vmalloc.h>
  36
  37#include <asm/barrier.h>
  38#include <asm/cpufeature.h>
  39#include <asm/mmu.h>
  40#include <asm/sysreg.h>
  41
  42/*
  43 * Cache if the event is allowed to trace Context information.
  44 * This allows us to perform the check, i.e, perf_allow_kernel(),
  45 * in the context of the event owner, once, during the event_init().
  46 */
  47#define SPE_PMU_HW_FLAGS_CX			0x00001
  48
  49static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_CX);
  50
  51static void set_spe_event_has_cx(struct perf_event *event)
  52{
  53	if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && !perf_allow_kernel(&event->attr))
  54		event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
  55}
  56
  57static bool get_spe_event_has_cx(struct perf_event *event)
  58{
  59	return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX);
  60}
  61
  62#define ARM_SPE_BUF_PAD_BYTE			0
  63
  64struct arm_spe_pmu_buf {
  65	int					nr_pages;
  66	bool					snapshot;
  67	void					*base;
  68};
  69
  70struct arm_spe_pmu {
  71	struct pmu				pmu;
  72	struct platform_device			*pdev;
  73	cpumask_t				supported_cpus;
  74	struct hlist_node			hotplug_node;
  75
  76	int					irq; /* PPI */
  77	u16					pmsver;
  78	u16					min_period;
  79	u16					counter_sz;
  80
  81#define SPE_PMU_FEAT_FILT_EVT			(1UL << 0)
  82#define SPE_PMU_FEAT_FILT_TYP			(1UL << 1)
  83#define SPE_PMU_FEAT_FILT_LAT			(1UL << 2)
  84#define SPE_PMU_FEAT_ARCH_INST			(1UL << 3)
  85#define SPE_PMU_FEAT_LDS			(1UL << 4)
  86#define SPE_PMU_FEAT_ERND			(1UL << 5)
  87#define SPE_PMU_FEAT_INV_FILT_EVT		(1UL << 6)
  88#define SPE_PMU_FEAT_DEV_PROBED			(1UL << 63)
  89	u64					features;
  90
  91	u16					max_record_sz;
  92	u16					align;
  93	struct perf_output_handle __percpu	*handle;
  94};
  95
  96#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
  97
  98/* Convert a free-running index from perf into an SPE buffer offset */
  99#define PERF_IDX2OFF(idx, buf)	((idx) % ((buf)->nr_pages << PAGE_SHIFT))
 100
 101/* Keep track of our dynamic hotplug state */
 102static enum cpuhp_state arm_spe_pmu_online;
 103
 104enum arm_spe_pmu_buf_fault_action {
 105	SPE_PMU_BUF_FAULT_ACT_SPURIOUS,
 106	SPE_PMU_BUF_FAULT_ACT_FATAL,
 107	SPE_PMU_BUF_FAULT_ACT_OK,
 108};
 109
 110/* This sysfs gunk was really good fun to write. */
 111enum arm_spe_pmu_capabilities {
 112	SPE_PMU_CAP_ARCH_INST = 0,
 113	SPE_PMU_CAP_ERND,
 114	SPE_PMU_CAP_FEAT_MAX,
 115	SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
 116	SPE_PMU_CAP_MIN_IVAL,
 117};
 118
 119static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
 120	[SPE_PMU_CAP_ARCH_INST]	= SPE_PMU_FEAT_ARCH_INST,
 121	[SPE_PMU_CAP_ERND]	= SPE_PMU_FEAT_ERND,
 122};
 123
 124static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
 125{
 126	if (cap < SPE_PMU_CAP_FEAT_MAX)
 127		return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
 128
 129	switch (cap) {
 130	case SPE_PMU_CAP_CNT_SZ:
 131		return spe_pmu->counter_sz;
 132	case SPE_PMU_CAP_MIN_IVAL:
 133		return spe_pmu->min_period;
 134	default:
 135		WARN(1, "unknown cap %d\n", cap);
 136	}
 137
 138	return 0;
 139}
 140
 141static ssize_t arm_spe_pmu_cap_show(struct device *dev,
 142				    struct device_attribute *attr,
 143				    char *buf)
 144{
 145	struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
 146	struct dev_ext_attribute *ea =
 147		container_of(attr, struct dev_ext_attribute, attr);
 148	int cap = (long)ea->var;
 149
 150	return sysfs_emit(buf, "%u\n", arm_spe_pmu_cap_get(spe_pmu, cap));
 151}
 152
 153#define SPE_EXT_ATTR_ENTRY(_name, _func, _var)				\
 154	&((struct dev_ext_attribute[]) {				\
 155		{ __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var }	\
 156	})[0].attr.attr
 157
 158#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var)				\
 159	SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
 160
 161static struct attribute *arm_spe_pmu_cap_attr[] = {
 162	SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
 163	SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
 164	SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
 165	SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
 166	NULL,
 167};
 168
 169static const struct attribute_group arm_spe_pmu_cap_group = {
 170	.name	= "caps",
 171	.attrs	= arm_spe_pmu_cap_attr,
 172};
 173
 174/* User ABI */
 175#define ATTR_CFG_FLD_ts_enable_CFG		config	/* PMSCR_EL1.TS */
 176#define ATTR_CFG_FLD_ts_enable_LO		0
 177#define ATTR_CFG_FLD_ts_enable_HI		0
 178#define ATTR_CFG_FLD_pa_enable_CFG		config	/* PMSCR_EL1.PA */
 179#define ATTR_CFG_FLD_pa_enable_LO		1
 180#define ATTR_CFG_FLD_pa_enable_HI		1
 181#define ATTR_CFG_FLD_pct_enable_CFG		config	/* PMSCR_EL1.PCT */
 182#define ATTR_CFG_FLD_pct_enable_LO		2
 183#define ATTR_CFG_FLD_pct_enable_HI		2
 184#define ATTR_CFG_FLD_jitter_CFG			config	/* PMSIRR_EL1.RND */
 185#define ATTR_CFG_FLD_jitter_LO			16
 186#define ATTR_CFG_FLD_jitter_HI			16
 187#define ATTR_CFG_FLD_branch_filter_CFG		config	/* PMSFCR_EL1.B */
 188#define ATTR_CFG_FLD_branch_filter_LO		32
 189#define ATTR_CFG_FLD_branch_filter_HI		32
 190#define ATTR_CFG_FLD_load_filter_CFG		config	/* PMSFCR_EL1.LD */
 191#define ATTR_CFG_FLD_load_filter_LO		33
 192#define ATTR_CFG_FLD_load_filter_HI		33
 193#define ATTR_CFG_FLD_store_filter_CFG		config	/* PMSFCR_EL1.ST */
 194#define ATTR_CFG_FLD_store_filter_LO		34
 195#define ATTR_CFG_FLD_store_filter_HI		34
 196
 197#define ATTR_CFG_FLD_event_filter_CFG		config1	/* PMSEVFR_EL1 */
 198#define ATTR_CFG_FLD_event_filter_LO		0
 199#define ATTR_CFG_FLD_event_filter_HI		63
 200
 201#define ATTR_CFG_FLD_min_latency_CFG		config2	/* PMSLATFR_EL1.MINLAT */
 202#define ATTR_CFG_FLD_min_latency_LO		0
 203#define ATTR_CFG_FLD_min_latency_HI		11
 204
 205#define ATTR_CFG_FLD_inv_event_filter_CFG	config3	/* PMSNEVFR_EL1 */
 206#define ATTR_CFG_FLD_inv_event_filter_LO	0
 207#define ATTR_CFG_FLD_inv_event_filter_HI	63
 208
 209GEN_PMU_FORMAT_ATTR(ts_enable);
 210GEN_PMU_FORMAT_ATTR(pa_enable);
 211GEN_PMU_FORMAT_ATTR(pct_enable);
 212GEN_PMU_FORMAT_ATTR(jitter);
 213GEN_PMU_FORMAT_ATTR(branch_filter);
 214GEN_PMU_FORMAT_ATTR(load_filter);
 215GEN_PMU_FORMAT_ATTR(store_filter);
 216GEN_PMU_FORMAT_ATTR(event_filter);
 217GEN_PMU_FORMAT_ATTR(inv_event_filter);
 218GEN_PMU_FORMAT_ATTR(min_latency);
 219
 220static struct attribute *arm_spe_pmu_formats_attr[] = {
 221	&format_attr_ts_enable.attr,
 222	&format_attr_pa_enable.attr,
 223	&format_attr_pct_enable.attr,
 224	&format_attr_jitter.attr,
 225	&format_attr_branch_filter.attr,
 226	&format_attr_load_filter.attr,
 227	&format_attr_store_filter.attr,
 228	&format_attr_event_filter.attr,
 229	&format_attr_inv_event_filter.attr,
 230	&format_attr_min_latency.attr,
 231	NULL,
 232};
 233
 234static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
 235						  struct attribute *attr,
 236						  int unused)
 237	{
 238	struct device *dev = kobj_to_dev(kobj);
 239	struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
 240
 241	if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
 242		return 0;
 243
 244	return attr->mode;
 245}
 246
 247static const struct attribute_group arm_spe_pmu_format_group = {
 248	.name	= "format",
 249	.is_visible = arm_spe_pmu_format_attr_is_visible,
 250	.attrs	= arm_spe_pmu_formats_attr,
 251};
 252
 253static ssize_t cpumask_show(struct device *dev,
 254			    struct device_attribute *attr, char *buf)
 255{
 256	struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
 257
 258	return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
 259}
 260static DEVICE_ATTR_RO(cpumask);
 261
 262static struct attribute *arm_spe_pmu_attrs[] = {
 263	&dev_attr_cpumask.attr,
 264	NULL,
 265};
 266
 267static const struct attribute_group arm_spe_pmu_group = {
 268	.attrs	= arm_spe_pmu_attrs,
 269};
 270
 271static const struct attribute_group *arm_spe_pmu_attr_groups[] = {
 272	&arm_spe_pmu_group,
 273	&arm_spe_pmu_cap_group,
 274	&arm_spe_pmu_format_group,
 275	NULL,
 276};
 277
 278/* Convert between user ABI and register values */
 279static u64 arm_spe_event_to_pmscr(struct perf_event *event)
 280{
 281	struct perf_event_attr *attr = &event->attr;
 282	u64 reg = 0;
 283
 284	reg |= FIELD_PREP(PMSCR_EL1_TS, ATTR_CFG_GET_FLD(attr, ts_enable));
 285	reg |= FIELD_PREP(PMSCR_EL1_PA, ATTR_CFG_GET_FLD(attr, pa_enable));
 286	reg |= FIELD_PREP(PMSCR_EL1_PCT, ATTR_CFG_GET_FLD(attr, pct_enable));
 287
 288	if (!attr->exclude_user)
 289		reg |= PMSCR_EL1_E0SPE;
 290
 291	if (!attr->exclude_kernel)
 292		reg |= PMSCR_EL1_E1SPE;
 293
 294	if (get_spe_event_has_cx(event))
 295		reg |= PMSCR_EL1_CX;
 296
 297	return reg;
 298}
 299
 300static void arm_spe_event_sanitise_period(struct perf_event *event)
 301{
 302	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
 303	u64 period = event->hw.sample_period;
 304	u64 max_period = PMSIRR_EL1_INTERVAL_MASK;
 305
 306	if (period < spe_pmu->min_period)
 307		period = spe_pmu->min_period;
 308	else if (period > max_period)
 309		period = max_period;
 310	else
 311		period &= max_period;
 312
 313	event->hw.sample_period = period;
 314}
 315
 316static u64 arm_spe_event_to_pmsirr(struct perf_event *event)
 317{
 318	struct perf_event_attr *attr = &event->attr;
 319	u64 reg = 0;
 320
 321	arm_spe_event_sanitise_period(event);
 322
 323	reg |= FIELD_PREP(PMSIRR_EL1_RND, ATTR_CFG_GET_FLD(attr, jitter));
 324	reg |= event->hw.sample_period;
 325
 326	return reg;
 327}
 328
 329static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
 330{
 331	struct perf_event_attr *attr = &event->attr;
 332	u64 reg = 0;
 333
 334	reg |= FIELD_PREP(PMSFCR_EL1_LD, ATTR_CFG_GET_FLD(attr, load_filter));
 335	reg |= FIELD_PREP(PMSFCR_EL1_ST, ATTR_CFG_GET_FLD(attr, store_filter));
 336	reg |= FIELD_PREP(PMSFCR_EL1_B, ATTR_CFG_GET_FLD(attr, branch_filter));
 337
 338	if (reg)
 339		reg |= PMSFCR_EL1_FT;
 340
 341	if (ATTR_CFG_GET_FLD(attr, event_filter))
 342		reg |= PMSFCR_EL1_FE;
 343
 344	if (ATTR_CFG_GET_FLD(attr, inv_event_filter))
 345		reg |= PMSFCR_EL1_FnE;
 346
 347	if (ATTR_CFG_GET_FLD(attr, min_latency))
 348		reg |= PMSFCR_EL1_FL;
 349
 350	return reg;
 351}
 352
 353static u64 arm_spe_event_to_pmsevfr(struct perf_event *event)
 354{
 355	struct perf_event_attr *attr = &event->attr;
 356	return ATTR_CFG_GET_FLD(attr, event_filter);
 357}
 358
 359static u64 arm_spe_event_to_pmsnevfr(struct perf_event *event)
 360{
 361	struct perf_event_attr *attr = &event->attr;
 362	return ATTR_CFG_GET_FLD(attr, inv_event_filter);
 363}
 364
 365static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
 366{
 367	struct perf_event_attr *attr = &event->attr;
 368	return FIELD_PREP(PMSLATFR_EL1_MINLAT, ATTR_CFG_GET_FLD(attr, min_latency));
 369}
 370
 371static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
 372{
 373	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
 374	u64 head = PERF_IDX2OFF(handle->head, buf);
 375
 376	memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len);
 377	if (!buf->snapshot)
 378		perf_aux_output_skip(handle, len);
 379}
 380
 381static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle)
 382{
 383	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
 384	struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
 385	u64 head = PERF_IDX2OFF(handle->head, buf);
 386	u64 limit = buf->nr_pages * PAGE_SIZE;
 387
 388	/*
 389	 * The trace format isn't parseable in reverse, so clamp
 390	 * the limit to half of the buffer size in snapshot mode
 391	 * so that the worst case is half a buffer of records, as
 392	 * opposed to a single record.
 393	 */
 394	if (head < limit >> 1)
 395		limit >>= 1;
 396
 397	/*
 398	 * If we're within max_record_sz of the limit, we must
 399	 * pad, move the head index and recompute the limit.
 400	 */
 401	if (limit - head < spe_pmu->max_record_sz) {
 402		arm_spe_pmu_pad_buf(handle, limit - head);
 403		handle->head = PERF_IDX2OFF(limit, buf);
 404		limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head;
 405	}
 406
 407	return limit;
 408}
 409
 410static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle)
 411{
 412	struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
 413	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
 414	const u64 bufsize = buf->nr_pages * PAGE_SIZE;
 415	u64 limit = bufsize;
 416	u64 head, tail, wakeup;
 417
 418	/*
 419	 * The head can be misaligned for two reasons:
 420	 *
 421	 * 1. The hardware left PMBPTR pointing to the first byte after
 422	 *    a record when generating a buffer management event.
 423	 *
 424	 * 2. We used perf_aux_output_skip to consume handle->size bytes
 425	 *    and CIRC_SPACE was used to compute the size, which always
 426	 *    leaves one entry free.
 427	 *
 428	 * Deal with this by padding to the next alignment boundary and
 429	 * moving the head index. If we run out of buffer space, we'll
 430	 * reduce handle->size to zero and end up reporting truncation.
 431	 */
 432	head = PERF_IDX2OFF(handle->head, buf);
 433	if (!IS_ALIGNED(head, spe_pmu->align)) {
 434		unsigned long delta = roundup(head, spe_pmu->align) - head;
 435
 436		delta = min(delta, handle->size);
 437		arm_spe_pmu_pad_buf(handle, delta);
 438		head = PERF_IDX2OFF(handle->head, buf);
 439	}
 440
 441	/* If we've run out of free space, then nothing more to do */
 442	if (!handle->size)
 443		goto no_space;
 444
 445	/* Compute the tail and wakeup indices now that we've aligned head */
 446	tail = PERF_IDX2OFF(handle->head + handle->size, buf);
 447	wakeup = PERF_IDX2OFF(handle->wakeup, buf);
 448
 449	/*
 450	 * Avoid clobbering unconsumed data. We know we have space, so
 451	 * if we see head == tail we know that the buffer is empty. If
 452	 * head > tail, then there's nothing to clobber prior to
 453	 * wrapping.
 454	 */
 455	if (head < tail)
 456		limit = round_down(tail, PAGE_SIZE);
 457
 458	/*
 459	 * Wakeup may be arbitrarily far into the future. If it's not in
 460	 * the current generation, either we'll wrap before hitting it,
 461	 * or it's in the past and has been handled already.
 462	 *
 463	 * If there's a wakeup before we wrap, arrange to be woken up by
 464	 * the page boundary following it. Keep the tail boundary if
 465	 * that's lower.
 466	 */
 467	if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
 468		limit = min(limit, round_up(wakeup, PAGE_SIZE));
 469
 470	if (limit > head)
 471		return limit;
 472
 473	arm_spe_pmu_pad_buf(handle, handle->size);
 474no_space:
 475	perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
 476	perf_aux_output_end(handle, 0);
 477	return 0;
 478}
 479
 480static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle)
 481{
 482	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
 483	struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
 484	u64 limit = __arm_spe_pmu_next_off(handle);
 485	u64 head = PERF_IDX2OFF(handle->head, buf);
 486
 487	/*
 488	 * If the head has come too close to the end of the buffer,
 489	 * then pad to the end and recompute the limit.
 490	 */
 491	if (limit && (limit - head < spe_pmu->max_record_sz)) {
 492		arm_spe_pmu_pad_buf(handle, limit - head);
 493		limit = __arm_spe_pmu_next_off(handle);
 494	}
 495
 496	return limit;
 497}
 498
 499static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
 500					  struct perf_event *event)
 501{
 502	u64 base, limit;
 503	struct arm_spe_pmu_buf *buf;
 504
 505	/* Start a new aux session */
 506	buf = perf_aux_output_begin(handle, event);
 507	if (!buf) {
 508		event->hw.state |= PERF_HES_STOPPED;
 509		/*
 510		 * We still need to clear the limit pointer, since the
 511		 * profiler might only be disabled by virtue of a fault.
 512		 */
 513		limit = 0;
 514		goto out_write_limit;
 515	}
 516
 517	limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle)
 518			      : arm_spe_pmu_next_off(handle);
 519	if (limit)
 520		limit |= PMBLIMITR_EL1_E;
 521
 522	limit += (u64)buf->base;
 523	base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf);
 524	write_sysreg_s(base, SYS_PMBPTR_EL1);
 525
 526out_write_limit:
 527	write_sysreg_s(limit, SYS_PMBLIMITR_EL1);
 528}
 529
 530static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle)
 531{
 532	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
 533	u64 offset, size;
 534
 535	offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base;
 536	size = offset - PERF_IDX2OFF(handle->head, buf);
 537
 538	if (buf->snapshot)
 539		handle->head = offset;
 540
 541	perf_aux_output_end(handle, size);
 542}
 543
 544static void arm_spe_pmu_disable_and_drain_local(void)
 545{
 546	/* Disable profiling at EL0 and EL1 */
 547	write_sysreg_s(0, SYS_PMSCR_EL1);
 548	isb();
 549
 550	/* Drain any buffered data */
 551	psb_csync();
 552	dsb(nsh);
 553
 554	/* Disable the profiling buffer */
 555	write_sysreg_s(0, SYS_PMBLIMITR_EL1);
 556	isb();
 557}
 558
 559/* IRQ handling */
 560static enum arm_spe_pmu_buf_fault_action
 561arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
 562{
 563	const char *err_str;
 564	u64 pmbsr;
 565	enum arm_spe_pmu_buf_fault_action ret;
 566
 567	/*
 568	 * Ensure new profiling data is visible to the CPU and any external
 569	 * aborts have been resolved.
 570	 */
 571	psb_csync();
 572	dsb(nsh);
 573
 574	/* Ensure hardware updates to PMBPTR_EL1 are visible */
 575	isb();
 576
 577	/* Service required? */
 578	pmbsr = read_sysreg_s(SYS_PMBSR_EL1);
 579	if (!FIELD_GET(PMBSR_EL1_S, pmbsr))
 580		return SPE_PMU_BUF_FAULT_ACT_SPURIOUS;
 581
 582	/*
 583	 * If we've lost data, disable profiling and also set the PARTIAL
 584	 * flag to indicate that the last record is corrupted.
 585	 */
 586	if (FIELD_GET(PMBSR_EL1_DL, pmbsr))
 587		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED |
 588					     PERF_AUX_FLAG_PARTIAL);
 589
 590	/* Report collisions to userspace so that it can up the period */
 591	if (FIELD_GET(PMBSR_EL1_COLL, pmbsr))
 592		perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
 593
 594	/* We only expect buffer management events */
 595	switch (FIELD_GET(PMBSR_EL1_EC, pmbsr)) {
 596	case PMBSR_EL1_EC_BUF:
 597		/* Handled below */
 598		break;
 599	case PMBSR_EL1_EC_FAULT_S1:
 600	case PMBSR_EL1_EC_FAULT_S2:
 601		err_str = "Unexpected buffer fault";
 602		goto out_err;
 603	default:
 604		err_str = "Unknown error code";
 605		goto out_err;
 606	}
 607
 608	/* Buffer management event */
 609	switch (FIELD_GET(PMBSR_EL1_BUF_BSC_MASK, pmbsr)) {
 610	case PMBSR_EL1_BUF_BSC_FULL:
 611		ret = SPE_PMU_BUF_FAULT_ACT_OK;
 612		goto out_stop;
 613	default:
 614		err_str = "Unknown buffer status code";
 615	}
 616
 617out_err:
 618	pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n",
 619			   err_str, smp_processor_id(), pmbsr,
 620			   read_sysreg_s(SYS_PMBPTR_EL1),
 621			   read_sysreg_s(SYS_PMBLIMITR_EL1));
 622	ret = SPE_PMU_BUF_FAULT_ACT_FATAL;
 623
 624out_stop:
 625	arm_spe_perf_aux_output_end(handle);
 626	return ret;
 627}
 628
 629static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
 630{
 631	struct perf_output_handle *handle = dev;
 632	struct perf_event *event = handle->event;
 633	enum arm_spe_pmu_buf_fault_action act;
 634
 635	if (!perf_get_aux(handle))
 636		return IRQ_NONE;
 637
 638	act = arm_spe_pmu_buf_get_fault_act(handle);
 639	if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
 640		return IRQ_NONE;
 641
 642	/*
 643	 * Ensure perf callbacks have completed, which may disable the
 644	 * profiling buffer in response to a TRUNCATION flag.
 645	 */
 646	irq_work_run();
 647
 648	switch (act) {
 649	case SPE_PMU_BUF_FAULT_ACT_FATAL:
 650		/*
 651		 * If a fatal exception occurred then leaving the profiling
 652		 * buffer enabled is a recipe waiting to happen. Since
 653		 * fatal faults don't always imply truncation, make sure
 654		 * that the profiling buffer is disabled explicitly before
 655		 * clearing the syndrome register.
 656		 */
 657		arm_spe_pmu_disable_and_drain_local();
 658		break;
 659	case SPE_PMU_BUF_FAULT_ACT_OK:
 660		/*
 661		 * We handled the fault (the buffer was full), so resume
 662		 * profiling as long as we didn't detect truncation.
 663		 * PMBPTR might be misaligned, but we'll burn that bridge
 664		 * when we get to it.
 665		 */
 666		if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) {
 667			arm_spe_perf_aux_output_begin(handle, event);
 668			isb();
 669		}
 670		break;
 671	case SPE_PMU_BUF_FAULT_ACT_SPURIOUS:
 672		/* We've seen you before, but GCC has the memory of a sieve. */
 673		break;
 674	}
 675
 676	/* The buffer pointers are now sane, so resume profiling. */
 677	write_sysreg_s(0, SYS_PMBSR_EL1);
 678	return IRQ_HANDLED;
 679}
 680
 681static u64 arm_spe_pmsevfr_res0(u16 pmsver)
 682{
 683	switch (pmsver) {
 684	case ID_AA64DFR0_EL1_PMSVer_IMP:
 685		return PMSEVFR_EL1_RES0_IMP;
 686	case ID_AA64DFR0_EL1_PMSVer_V1P1:
 687		return PMSEVFR_EL1_RES0_V1P1;
 688	case ID_AA64DFR0_EL1_PMSVer_V1P2:
 689	/* Return the highest version we support in default */
 690	default:
 691		return PMSEVFR_EL1_RES0_V1P2;
 692	}
 693}
 694
 695/* Perf callbacks */
 696static int arm_spe_pmu_event_init(struct perf_event *event)
 697{
 698	u64 reg;
 699	struct perf_event_attr *attr = &event->attr;
 700	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
 701
 702	/* This is, of course, deeply driver-specific */
 703	if (attr->type != event->pmu->type)
 704		return -ENOENT;
 705
 706	if (event->cpu >= 0 &&
 707	    !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
 708		return -ENOENT;
 709
 710	if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
 711		return -EOPNOTSUPP;
 712
 713	if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
 714		return -EOPNOTSUPP;
 715
 716	if (attr->exclude_idle)
 717		return -EOPNOTSUPP;
 718
 719	/*
 720	 * Feedback-directed frequency throttling doesn't work when we
 721	 * have a buffer of samples. We'd need to manually count the
 722	 * samples in the buffer when it fills up and adjust the event
 723	 * count to reflect that. Instead, just force the user to specify
 724	 * a sample period.
 725	 */
 726	if (attr->freq)
 727		return -EINVAL;
 728
 729	reg = arm_spe_event_to_pmsfcr(event);
 730	if ((FIELD_GET(PMSFCR_EL1_FE, reg)) &&
 731	    !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT))
 732		return -EOPNOTSUPP;
 733
 734	if ((FIELD_GET(PMSFCR_EL1_FnE, reg)) &&
 735	    !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
 736		return -EOPNOTSUPP;
 737
 738	if ((FIELD_GET(PMSFCR_EL1_FT, reg)) &&
 739	    !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP))
 740		return -EOPNOTSUPP;
 741
 742	if ((FIELD_GET(PMSFCR_EL1_FL, reg)) &&
 743	    !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
 744		return -EOPNOTSUPP;
 745
 746	set_spe_event_has_cx(event);
 747	reg = arm_spe_event_to_pmscr(event);
 748	if (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))
 749		return perf_allow_kernel(&event->attr);
 
 750
 751	return 0;
 752}
 753
 754static void arm_spe_pmu_start(struct perf_event *event, int flags)
 755{
 756	u64 reg;
 757	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
 758	struct hw_perf_event *hwc = &event->hw;
 759	struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
 760
 761	hwc->state = 0;
 762	arm_spe_perf_aux_output_begin(handle, event);
 763	if (hwc->state)
 764		return;
 765
 766	reg = arm_spe_event_to_pmsfcr(event);
 767	write_sysreg_s(reg, SYS_PMSFCR_EL1);
 768
 769	reg = arm_spe_event_to_pmsevfr(event);
 770	write_sysreg_s(reg, SYS_PMSEVFR_EL1);
 771
 772	if (spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT) {
 773		reg = arm_spe_event_to_pmsnevfr(event);
 774		write_sysreg_s(reg, SYS_PMSNEVFR_EL1);
 775	}
 776
 777	reg = arm_spe_event_to_pmslatfr(event);
 778	write_sysreg_s(reg, SYS_PMSLATFR_EL1);
 779
 780	if (flags & PERF_EF_RELOAD) {
 781		reg = arm_spe_event_to_pmsirr(event);
 782		write_sysreg_s(reg, SYS_PMSIRR_EL1);
 783		isb();
 784		reg = local64_read(&hwc->period_left);
 785		write_sysreg_s(reg, SYS_PMSICR_EL1);
 786	}
 787
 788	reg = arm_spe_event_to_pmscr(event);
 789	isb();
 790	write_sysreg_s(reg, SYS_PMSCR_EL1);
 791}
 792
 793static void arm_spe_pmu_stop(struct perf_event *event, int flags)
 794{
 795	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
 796	struct hw_perf_event *hwc = &event->hw;
 797	struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
 798
 799	/* If we're already stopped, then nothing to do */
 800	if (hwc->state & PERF_HES_STOPPED)
 801		return;
 802
 803	/* Stop all trace generation */
 804	arm_spe_pmu_disable_and_drain_local();
 805
 806	if (flags & PERF_EF_UPDATE) {
 807		/*
 808		 * If there's a fault pending then ensure we contain it
 809		 * to this buffer, since we might be on the context-switch
 810		 * path.
 811		 */
 812		if (perf_get_aux(handle)) {
 813			enum arm_spe_pmu_buf_fault_action act;
 814
 815			act = arm_spe_pmu_buf_get_fault_act(handle);
 816			if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
 817				arm_spe_perf_aux_output_end(handle);
 818			else
 819				write_sysreg_s(0, SYS_PMBSR_EL1);
 820		}
 821
 822		/*
 823		 * This may also contain ECOUNT, but nobody else should
 824		 * be looking at period_left, since we forbid frequency
 825		 * based sampling.
 826		 */
 827		local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1));
 828		hwc->state |= PERF_HES_UPTODATE;
 829	}
 830
 831	hwc->state |= PERF_HES_STOPPED;
 832}
 833
 834static int arm_spe_pmu_add(struct perf_event *event, int flags)
 835{
 836	int ret = 0;
 837	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
 838	struct hw_perf_event *hwc = &event->hw;
 839	int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu;
 840
 841	if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
 842		return -ENOENT;
 843
 844	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 845
 846	if (flags & PERF_EF_START) {
 847		arm_spe_pmu_start(event, PERF_EF_RELOAD);
 848		if (hwc->state & PERF_HES_STOPPED)
 849			ret = -EINVAL;
 850	}
 851
 852	return ret;
 853}
 854
 855static void arm_spe_pmu_del(struct perf_event *event, int flags)
 856{
 857	arm_spe_pmu_stop(event, PERF_EF_UPDATE);
 858}
 859
 860static void arm_spe_pmu_read(struct perf_event *event)
 861{
 862}
 863
 864static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
 865				   int nr_pages, bool snapshot)
 866{
 867	int i, cpu = event->cpu;
 868	struct page **pglist;
 869	struct arm_spe_pmu_buf *buf;
 870
 871	/* We need at least two pages for this to work. */
 872	if (nr_pages < 2)
 873		return NULL;
 874
 875	/*
 876	 * We require an even number of pages for snapshot mode, so that
 877	 * we can effectively treat the buffer as consisting of two equal
 878	 * parts and give userspace a fighting chance of getting some
 879	 * useful data out of it.
 880	 */
 881	if (snapshot && (nr_pages & 1))
 882		return NULL;
 883
 884	if (cpu == -1)
 885		cpu = raw_smp_processor_id();
 886
 887	buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
 888	if (!buf)
 889		return NULL;
 890
 891	pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
 892	if (!pglist)
 893		goto out_free_buf;
 894
 895	for (i = 0; i < nr_pages; ++i)
 896		pglist[i] = virt_to_page(pages[i]);
 897
 898	buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
 899	if (!buf->base)
 900		goto out_free_pglist;
 901
 902	buf->nr_pages	= nr_pages;
 903	buf->snapshot	= snapshot;
 904
 905	kfree(pglist);
 906	return buf;
 907
 908out_free_pglist:
 909	kfree(pglist);
 910out_free_buf:
 911	kfree(buf);
 912	return NULL;
 913}
 914
 915static void arm_spe_pmu_free_aux(void *aux)
 916{
 917	struct arm_spe_pmu_buf *buf = aux;
 918
 919	vunmap(buf->base);
 920	kfree(buf);
 921}
 922
 923/* Initialisation and teardown functions */
 924static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
 925{
 926	static atomic_t pmu_idx = ATOMIC_INIT(-1);
 927
 928	int idx;
 929	char *name;
 930	struct device *dev = &spe_pmu->pdev->dev;
 931
 932	spe_pmu->pmu = (struct pmu) {
 933		.module = THIS_MODULE,
 934		.parent		= &spe_pmu->pdev->dev,
 935		.capabilities	= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
 936		.attr_groups	= arm_spe_pmu_attr_groups,
 937		/*
 938		 * We hitch a ride on the software context here, so that
 939		 * we can support per-task profiling (which is not possible
 940		 * with the invalid context as it doesn't get sched callbacks).
 941		 * This requires that userspace either uses a dummy event for
 942		 * perf_event_open, since the aux buffer is not setup until
 943		 * a subsequent mmap, or creates the profiling event in a
 944		 * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it
 945		 * once the buffer has been created.
 946		 */
 947		.task_ctx_nr	= perf_sw_context,
 948		.event_init	= arm_spe_pmu_event_init,
 949		.add		= arm_spe_pmu_add,
 950		.del		= arm_spe_pmu_del,
 951		.start		= arm_spe_pmu_start,
 952		.stop		= arm_spe_pmu_stop,
 953		.read		= arm_spe_pmu_read,
 954		.setup_aux	= arm_spe_pmu_setup_aux,
 955		.free_aux	= arm_spe_pmu_free_aux,
 956	};
 957
 958	idx = atomic_inc_return(&pmu_idx);
 959	name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
 960	if (!name) {
 961		dev_err(dev, "failed to allocate name for pmu %d\n", idx);
 962		return -ENOMEM;
 963	}
 964
 965	return perf_pmu_register(&spe_pmu->pmu, name, -1);
 966}
 967
 968static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu)
 969{
 970	perf_pmu_unregister(&spe_pmu->pmu);
 971}
 972
 973static void __arm_spe_pmu_dev_probe(void *info)
 974{
 975	int fld;
 976	u64 reg;
 977	struct arm_spe_pmu *spe_pmu = info;
 978	struct device *dev = &spe_pmu->pdev->dev;
 979
 980	fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
 981						   ID_AA64DFR0_EL1_PMSVer_SHIFT);
 982	if (!fld) {
 983		dev_err(dev,
 984			"unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
 985			fld, smp_processor_id());
 986		return;
 987	}
 988	spe_pmu->pmsver = (u16)fld;
 989
 990	/* Read PMBIDR first to determine whether or not we have access */
 991	reg = read_sysreg_s(SYS_PMBIDR_EL1);
 992	if (FIELD_GET(PMBIDR_EL1_P, reg)) {
 993		dev_err(dev,
 994			"profiling buffer owned by higher exception level\n");
 995		return;
 996	}
 997
 998	/* Minimum alignment. If it's out-of-range, then fail the probe */
 999	fld = FIELD_GET(PMBIDR_EL1_ALIGN, reg);
1000	spe_pmu->align = 1 << fld;
1001	if (spe_pmu->align > SZ_2K) {
1002		dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n",
1003			fld, smp_processor_id());
1004		return;
1005	}
1006
1007	/* It's now safe to read PMSIDR and figure out what we've got */
1008	reg = read_sysreg_s(SYS_PMSIDR_EL1);
1009	if (FIELD_GET(PMSIDR_EL1_FE, reg))
1010		spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT;
1011
1012	if (FIELD_GET(PMSIDR_EL1_FnE, reg))
1013		spe_pmu->features |= SPE_PMU_FEAT_INV_FILT_EVT;
1014
1015	if (FIELD_GET(PMSIDR_EL1_FT, reg))
1016		spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP;
1017
1018	if (FIELD_GET(PMSIDR_EL1_FL, reg))
1019		spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT;
1020
1021	if (FIELD_GET(PMSIDR_EL1_ARCHINST, reg))
1022		spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST;
1023
1024	if (FIELD_GET(PMSIDR_EL1_LDS, reg))
1025		spe_pmu->features |= SPE_PMU_FEAT_LDS;
1026
1027	if (FIELD_GET(PMSIDR_EL1_ERND, reg))
1028		spe_pmu->features |= SPE_PMU_FEAT_ERND;
1029
1030	/* This field has a spaced out encoding, so just use a look-up */
1031	fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
1032	switch (fld) {
1033	case PMSIDR_EL1_INTERVAL_256:
1034		spe_pmu->min_period = 256;
1035		break;
1036	case PMSIDR_EL1_INTERVAL_512:
1037		spe_pmu->min_period = 512;
1038		break;
1039	case PMSIDR_EL1_INTERVAL_768:
1040		spe_pmu->min_period = 768;
1041		break;
1042	case PMSIDR_EL1_INTERVAL_1024:
1043		spe_pmu->min_period = 1024;
1044		break;
1045	case PMSIDR_EL1_INTERVAL_1536:
1046		spe_pmu->min_period = 1536;
1047		break;
1048	case PMSIDR_EL1_INTERVAL_2048:
1049		spe_pmu->min_period = 2048;
1050		break;
1051	case PMSIDR_EL1_INTERVAL_3072:
1052		spe_pmu->min_period = 3072;
1053		break;
1054	default:
1055		dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
1056			 fld);
1057		fallthrough;
1058	case PMSIDR_EL1_INTERVAL_4096:
1059		spe_pmu->min_period = 4096;
1060	}
1061
1062	/* Maximum record size. If it's out-of-range, then fail the probe */
1063	fld = FIELD_GET(PMSIDR_EL1_MAXSIZE, reg);
1064	spe_pmu->max_record_sz = 1 << fld;
1065	if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) {
1066		dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n",
1067			fld, smp_processor_id());
1068		return;
1069	}
1070
1071	fld = FIELD_GET(PMSIDR_EL1_COUNTSIZE, reg);
1072	switch (fld) {
1073	default:
1074		dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
1075			 fld);
1076		fallthrough;
1077	case PMSIDR_EL1_COUNTSIZE_12_BIT_SAT:
1078		spe_pmu->counter_sz = 12;
1079		break;
1080	case PMSIDR_EL1_COUNTSIZE_16_BIT_SAT:
1081		spe_pmu->counter_sz = 16;
1082	}
1083
1084	dev_info(dev,
1085		 "probed SPEv1.%d for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
1086		 spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus),
1087		 spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
1088
1089	spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
1090}
1091
1092static void __arm_spe_pmu_reset_local(void)
1093{
1094	/*
1095	 * This is probably overkill, as we have no idea where we're
1096	 * draining any buffered data to...
1097	 */
1098	arm_spe_pmu_disable_and_drain_local();
1099
1100	/* Reset the buffer base pointer */
1101	write_sysreg_s(0, SYS_PMBPTR_EL1);
1102	isb();
1103
1104	/* Clear any pending management interrupts */
1105	write_sysreg_s(0, SYS_PMBSR_EL1);
1106	isb();
1107}
1108
1109static void __arm_spe_pmu_setup_one(void *info)
1110{
1111	struct arm_spe_pmu *spe_pmu = info;
1112
1113	__arm_spe_pmu_reset_local();
1114	enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE);
1115}
1116
1117static void __arm_spe_pmu_stop_one(void *info)
1118{
1119	struct arm_spe_pmu *spe_pmu = info;
1120
1121	disable_percpu_irq(spe_pmu->irq);
1122	__arm_spe_pmu_reset_local();
1123}
1124
1125static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
1126{
1127	struct arm_spe_pmu *spe_pmu;
1128
1129	spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
1130	if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
1131		return 0;
1132
1133	__arm_spe_pmu_setup_one(spe_pmu);
1134	return 0;
1135}
1136
1137static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1138{
1139	struct arm_spe_pmu *spe_pmu;
1140
1141	spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
1142	if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
1143		return 0;
1144
1145	__arm_spe_pmu_stop_one(spe_pmu);
1146	return 0;
1147}
1148
1149static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
1150{
1151	int ret;
1152	cpumask_t *mask = &spe_pmu->supported_cpus;
1153
1154	/* Make sure we probe the hardware on a relevant CPU */
1155	ret = smp_call_function_any(mask,  __arm_spe_pmu_dev_probe, spe_pmu, 1);
1156	if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED))
1157		return -ENXIO;
1158
1159	/* Request our PPIs (note that the IRQ is still disabled) */
1160	ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME,
1161				 spe_pmu->handle);
1162	if (ret)
1163		return ret;
1164
1165	/*
1166	 * Register our hotplug notifier now so we don't miss any events.
1167	 * This will enable the IRQ for any supported CPUs that are already
1168	 * up.
1169	 */
1170	ret = cpuhp_state_add_instance(arm_spe_pmu_online,
1171				       &spe_pmu->hotplug_node);
1172	if (ret)
1173		free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
1174
1175	return ret;
1176}
1177
1178static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu)
1179{
1180	cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node);
1181	free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
1182}
1183
1184/* Driver and device probing */
1185static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
1186{
1187	struct platform_device *pdev = spe_pmu->pdev;
1188	int irq = platform_get_irq(pdev, 0);
1189
1190	if (irq < 0)
1191		return -ENXIO;
1192
1193	if (!irq_is_percpu(irq)) {
1194		dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq);
1195		return -EINVAL;
1196	}
1197
1198	if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) {
1199		dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq);
1200		return -EINVAL;
1201	}
1202
1203	spe_pmu->irq = irq;
1204	return 0;
1205}
1206
1207static const struct of_device_id arm_spe_pmu_of_match[] = {
1208	{ .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 },
1209	{ /* Sentinel */ },
1210};
1211MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match);
1212
1213static const struct platform_device_id arm_spe_match[] = {
1214	{ ARMV8_SPE_PDEV_NAME, 0},
1215	{ }
1216};
1217MODULE_DEVICE_TABLE(platform, arm_spe_match);
1218
1219static int arm_spe_pmu_device_probe(struct platform_device *pdev)
1220{
1221	int ret;
1222	struct arm_spe_pmu *spe_pmu;
1223	struct device *dev = &pdev->dev;
1224
1225	/*
1226	 * If kernelspace is unmapped when running at EL0, then the SPE
1227	 * buffer will fault and prematurely terminate the AUX session.
1228	 */
1229	if (arm64_kernel_unmapped_at_el0()) {
1230		dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
1231		return -EPERM;
1232	}
1233
1234	spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
1235	if (!spe_pmu)
1236		return -ENOMEM;
1237
1238	spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
1239	if (!spe_pmu->handle)
1240		return -ENOMEM;
1241
1242	spe_pmu->pdev = pdev;
1243	platform_set_drvdata(pdev, spe_pmu);
1244
1245	ret = arm_spe_pmu_irq_probe(spe_pmu);
1246	if (ret)
1247		goto out_free_handle;
1248
1249	ret = arm_spe_pmu_dev_init(spe_pmu);
1250	if (ret)
1251		goto out_free_handle;
1252
1253	ret = arm_spe_pmu_perf_init(spe_pmu);
1254	if (ret)
1255		goto out_teardown_dev;
1256
1257	return 0;
1258
1259out_teardown_dev:
1260	arm_spe_pmu_dev_teardown(spe_pmu);
1261out_free_handle:
1262	free_percpu(spe_pmu->handle);
1263	return ret;
1264}
1265
1266static void arm_spe_pmu_device_remove(struct platform_device *pdev)
1267{
1268	struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
1269
1270	arm_spe_pmu_perf_destroy(spe_pmu);
1271	arm_spe_pmu_dev_teardown(spe_pmu);
1272	free_percpu(spe_pmu->handle);
 
1273}
1274
1275static struct platform_driver arm_spe_pmu_driver = {
1276	.id_table = arm_spe_match,
1277	.driver	= {
1278		.name		= DRVNAME,
1279		.of_match_table	= of_match_ptr(arm_spe_pmu_of_match),
1280		.suppress_bind_attrs = true,
1281	},
1282	.probe	= arm_spe_pmu_device_probe,
1283	.remove = arm_spe_pmu_device_remove,
1284};
1285
1286static int __init arm_spe_pmu_init(void)
1287{
1288	int ret;
1289
1290	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
1291				      arm_spe_pmu_cpu_startup,
1292				      arm_spe_pmu_cpu_teardown);
1293	if (ret < 0)
1294		return ret;
1295	arm_spe_pmu_online = ret;
1296
1297	ret = platform_driver_register(&arm_spe_pmu_driver);
1298	if (ret)
1299		cpuhp_remove_multi_state(arm_spe_pmu_online);
1300
1301	return ret;
1302}
1303
1304static void __exit arm_spe_pmu_exit(void)
1305{
1306	platform_driver_unregister(&arm_spe_pmu_driver);
1307	cpuhp_remove_multi_state(arm_spe_pmu_online);
1308}
1309
1310module_init(arm_spe_pmu_init);
1311module_exit(arm_spe_pmu_exit);
1312
1313MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension");
1314MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1315MODULE_LICENSE("GPL v2");
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Perf support for the Statistical Profiling Extension, introduced as
   4 * part of ARMv8.2.
   5 *
   6 * Copyright (C) 2016 ARM Limited
   7 *
   8 * Author: Will Deacon <will.deacon@arm.com>
   9 */
  10
  11#define PMUNAME					"arm_spe"
  12#define DRVNAME					PMUNAME "_pmu"
  13#define pr_fmt(fmt)				DRVNAME ": " fmt
  14
  15#include <linux/bitfield.h>
  16#include <linux/bitops.h>
  17#include <linux/bug.h>
  18#include <linux/capability.h>
  19#include <linux/cpuhotplug.h>
  20#include <linux/cpumask.h>
  21#include <linux/device.h>
  22#include <linux/errno.h>
  23#include <linux/interrupt.h>
  24#include <linux/irq.h>
  25#include <linux/kernel.h>
  26#include <linux/list.h>
  27#include <linux/module.h>
  28#include <linux/of.h>
  29#include <linux/perf_event.h>
  30#include <linux/perf/arm_pmu.h>
  31#include <linux/platform_device.h>
  32#include <linux/printk.h>
  33#include <linux/slab.h>
  34#include <linux/smp.h>
  35#include <linux/vmalloc.h>
  36
  37#include <asm/barrier.h>
  38#include <asm/cpufeature.h>
  39#include <asm/mmu.h>
  40#include <asm/sysreg.h>
  41
  42/*
  43 * Cache if the event is allowed to trace Context information.
  44 * This allows us to perform the check, i.e, perfmon_capable(),
  45 * in the context of the event owner, once, during the event_init().
  46 */
  47#define SPE_PMU_HW_FLAGS_CX			0x00001
  48
  49static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_CX);
  50
  51static void set_spe_event_has_cx(struct perf_event *event)
  52{
  53	if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
  54		event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
  55}
  56
  57static bool get_spe_event_has_cx(struct perf_event *event)
  58{
  59	return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX);
  60}
  61
  62#define ARM_SPE_BUF_PAD_BYTE			0
  63
  64struct arm_spe_pmu_buf {
  65	int					nr_pages;
  66	bool					snapshot;
  67	void					*base;
  68};
  69
  70struct arm_spe_pmu {
  71	struct pmu				pmu;
  72	struct platform_device			*pdev;
  73	cpumask_t				supported_cpus;
  74	struct hlist_node			hotplug_node;
  75
  76	int					irq; /* PPI */
  77	u16					pmsver;
  78	u16					min_period;
  79	u16					counter_sz;
  80
  81#define SPE_PMU_FEAT_FILT_EVT			(1UL << 0)
  82#define SPE_PMU_FEAT_FILT_TYP			(1UL << 1)
  83#define SPE_PMU_FEAT_FILT_LAT			(1UL << 2)
  84#define SPE_PMU_FEAT_ARCH_INST			(1UL << 3)
  85#define SPE_PMU_FEAT_LDS			(1UL << 4)
  86#define SPE_PMU_FEAT_ERND			(1UL << 5)
  87#define SPE_PMU_FEAT_INV_FILT_EVT		(1UL << 6)
  88#define SPE_PMU_FEAT_DEV_PROBED			(1UL << 63)
  89	u64					features;
  90
  91	u16					max_record_sz;
  92	u16					align;
  93	struct perf_output_handle __percpu	*handle;
  94};
  95
  96#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
  97
  98/* Convert a free-running index from perf into an SPE buffer offset */
  99#define PERF_IDX2OFF(idx, buf)	((idx) % ((buf)->nr_pages << PAGE_SHIFT))
 100
 101/* Keep track of our dynamic hotplug state */
 102static enum cpuhp_state arm_spe_pmu_online;
 103
 104enum arm_spe_pmu_buf_fault_action {
 105	SPE_PMU_BUF_FAULT_ACT_SPURIOUS,
 106	SPE_PMU_BUF_FAULT_ACT_FATAL,
 107	SPE_PMU_BUF_FAULT_ACT_OK,
 108};
 109
 110/* This sysfs gunk was really good fun to write. */
 111enum arm_spe_pmu_capabilities {
 112	SPE_PMU_CAP_ARCH_INST = 0,
 113	SPE_PMU_CAP_ERND,
 114	SPE_PMU_CAP_FEAT_MAX,
 115	SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
 116	SPE_PMU_CAP_MIN_IVAL,
 117};
 118
 119static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
 120	[SPE_PMU_CAP_ARCH_INST]	= SPE_PMU_FEAT_ARCH_INST,
 121	[SPE_PMU_CAP_ERND]	= SPE_PMU_FEAT_ERND,
 122};
 123
 124static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
 125{
 126	if (cap < SPE_PMU_CAP_FEAT_MAX)
 127		return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
 128
 129	switch (cap) {
 130	case SPE_PMU_CAP_CNT_SZ:
 131		return spe_pmu->counter_sz;
 132	case SPE_PMU_CAP_MIN_IVAL:
 133		return spe_pmu->min_period;
 134	default:
 135		WARN(1, "unknown cap %d\n", cap);
 136	}
 137
 138	return 0;
 139}
 140
 141static ssize_t arm_spe_pmu_cap_show(struct device *dev,
 142				    struct device_attribute *attr,
 143				    char *buf)
 144{
 145	struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
 146	struct dev_ext_attribute *ea =
 147		container_of(attr, struct dev_ext_attribute, attr);
 148	int cap = (long)ea->var;
 149
 150	return sysfs_emit(buf, "%u\n", arm_spe_pmu_cap_get(spe_pmu, cap));
 151}
 152
 153#define SPE_EXT_ATTR_ENTRY(_name, _func, _var)				\
 154	&((struct dev_ext_attribute[]) {				\
 155		{ __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var }	\
 156	})[0].attr.attr
 157
 158#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var)				\
 159	SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
 160
 161static struct attribute *arm_spe_pmu_cap_attr[] = {
 162	SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
 163	SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
 164	SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
 165	SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
 166	NULL,
 167};
 168
 169static const struct attribute_group arm_spe_pmu_cap_group = {
 170	.name	= "caps",
 171	.attrs	= arm_spe_pmu_cap_attr,
 172};
 173
 174/* User ABI */
 175#define ATTR_CFG_FLD_ts_enable_CFG		config	/* PMSCR_EL1.TS */
 176#define ATTR_CFG_FLD_ts_enable_LO		0
 177#define ATTR_CFG_FLD_ts_enable_HI		0
 178#define ATTR_CFG_FLD_pa_enable_CFG		config	/* PMSCR_EL1.PA */
 179#define ATTR_CFG_FLD_pa_enable_LO		1
 180#define ATTR_CFG_FLD_pa_enable_HI		1
 181#define ATTR_CFG_FLD_pct_enable_CFG		config	/* PMSCR_EL1.PCT */
 182#define ATTR_CFG_FLD_pct_enable_LO		2
 183#define ATTR_CFG_FLD_pct_enable_HI		2
 184#define ATTR_CFG_FLD_jitter_CFG			config	/* PMSIRR_EL1.RND */
 185#define ATTR_CFG_FLD_jitter_LO			16
 186#define ATTR_CFG_FLD_jitter_HI			16
 187#define ATTR_CFG_FLD_branch_filter_CFG		config	/* PMSFCR_EL1.B */
 188#define ATTR_CFG_FLD_branch_filter_LO		32
 189#define ATTR_CFG_FLD_branch_filter_HI		32
 190#define ATTR_CFG_FLD_load_filter_CFG		config	/* PMSFCR_EL1.LD */
 191#define ATTR_CFG_FLD_load_filter_LO		33
 192#define ATTR_CFG_FLD_load_filter_HI		33
 193#define ATTR_CFG_FLD_store_filter_CFG		config	/* PMSFCR_EL1.ST */
 194#define ATTR_CFG_FLD_store_filter_LO		34
 195#define ATTR_CFG_FLD_store_filter_HI		34
 196
 197#define ATTR_CFG_FLD_event_filter_CFG		config1	/* PMSEVFR_EL1 */
 198#define ATTR_CFG_FLD_event_filter_LO		0
 199#define ATTR_CFG_FLD_event_filter_HI		63
 200
 201#define ATTR_CFG_FLD_min_latency_CFG		config2	/* PMSLATFR_EL1.MINLAT */
 202#define ATTR_CFG_FLD_min_latency_LO		0
 203#define ATTR_CFG_FLD_min_latency_HI		11
 204
 205#define ATTR_CFG_FLD_inv_event_filter_CFG	config3	/* PMSNEVFR_EL1 */
 206#define ATTR_CFG_FLD_inv_event_filter_LO	0
 207#define ATTR_CFG_FLD_inv_event_filter_HI	63
 208
 209GEN_PMU_FORMAT_ATTR(ts_enable);
 210GEN_PMU_FORMAT_ATTR(pa_enable);
 211GEN_PMU_FORMAT_ATTR(pct_enable);
 212GEN_PMU_FORMAT_ATTR(jitter);
 213GEN_PMU_FORMAT_ATTR(branch_filter);
 214GEN_PMU_FORMAT_ATTR(load_filter);
 215GEN_PMU_FORMAT_ATTR(store_filter);
 216GEN_PMU_FORMAT_ATTR(event_filter);
 217GEN_PMU_FORMAT_ATTR(inv_event_filter);
 218GEN_PMU_FORMAT_ATTR(min_latency);
 219
 220static struct attribute *arm_spe_pmu_formats_attr[] = {
 221	&format_attr_ts_enable.attr,
 222	&format_attr_pa_enable.attr,
 223	&format_attr_pct_enable.attr,
 224	&format_attr_jitter.attr,
 225	&format_attr_branch_filter.attr,
 226	&format_attr_load_filter.attr,
 227	&format_attr_store_filter.attr,
 228	&format_attr_event_filter.attr,
 229	&format_attr_inv_event_filter.attr,
 230	&format_attr_min_latency.attr,
 231	NULL,
 232};
 233
 234static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
 235						  struct attribute *attr,
 236						  int unused)
 237	{
 238	struct device *dev = kobj_to_dev(kobj);
 239	struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
 240
 241	if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
 242		return 0;
 243
 244	return attr->mode;
 245}
 246
 247static const struct attribute_group arm_spe_pmu_format_group = {
 248	.name	= "format",
 249	.is_visible = arm_spe_pmu_format_attr_is_visible,
 250	.attrs	= arm_spe_pmu_formats_attr,
 251};
 252
 253static ssize_t cpumask_show(struct device *dev,
 254			    struct device_attribute *attr, char *buf)
 255{
 256	struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
 257
 258	return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
 259}
 260static DEVICE_ATTR_RO(cpumask);
 261
 262static struct attribute *arm_spe_pmu_attrs[] = {
 263	&dev_attr_cpumask.attr,
 264	NULL,
 265};
 266
 267static const struct attribute_group arm_spe_pmu_group = {
 268	.attrs	= arm_spe_pmu_attrs,
 269};
 270
 271static const struct attribute_group *arm_spe_pmu_attr_groups[] = {
 272	&arm_spe_pmu_group,
 273	&arm_spe_pmu_cap_group,
 274	&arm_spe_pmu_format_group,
 275	NULL,
 276};
 277
 278/* Convert between user ABI and register values */
 279static u64 arm_spe_event_to_pmscr(struct perf_event *event)
 280{
 281	struct perf_event_attr *attr = &event->attr;
 282	u64 reg = 0;
 283
 284	reg |= FIELD_PREP(PMSCR_EL1_TS, ATTR_CFG_GET_FLD(attr, ts_enable));
 285	reg |= FIELD_PREP(PMSCR_EL1_PA, ATTR_CFG_GET_FLD(attr, pa_enable));
 286	reg |= FIELD_PREP(PMSCR_EL1_PCT, ATTR_CFG_GET_FLD(attr, pct_enable));
 287
 288	if (!attr->exclude_user)
 289		reg |= PMSCR_EL1_E0SPE;
 290
 291	if (!attr->exclude_kernel)
 292		reg |= PMSCR_EL1_E1SPE;
 293
 294	if (get_spe_event_has_cx(event))
 295		reg |= PMSCR_EL1_CX;
 296
 297	return reg;
 298}
 299
 300static void arm_spe_event_sanitise_period(struct perf_event *event)
 301{
 302	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
 303	u64 period = event->hw.sample_period;
 304	u64 max_period = PMSIRR_EL1_INTERVAL_MASK;
 305
 306	if (period < spe_pmu->min_period)
 307		period = spe_pmu->min_period;
 308	else if (period > max_period)
 309		period = max_period;
 310	else
 311		period &= max_period;
 312
 313	event->hw.sample_period = period;
 314}
 315
 316static u64 arm_spe_event_to_pmsirr(struct perf_event *event)
 317{
 318	struct perf_event_attr *attr = &event->attr;
 319	u64 reg = 0;
 320
 321	arm_spe_event_sanitise_period(event);
 322
 323	reg |= FIELD_PREP(PMSIRR_EL1_RND, ATTR_CFG_GET_FLD(attr, jitter));
 324	reg |= event->hw.sample_period;
 325
 326	return reg;
 327}
 328
 329static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
 330{
 331	struct perf_event_attr *attr = &event->attr;
 332	u64 reg = 0;
 333
 334	reg |= FIELD_PREP(PMSFCR_EL1_LD, ATTR_CFG_GET_FLD(attr, load_filter));
 335	reg |= FIELD_PREP(PMSFCR_EL1_ST, ATTR_CFG_GET_FLD(attr, store_filter));
 336	reg |= FIELD_PREP(PMSFCR_EL1_B, ATTR_CFG_GET_FLD(attr, branch_filter));
 337
 338	if (reg)
 339		reg |= PMSFCR_EL1_FT;
 340
 341	if (ATTR_CFG_GET_FLD(attr, event_filter))
 342		reg |= PMSFCR_EL1_FE;
 343
 344	if (ATTR_CFG_GET_FLD(attr, inv_event_filter))
 345		reg |= PMSFCR_EL1_FnE;
 346
 347	if (ATTR_CFG_GET_FLD(attr, min_latency))
 348		reg |= PMSFCR_EL1_FL;
 349
 350	return reg;
 351}
 352
 353static u64 arm_spe_event_to_pmsevfr(struct perf_event *event)
 354{
 355	struct perf_event_attr *attr = &event->attr;
 356	return ATTR_CFG_GET_FLD(attr, event_filter);
 357}
 358
 359static u64 arm_spe_event_to_pmsnevfr(struct perf_event *event)
 360{
 361	struct perf_event_attr *attr = &event->attr;
 362	return ATTR_CFG_GET_FLD(attr, inv_event_filter);
 363}
 364
 365static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
 366{
 367	struct perf_event_attr *attr = &event->attr;
 368	return FIELD_PREP(PMSLATFR_EL1_MINLAT, ATTR_CFG_GET_FLD(attr, min_latency));
 369}
 370
 371static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
 372{
 373	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
 374	u64 head = PERF_IDX2OFF(handle->head, buf);
 375
 376	memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len);
 377	if (!buf->snapshot)
 378		perf_aux_output_skip(handle, len);
 379}
 380
 381static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle)
 382{
 383	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
 384	struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
 385	u64 head = PERF_IDX2OFF(handle->head, buf);
 386	u64 limit = buf->nr_pages * PAGE_SIZE;
 387
 388	/*
 389	 * The trace format isn't parseable in reverse, so clamp
 390	 * the limit to half of the buffer size in snapshot mode
 391	 * so that the worst case is half a buffer of records, as
 392	 * opposed to a single record.
 393	 */
 394	if (head < limit >> 1)
 395		limit >>= 1;
 396
 397	/*
 398	 * If we're within max_record_sz of the limit, we must
 399	 * pad, move the head index and recompute the limit.
 400	 */
 401	if (limit - head < spe_pmu->max_record_sz) {
 402		arm_spe_pmu_pad_buf(handle, limit - head);
 403		handle->head = PERF_IDX2OFF(limit, buf);
 404		limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head;
 405	}
 406
 407	return limit;
 408}
 409
 410static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle)
 411{
 412	struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
 413	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
 414	const u64 bufsize = buf->nr_pages * PAGE_SIZE;
 415	u64 limit = bufsize;
 416	u64 head, tail, wakeup;
 417
 418	/*
 419	 * The head can be misaligned for two reasons:
 420	 *
 421	 * 1. The hardware left PMBPTR pointing to the first byte after
 422	 *    a record when generating a buffer management event.
 423	 *
 424	 * 2. We used perf_aux_output_skip to consume handle->size bytes
 425	 *    and CIRC_SPACE was used to compute the size, which always
 426	 *    leaves one entry free.
 427	 *
 428	 * Deal with this by padding to the next alignment boundary and
 429	 * moving the head index. If we run out of buffer space, we'll
 430	 * reduce handle->size to zero and end up reporting truncation.
 431	 */
 432	head = PERF_IDX2OFF(handle->head, buf);
 433	if (!IS_ALIGNED(head, spe_pmu->align)) {
 434		unsigned long delta = roundup(head, spe_pmu->align) - head;
 435
 436		delta = min(delta, handle->size);
 437		arm_spe_pmu_pad_buf(handle, delta);
 438		head = PERF_IDX2OFF(handle->head, buf);
 439	}
 440
 441	/* If we've run out of free space, then nothing more to do */
 442	if (!handle->size)
 443		goto no_space;
 444
 445	/* Compute the tail and wakeup indices now that we've aligned head */
 446	tail = PERF_IDX2OFF(handle->head + handle->size, buf);
 447	wakeup = PERF_IDX2OFF(handle->wakeup, buf);
 448
 449	/*
 450	 * Avoid clobbering unconsumed data. We know we have space, so
 451	 * if we see head == tail we know that the buffer is empty. If
 452	 * head > tail, then there's nothing to clobber prior to
 453	 * wrapping.
 454	 */
 455	if (head < tail)
 456		limit = round_down(tail, PAGE_SIZE);
 457
 458	/*
 459	 * Wakeup may be arbitrarily far into the future. If it's not in
 460	 * the current generation, either we'll wrap before hitting it,
 461	 * or it's in the past and has been handled already.
 462	 *
 463	 * If there's a wakeup before we wrap, arrange to be woken up by
 464	 * the page boundary following it. Keep the tail boundary if
 465	 * that's lower.
 466	 */
 467	if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
 468		limit = min(limit, round_up(wakeup, PAGE_SIZE));
 469
 470	if (limit > head)
 471		return limit;
 472
 473	arm_spe_pmu_pad_buf(handle, handle->size);
 474no_space:
 475	perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
 476	perf_aux_output_end(handle, 0);
 477	return 0;
 478}
 479
 480static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle)
 481{
 482	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
 483	struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
 484	u64 limit = __arm_spe_pmu_next_off(handle);
 485	u64 head = PERF_IDX2OFF(handle->head, buf);
 486
 487	/*
 488	 * If the head has come too close to the end of the buffer,
 489	 * then pad to the end and recompute the limit.
 490	 */
 491	if (limit && (limit - head < spe_pmu->max_record_sz)) {
 492		arm_spe_pmu_pad_buf(handle, limit - head);
 493		limit = __arm_spe_pmu_next_off(handle);
 494	}
 495
 496	return limit;
 497}
 498
 499static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
 500					  struct perf_event *event)
 501{
 502	u64 base, limit;
 503	struct arm_spe_pmu_buf *buf;
 504
 505	/* Start a new aux session */
 506	buf = perf_aux_output_begin(handle, event);
 507	if (!buf) {
 508		event->hw.state |= PERF_HES_STOPPED;
 509		/*
 510		 * We still need to clear the limit pointer, since the
 511		 * profiler might only be disabled by virtue of a fault.
 512		 */
 513		limit = 0;
 514		goto out_write_limit;
 515	}
 516
 517	limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle)
 518			      : arm_spe_pmu_next_off(handle);
 519	if (limit)
 520		limit |= PMBLIMITR_EL1_E;
 521
 522	limit += (u64)buf->base;
 523	base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf);
 524	write_sysreg_s(base, SYS_PMBPTR_EL1);
 525
 526out_write_limit:
 527	write_sysreg_s(limit, SYS_PMBLIMITR_EL1);
 528}
 529
 530static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle)
 531{
 532	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
 533	u64 offset, size;
 534
 535	offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base;
 536	size = offset - PERF_IDX2OFF(handle->head, buf);
 537
 538	if (buf->snapshot)
 539		handle->head = offset;
 540
 541	perf_aux_output_end(handle, size);
 542}
 543
 544static void arm_spe_pmu_disable_and_drain_local(void)
 545{
 546	/* Disable profiling at EL0 and EL1 */
 547	write_sysreg_s(0, SYS_PMSCR_EL1);
 548	isb();
 549
 550	/* Drain any buffered data */
 551	psb_csync();
 552	dsb(nsh);
 553
 554	/* Disable the profiling buffer */
 555	write_sysreg_s(0, SYS_PMBLIMITR_EL1);
 556	isb();
 557}
 558
 559/* IRQ handling */
 560static enum arm_spe_pmu_buf_fault_action
 561arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
 562{
 563	const char *err_str;
 564	u64 pmbsr;
 565	enum arm_spe_pmu_buf_fault_action ret;
 566
 567	/*
 568	 * Ensure new profiling data is visible to the CPU and any external
 569	 * aborts have been resolved.
 570	 */
 571	psb_csync();
 572	dsb(nsh);
 573
 574	/* Ensure hardware updates to PMBPTR_EL1 are visible */
 575	isb();
 576
 577	/* Service required? */
 578	pmbsr = read_sysreg_s(SYS_PMBSR_EL1);
 579	if (!FIELD_GET(PMBSR_EL1_S, pmbsr))
 580		return SPE_PMU_BUF_FAULT_ACT_SPURIOUS;
 581
 582	/*
 583	 * If we've lost data, disable profiling and also set the PARTIAL
 584	 * flag to indicate that the last record is corrupted.
 585	 */
 586	if (FIELD_GET(PMBSR_EL1_DL, pmbsr))
 587		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED |
 588					     PERF_AUX_FLAG_PARTIAL);
 589
 590	/* Report collisions to userspace so that it can up the period */
 591	if (FIELD_GET(PMBSR_EL1_COLL, pmbsr))
 592		perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
 593
 594	/* We only expect buffer management events */
 595	switch (FIELD_GET(PMBSR_EL1_EC, pmbsr)) {
 596	case PMBSR_EL1_EC_BUF:
 597		/* Handled below */
 598		break;
 599	case PMBSR_EL1_EC_FAULT_S1:
 600	case PMBSR_EL1_EC_FAULT_S2:
 601		err_str = "Unexpected buffer fault";
 602		goto out_err;
 603	default:
 604		err_str = "Unknown error code";
 605		goto out_err;
 606	}
 607
 608	/* Buffer management event */
 609	switch (FIELD_GET(PMBSR_EL1_BUF_BSC_MASK, pmbsr)) {
 610	case PMBSR_EL1_BUF_BSC_FULL:
 611		ret = SPE_PMU_BUF_FAULT_ACT_OK;
 612		goto out_stop;
 613	default:
 614		err_str = "Unknown buffer status code";
 615	}
 616
 617out_err:
 618	pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n",
 619			   err_str, smp_processor_id(), pmbsr,
 620			   read_sysreg_s(SYS_PMBPTR_EL1),
 621			   read_sysreg_s(SYS_PMBLIMITR_EL1));
 622	ret = SPE_PMU_BUF_FAULT_ACT_FATAL;
 623
 624out_stop:
 625	arm_spe_perf_aux_output_end(handle);
 626	return ret;
 627}
 628
 629static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
 630{
 631	struct perf_output_handle *handle = dev;
 632	struct perf_event *event = handle->event;
 633	enum arm_spe_pmu_buf_fault_action act;
 634
 635	if (!perf_get_aux(handle))
 636		return IRQ_NONE;
 637
 638	act = arm_spe_pmu_buf_get_fault_act(handle);
 639	if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
 640		return IRQ_NONE;
 641
 642	/*
 643	 * Ensure perf callbacks have completed, which may disable the
 644	 * profiling buffer in response to a TRUNCATION flag.
 645	 */
 646	irq_work_run();
 647
 648	switch (act) {
 649	case SPE_PMU_BUF_FAULT_ACT_FATAL:
 650		/*
 651		 * If a fatal exception occurred then leaving the profiling
 652		 * buffer enabled is a recipe waiting to happen. Since
 653		 * fatal faults don't always imply truncation, make sure
 654		 * that the profiling buffer is disabled explicitly before
 655		 * clearing the syndrome register.
 656		 */
 657		arm_spe_pmu_disable_and_drain_local();
 658		break;
 659	case SPE_PMU_BUF_FAULT_ACT_OK:
 660		/*
 661		 * We handled the fault (the buffer was full), so resume
 662		 * profiling as long as we didn't detect truncation.
 663		 * PMBPTR might be misaligned, but we'll burn that bridge
 664		 * when we get to it.
 665		 */
 666		if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) {
 667			arm_spe_perf_aux_output_begin(handle, event);
 668			isb();
 669		}
 670		break;
 671	case SPE_PMU_BUF_FAULT_ACT_SPURIOUS:
 672		/* We've seen you before, but GCC has the memory of a sieve. */
 673		break;
 674	}
 675
 676	/* The buffer pointers are now sane, so resume profiling. */
 677	write_sysreg_s(0, SYS_PMBSR_EL1);
 678	return IRQ_HANDLED;
 679}
 680
 681static u64 arm_spe_pmsevfr_res0(u16 pmsver)
 682{
 683	switch (pmsver) {
 684	case ID_AA64DFR0_EL1_PMSVer_IMP:
 685		return PMSEVFR_EL1_RES0_IMP;
 686	case ID_AA64DFR0_EL1_PMSVer_V1P1:
 687		return PMSEVFR_EL1_RES0_V1P1;
 688	case ID_AA64DFR0_EL1_PMSVer_V1P2:
 689	/* Return the highest version we support in default */
 690	default:
 691		return PMSEVFR_EL1_RES0_V1P2;
 692	}
 693}
 694
 695/* Perf callbacks */
 696static int arm_spe_pmu_event_init(struct perf_event *event)
 697{
 698	u64 reg;
 699	struct perf_event_attr *attr = &event->attr;
 700	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
 701
 702	/* This is, of course, deeply driver-specific */
 703	if (attr->type != event->pmu->type)
 704		return -ENOENT;
 705
 706	if (event->cpu >= 0 &&
 707	    !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
 708		return -ENOENT;
 709
 710	if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
 711		return -EOPNOTSUPP;
 712
 713	if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
 714		return -EOPNOTSUPP;
 715
 716	if (attr->exclude_idle)
 717		return -EOPNOTSUPP;
 718
 719	/*
 720	 * Feedback-directed frequency throttling doesn't work when we
 721	 * have a buffer of samples. We'd need to manually count the
 722	 * samples in the buffer when it fills up and adjust the event
 723	 * count to reflect that. Instead, just force the user to specify
 724	 * a sample period.
 725	 */
 726	if (attr->freq)
 727		return -EINVAL;
 728
 729	reg = arm_spe_event_to_pmsfcr(event);
 730	if ((FIELD_GET(PMSFCR_EL1_FE, reg)) &&
 731	    !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT))
 732		return -EOPNOTSUPP;
 733
 734	if ((FIELD_GET(PMSFCR_EL1_FnE, reg)) &&
 735	    !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
 736		return -EOPNOTSUPP;
 737
 738	if ((FIELD_GET(PMSFCR_EL1_FT, reg)) &&
 739	    !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP))
 740		return -EOPNOTSUPP;
 741
 742	if ((FIELD_GET(PMSFCR_EL1_FL, reg)) &&
 743	    !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
 744		return -EOPNOTSUPP;
 745
 746	set_spe_event_has_cx(event);
 747	reg = arm_spe_event_to_pmscr(event);
 748	if (!perfmon_capable() &&
 749	    (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT)))
 750		return -EACCES;
 751
 752	return 0;
 753}
 754
 755static void arm_spe_pmu_start(struct perf_event *event, int flags)
 756{
 757	u64 reg;
 758	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
 759	struct hw_perf_event *hwc = &event->hw;
 760	struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
 761
 762	hwc->state = 0;
 763	arm_spe_perf_aux_output_begin(handle, event);
 764	if (hwc->state)
 765		return;
 766
 767	reg = arm_spe_event_to_pmsfcr(event);
 768	write_sysreg_s(reg, SYS_PMSFCR_EL1);
 769
 770	reg = arm_spe_event_to_pmsevfr(event);
 771	write_sysreg_s(reg, SYS_PMSEVFR_EL1);
 772
 773	if (spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT) {
 774		reg = arm_spe_event_to_pmsnevfr(event);
 775		write_sysreg_s(reg, SYS_PMSNEVFR_EL1);
 776	}
 777
 778	reg = arm_spe_event_to_pmslatfr(event);
 779	write_sysreg_s(reg, SYS_PMSLATFR_EL1);
 780
 781	if (flags & PERF_EF_RELOAD) {
 782		reg = arm_spe_event_to_pmsirr(event);
 783		write_sysreg_s(reg, SYS_PMSIRR_EL1);
 784		isb();
 785		reg = local64_read(&hwc->period_left);
 786		write_sysreg_s(reg, SYS_PMSICR_EL1);
 787	}
 788
 789	reg = arm_spe_event_to_pmscr(event);
 790	isb();
 791	write_sysreg_s(reg, SYS_PMSCR_EL1);
 792}
 793
 794static void arm_spe_pmu_stop(struct perf_event *event, int flags)
 795{
 796	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
 797	struct hw_perf_event *hwc = &event->hw;
 798	struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
 799
 800	/* If we're already stopped, then nothing to do */
 801	if (hwc->state & PERF_HES_STOPPED)
 802		return;
 803
 804	/* Stop all trace generation */
 805	arm_spe_pmu_disable_and_drain_local();
 806
 807	if (flags & PERF_EF_UPDATE) {
 808		/*
 809		 * If there's a fault pending then ensure we contain it
 810		 * to this buffer, since we might be on the context-switch
 811		 * path.
 812		 */
 813		if (perf_get_aux(handle)) {
 814			enum arm_spe_pmu_buf_fault_action act;
 815
 816			act = arm_spe_pmu_buf_get_fault_act(handle);
 817			if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
 818				arm_spe_perf_aux_output_end(handle);
 819			else
 820				write_sysreg_s(0, SYS_PMBSR_EL1);
 821		}
 822
 823		/*
 824		 * This may also contain ECOUNT, but nobody else should
 825		 * be looking at period_left, since we forbid frequency
 826		 * based sampling.
 827		 */
 828		local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1));
 829		hwc->state |= PERF_HES_UPTODATE;
 830	}
 831
 832	hwc->state |= PERF_HES_STOPPED;
 833}
 834
 835static int arm_spe_pmu_add(struct perf_event *event, int flags)
 836{
 837	int ret = 0;
 838	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
 839	struct hw_perf_event *hwc = &event->hw;
 840	int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu;
 841
 842	if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
 843		return -ENOENT;
 844
 845	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 846
 847	if (flags & PERF_EF_START) {
 848		arm_spe_pmu_start(event, PERF_EF_RELOAD);
 849		if (hwc->state & PERF_HES_STOPPED)
 850			ret = -EINVAL;
 851	}
 852
 853	return ret;
 854}
 855
 856static void arm_spe_pmu_del(struct perf_event *event, int flags)
 857{
 858	arm_spe_pmu_stop(event, PERF_EF_UPDATE);
 859}
 860
 861static void arm_spe_pmu_read(struct perf_event *event)
 862{
 863}
 864
 865static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
 866				   int nr_pages, bool snapshot)
 867{
 868	int i, cpu = event->cpu;
 869	struct page **pglist;
 870	struct arm_spe_pmu_buf *buf;
 871
 872	/* We need at least two pages for this to work. */
 873	if (nr_pages < 2)
 874		return NULL;
 875
 876	/*
 877	 * We require an even number of pages for snapshot mode, so that
 878	 * we can effectively treat the buffer as consisting of two equal
 879	 * parts and give userspace a fighting chance of getting some
 880	 * useful data out of it.
 881	 */
 882	if (snapshot && (nr_pages & 1))
 883		return NULL;
 884
 885	if (cpu == -1)
 886		cpu = raw_smp_processor_id();
 887
 888	buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
 889	if (!buf)
 890		return NULL;
 891
 892	pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
 893	if (!pglist)
 894		goto out_free_buf;
 895
 896	for (i = 0; i < nr_pages; ++i)
 897		pglist[i] = virt_to_page(pages[i]);
 898
 899	buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
 900	if (!buf->base)
 901		goto out_free_pglist;
 902
 903	buf->nr_pages	= nr_pages;
 904	buf->snapshot	= snapshot;
 905
 906	kfree(pglist);
 907	return buf;
 908
 909out_free_pglist:
 910	kfree(pglist);
 911out_free_buf:
 912	kfree(buf);
 913	return NULL;
 914}
 915
 916static void arm_spe_pmu_free_aux(void *aux)
 917{
 918	struct arm_spe_pmu_buf *buf = aux;
 919
 920	vunmap(buf->base);
 921	kfree(buf);
 922}
 923
 924/* Initialisation and teardown functions */
 925static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
 926{
 927	static atomic_t pmu_idx = ATOMIC_INIT(-1);
 928
 929	int idx;
 930	char *name;
 931	struct device *dev = &spe_pmu->pdev->dev;
 932
 933	spe_pmu->pmu = (struct pmu) {
 934		.module = THIS_MODULE,
 
 935		.capabilities	= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
 936		.attr_groups	= arm_spe_pmu_attr_groups,
 937		/*
 938		 * We hitch a ride on the software context here, so that
 939		 * we can support per-task profiling (which is not possible
 940		 * with the invalid context as it doesn't get sched callbacks).
 941		 * This requires that userspace either uses a dummy event for
 942		 * perf_event_open, since the aux buffer is not setup until
 943		 * a subsequent mmap, or creates the profiling event in a
 944		 * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it
 945		 * once the buffer has been created.
 946		 */
 947		.task_ctx_nr	= perf_sw_context,
 948		.event_init	= arm_spe_pmu_event_init,
 949		.add		= arm_spe_pmu_add,
 950		.del		= arm_spe_pmu_del,
 951		.start		= arm_spe_pmu_start,
 952		.stop		= arm_spe_pmu_stop,
 953		.read		= arm_spe_pmu_read,
 954		.setup_aux	= arm_spe_pmu_setup_aux,
 955		.free_aux	= arm_spe_pmu_free_aux,
 956	};
 957
 958	idx = atomic_inc_return(&pmu_idx);
 959	name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
 960	if (!name) {
 961		dev_err(dev, "failed to allocate name for pmu %d\n", idx);
 962		return -ENOMEM;
 963	}
 964
 965	return perf_pmu_register(&spe_pmu->pmu, name, -1);
 966}
 967
 968static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu)
 969{
 970	perf_pmu_unregister(&spe_pmu->pmu);
 971}
 972
 973static void __arm_spe_pmu_dev_probe(void *info)
 974{
 975	int fld;
 976	u64 reg;
 977	struct arm_spe_pmu *spe_pmu = info;
 978	struct device *dev = &spe_pmu->pdev->dev;
 979
 980	fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
 981						   ID_AA64DFR0_EL1_PMSVer_SHIFT);
 982	if (!fld) {
 983		dev_err(dev,
 984			"unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
 985			fld, smp_processor_id());
 986		return;
 987	}
 988	spe_pmu->pmsver = (u16)fld;
 989
 990	/* Read PMBIDR first to determine whether or not we have access */
 991	reg = read_sysreg_s(SYS_PMBIDR_EL1);
 992	if (FIELD_GET(PMBIDR_EL1_P, reg)) {
 993		dev_err(dev,
 994			"profiling buffer owned by higher exception level\n");
 995		return;
 996	}
 997
 998	/* Minimum alignment. If it's out-of-range, then fail the probe */
 999	fld = FIELD_GET(PMBIDR_EL1_ALIGN, reg);
1000	spe_pmu->align = 1 << fld;
1001	if (spe_pmu->align > SZ_2K) {
1002		dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n",
1003			fld, smp_processor_id());
1004		return;
1005	}
1006
1007	/* It's now safe to read PMSIDR and figure out what we've got */
1008	reg = read_sysreg_s(SYS_PMSIDR_EL1);
1009	if (FIELD_GET(PMSIDR_EL1_FE, reg))
1010		spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT;
1011
1012	if (FIELD_GET(PMSIDR_EL1_FnE, reg))
1013		spe_pmu->features |= SPE_PMU_FEAT_INV_FILT_EVT;
1014
1015	if (FIELD_GET(PMSIDR_EL1_FT, reg))
1016		spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP;
1017
1018	if (FIELD_GET(PMSIDR_EL1_FL, reg))
1019		spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT;
1020
1021	if (FIELD_GET(PMSIDR_EL1_ARCHINST, reg))
1022		spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST;
1023
1024	if (FIELD_GET(PMSIDR_EL1_LDS, reg))
1025		spe_pmu->features |= SPE_PMU_FEAT_LDS;
1026
1027	if (FIELD_GET(PMSIDR_EL1_ERND, reg))
1028		spe_pmu->features |= SPE_PMU_FEAT_ERND;
1029
1030	/* This field has a spaced out encoding, so just use a look-up */
1031	fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
1032	switch (fld) {
1033	case PMSIDR_EL1_INTERVAL_256:
1034		spe_pmu->min_period = 256;
1035		break;
1036	case PMSIDR_EL1_INTERVAL_512:
1037		spe_pmu->min_period = 512;
1038		break;
1039	case PMSIDR_EL1_INTERVAL_768:
1040		spe_pmu->min_period = 768;
1041		break;
1042	case PMSIDR_EL1_INTERVAL_1024:
1043		spe_pmu->min_period = 1024;
1044		break;
1045	case PMSIDR_EL1_INTERVAL_1536:
1046		spe_pmu->min_period = 1536;
1047		break;
1048	case PMSIDR_EL1_INTERVAL_2048:
1049		spe_pmu->min_period = 2048;
1050		break;
1051	case PMSIDR_EL1_INTERVAL_3072:
1052		spe_pmu->min_period = 3072;
1053		break;
1054	default:
1055		dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
1056			 fld);
1057		fallthrough;
1058	case PMSIDR_EL1_INTERVAL_4096:
1059		spe_pmu->min_period = 4096;
1060	}
1061
1062	/* Maximum record size. If it's out-of-range, then fail the probe */
1063	fld = FIELD_GET(PMSIDR_EL1_MAXSIZE, reg);
1064	spe_pmu->max_record_sz = 1 << fld;
1065	if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) {
1066		dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n",
1067			fld, smp_processor_id());
1068		return;
1069	}
1070
1071	fld = FIELD_GET(PMSIDR_EL1_COUNTSIZE, reg);
1072	switch (fld) {
1073	default:
1074		dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
1075			 fld);
1076		fallthrough;
1077	case PMSIDR_EL1_COUNTSIZE_12_BIT_SAT:
1078		spe_pmu->counter_sz = 12;
1079		break;
1080	case PMSIDR_EL1_COUNTSIZE_16_BIT_SAT:
1081		spe_pmu->counter_sz = 16;
1082	}
1083
1084	dev_info(dev,
1085		 "probed SPEv1.%d for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
1086		 spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus),
1087		 spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
1088
1089	spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
1090}
1091
1092static void __arm_spe_pmu_reset_local(void)
1093{
1094	/*
1095	 * This is probably overkill, as we have no idea where we're
1096	 * draining any buffered data to...
1097	 */
1098	arm_spe_pmu_disable_and_drain_local();
1099
1100	/* Reset the buffer base pointer */
1101	write_sysreg_s(0, SYS_PMBPTR_EL1);
1102	isb();
1103
1104	/* Clear any pending management interrupts */
1105	write_sysreg_s(0, SYS_PMBSR_EL1);
1106	isb();
1107}
1108
1109static void __arm_spe_pmu_setup_one(void *info)
1110{
1111	struct arm_spe_pmu *spe_pmu = info;
1112
1113	__arm_spe_pmu_reset_local();
1114	enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE);
1115}
1116
1117static void __arm_spe_pmu_stop_one(void *info)
1118{
1119	struct arm_spe_pmu *spe_pmu = info;
1120
1121	disable_percpu_irq(spe_pmu->irq);
1122	__arm_spe_pmu_reset_local();
1123}
1124
1125static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
1126{
1127	struct arm_spe_pmu *spe_pmu;
1128
1129	spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
1130	if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
1131		return 0;
1132
1133	__arm_spe_pmu_setup_one(spe_pmu);
1134	return 0;
1135}
1136
1137static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1138{
1139	struct arm_spe_pmu *spe_pmu;
1140
1141	spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
1142	if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
1143		return 0;
1144
1145	__arm_spe_pmu_stop_one(spe_pmu);
1146	return 0;
1147}
1148
1149static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
1150{
1151	int ret;
1152	cpumask_t *mask = &spe_pmu->supported_cpus;
1153
1154	/* Make sure we probe the hardware on a relevant CPU */
1155	ret = smp_call_function_any(mask,  __arm_spe_pmu_dev_probe, spe_pmu, 1);
1156	if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED))
1157		return -ENXIO;
1158
1159	/* Request our PPIs (note that the IRQ is still disabled) */
1160	ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME,
1161				 spe_pmu->handle);
1162	if (ret)
1163		return ret;
1164
1165	/*
1166	 * Register our hotplug notifier now so we don't miss any events.
1167	 * This will enable the IRQ for any supported CPUs that are already
1168	 * up.
1169	 */
1170	ret = cpuhp_state_add_instance(arm_spe_pmu_online,
1171				       &spe_pmu->hotplug_node);
1172	if (ret)
1173		free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
1174
1175	return ret;
1176}
1177
1178static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu)
1179{
1180	cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node);
1181	free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
1182}
1183
1184/* Driver and device probing */
1185static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
1186{
1187	struct platform_device *pdev = spe_pmu->pdev;
1188	int irq = platform_get_irq(pdev, 0);
1189
1190	if (irq < 0)
1191		return -ENXIO;
1192
1193	if (!irq_is_percpu(irq)) {
1194		dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq);
1195		return -EINVAL;
1196	}
1197
1198	if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) {
1199		dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq);
1200		return -EINVAL;
1201	}
1202
1203	spe_pmu->irq = irq;
1204	return 0;
1205}
1206
1207static const struct of_device_id arm_spe_pmu_of_match[] = {
1208	{ .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 },
1209	{ /* Sentinel */ },
1210};
1211MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match);
1212
1213static const struct platform_device_id arm_spe_match[] = {
1214	{ ARMV8_SPE_PDEV_NAME, 0},
1215	{ }
1216};
1217MODULE_DEVICE_TABLE(platform, arm_spe_match);
1218
1219static int arm_spe_pmu_device_probe(struct platform_device *pdev)
1220{
1221	int ret;
1222	struct arm_spe_pmu *spe_pmu;
1223	struct device *dev = &pdev->dev;
1224
1225	/*
1226	 * If kernelspace is unmapped when running at EL0, then the SPE
1227	 * buffer will fault and prematurely terminate the AUX session.
1228	 */
1229	if (arm64_kernel_unmapped_at_el0()) {
1230		dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
1231		return -EPERM;
1232	}
1233
1234	spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
1235	if (!spe_pmu)
1236		return -ENOMEM;
1237
1238	spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
1239	if (!spe_pmu->handle)
1240		return -ENOMEM;
1241
1242	spe_pmu->pdev = pdev;
1243	platform_set_drvdata(pdev, spe_pmu);
1244
1245	ret = arm_spe_pmu_irq_probe(spe_pmu);
1246	if (ret)
1247		goto out_free_handle;
1248
1249	ret = arm_spe_pmu_dev_init(spe_pmu);
1250	if (ret)
1251		goto out_free_handle;
1252
1253	ret = arm_spe_pmu_perf_init(spe_pmu);
1254	if (ret)
1255		goto out_teardown_dev;
1256
1257	return 0;
1258
1259out_teardown_dev:
1260	arm_spe_pmu_dev_teardown(spe_pmu);
1261out_free_handle:
1262	free_percpu(spe_pmu->handle);
1263	return ret;
1264}
1265
1266static int arm_spe_pmu_device_remove(struct platform_device *pdev)
1267{
1268	struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
1269
1270	arm_spe_pmu_perf_destroy(spe_pmu);
1271	arm_spe_pmu_dev_teardown(spe_pmu);
1272	free_percpu(spe_pmu->handle);
1273	return 0;
1274}
1275
1276static struct platform_driver arm_spe_pmu_driver = {
1277	.id_table = arm_spe_match,
1278	.driver	= {
1279		.name		= DRVNAME,
1280		.of_match_table	= of_match_ptr(arm_spe_pmu_of_match),
1281		.suppress_bind_attrs = true,
1282	},
1283	.probe	= arm_spe_pmu_device_probe,
1284	.remove	= arm_spe_pmu_device_remove,
1285};
1286
1287static int __init arm_spe_pmu_init(void)
1288{
1289	int ret;
1290
1291	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
1292				      arm_spe_pmu_cpu_startup,
1293				      arm_spe_pmu_cpu_teardown);
1294	if (ret < 0)
1295		return ret;
1296	arm_spe_pmu_online = ret;
1297
1298	ret = platform_driver_register(&arm_spe_pmu_driver);
1299	if (ret)
1300		cpuhp_remove_multi_state(arm_spe_pmu_online);
1301
1302	return ret;
1303}
1304
1305static void __exit arm_spe_pmu_exit(void)
1306{
1307	platform_driver_unregister(&arm_spe_pmu_driver);
1308	cpuhp_remove_multi_state(arm_spe_pmu_online);
1309}
1310
1311module_init(arm_spe_pmu_init);
1312module_exit(arm_spe_pmu_exit);
1313
1314MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension");
1315MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1316MODULE_LICENSE("GPL v2");