Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: GPL-2.0
   2// CCI Cache Coherent Interconnect PMU driver
   3// Copyright (C) 2013-2018 Arm Ltd.
   4// Author: Punit Agrawal <punit.agrawal@arm.com>, Suzuki Poulose <suzuki.poulose@arm.com>
   5
   6#include <linux/arm-cci.h>
   7#include <linux/io.h>
   8#include <linux/interrupt.h>
   9#include <linux/module.h>
  10#include <linux/of_address.h>
  11#include <linux/of_device.h>
  12#include <linux/of_irq.h>
  13#include <linux/of_platform.h>
  14#include <linux/perf_event.h>
  15#include <linux/platform_device.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18
  19#define DRIVER_NAME		"ARM-CCI PMU"
  20
  21#define CCI_PMCR		0x0100
  22#define CCI_PID2		0x0fe8
  23
  24#define CCI_PMCR_CEN		0x00000001
  25#define CCI_PMCR_NCNT_MASK	0x0000f800
  26#define CCI_PMCR_NCNT_SHIFT	11
  27
  28#define CCI_PID2_REV_MASK	0xf0
  29#define CCI_PID2_REV_SHIFT	4
  30
  31#define CCI_PMU_EVT_SEL		0x000
  32#define CCI_PMU_CNTR		0x004
  33#define CCI_PMU_CNTR_CTRL	0x008
  34#define CCI_PMU_OVRFLW		0x00c
  35
  36#define CCI_PMU_OVRFLW_FLAG	1
  37
  38#define CCI_PMU_CNTR_SIZE(model)	((model)->cntr_size)
  39#define CCI_PMU_CNTR_BASE(model, idx)	((idx) * CCI_PMU_CNTR_SIZE(model))
  40#define CCI_PMU_CNTR_MASK		((1ULL << 32) - 1)
  41#define CCI_PMU_CNTR_LAST(cci_pmu)	(cci_pmu->num_cntrs - 1)
  42
  43#define CCI_PMU_MAX_HW_CNTRS(model) \
  44	((model)->num_hw_cntrs + (model)->fixed_hw_cntrs)
  45
  46/* Types of interfaces that can generate events */
  47enum {
  48	CCI_IF_SLAVE,
  49	CCI_IF_MASTER,
  50#ifdef CONFIG_ARM_CCI5xx_PMU
  51	CCI_IF_GLOBAL,
  52#endif
  53	CCI_IF_MAX,
  54};
  55
  56#define NUM_HW_CNTRS_CII_4XX	4
  57#define NUM_HW_CNTRS_CII_5XX	8
  58#define NUM_HW_CNTRS_MAX	NUM_HW_CNTRS_CII_5XX
  59
  60#define FIXED_HW_CNTRS_CII_4XX	1
  61#define FIXED_HW_CNTRS_CII_5XX	0
  62#define FIXED_HW_CNTRS_MAX	FIXED_HW_CNTRS_CII_4XX
  63
  64#define HW_CNTRS_MAX		(NUM_HW_CNTRS_MAX + FIXED_HW_CNTRS_MAX)
  65
  66struct event_range {
  67	u32 min;
  68	u32 max;
  69};
  70
  71struct cci_pmu_hw_events {
  72	struct perf_event **events;
  73	unsigned long *used_mask;
  74	raw_spinlock_t pmu_lock;
  75};
  76
  77struct cci_pmu;
  78/*
  79 * struct cci_pmu_model:
  80 * @fixed_hw_cntrs - Number of fixed event counters
  81 * @num_hw_cntrs - Maximum number of programmable event counters
  82 * @cntr_size - Size of an event counter mapping
  83 */
  84struct cci_pmu_model {
  85	char *name;
  86	u32 fixed_hw_cntrs;
  87	u32 num_hw_cntrs;
  88	u32 cntr_size;
  89	struct attribute **format_attrs;
  90	struct attribute **event_attrs;
  91	struct event_range event_ranges[CCI_IF_MAX];
  92	int (*validate_hw_event)(struct cci_pmu *, unsigned long);
  93	int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
  94	void (*write_counters)(struct cci_pmu *, unsigned long *);
  95};
  96
  97static struct cci_pmu_model cci_pmu_models[];
  98
  99struct cci_pmu {
 100	void __iomem *base;
 101	void __iomem *ctrl_base;
 102	struct pmu pmu;
 103	int cpu;
 104	int nr_irqs;
 105	int *irqs;
 106	unsigned long active_irqs;
 107	const struct cci_pmu_model *model;
 108	struct cci_pmu_hw_events hw_events;
 109	struct platform_device *plat_device;
 110	int num_cntrs;
 111	atomic_t active_events;
 112	struct mutex reserve_mutex;
 113};
 114
 115#define to_cci_pmu(c)	(container_of(c, struct cci_pmu, pmu))
 116
 117static struct cci_pmu *g_cci_pmu;
 118
 119enum cci_models {
 120#ifdef CONFIG_ARM_CCI400_PMU
 121	CCI400_R0,
 122	CCI400_R1,
 123#endif
 124#ifdef CONFIG_ARM_CCI5xx_PMU
 125	CCI500_R0,
 126	CCI550_R0,
 127#endif
 128	CCI_MODEL_MAX
 129};
 130
 131static void pmu_write_counters(struct cci_pmu *cci_pmu,
 132				 unsigned long *mask);
 133static ssize_t __maybe_unused cci_pmu_format_show(struct device *dev,
 134			struct device_attribute *attr, char *buf);
 135static ssize_t __maybe_unused cci_pmu_event_show(struct device *dev,
 136			struct device_attribute *attr, char *buf);
 137
 138#define CCI_EXT_ATTR_ENTRY(_name, _func, _config) 				\
 139	&((struct dev_ext_attribute[]) {					\
 140		{ __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config }	\
 141	})[0].attr.attr
 142
 143#define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \
 144	CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config)
 145#define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \
 146	CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config)
 147
 148/* CCI400 PMU Specific definitions */
 149
 150#ifdef CONFIG_ARM_CCI400_PMU
 151
 152/* Port ids */
 153#define CCI400_PORT_S0		0
 154#define CCI400_PORT_S1		1
 155#define CCI400_PORT_S2		2
 156#define CCI400_PORT_S3		3
 157#define CCI400_PORT_S4		4
 158#define CCI400_PORT_M0		5
 159#define CCI400_PORT_M1		6
 160#define CCI400_PORT_M2		7
 161
 162#define CCI400_R1_PX		5
 163
 164/*
 165 * Instead of an event id to monitor CCI cycles, a dedicated counter is
 166 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
 167 * make use of this event in hardware.
 168 */
 169enum cci400_perf_events {
 170	CCI400_PMU_CYCLES = 0xff
 171};
 172
 173#define CCI400_PMU_CYCLE_CNTR_IDX	0
 174#define CCI400_PMU_CNTR0_IDX		1
 175
 176/*
 177 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
 178 * ports and bits 4:0 are event codes. There are different event codes
 179 * associated with each port type.
 180 *
 181 * Additionally, the range of events associated with the port types changed
 182 * between Rev0 and Rev1.
 183 *
 184 * The constants below define the range of valid codes for each port type for
 185 * the different revisions and are used to validate the event to be monitored.
 186 */
 187
 188#define CCI400_PMU_EVENT_MASK		0xffUL
 189#define CCI400_PMU_EVENT_SOURCE_SHIFT	5
 190#define CCI400_PMU_EVENT_SOURCE_MASK	0x7
 191#define CCI400_PMU_EVENT_CODE_SHIFT	0
 192#define CCI400_PMU_EVENT_CODE_MASK	0x1f
 193#define CCI400_PMU_EVENT_SOURCE(event) \
 194	((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \
 195			CCI400_PMU_EVENT_SOURCE_MASK)
 196#define CCI400_PMU_EVENT_CODE(event) \
 197	((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK)
 198
 199#define CCI400_R0_SLAVE_PORT_MIN_EV	0x00
 200#define CCI400_R0_SLAVE_PORT_MAX_EV	0x13
 201#define CCI400_R0_MASTER_PORT_MIN_EV	0x14
 202#define CCI400_R0_MASTER_PORT_MAX_EV	0x1a
 203
 204#define CCI400_R1_SLAVE_PORT_MIN_EV	0x00
 205#define CCI400_R1_SLAVE_PORT_MAX_EV	0x14
 206#define CCI400_R1_MASTER_PORT_MIN_EV	0x00
 207#define CCI400_R1_MASTER_PORT_MAX_EV	0x11
 208
 209#define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \
 210	CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \
 211					(unsigned long)_config)
 212
 213static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
 214			struct device_attribute *attr, char *buf);
 215
 216static struct attribute *cci400_pmu_format_attrs[] = {
 217	CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
 218	CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"),
 219	NULL
 220};
 221
 222static struct attribute *cci400_r0_pmu_event_attrs[] = {
 223	/* Slave events */
 224	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
 225	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
 226	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
 227	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
 228	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
 229	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
 230	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
 231	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
 232	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
 233	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
 234	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
 235	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
 236	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
 237	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
 238	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
 239	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
 240	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
 241	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
 242	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
 243	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
 244	/* Master events */
 245	CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14),
 246	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15),
 247	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16),
 248	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17),
 249	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18),
 250	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19),
 251	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A),
 252	/* Special event for cycles counter */
 253	CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
 254	NULL
 255};
 256
 257static struct attribute *cci400_r1_pmu_event_attrs[] = {
 258	/* Slave events */
 259	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
 260	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
 261	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
 262	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
 263	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
 264	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
 265	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
 266	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
 267	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
 268	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
 269	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
 270	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
 271	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
 272	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
 273	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
 274	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
 275	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
 276	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
 277	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
 278	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
 279	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14),
 280	/* Master events */
 281	CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0),
 282	CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1),
 283	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2),
 284	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3),
 285	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4),
 286	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5),
 287	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6),
 288	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7),
 289	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8),
 290	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9),
 291	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA),
 292	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB),
 293	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC),
 294	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD),
 295	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE),
 296	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF),
 297	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10),
 298	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11),
 299	/* Special event for cycles counter */
 300	CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
 301	NULL
 302};
 303
 304static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
 305			struct device_attribute *attr, char *buf)
 306{
 307	struct dev_ext_attribute *eattr = container_of(attr,
 308				struct dev_ext_attribute, attr);
 309	return sysfs_emit(buf, "config=0x%lx\n", (unsigned long)eattr->var);
 310}
 311
 312static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
 313				struct cci_pmu_hw_events *hw,
 314				unsigned long cci_event)
 315{
 316	int idx;
 317
 318	/* cycles event idx is fixed */
 319	if (cci_event == CCI400_PMU_CYCLES) {
 320		if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask))
 321			return -EAGAIN;
 322
 323		return CCI400_PMU_CYCLE_CNTR_IDX;
 324	}
 325
 326	for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
 327		if (!test_and_set_bit(idx, hw->used_mask))
 328			return idx;
 329
 330	/* No counters available */
 331	return -EAGAIN;
 332}
 333
 334static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
 335{
 336	u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event);
 337	u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event);
 338	int if_type;
 339
 340	if (hw_event & ~CCI400_PMU_EVENT_MASK)
 341		return -ENOENT;
 342
 343	if (hw_event == CCI400_PMU_CYCLES)
 344		return hw_event;
 345
 346	switch (ev_source) {
 347	case CCI400_PORT_S0:
 348	case CCI400_PORT_S1:
 349	case CCI400_PORT_S2:
 350	case CCI400_PORT_S3:
 351	case CCI400_PORT_S4:
 352		/* Slave Interface */
 353		if_type = CCI_IF_SLAVE;
 354		break;
 355	case CCI400_PORT_M0:
 356	case CCI400_PORT_M1:
 357	case CCI400_PORT_M2:
 358		/* Master Interface */
 359		if_type = CCI_IF_MASTER;
 360		break;
 361	default:
 362		return -ENOENT;
 363	}
 364
 365	if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
 366		ev_code <= cci_pmu->model->event_ranges[if_type].max)
 367		return hw_event;
 368
 369	return -ENOENT;
 370}
 371
 372static int probe_cci400_revision(struct cci_pmu *cci_pmu)
 373{
 374	int rev;
 375	rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
 376	rev >>= CCI_PID2_REV_SHIFT;
 377
 378	if (rev < CCI400_R1_PX)
 379		return CCI400_R0;
 380	else
 381		return CCI400_R1;
 382}
 383
 384static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu)
 385{
 386	if (platform_has_secure_cci_access())
 387		return &cci_pmu_models[probe_cci400_revision(cci_pmu)];
 388	return NULL;
 389}
 390#else	/* !CONFIG_ARM_CCI400_PMU */
 391static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu)
 392{
 393	return NULL;
 394}
 395#endif	/* CONFIG_ARM_CCI400_PMU */
 396
 397#ifdef CONFIG_ARM_CCI5xx_PMU
 398
 399/*
 400 * CCI5xx PMU event id is an 9-bit value made of two parts.
 401 *	 bits [8:5] - Source for the event
 402 *	 bits [4:0] - Event code (specific to type of interface)
 403 *
 404 *
 405 */
 406
 407/* Port ids */
 408#define CCI5xx_PORT_S0			0x0
 409#define CCI5xx_PORT_S1			0x1
 410#define CCI5xx_PORT_S2			0x2
 411#define CCI5xx_PORT_S3			0x3
 412#define CCI5xx_PORT_S4			0x4
 413#define CCI5xx_PORT_S5			0x5
 414#define CCI5xx_PORT_S6			0x6
 415
 416#define CCI5xx_PORT_M0			0x8
 417#define CCI5xx_PORT_M1			0x9
 418#define CCI5xx_PORT_M2			0xa
 419#define CCI5xx_PORT_M3			0xb
 420#define CCI5xx_PORT_M4			0xc
 421#define CCI5xx_PORT_M5			0xd
 422#define CCI5xx_PORT_M6			0xe
 423
 424#define CCI5xx_PORT_GLOBAL		0xf
 425
 426#define CCI5xx_PMU_EVENT_MASK		0x1ffUL
 427#define CCI5xx_PMU_EVENT_SOURCE_SHIFT	0x5
 428#define CCI5xx_PMU_EVENT_SOURCE_MASK	0xf
 429#define CCI5xx_PMU_EVENT_CODE_SHIFT	0x0
 430#define CCI5xx_PMU_EVENT_CODE_MASK	0x1f
 431
 432#define CCI5xx_PMU_EVENT_SOURCE(event)	\
 433	((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK)
 434#define CCI5xx_PMU_EVENT_CODE(event)	\
 435	((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK)
 436
 437#define CCI5xx_SLAVE_PORT_MIN_EV	0x00
 438#define CCI5xx_SLAVE_PORT_MAX_EV	0x1f
 439#define CCI5xx_MASTER_PORT_MIN_EV	0x00
 440#define CCI5xx_MASTER_PORT_MAX_EV	0x06
 441#define CCI5xx_GLOBAL_PORT_MIN_EV	0x00
 442#define CCI5xx_GLOBAL_PORT_MAX_EV	0x0f
 443
 444
 445#define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
 446	CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \
 447					(unsigned long) _config)
 448
 449static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
 450				struct device_attribute *attr, char *buf);
 451
 452static struct attribute *cci5xx_pmu_format_attrs[] = {
 453	CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
 454	CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"),
 455	NULL,
 456};
 457
 458static struct attribute *cci5xx_pmu_event_attrs[] = {
 459	/* Slave events */
 460	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0),
 461	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1),
 462	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2),
 463	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3),
 464	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4),
 465	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5),
 466	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6),
 467	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
 468	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8),
 469	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9),
 470	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA),
 471	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB),
 472	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC),
 473	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD),
 474	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE),
 475	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF),
 476	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10),
 477	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11),
 478	CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12),
 479	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13),
 480	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14),
 481	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15),
 482	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16),
 483	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17),
 484	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18),
 485	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19),
 486	CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A),
 487	CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B),
 488	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C),
 489	CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D),
 490	CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E),
 491	CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F),
 492
 493	/* Master events */
 494	CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0),
 495	CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1),
 496	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2),
 497	CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3),
 498	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4),
 499	CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5),
 500	CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6),
 501
 502	/* Global events */
 503	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0),
 504	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1),
 505	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2),
 506	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3),
 507	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4),
 508	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5),
 509	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6),
 510	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7),
 511	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8),
 512	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9),
 513	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA),
 514	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
 515	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
 516	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
 517	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE),
 518	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
 519	NULL
 520};
 521
 522static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
 523				struct device_attribute *attr, char *buf)
 524{
 525	struct dev_ext_attribute *eattr = container_of(attr,
 526					struct dev_ext_attribute, attr);
 527	/* Global events have single fixed source code */
 528	return sysfs_emit(buf, "event=0x%lx,source=0x%x\n",
 529			  (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
 530}
 531
 532/*
 533 * CCI500 provides 8 independent event counters that can count
 534 * any of the events available.
 535 * CCI500 PMU event source ids
 536 *	0x0-0x6 - Slave interfaces
 537 *	0x8-0xD - Master interfaces
 538 *	0xf     - Global Events
 539 *	0x7,0xe - Reserved
 540 */
 541static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
 542					unsigned long hw_event)
 543{
 544	u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
 545	u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
 546	int if_type;
 547
 548	if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
 549		return -ENOENT;
 550
 551	switch (ev_source) {
 552	case CCI5xx_PORT_S0:
 553	case CCI5xx_PORT_S1:
 554	case CCI5xx_PORT_S2:
 555	case CCI5xx_PORT_S3:
 556	case CCI5xx_PORT_S4:
 557	case CCI5xx_PORT_S5:
 558	case CCI5xx_PORT_S6:
 559		if_type = CCI_IF_SLAVE;
 560		break;
 561	case CCI5xx_PORT_M0:
 562	case CCI5xx_PORT_M1:
 563	case CCI5xx_PORT_M2:
 564	case CCI5xx_PORT_M3:
 565	case CCI5xx_PORT_M4:
 566	case CCI5xx_PORT_M5:
 567		if_type = CCI_IF_MASTER;
 568		break;
 569	case CCI5xx_PORT_GLOBAL:
 570		if_type = CCI_IF_GLOBAL;
 571		break;
 572	default:
 573		return -ENOENT;
 574	}
 575
 576	if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
 577		ev_code <= cci_pmu->model->event_ranges[if_type].max)
 578		return hw_event;
 579
 580	return -ENOENT;
 581}
 582
 583/*
 584 * CCI550 provides 8 independent event counters that can count
 585 * any of the events available.
 586 * CCI550 PMU event source ids
 587 *	0x0-0x6 - Slave interfaces
 588 *	0x8-0xe - Master interfaces
 589 *	0xf     - Global Events
 590 *	0x7	- Reserved
 591 */
 592static int cci550_validate_hw_event(struct cci_pmu *cci_pmu,
 593					unsigned long hw_event)
 594{
 595	u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
 596	u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
 597	int if_type;
 598
 599	if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
 600		return -ENOENT;
 601
 602	switch (ev_source) {
 603	case CCI5xx_PORT_S0:
 604	case CCI5xx_PORT_S1:
 605	case CCI5xx_PORT_S2:
 606	case CCI5xx_PORT_S3:
 607	case CCI5xx_PORT_S4:
 608	case CCI5xx_PORT_S5:
 609	case CCI5xx_PORT_S6:
 610		if_type = CCI_IF_SLAVE;
 611		break;
 612	case CCI5xx_PORT_M0:
 613	case CCI5xx_PORT_M1:
 614	case CCI5xx_PORT_M2:
 615	case CCI5xx_PORT_M3:
 616	case CCI5xx_PORT_M4:
 617	case CCI5xx_PORT_M5:
 618	case CCI5xx_PORT_M6:
 619		if_type = CCI_IF_MASTER;
 620		break;
 621	case CCI5xx_PORT_GLOBAL:
 622		if_type = CCI_IF_GLOBAL;
 623		break;
 624	default:
 625		return -ENOENT;
 626	}
 627
 628	if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
 629		ev_code <= cci_pmu->model->event_ranges[if_type].max)
 630		return hw_event;
 631
 632	return -ENOENT;
 633}
 634
 635#endif	/* CONFIG_ARM_CCI5xx_PMU */
 636
 637/*
 638 * Program the CCI PMU counters which have PERF_HES_ARCH set
 639 * with the event period and mark them ready before we enable
 640 * PMU.
 641 */
 642static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
 643{
 644	int i;
 645	struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
 646	DECLARE_BITMAP(mask, HW_CNTRS_MAX);
 647
 648	bitmap_zero(mask, cci_pmu->num_cntrs);
 649	for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
 650		struct perf_event *event = cci_hw->events[i];
 651
 652		if (WARN_ON(!event))
 653			continue;
 654
 655		/* Leave the events which are not counting */
 656		if (event->hw.state & PERF_HES_STOPPED)
 657			continue;
 658		if (event->hw.state & PERF_HES_ARCH) {
 659			set_bit(i, mask);
 660			event->hw.state &= ~PERF_HES_ARCH;
 661		}
 662	}
 663
 664	pmu_write_counters(cci_pmu, mask);
 665}
 666
 667/* Should be called with cci_pmu->hw_events->pmu_lock held */
 668static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu)
 669{
 670	u32 val;
 671
 672	/* Enable all the PMU counters. */
 673	val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
 674	writel(val, cci_pmu->ctrl_base + CCI_PMCR);
 675}
 676
 677/* Should be called with cci_pmu->hw_events->pmu_lock held */
 678static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu)
 679{
 680	cci_pmu_sync_counters(cci_pmu);
 681	__cci_pmu_enable_nosync(cci_pmu);
 682}
 683
 684/* Should be called with cci_pmu->hw_events->pmu_lock held */
 685static void __cci_pmu_disable(struct cci_pmu *cci_pmu)
 686{
 687	u32 val;
 688
 689	/* Disable all the PMU counters. */
 690	val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
 691	writel(val, cci_pmu->ctrl_base + CCI_PMCR);
 692}
 693
 694static ssize_t cci_pmu_format_show(struct device *dev,
 695			struct device_attribute *attr, char *buf)
 696{
 697	struct dev_ext_attribute *eattr = container_of(attr,
 698				struct dev_ext_attribute, attr);
 699	return sysfs_emit(buf, "%s\n", (char *)eattr->var);
 700}
 701
 702static ssize_t cci_pmu_event_show(struct device *dev,
 703			struct device_attribute *attr, char *buf)
 704{
 705	struct dev_ext_attribute *eattr = container_of(attr,
 706				struct dev_ext_attribute, attr);
 707	/* source parameter is mandatory for normal PMU events */
 708	return sysfs_emit(buf, "source=?,event=0x%lx\n",
 709			  (unsigned long)eattr->var);
 710}
 711
 712static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
 713{
 714	return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu);
 715}
 716
 717static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset)
 718{
 719	return readl_relaxed(cci_pmu->base +
 720			     CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
 721}
 722
 723static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
 724			       int idx, unsigned int offset)
 725{
 726	writel_relaxed(value, cci_pmu->base +
 727		       CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
 728}
 729
 730static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
 731{
 732	pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL);
 733}
 734
 735static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
 736{
 737	pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
 738}
 739
 740static bool __maybe_unused
 741pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx)
 742{
 743	return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0;
 744}
 745
 746static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
 747{
 748	pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
 749}
 750
 751/*
 752 * For all counters on the CCI-PMU, disable any 'enabled' counters,
 753 * saving the changed counters in the mask, so that we can restore
 754 * it later using pmu_restore_counters. The mask is private to the
 755 * caller. We cannot rely on the used_mask maintained by the CCI_PMU
 756 * as it only tells us if the counter is assigned to perf_event or not.
 757 * The state of the perf_event cannot be locked by the PMU layer, hence
 758 * we check the individual counter status (which can be locked by
 759 * cci_pm->hw_events->pmu_lock).
 760 *
 761 * @mask should be initialised to empty by the caller.
 762 */
 763static void __maybe_unused
 764pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
 765{
 766	int i;
 767
 768	for (i = 0; i < cci_pmu->num_cntrs; i++) {
 769		if (pmu_counter_is_enabled(cci_pmu, i)) {
 770			set_bit(i, mask);
 771			pmu_disable_counter(cci_pmu, i);
 772		}
 773	}
 774}
 775
 776/*
 777 * Restore the status of the counters. Reversal of the pmu_save_counters().
 778 * For each counter set in the mask, enable the counter back.
 779 */
 780static void __maybe_unused
 781pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
 782{
 783	int i;
 784
 785	for_each_set_bit(i, mask, cci_pmu->num_cntrs)
 786		pmu_enable_counter(cci_pmu, i);
 787}
 788
 789/*
 790 * Returns the number of programmable counters actually implemented
 791 * by the cci
 792 */
 793static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu)
 794{
 795	return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) &
 796		CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
 797}
 798
 799static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
 800{
 801	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
 802	unsigned long cci_event = event->hw.config_base;
 803	int idx;
 804
 805	if (cci_pmu->model->get_event_idx)
 806		return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
 807
 808	/* Generic code to find an unused idx from the mask */
 809	for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
 810		if (!test_and_set_bit(idx, hw->used_mask))
 811			return idx;
 812
 813	/* No counters available */
 814	return -EAGAIN;
 815}
 816
 817static int pmu_map_event(struct perf_event *event)
 818{
 819	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
 820
 821	if (event->attr.type < PERF_TYPE_MAX ||
 822			!cci_pmu->model->validate_hw_event)
 823		return -ENOENT;
 824
 825	return	cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
 826}
 827
 828static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
 829{
 830	int i;
 831	struct platform_device *pmu_device = cci_pmu->plat_device;
 832
 833	if (unlikely(!pmu_device))
 834		return -ENODEV;
 835
 836	if (cci_pmu->nr_irqs < 1) {
 837		dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
 838		return -ENODEV;
 839	}
 840
 841	/*
 842	 * Register all available CCI PMU interrupts. In the interrupt handler
 843	 * we iterate over the counters checking for interrupt source (the
 844	 * overflowing counter) and clear it.
 845	 *
 846	 * This should allow handling of non-unique interrupt for the counters.
 847	 */
 848	for (i = 0; i < cci_pmu->nr_irqs; i++) {
 849		int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED,
 850				"arm-cci-pmu", cci_pmu);
 851		if (err) {
 852			dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
 853				cci_pmu->irqs[i]);
 854			return err;
 855		}
 856
 857		set_bit(i, &cci_pmu->active_irqs);
 858	}
 859
 860	return 0;
 861}
 862
 863static void pmu_free_irq(struct cci_pmu *cci_pmu)
 864{
 865	int i;
 866
 867	for (i = 0; i < cci_pmu->nr_irqs; i++) {
 868		if (!test_and_clear_bit(i, &cci_pmu->active_irqs))
 869			continue;
 870
 871		free_irq(cci_pmu->irqs[i], cci_pmu);
 872	}
 873}
 874
 875static u32 pmu_read_counter(struct perf_event *event)
 876{
 877	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
 878	struct hw_perf_event *hw_counter = &event->hw;
 879	int idx = hw_counter->idx;
 880	u32 value;
 881
 882	if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
 883		dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
 884		return 0;
 885	}
 886	value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR);
 887
 888	return value;
 889}
 890
 891static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx)
 892{
 893	pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
 894}
 895
 896static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
 897{
 898	int i;
 899	struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
 900
 901	for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
 902		struct perf_event *event = cci_hw->events[i];
 903
 904		if (WARN_ON(!event))
 905			continue;
 906		pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
 907	}
 908}
 909
 910static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
 911{
 912	if (cci_pmu->model->write_counters)
 913		cci_pmu->model->write_counters(cci_pmu, mask);
 914	else
 915		__pmu_write_counters(cci_pmu, mask);
 916}
 917
 918#ifdef CONFIG_ARM_CCI5xx_PMU
 919
 920/*
 921 * CCI-500/CCI-550 has advanced power saving policies, which could gate the
 922 * clocks to the PMU counters, which makes the writes to them ineffective.
 923 * The only way to write to those counters is when the global counters
 924 * are enabled and the particular counter is enabled.
 925 *
 926 * So we do the following :
 927 *
 928 * 1) Disable all the PMU counters, saving their current state
 929 * 2) Enable the global PMU profiling, now that all counters are
 930 *    disabled.
 931 *
 932 * For each counter to be programmed, repeat steps 3-7:
 933 *
 934 * 3) Write an invalid event code to the event control register for the
 935      counter, so that the counters are not modified.
 936 * 4) Enable the counter control for the counter.
 937 * 5) Set the counter value
 938 * 6) Disable the counter
 939 * 7) Restore the event in the target counter
 940 *
 941 * 8) Disable the global PMU.
 942 * 9) Restore the status of the rest of the counters.
 943 *
 944 * We choose an event which for CCI-5xx is guaranteed not to count.
 945 * We use the highest possible event code (0x1f) for the master interface 0.
 946 */
 947#define CCI5xx_INVALID_EVENT	((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \
 948				 (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT))
 949static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
 950{
 951	int i;
 952	DECLARE_BITMAP(saved_mask, HW_CNTRS_MAX);
 953
 954	bitmap_zero(saved_mask, cci_pmu->num_cntrs);
 955	pmu_save_counters(cci_pmu, saved_mask);
 956
 957	/*
 958	 * Now that all the counters are disabled, we can safely turn the PMU on,
 959	 * without syncing the status of the counters
 960	 */
 961	__cci_pmu_enable_nosync(cci_pmu);
 962
 963	for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
 964		struct perf_event *event = cci_pmu->hw_events.events[i];
 965
 966		if (WARN_ON(!event))
 967			continue;
 968
 969		pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT);
 970		pmu_enable_counter(cci_pmu, i);
 971		pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
 972		pmu_disable_counter(cci_pmu, i);
 973		pmu_set_event(cci_pmu, i, event->hw.config_base);
 974	}
 975
 976	__cci_pmu_disable(cci_pmu);
 977
 978	pmu_restore_counters(cci_pmu, saved_mask);
 979}
 980
 981#endif	/* CONFIG_ARM_CCI5xx_PMU */
 982
 983static u64 pmu_event_update(struct perf_event *event)
 984{
 985	struct hw_perf_event *hwc = &event->hw;
 986	u64 delta, prev_raw_count, new_raw_count;
 987
 988	do {
 989		prev_raw_count = local64_read(&hwc->prev_count);
 990		new_raw_count = pmu_read_counter(event);
 991	} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
 992		 new_raw_count) != prev_raw_count);
 993
 994	delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
 995
 996	local64_add(delta, &event->count);
 997
 998	return new_raw_count;
 999}
1000
1001static void pmu_read(struct perf_event *event)
1002{
1003	pmu_event_update(event);
1004}
1005
1006static void pmu_event_set_period(struct perf_event *event)
1007{
1008	struct hw_perf_event *hwc = &event->hw;
1009	/*
1010	 * The CCI PMU counters have a period of 2^32. To account for the
1011	 * possiblity of extreme interrupt latency we program for a period of
1012	 * half that. Hopefully we can handle the interrupt before another 2^31
1013	 * events occur and the counter overtakes its previous value.
1014	 */
1015	u64 val = 1ULL << 31;
1016	local64_set(&hwc->prev_count, val);
1017
1018	/*
1019	 * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose
1020	 * values needs to be sync-ed with the s/w state before the PMU is
1021	 * enabled.
1022	 * Mark this counter for sync.
1023	 */
1024	hwc->state |= PERF_HES_ARCH;
1025}
1026
1027static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
1028{
1029	struct cci_pmu *cci_pmu = dev;
1030	struct cci_pmu_hw_events *events = &cci_pmu->hw_events;
1031	int idx, handled = IRQ_NONE;
1032
1033	raw_spin_lock(&events->pmu_lock);
1034
1035	/* Disable the PMU while we walk through the counters */
1036	__cci_pmu_disable(cci_pmu);
1037	/*
1038	 * Iterate over counters and update the corresponding perf events.
1039	 * This should work regardless of whether we have per-counter overflow
1040	 * interrupt or a combined overflow interrupt.
1041	 */
1042	for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
1043		struct perf_event *event = events->events[idx];
1044
1045		if (!event)
1046			continue;
1047
1048		/* Did this counter overflow? */
1049		if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
1050		      CCI_PMU_OVRFLW_FLAG))
1051			continue;
1052
1053		pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx,
1054							CCI_PMU_OVRFLW);
1055
1056		pmu_event_update(event);
1057		pmu_event_set_period(event);
1058		handled = IRQ_HANDLED;
1059	}
1060
1061	/* Enable the PMU and sync possibly overflowed counters */
1062	__cci_pmu_enable_sync(cci_pmu);
1063	raw_spin_unlock(&events->pmu_lock);
1064
1065	return IRQ_RETVAL(handled);
1066}
1067
1068static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
1069{
1070	int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
1071	if (ret) {
1072		pmu_free_irq(cci_pmu);
1073		return ret;
1074	}
1075	return 0;
1076}
1077
1078static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
1079{
1080	pmu_free_irq(cci_pmu);
1081}
1082
1083static void hw_perf_event_destroy(struct perf_event *event)
1084{
1085	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1086	atomic_t *active_events = &cci_pmu->active_events;
1087	struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
1088
1089	if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
1090		cci_pmu_put_hw(cci_pmu);
1091		mutex_unlock(reserve_mutex);
1092	}
1093}
1094
1095static void cci_pmu_enable(struct pmu *pmu)
1096{
1097	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1098	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1099	bool enabled = !bitmap_empty(hw_events->used_mask, cci_pmu->num_cntrs);
1100	unsigned long flags;
1101
1102	if (!enabled)
1103		return;
1104
1105	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
1106	__cci_pmu_enable_sync(cci_pmu);
1107	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
1108
1109}
1110
1111static void cci_pmu_disable(struct pmu *pmu)
1112{
1113	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1114	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1115	unsigned long flags;
1116
1117	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
1118	__cci_pmu_disable(cci_pmu);
1119	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
1120}
1121
1122/*
1123 * Check if the idx represents a non-programmable counter.
1124 * All the fixed event counters are mapped before the programmable
1125 * counters.
1126 */
1127static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
1128{
1129	return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
1130}
1131
1132static void cci_pmu_start(struct perf_event *event, int pmu_flags)
1133{
1134	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1135	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1136	struct hw_perf_event *hwc = &event->hw;
1137	int idx = hwc->idx;
1138	unsigned long flags;
1139
1140	/*
1141	 * To handle interrupt latency, we always reprogram the period
1142	 * regardless of PERF_EF_RELOAD.
1143	 */
1144	if (pmu_flags & PERF_EF_RELOAD)
1145		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
1146
1147	hwc->state = 0;
1148
1149	if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
1150		dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
1151		return;
1152	}
1153
1154	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
1155
1156	/* Configure the counter unless you are counting a fixed event */
1157	if (!pmu_fixed_hw_idx(cci_pmu, idx))
1158		pmu_set_event(cci_pmu, idx, hwc->config_base);
1159
1160	pmu_event_set_period(event);
1161	pmu_enable_counter(cci_pmu, idx);
1162
1163	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
1164}
1165
1166static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
1167{
1168	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1169	struct hw_perf_event *hwc = &event->hw;
1170	int idx = hwc->idx;
1171
1172	if (hwc->state & PERF_HES_STOPPED)
1173		return;
1174
1175	if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
1176		dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
1177		return;
1178	}
1179
1180	/*
1181	 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
1182	 * cci_pmu_start()
1183	 */
1184	pmu_disable_counter(cci_pmu, idx);
1185	pmu_event_update(event);
1186	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
1187}
1188
1189static int cci_pmu_add(struct perf_event *event, int flags)
1190{
1191	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1192	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1193	struct hw_perf_event *hwc = &event->hw;
1194	int idx;
1195
1196	/* If we don't have a space for the counter then finish early. */
1197	idx = pmu_get_event_idx(hw_events, event);
1198	if (idx < 0)
1199		return idx;
1200
1201	event->hw.idx = idx;
1202	hw_events->events[idx] = event;
1203
1204	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
1205	if (flags & PERF_EF_START)
1206		cci_pmu_start(event, PERF_EF_RELOAD);
1207
1208	/* Propagate our changes to the userspace mapping. */
1209	perf_event_update_userpage(event);
1210
1211	return 0;
1212}
1213
1214static void cci_pmu_del(struct perf_event *event, int flags)
1215{
1216	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1217	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1218	struct hw_perf_event *hwc = &event->hw;
1219	int idx = hwc->idx;
1220
1221	cci_pmu_stop(event, PERF_EF_UPDATE);
1222	hw_events->events[idx] = NULL;
1223	clear_bit(idx, hw_events->used_mask);
1224
1225	perf_event_update_userpage(event);
1226}
1227
1228static int validate_event(struct pmu *cci_pmu,
1229			  struct cci_pmu_hw_events *hw_events,
1230			  struct perf_event *event)
1231{
1232	if (is_software_event(event))
1233		return 1;
1234
1235	/*
1236	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
1237	 * core perf code won't check that the pmu->ctx == leader->ctx
1238	 * until after pmu->event_init(event).
1239	 */
1240	if (event->pmu != cci_pmu)
1241		return 0;
1242
1243	if (event->state < PERF_EVENT_STATE_OFF)
1244		return 1;
1245
1246	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
1247		return 1;
1248
1249	return pmu_get_event_idx(hw_events, event) >= 0;
1250}
1251
1252static int validate_group(struct perf_event *event)
1253{
1254	struct perf_event *sibling, *leader = event->group_leader;
1255	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1256	unsigned long mask[BITS_TO_LONGS(HW_CNTRS_MAX)];
1257	struct cci_pmu_hw_events fake_pmu = {
1258		/*
1259		 * Initialise the fake PMU. We only need to populate the
1260		 * used_mask for the purposes of validation.
1261		 */
1262		.used_mask = mask,
1263	};
1264	bitmap_zero(mask, cci_pmu->num_cntrs);
1265
1266	if (!validate_event(event->pmu, &fake_pmu, leader))
1267		return -EINVAL;
1268
1269	for_each_sibling_event(sibling, leader) {
1270		if (!validate_event(event->pmu, &fake_pmu, sibling))
1271			return -EINVAL;
1272	}
1273
1274	if (!validate_event(event->pmu, &fake_pmu, event))
1275		return -EINVAL;
1276
1277	return 0;
1278}
1279
1280static int __hw_perf_event_init(struct perf_event *event)
1281{
1282	struct hw_perf_event *hwc = &event->hw;
1283	int mapping;
1284
1285	mapping = pmu_map_event(event);
1286
1287	if (mapping < 0) {
1288		pr_debug("event %x:%llx not supported\n", event->attr.type,
1289			 event->attr.config);
1290		return mapping;
1291	}
1292
1293	/*
1294	 * We don't assign an index until we actually place the event onto
1295	 * hardware. Use -1 to signify that we haven't decided where to put it
1296	 * yet.
1297	 */
1298	hwc->idx		= -1;
1299	hwc->config_base	= 0;
1300	hwc->config		= 0;
1301	hwc->event_base		= 0;
1302
1303	/*
1304	 * Store the event encoding into the config_base field.
1305	 */
1306	hwc->config_base	    |= (unsigned long)mapping;
1307
1308	if (event->group_leader != event) {
1309		if (validate_group(event) != 0)
1310			return -EINVAL;
1311	}
1312
1313	return 0;
1314}
1315
1316static int cci_pmu_event_init(struct perf_event *event)
1317{
1318	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1319	atomic_t *active_events = &cci_pmu->active_events;
1320	int err = 0;
1321
1322	if (event->attr.type != event->pmu->type)
1323		return -ENOENT;
1324
1325	/* Shared by all CPUs, no meaningful state to sample */
1326	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
1327		return -EOPNOTSUPP;
1328
1329	/*
1330	 * Following the example set by other "uncore" PMUs, we accept any CPU
1331	 * and rewrite its affinity dynamically rather than having perf core
1332	 * handle cpu == -1 and pid == -1 for this case.
1333	 *
1334	 * The perf core will pin online CPUs for the duration of this call and
1335	 * the event being installed into its context, so the PMU's CPU can't
1336	 * change under our feet.
1337	 */
1338	if (event->cpu < 0)
1339		return -EINVAL;
1340	event->cpu = cci_pmu->cpu;
1341
1342	event->destroy = hw_perf_event_destroy;
1343	if (!atomic_inc_not_zero(active_events)) {
1344		mutex_lock(&cci_pmu->reserve_mutex);
1345		if (atomic_read(active_events) == 0)
1346			err = cci_pmu_get_hw(cci_pmu);
1347		if (!err)
1348			atomic_inc(active_events);
1349		mutex_unlock(&cci_pmu->reserve_mutex);
1350	}
1351	if (err)
1352		return err;
1353
1354	err = __hw_perf_event_init(event);
1355	if (err)
1356		hw_perf_event_destroy(event);
1357
1358	return err;
1359}
1360
1361static ssize_t pmu_cpumask_attr_show(struct device *dev,
1362				     struct device_attribute *attr, char *buf)
1363{
1364	struct pmu *pmu = dev_get_drvdata(dev);
1365	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1366
1367	return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu));
1368}
1369
1370static struct device_attribute pmu_cpumask_attr =
1371	__ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL);
1372
1373static struct attribute *pmu_attrs[] = {
1374	&pmu_cpumask_attr.attr,
1375	NULL,
1376};
1377
1378static const struct attribute_group pmu_attr_group = {
1379	.attrs = pmu_attrs,
1380};
1381
1382static struct attribute_group pmu_format_attr_group = {
1383	.name = "format",
1384	.attrs = NULL,		/* Filled in cci_pmu_init_attrs */
1385};
1386
1387static struct attribute_group pmu_event_attr_group = {
1388	.name = "events",
1389	.attrs = NULL,		/* Filled in cci_pmu_init_attrs */
1390};
1391
1392static const struct attribute_group *pmu_attr_groups[] = {
1393	&pmu_attr_group,
1394	&pmu_format_attr_group,
1395	&pmu_event_attr_group,
1396	NULL
1397};
1398
1399static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
1400{
1401	const struct cci_pmu_model *model = cci_pmu->model;
1402	char *name = model->name;
1403	u32 num_cntrs;
1404
1405	if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX))
1406		return -EINVAL;
1407	if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX))
1408		return -EINVAL;
1409
1410	pmu_event_attr_group.attrs = model->event_attrs;
1411	pmu_format_attr_group.attrs = model->format_attrs;
1412
1413	cci_pmu->pmu = (struct pmu) {
1414		.module		= THIS_MODULE,
1415		.name		= cci_pmu->model->name,
1416		.task_ctx_nr	= perf_invalid_context,
1417		.pmu_enable	= cci_pmu_enable,
1418		.pmu_disable	= cci_pmu_disable,
1419		.event_init	= cci_pmu_event_init,
1420		.add		= cci_pmu_add,
1421		.del		= cci_pmu_del,
1422		.start		= cci_pmu_start,
1423		.stop		= cci_pmu_stop,
1424		.read		= pmu_read,
1425		.attr_groups	= pmu_attr_groups,
1426		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
1427	};
1428
1429	cci_pmu->plat_device = pdev;
1430	num_cntrs = pmu_get_max_counters(cci_pmu);
1431	if (num_cntrs > cci_pmu->model->num_hw_cntrs) {
1432		dev_warn(&pdev->dev,
1433			"PMU implements more counters(%d) than supported by"
1434			" the model(%d), truncated.",
1435			num_cntrs, cci_pmu->model->num_hw_cntrs);
1436		num_cntrs = cci_pmu->model->num_hw_cntrs;
1437	}
1438	cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs;
1439
1440	return perf_pmu_register(&cci_pmu->pmu, name, -1);
1441}
1442
1443static int cci_pmu_offline_cpu(unsigned int cpu)
1444{
1445	int target;
1446
1447	if (!g_cci_pmu || cpu != g_cci_pmu->cpu)
1448		return 0;
1449
1450	target = cpumask_any_but(cpu_online_mask, cpu);
1451	if (target >= nr_cpu_ids)
1452		return 0;
1453
1454	perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target);
1455	g_cci_pmu->cpu = target;
1456	return 0;
1457}
1458
1459static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
1460#ifdef CONFIG_ARM_CCI400_PMU
1461	[CCI400_R0] = {
1462		.name = "CCI_400",
1463		.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
1464		.num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
1465		.cntr_size = SZ_4K,
1466		.format_attrs = cci400_pmu_format_attrs,
1467		.event_attrs = cci400_r0_pmu_event_attrs,
1468		.event_ranges = {
1469			[CCI_IF_SLAVE] = {
1470				CCI400_R0_SLAVE_PORT_MIN_EV,
1471				CCI400_R0_SLAVE_PORT_MAX_EV,
1472			},
1473			[CCI_IF_MASTER] = {
1474				CCI400_R0_MASTER_PORT_MIN_EV,
1475				CCI400_R0_MASTER_PORT_MAX_EV,
1476			},
1477		},
1478		.validate_hw_event = cci400_validate_hw_event,
1479		.get_event_idx = cci400_get_event_idx,
1480	},
1481	[CCI400_R1] = {
1482		.name = "CCI_400_r1",
1483		.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
1484		.num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
1485		.cntr_size = SZ_4K,
1486		.format_attrs = cci400_pmu_format_attrs,
1487		.event_attrs = cci400_r1_pmu_event_attrs,
1488		.event_ranges = {
1489			[CCI_IF_SLAVE] = {
1490				CCI400_R1_SLAVE_PORT_MIN_EV,
1491				CCI400_R1_SLAVE_PORT_MAX_EV,
1492			},
1493			[CCI_IF_MASTER] = {
1494				CCI400_R1_MASTER_PORT_MIN_EV,
1495				CCI400_R1_MASTER_PORT_MAX_EV,
1496			},
1497		},
1498		.validate_hw_event = cci400_validate_hw_event,
1499		.get_event_idx = cci400_get_event_idx,
1500	},
1501#endif
1502#ifdef CONFIG_ARM_CCI5xx_PMU
1503	[CCI500_R0] = {
1504		.name = "CCI_500",
1505		.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
1506		.num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
1507		.cntr_size = SZ_64K,
1508		.format_attrs = cci5xx_pmu_format_attrs,
1509		.event_attrs = cci5xx_pmu_event_attrs,
1510		.event_ranges = {
1511			[CCI_IF_SLAVE] = {
1512				CCI5xx_SLAVE_PORT_MIN_EV,
1513				CCI5xx_SLAVE_PORT_MAX_EV,
1514			},
1515			[CCI_IF_MASTER] = {
1516				CCI5xx_MASTER_PORT_MIN_EV,
1517				CCI5xx_MASTER_PORT_MAX_EV,
1518			},
1519			[CCI_IF_GLOBAL] = {
1520				CCI5xx_GLOBAL_PORT_MIN_EV,
1521				CCI5xx_GLOBAL_PORT_MAX_EV,
1522			},
1523		},
1524		.validate_hw_event = cci500_validate_hw_event,
1525		.write_counters	= cci5xx_pmu_write_counters,
1526	},
1527	[CCI550_R0] = {
1528		.name = "CCI_550",
1529		.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
1530		.num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
1531		.cntr_size = SZ_64K,
1532		.format_attrs = cci5xx_pmu_format_attrs,
1533		.event_attrs = cci5xx_pmu_event_attrs,
1534		.event_ranges = {
1535			[CCI_IF_SLAVE] = {
1536				CCI5xx_SLAVE_PORT_MIN_EV,
1537				CCI5xx_SLAVE_PORT_MAX_EV,
1538			},
1539			[CCI_IF_MASTER] = {
1540				CCI5xx_MASTER_PORT_MIN_EV,
1541				CCI5xx_MASTER_PORT_MAX_EV,
1542			},
1543			[CCI_IF_GLOBAL] = {
1544				CCI5xx_GLOBAL_PORT_MIN_EV,
1545				CCI5xx_GLOBAL_PORT_MAX_EV,
1546			},
1547		},
1548		.validate_hw_event = cci550_validate_hw_event,
1549		.write_counters	= cci5xx_pmu_write_counters,
1550	},
1551#endif
1552};
1553
1554static const struct of_device_id arm_cci_pmu_matches[] = {
1555#ifdef CONFIG_ARM_CCI400_PMU
1556	{
1557		.compatible = "arm,cci-400-pmu",
1558		.data	= NULL,
1559	},
1560	{
1561		.compatible = "arm,cci-400-pmu,r0",
1562		.data	= &cci_pmu_models[CCI400_R0],
1563	},
1564	{
1565		.compatible = "arm,cci-400-pmu,r1",
1566		.data	= &cci_pmu_models[CCI400_R1],
1567	},
1568#endif
1569#ifdef CONFIG_ARM_CCI5xx_PMU
1570	{
1571		.compatible = "arm,cci-500-pmu,r0",
1572		.data = &cci_pmu_models[CCI500_R0],
1573	},
1574	{
1575		.compatible = "arm,cci-550-pmu,r0",
1576		.data = &cci_pmu_models[CCI550_R0],
1577	},
1578#endif
1579	{},
1580};
1581MODULE_DEVICE_TABLE(of, arm_cci_pmu_matches);
1582
1583static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
1584{
1585	int i;
1586
1587	for (i = 0; i < nr_irqs; i++)
1588		if (irq == irqs[i])
1589			return true;
1590
1591	return false;
1592}
1593
1594static struct cci_pmu *cci_pmu_alloc(struct device *dev)
1595{
1596	struct cci_pmu *cci_pmu;
1597	const struct cci_pmu_model *model;
1598
1599	/*
1600	 * All allocations are devm_* hence we don't have to free
1601	 * them explicitly on an error, as it would end up in driver
1602	 * detach.
1603	 */
1604	cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL);
1605	if (!cci_pmu)
1606		return ERR_PTR(-ENOMEM);
1607
1608	cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data;
1609
1610	model = of_device_get_match_data(dev);
1611	if (!model) {
1612		dev_warn(dev,
1613			 "DEPRECATED compatible property, requires secure access to CCI registers");
1614		model = probe_cci_model(cci_pmu);
1615	}
1616	if (!model) {
1617		dev_warn(dev, "CCI PMU version not supported\n");
1618		return ERR_PTR(-ENODEV);
1619	}
1620
1621	cci_pmu->model = model;
1622	cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model),
1623					sizeof(*cci_pmu->irqs), GFP_KERNEL);
1624	if (!cci_pmu->irqs)
1625		return ERR_PTR(-ENOMEM);
1626	cci_pmu->hw_events.events = devm_kcalloc(dev,
1627					     CCI_PMU_MAX_HW_CNTRS(model),
1628					     sizeof(*cci_pmu->hw_events.events),
1629					     GFP_KERNEL);
1630	if (!cci_pmu->hw_events.events)
1631		return ERR_PTR(-ENOMEM);
1632	cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev,
1633							  CCI_PMU_MAX_HW_CNTRS(model),
1634							  GFP_KERNEL);
1635	if (!cci_pmu->hw_events.used_mask)
1636		return ERR_PTR(-ENOMEM);
1637
1638	return cci_pmu;
1639}
1640
1641static int cci_pmu_probe(struct platform_device *pdev)
1642{
1643	struct cci_pmu *cci_pmu;
1644	int i, ret, irq;
1645
1646	cci_pmu = cci_pmu_alloc(&pdev->dev);
1647	if (IS_ERR(cci_pmu))
1648		return PTR_ERR(cci_pmu);
1649
1650	cci_pmu->base = devm_platform_ioremap_resource(pdev, 0);
1651	if (IS_ERR(cci_pmu->base))
1652		return -ENOMEM;
1653
1654	/*
1655	 * CCI PMU has one overflow interrupt per counter; but some may be tied
1656	 * together to a common interrupt.
1657	 */
1658	cci_pmu->nr_irqs = 0;
1659	for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) {
1660		irq = platform_get_irq(pdev, i);
1661		if (irq < 0)
1662			break;
1663
1664		if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs))
1665			continue;
1666
1667		cci_pmu->irqs[cci_pmu->nr_irqs++] = irq;
1668	}
1669
1670	/*
1671	 * Ensure that the device tree has as many interrupts as the number
1672	 * of counters.
1673	 */
1674	if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) {
1675		dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
1676			i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model));
1677		return -EINVAL;
1678	}
1679
1680	raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
1681	mutex_init(&cci_pmu->reserve_mutex);
1682	atomic_set(&cci_pmu->active_events, 0);
1683
1684	cci_pmu->cpu = raw_smp_processor_id();
1685	g_cci_pmu = cci_pmu;
1686	cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
1687				  "perf/arm/cci:online", NULL,
1688				  cci_pmu_offline_cpu);
1689
1690	ret = cci_pmu_init(cci_pmu, pdev);
1691	if (ret)
1692		goto error_pmu_init;
1693
1694	pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
1695	return 0;
1696
1697error_pmu_init:
1698	cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
1699	g_cci_pmu = NULL;
1700	return ret;
1701}
1702
1703static int cci_pmu_remove(struct platform_device *pdev)
1704{
1705	if (!g_cci_pmu)
1706		return 0;
1707
1708	cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
1709	perf_pmu_unregister(&g_cci_pmu->pmu);
1710	g_cci_pmu = NULL;
1711
1712	return 0;
1713}
1714
1715static struct platform_driver cci_pmu_driver = {
1716	.driver = {
1717		   .name = DRIVER_NAME,
1718		   .of_match_table = arm_cci_pmu_matches,
1719		   .suppress_bind_attrs = true,
1720		  },
1721	.probe = cci_pmu_probe,
1722	.remove = cci_pmu_remove,
1723};
1724
1725module_platform_driver(cci_pmu_driver);
1726MODULE_LICENSE("GPL v2");
1727MODULE_DESCRIPTION("ARM CCI PMU support");