Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright (C) 2014 ARM Limited
   5 */
   6
   7#include <linux/ctype.h>
   8#include <linux/hrtimer.h>
   9#include <linux/idr.h>
  10#include <linux/interrupt.h>
  11#include <linux/io.h>
  12#include <linux/module.h>
  13#include <linux/mod_devicetable.h>
  14#include <linux/perf_event.h>
  15#include <linux/platform_device.h>
  16#include <linux/slab.h>
  17
  18#define CCN_NUM_XP_PORTS 2
  19#define CCN_NUM_VCS 4
  20#define CCN_NUM_REGIONS	256
  21#define CCN_REGION_SIZE	0x10000
  22
  23#define CCN_ALL_OLY_ID			0xff00
  24#define CCN_ALL_OLY_ID__OLY_ID__SHIFT			0
  25#define CCN_ALL_OLY_ID__OLY_ID__MASK			0x1f
  26#define CCN_ALL_OLY_ID__NODE_ID__SHIFT			8
  27#define CCN_ALL_OLY_ID__NODE_ID__MASK			0x3f
  28
  29#define CCN_MN_ERRINT_STATUS		0x0008
  30#define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT		0x11
  31#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE	0x02
  32#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED	0x20
  33#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE	0x22
  34#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE	0x04
  35#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED	0x40
  36#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE	0x44
  37#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE	0x08
  38#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED	0x80
  39#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE	0x88
  40#define CCN_MN_OLY_COMP_LIST_63_0	0x01e0
  41#define CCN_MN_ERR_SIG_VAL_63_0		0x0300
  42#define CCN_MN_ERR_SIG_VAL_63_0__DT			(1 << 1)
  43
  44#define CCN_DT_ACTIVE_DSM		0x0000
  45#define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n)		((n) * 8)
  46#define CCN_DT_ACTIVE_DSM__DSM_ID__MASK			0xff
  47#define CCN_DT_CTL			0x0028
  48#define CCN_DT_CTL__DT_EN				(1 << 0)
  49#define CCN_DT_PMEVCNT(n)		(0x0100 + (n) * 0x8)
  50#define CCN_DT_PMCCNTR			0x0140
  51#define CCN_DT_PMCCNTRSR		0x0190
  52#define CCN_DT_PMOVSR			0x0198
  53#define CCN_DT_PMOVSR_CLR		0x01a0
  54#define CCN_DT_PMOVSR_CLR__MASK				0x1f
  55#define CCN_DT_PMCR			0x01a8
  56#define CCN_DT_PMCR__OVFL_INTR_EN			(1 << 6)
  57#define CCN_DT_PMCR__PMU_EN				(1 << 0)
  58#define CCN_DT_PMSR			0x01b0
  59#define CCN_DT_PMSR_REQ			0x01b8
  60#define CCN_DT_PMSR_CLR			0x01c0
  61
  62#define CCN_HNF_PMU_EVENT_SEL		0x0600
  63#define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n)		((n) * 4)
  64#define CCN_HNF_PMU_EVENT_SEL__ID__MASK			0xf
  65
  66#define CCN_XP_DT_CONFIG		0x0300
  67#define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n)		((n) * 4)
  68#define CCN_XP_DT_CONFIG__DT_CFG__MASK			0xf
  69#define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH		0x0
  70#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1	0x1
  71#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n)		(0x2 + (n))
  72#define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n)	(0x4 + (n))
  73#define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n))
  74#define CCN_XP_DT_INTERFACE_SEL		0x0308
  75#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n)	(0 + (n) * 8)
  76#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK	0x1
  77#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n)	(1 + (n) * 8)
  78#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK	0x1
  79#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n)	(2 + (n) * 8)
  80#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK	0x3
  81#define CCN_XP_DT_CMP_VAL_L(n)		(0x0310 + (n) * 0x40)
  82#define CCN_XP_DT_CMP_VAL_H(n)		(0x0318 + (n) * 0x40)
  83#define CCN_XP_DT_CMP_MASK_L(n)		(0x0320 + (n) * 0x40)
  84#define CCN_XP_DT_CMP_MASK_H(n)		(0x0328 + (n) * 0x40)
  85#define CCN_XP_DT_CONTROL		0x0370
  86#define CCN_XP_DT_CONTROL__DT_ENABLE			(1 << 0)
  87#define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n)		(12 + (n) * 4)
  88#define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK		0xf
  89#define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS		0xf
  90#define CCN_XP_PMU_EVENT_SEL		0x0600
  91#define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n)		((n) * 7)
  92#define CCN_XP_PMU_EVENT_SEL__ID__MASK			0x3f
  93
  94#define CCN_SBAS_PMU_EVENT_SEL		0x0600
  95#define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n)		((n) * 4)
  96#define CCN_SBAS_PMU_EVENT_SEL__ID__MASK		0xf
  97
  98#define CCN_RNI_PMU_EVENT_SEL		0x0600
  99#define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n)		((n) * 4)
 100#define CCN_RNI_PMU_EVENT_SEL__ID__MASK			0xf
 101
 102#define CCN_TYPE_MN	0x01
 103#define CCN_TYPE_DT	0x02
 104#define CCN_TYPE_HNF	0x04
 105#define CCN_TYPE_HNI	0x05
 106#define CCN_TYPE_XP	0x08
 107#define CCN_TYPE_SBSX	0x0c
 108#define CCN_TYPE_SBAS	0x10
 109#define CCN_TYPE_RNI_1P	0x14
 110#define CCN_TYPE_RNI_2P	0x15
 111#define CCN_TYPE_RNI_3P	0x16
 112#define CCN_TYPE_RND_1P	0x18 /* RN-D = RN-I + DVM */
 113#define CCN_TYPE_RND_2P	0x19
 114#define CCN_TYPE_RND_3P	0x1a
 115#define CCN_TYPE_CYCLES	0xff /* Pseudotype */
 116
 117#define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */
 118
 119#define CCN_NUM_PMU_EVENTS		4
 120#define CCN_NUM_XP_WATCHPOINTS		2 /* See DT.dbg_id.num_watchpoints */
 121#define CCN_NUM_PMU_EVENT_COUNTERS	8 /* See DT.dbg_id.num_pmucntr */
 122#define CCN_IDX_PMU_CYCLE_COUNTER	CCN_NUM_PMU_EVENT_COUNTERS
 123
 124#define CCN_NUM_PREDEFINED_MASKS	4
 125#define CCN_IDX_MASK_ANY		(CCN_NUM_PMU_EVENT_COUNTERS + 0)
 126#define CCN_IDX_MASK_EXACT		(CCN_NUM_PMU_EVENT_COUNTERS + 1)
 127#define CCN_IDX_MASK_ORDER		(CCN_NUM_PMU_EVENT_COUNTERS + 2)
 128#define CCN_IDX_MASK_OPCODE		(CCN_NUM_PMU_EVENT_COUNTERS + 3)
 129
 130struct arm_ccn_component {
 131	void __iomem *base;
 132	u32 type;
 133
 134	DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS);
 135	union {
 136		struct {
 137			DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS);
 138		} xp;
 139	};
 140};
 141
 142#define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \
 143	struct arm_ccn_dt, pmu), struct arm_ccn, dt)
 144
 145struct arm_ccn_dt {
 146	int id;
 147	void __iomem *base;
 148
 149	spinlock_t config_lock;
 150
 151	DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1);
 152	struct {
 153		struct arm_ccn_component *source;
 154		struct perf_event *event;
 155	} pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1];
 156
 157	struct {
 158	       u64 l, h;
 159	} cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS];
 160
 161	struct hrtimer hrtimer;
 162
 163	unsigned int cpu;
 164	struct hlist_node node;
 165
 166	struct pmu pmu;
 167};
 168
 169struct arm_ccn {
 170	struct device *dev;
 171	void __iomem *base;
 172	unsigned int irq;
 173
 174	unsigned sbas_present:1;
 175	unsigned sbsx_present:1;
 176
 177	int num_nodes;
 178	struct arm_ccn_component *node;
 179
 180	int num_xps;
 181	struct arm_ccn_component *xp;
 182
 183	struct arm_ccn_dt dt;
 184	int mn_id;
 185};
 186
 187static int arm_ccn_node_to_xp(int node)
 188{
 189	return node / CCN_NUM_XP_PORTS;
 190}
 191
 192static int arm_ccn_node_to_xp_port(int node)
 193{
 194	return node % CCN_NUM_XP_PORTS;
 195}
 196
 197
 198/*
 199 * Bit shifts and masks in these defines must be kept in sync with
 200 * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below!
 201 */
 202#define CCN_CONFIG_NODE(_config)	(((_config) >> 0) & 0xff)
 203#define CCN_CONFIG_XP(_config)		(((_config) >> 0) & 0xff)
 204#define CCN_CONFIG_TYPE(_config)	(((_config) >> 8) & 0xff)
 205#define CCN_CONFIG_EVENT(_config)	(((_config) >> 16) & 0xff)
 206#define CCN_CONFIG_PORT(_config)	(((_config) >> 24) & 0x3)
 207#define CCN_CONFIG_BUS(_config)		(((_config) >> 24) & 0x3)
 208#define CCN_CONFIG_VC(_config)		(((_config) >> 26) & 0x7)
 209#define CCN_CONFIG_DIR(_config)		(((_config) >> 29) & 0x1)
 210#define CCN_CONFIG_MASK(_config)	(((_config) >> 30) & 0xf)
 211
 212static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
 213{
 214	*config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24));
 215	*config |= (node_xp << 0) | (type << 8) | (port << 24);
 216}
 217
 
 
 
 
 
 
 
 
 
 218#define CCN_FORMAT_ATTR(_name, _config) \
 219	struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \
 220			{ __ATTR(_name, S_IRUGO, device_show_string, \
 221			NULL), _config }
 222
 223static CCN_FORMAT_ATTR(node, "config:0-7");
 224static CCN_FORMAT_ATTR(xp, "config:0-7");
 225static CCN_FORMAT_ATTR(type, "config:8-15");
 226static CCN_FORMAT_ATTR(event, "config:16-23");
 227static CCN_FORMAT_ATTR(port, "config:24-25");
 228static CCN_FORMAT_ATTR(bus, "config:24-25");
 229static CCN_FORMAT_ATTR(vc, "config:26-28");
 230static CCN_FORMAT_ATTR(dir, "config:29-29");
 231static CCN_FORMAT_ATTR(mask, "config:30-33");
 232static CCN_FORMAT_ATTR(cmp_l, "config1:0-62");
 233static CCN_FORMAT_ATTR(cmp_h, "config2:0-59");
 234
 235static struct attribute *arm_ccn_pmu_format_attrs[] = {
 236	&arm_ccn_pmu_format_attr_node.attr.attr,
 237	&arm_ccn_pmu_format_attr_xp.attr.attr,
 238	&arm_ccn_pmu_format_attr_type.attr.attr,
 239	&arm_ccn_pmu_format_attr_event.attr.attr,
 240	&arm_ccn_pmu_format_attr_port.attr.attr,
 241	&arm_ccn_pmu_format_attr_bus.attr.attr,
 242	&arm_ccn_pmu_format_attr_vc.attr.attr,
 243	&arm_ccn_pmu_format_attr_dir.attr.attr,
 244	&arm_ccn_pmu_format_attr_mask.attr.attr,
 245	&arm_ccn_pmu_format_attr_cmp_l.attr.attr,
 246	&arm_ccn_pmu_format_attr_cmp_h.attr.attr,
 247	NULL
 248};
 249
 250static const struct attribute_group arm_ccn_pmu_format_attr_group = {
 251	.name = "format",
 252	.attrs = arm_ccn_pmu_format_attrs,
 253};
 254
 255
 256struct arm_ccn_pmu_event {
 257	struct device_attribute attr;
 258	u32 type;
 259	u32 event;
 260	int num_ports;
 261	int num_vcs;
 262	const char *def;
 263	int mask;
 264};
 265
 266#define CCN_EVENT_ATTR(_name) \
 267	__ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL)
 268
 269/*
 270 * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on
 271 * their ports in XP they are connected to. For the sake of usability they are
 272 * explicitly defined here (and translated into a relevant watchpoint in
 273 * arm_ccn_pmu_event_init()) so the user can easily request them without deep
 274 * knowledge of the flit format.
 275 */
 276
 277#define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \
 278		.type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
 279		.num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \
 280		.def = _def, .mask = _mask, }
 281
 282#define CCN_EVENT_HNI(_name, _def, _mask) { \
 283		.attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \
 284		.event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
 285		.num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
 286
 287#define CCN_EVENT_SBSX(_name, _def, _mask) { \
 288		.attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \
 289		.event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
 290		.num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
 291
 292#define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \
 293		.type = CCN_TYPE_HNF, .event = _event, }
 294
 295#define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \
 296		.type = CCN_TYPE_XP, .event = _event, \
 297		.num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, }
 298
 299/*
 300 * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending
 301 * on configuration. One of them is picked to represent the whole group,
 302 * as they all share the same event types.
 303 */
 304#define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \
 305		.type = CCN_TYPE_RNI_3P, .event = _event, }
 306
 307#define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \
 308		.type = CCN_TYPE_SBAS, .event = _event, }
 309
 310#define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \
 311		.type = CCN_TYPE_CYCLES }
 312
 313
 314static ssize_t arm_ccn_pmu_event_show(struct device *dev,
 315		struct device_attribute *attr, char *buf)
 316{
 317	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
 318	struct arm_ccn_pmu_event *event = container_of(attr,
 319			struct arm_ccn_pmu_event, attr);
 320	int res;
 321
 322	res = sysfs_emit(buf, "type=0x%x", event->type);
 323	if (event->event)
 324		res += sysfs_emit_at(buf, res, ",event=0x%x", event->event);
 325	if (event->def)
 326		res += sysfs_emit_at(buf, res, ",%s", event->def);
 327	if (event->mask)
 328		res += sysfs_emit_at(buf, res, ",mask=0x%x", event->mask);
 329
 330	/* Arguments required by an event */
 331	switch (event->type) {
 332	case CCN_TYPE_CYCLES:
 333		break;
 334	case CCN_TYPE_XP:
 335		res += sysfs_emit_at(buf, res, ",xp=?,vc=?");
 336		if (event->event == CCN_EVENT_WATCHPOINT)
 337			res += sysfs_emit_at(buf, res,
 338					",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
 339		else
 340			res += sysfs_emit_at(buf, res, ",bus=?");
 341
 342		break;
 343	case CCN_TYPE_MN:
 344		res += sysfs_emit_at(buf, res, ",node=%d", ccn->mn_id);
 345		break;
 346	default:
 347		res += sysfs_emit_at(buf, res, ",node=?");
 348		break;
 349	}
 350
 351	res += sysfs_emit_at(buf, res, "\n");
 352
 353	return res;
 354}
 355
 356static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
 357				     struct attribute *attr, int index)
 358{
 359	struct device *dev = kobj_to_dev(kobj);
 360	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
 361	struct device_attribute *dev_attr = container_of(attr,
 362			struct device_attribute, attr);
 363	struct arm_ccn_pmu_event *event = container_of(dev_attr,
 364			struct arm_ccn_pmu_event, attr);
 365
 366	if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present)
 367		return 0;
 368	if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present)
 369		return 0;
 370
 371	return attr->mode;
 372}
 373
 374static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
 375	CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
 376	CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
 377	CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
 378	CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
 379	CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
 380	CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
 381	CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
 382	CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
 383			CCN_IDX_MASK_ORDER),
 384	CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
 385	CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
 386	CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
 387	CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
 388	CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
 389			CCN_IDX_MASK_ORDER),
 390	CCN_EVENT_HNF(cache_miss, 0x1),
 391	CCN_EVENT_HNF(l3_sf_cache_access, 0x02),
 392	CCN_EVENT_HNF(cache_fill, 0x3),
 393	CCN_EVENT_HNF(pocq_retry, 0x4),
 394	CCN_EVENT_HNF(pocq_reqs_recvd, 0x5),
 395	CCN_EVENT_HNF(sf_hit, 0x6),
 396	CCN_EVENT_HNF(sf_evictions, 0x7),
 397	CCN_EVENT_HNF(snoops_sent, 0x8),
 398	CCN_EVENT_HNF(snoops_broadcast, 0x9),
 399	CCN_EVENT_HNF(l3_eviction, 0xa),
 400	CCN_EVENT_HNF(l3_fill_invalid_way, 0xb),
 401	CCN_EVENT_HNF(mc_retries, 0xc),
 402	CCN_EVENT_HNF(mc_reqs, 0xd),
 403	CCN_EVENT_HNF(qos_hh_retry, 0xe),
 404	CCN_EVENT_RNI(rdata_beats_p0, 0x1),
 405	CCN_EVENT_RNI(rdata_beats_p1, 0x2),
 406	CCN_EVENT_RNI(rdata_beats_p2, 0x3),
 407	CCN_EVENT_RNI(rxdat_flits, 0x4),
 408	CCN_EVENT_RNI(txdat_flits, 0x5),
 409	CCN_EVENT_RNI(txreq_flits, 0x6),
 410	CCN_EVENT_RNI(txreq_flits_retried, 0x7),
 411	CCN_EVENT_RNI(rrt_full, 0x8),
 412	CCN_EVENT_RNI(wrt_full, 0x9),
 413	CCN_EVENT_RNI(txreq_flits_replayed, 0xa),
 414	CCN_EVENT_XP(upload_starvation, 0x1),
 415	CCN_EVENT_XP(download_starvation, 0x2),
 416	CCN_EVENT_XP(respin, 0x3),
 417	CCN_EVENT_XP(valid_flit, 0x4),
 418	CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT),
 419	CCN_EVENT_SBAS(rdata_beats_p0, 0x1),
 420	CCN_EVENT_SBAS(rxdat_flits, 0x4),
 421	CCN_EVENT_SBAS(txdat_flits, 0x5),
 422	CCN_EVENT_SBAS(txreq_flits, 0x6),
 423	CCN_EVENT_SBAS(txreq_flits_retried, 0x7),
 424	CCN_EVENT_SBAS(rrt_full, 0x8),
 425	CCN_EVENT_SBAS(wrt_full, 0x9),
 426	CCN_EVENT_SBAS(txreq_flits_replayed, 0xa),
 427	CCN_EVENT_CYCLES(cycles),
 428};
 429
 430/* Populated in arm_ccn_init() */
 431static struct attribute
 432		*arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
 433
 434static const struct attribute_group arm_ccn_pmu_events_attr_group = {
 435	.name = "events",
 436	.is_visible = arm_ccn_pmu_events_is_visible,
 437	.attrs = arm_ccn_pmu_events_attrs,
 438};
 439
 440
 441static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name)
 442{
 443	unsigned long i;
 444
 445	if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1]))
 446		return NULL;
 447	i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a';
 448
 449	switch (name[1]) {
 450	case 'l':
 451		return &ccn->dt.cmp_mask[i].l;
 452	case 'h':
 453		return &ccn->dt.cmp_mask[i].h;
 454	default:
 455		return NULL;
 456	}
 457}
 458
 459static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
 460		struct device_attribute *attr, char *buf)
 461{
 462	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
 463	u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
 464
 465	return mask ? sysfs_emit(buf, "0x%016llx\n", *mask) : -EINVAL;
 466}
 467
 468static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
 469		struct device_attribute *attr, const char *buf, size_t count)
 470{
 471	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
 472	u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
 473	int err = -EINVAL;
 474
 475	if (mask)
 476		err = kstrtoull(buf, 0, mask);
 477
 478	return err ? err : count;
 479}
 480
 481#define CCN_CMP_MASK_ATTR(_name) \
 482	struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
 483			__ATTR(_name, S_IRUGO | S_IWUSR, \
 484			arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store)
 485
 486#define CCN_CMP_MASK_ATTR_RO(_name) \
 487	struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
 488			__ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL)
 489
 490static CCN_CMP_MASK_ATTR(0l);
 491static CCN_CMP_MASK_ATTR(0h);
 492static CCN_CMP_MASK_ATTR(1l);
 493static CCN_CMP_MASK_ATTR(1h);
 494static CCN_CMP_MASK_ATTR(2l);
 495static CCN_CMP_MASK_ATTR(2h);
 496static CCN_CMP_MASK_ATTR(3l);
 497static CCN_CMP_MASK_ATTR(3h);
 498static CCN_CMP_MASK_ATTR(4l);
 499static CCN_CMP_MASK_ATTR(4h);
 500static CCN_CMP_MASK_ATTR(5l);
 501static CCN_CMP_MASK_ATTR(5h);
 502static CCN_CMP_MASK_ATTR(6l);
 503static CCN_CMP_MASK_ATTR(6h);
 504static CCN_CMP_MASK_ATTR(7l);
 505static CCN_CMP_MASK_ATTR(7h);
 506static CCN_CMP_MASK_ATTR_RO(8l);
 507static CCN_CMP_MASK_ATTR_RO(8h);
 508static CCN_CMP_MASK_ATTR_RO(9l);
 509static CCN_CMP_MASK_ATTR_RO(9h);
 510static CCN_CMP_MASK_ATTR_RO(al);
 511static CCN_CMP_MASK_ATTR_RO(ah);
 512static CCN_CMP_MASK_ATTR_RO(bl);
 513static CCN_CMP_MASK_ATTR_RO(bh);
 514
 515static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
 516	&arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr,
 517	&arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr,
 518	&arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr,
 519	&arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr,
 520	&arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr,
 521	&arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr,
 522	&arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr,
 523	&arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr,
 524	&arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr,
 525	&arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr,
 526	&arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr,
 527	&arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr,
 528	NULL
 529};
 530
 531static const struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
 532	.name = "cmp_mask",
 533	.attrs = arm_ccn_pmu_cmp_mask_attrs,
 534};
 535
 536static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev,
 537				     struct device_attribute *attr, char *buf)
 538{
 539	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
 540
 541	return cpumap_print_to_pagebuf(true, buf, cpumask_of(ccn->dt.cpu));
 542}
 543
 544static struct device_attribute arm_ccn_pmu_cpumask_attr =
 545		__ATTR(cpumask, S_IRUGO, arm_ccn_pmu_cpumask_show, NULL);
 546
 547static struct attribute *arm_ccn_pmu_cpumask_attrs[] = {
 548	&arm_ccn_pmu_cpumask_attr.attr,
 549	NULL,
 550};
 551
 552static const struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
 553	.attrs = arm_ccn_pmu_cpumask_attrs,
 554};
 555
 556/*
 557 * Default poll period is 10ms, which is way over the top anyway,
 558 * as in the worst case scenario (an event every cycle), with 1GHz
 559 * clocked bus, the smallest, 32 bit counter will overflow in
 560 * more than 4s.
 561 */
 562static unsigned int arm_ccn_pmu_poll_period_us = 10000;
 563module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
 564		S_IRUGO | S_IWUSR);
 565
 566static ktime_t arm_ccn_pmu_timer_period(void)
 567{
 568	return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
 569}
 570
 571
 572static const struct attribute_group *arm_ccn_pmu_attr_groups[] = {
 573	&arm_ccn_pmu_events_attr_group,
 574	&arm_ccn_pmu_format_attr_group,
 575	&arm_ccn_pmu_cmp_mask_attr_group,
 576	&arm_ccn_pmu_cpumask_attr_group,
 577	NULL
 578};
 579
 580
 581static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size)
 582{
 583	int bit;
 584
 585	do {
 586		bit = find_first_zero_bit(bitmap, size);
 587		if (bit >= size)
 588			return -EAGAIN;
 589	} while (test_and_set_bit(bit, bitmap));
 590
 591	return bit;
 592}
 593
 594/* All RN-I and RN-D nodes have identical PMUs */
 595static int arm_ccn_pmu_type_eq(u32 a, u32 b)
 596{
 597	if (a == b)
 598		return 1;
 599
 600	switch (a) {
 601	case CCN_TYPE_RNI_1P:
 602	case CCN_TYPE_RNI_2P:
 603	case CCN_TYPE_RNI_3P:
 604	case CCN_TYPE_RND_1P:
 605	case CCN_TYPE_RND_2P:
 606	case CCN_TYPE_RND_3P:
 607		switch (b) {
 608		case CCN_TYPE_RNI_1P:
 609		case CCN_TYPE_RNI_2P:
 610		case CCN_TYPE_RNI_3P:
 611		case CCN_TYPE_RND_1P:
 612		case CCN_TYPE_RND_2P:
 613		case CCN_TYPE_RND_3P:
 614			return 1;
 615		}
 616		break;
 617	}
 618
 619	return 0;
 620}
 621
 622static int arm_ccn_pmu_event_alloc(struct perf_event *event)
 623{
 624	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 625	struct hw_perf_event *hw = &event->hw;
 626	u32 node_xp, type, event_id;
 627	struct arm_ccn_component *source;
 628	int bit;
 629
 630	node_xp = CCN_CONFIG_NODE(event->attr.config);
 631	type = CCN_CONFIG_TYPE(event->attr.config);
 632	event_id = CCN_CONFIG_EVENT(event->attr.config);
 633
 634	/* Allocate the cycle counter */
 635	if (type == CCN_TYPE_CYCLES) {
 636		if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER,
 637				ccn->dt.pmu_counters_mask))
 638			return -EAGAIN;
 639
 640		hw->idx = CCN_IDX_PMU_CYCLE_COUNTER;
 641		ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event;
 642
 643		return 0;
 644	}
 645
 646	/* Allocate an event counter */
 647	hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask,
 648			CCN_NUM_PMU_EVENT_COUNTERS);
 649	if (hw->idx < 0) {
 650		dev_dbg(ccn->dev, "No more counters available!\n");
 651		return -EAGAIN;
 652	}
 653
 654	if (type == CCN_TYPE_XP)
 655		source = &ccn->xp[node_xp];
 656	else
 657		source = &ccn->node[node_xp];
 658	ccn->dt.pmu_counters[hw->idx].source = source;
 659
 660	/* Allocate an event source or a watchpoint */
 661	if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT)
 662		bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask,
 663				CCN_NUM_XP_WATCHPOINTS);
 664	else
 665		bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask,
 666				CCN_NUM_PMU_EVENTS);
 667	if (bit < 0) {
 668		dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n",
 669				node_xp);
 670		clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
 671		return -EAGAIN;
 672	}
 673	hw->config_base = bit;
 674
 675	ccn->dt.pmu_counters[hw->idx].event = event;
 676
 677	return 0;
 678}
 679
 680static void arm_ccn_pmu_event_release(struct perf_event *event)
 681{
 682	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 683	struct hw_perf_event *hw = &event->hw;
 684
 685	if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
 686		clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
 687	} else {
 688		struct arm_ccn_component *source =
 689				ccn->dt.pmu_counters[hw->idx].source;
 690
 691		if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
 692				CCN_CONFIG_EVENT(event->attr.config) ==
 693				CCN_EVENT_WATCHPOINT)
 694			clear_bit(hw->config_base, source->xp.dt_cmp_mask);
 695		else
 696			clear_bit(hw->config_base, source->pmu_events_mask);
 697		clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
 698	}
 699
 700	ccn->dt.pmu_counters[hw->idx].source = NULL;
 701	ccn->dt.pmu_counters[hw->idx].event = NULL;
 702}
 703
 704static int arm_ccn_pmu_event_init(struct perf_event *event)
 705{
 706	struct arm_ccn *ccn;
 707	struct hw_perf_event *hw = &event->hw;
 708	u32 node_xp, type, event_id;
 709	int valid;
 710	int i;
 711	struct perf_event *sibling;
 712
 713	if (event->attr.type != event->pmu->type)
 714		return -ENOENT;
 715
 716	ccn = pmu_to_arm_ccn(event->pmu);
 717
 718	if (hw->sample_period) {
 719		dev_dbg(ccn->dev, "Sampling not supported!\n");
 720		return -EOPNOTSUPP;
 721	}
 722
 723	if (has_branch_stack(event)) {
 724		dev_dbg(ccn->dev, "Can't exclude execution levels!\n");
 725		return -EINVAL;
 726	}
 727
 728	if (event->cpu < 0) {
 729		dev_dbg(ccn->dev, "Can't provide per-task data!\n");
 730		return -EOPNOTSUPP;
 731	}
 732	/*
 733	 * Many perf core operations (eg. events rotation) operate on a
 734	 * single CPU context. This is obvious for CPU PMUs, where one
 735	 * expects the same sets of events being observed on all CPUs,
 736	 * but can lead to issues for off-core PMUs, like CCN, where each
 737	 * event could be theoretically assigned to a different CPU. To
 738	 * mitigate this, we enforce CPU assignment to one, selected
 739	 * processor (the one described in the "cpumask" attribute).
 740	 */
 741	event->cpu = ccn->dt.cpu;
 742
 743	node_xp = CCN_CONFIG_NODE(event->attr.config);
 744	type = CCN_CONFIG_TYPE(event->attr.config);
 745	event_id = CCN_CONFIG_EVENT(event->attr.config);
 746
 747	/* Validate node/xp vs topology */
 748	switch (type) {
 749	case CCN_TYPE_MN:
 750		if (node_xp != ccn->mn_id) {
 751			dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp);
 752			return -EINVAL;
 753		}
 754		break;
 755	case CCN_TYPE_XP:
 756		if (node_xp >= ccn->num_xps) {
 757			dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp);
 758			return -EINVAL;
 759		}
 760		break;
 761	case CCN_TYPE_CYCLES:
 762		break;
 763	default:
 764		if (node_xp >= ccn->num_nodes) {
 765			dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp);
 766			return -EINVAL;
 767		}
 768		if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
 769			dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n",
 770					type, node_xp);
 771			return -EINVAL;
 772		}
 773		break;
 774	}
 775
 776	/* Validate event ID vs available for the type */
 777	for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid;
 778			i++) {
 779		struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i];
 780		u32 port = CCN_CONFIG_PORT(event->attr.config);
 781		u32 vc = CCN_CONFIG_VC(event->attr.config);
 782
 783		if (!arm_ccn_pmu_type_eq(type, e->type))
 784			continue;
 785		if (event_id != e->event)
 786			continue;
 787		if (e->num_ports && port >= e->num_ports) {
 788			dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n",
 789					port, node_xp);
 790			return -EINVAL;
 791		}
 792		if (e->num_vcs && vc >= e->num_vcs) {
 793			dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n",
 794					vc, node_xp);
 795			return -EINVAL;
 796		}
 797		valid = 1;
 798	}
 799	if (!valid) {
 800		dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
 801				event_id, node_xp);
 802		return -EINVAL;
 803	}
 804
 805	/* Watchpoint-based event for a node is actually set on XP */
 806	if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) {
 807		u32 port;
 808
 809		type = CCN_TYPE_XP;
 810		port = arm_ccn_node_to_xp_port(node_xp);
 811		node_xp = arm_ccn_node_to_xp(node_xp);
 812
 813		arm_ccn_pmu_config_set(&event->attr.config,
 814				node_xp, type, port);
 815	}
 816
 817	/*
 818	 * We must NOT create groups containing mixed PMUs, although software
 819	 * events are acceptable (for example to create a CCN group
 820	 * periodically read when a hrtimer aka cpu-clock leader triggers).
 821	 */
 822	if (event->group_leader->pmu != event->pmu &&
 823			!is_software_event(event->group_leader))
 824		return -EINVAL;
 825
 826	for_each_sibling_event(sibling, event->group_leader) {
 827		if (sibling->pmu != event->pmu &&
 828				!is_software_event(sibling))
 829			return -EINVAL;
 830	}
 831
 832	return 0;
 833}
 834
 835static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx)
 836{
 837	u64 res;
 838
 839	if (idx == CCN_IDX_PMU_CYCLE_COUNTER) {
 840#ifdef readq
 841		res = readq(ccn->dt.base + CCN_DT_PMCCNTR);
 842#else
 843		/* 40 bit counter, can do snapshot and read in two parts */
 844		writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ);
 845		while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1))
 846			;
 847		writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
 848		res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff;
 849		res <<= 32;
 850		res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR);
 851#endif
 852	} else {
 853		res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx));
 854	}
 855
 856	return res;
 857}
 858
 859static void arm_ccn_pmu_event_update(struct perf_event *event)
 860{
 861	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 862	struct hw_perf_event *hw = &event->hw;
 863	u64 prev_count, new_count, mask;
 864
 865	do {
 866		prev_count = local64_read(&hw->prev_count);
 867		new_count = arm_ccn_pmu_read_counter(ccn, hw->idx);
 868	} while (local64_xchg(&hw->prev_count, new_count) != prev_count);
 869
 870	mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1;
 871
 872	local64_add((new_count - prev_count) & mask, &event->count);
 873}
 874
 875static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
 876{
 877	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 878	struct hw_perf_event *hw = &event->hw;
 879	struct arm_ccn_component *xp;
 880	u32 val, dt_cfg;
 881
 882	/* Nothing to do for cycle counter */
 883	if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
 884		return;
 885
 886	if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
 887		xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
 888	else
 889		xp = &ccn->xp[arm_ccn_node_to_xp(
 890				CCN_CONFIG_NODE(event->attr.config))];
 891
 892	if (enable)
 893		dt_cfg = hw->event_base;
 894	else
 895		dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH;
 896
 897	spin_lock(&ccn->dt.config_lock);
 898
 899	val = readl(xp->base + CCN_XP_DT_CONFIG);
 900	val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK <<
 901			CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx));
 902	val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx);
 903	writel(val, xp->base + CCN_XP_DT_CONFIG);
 904
 905	spin_unlock(&ccn->dt.config_lock);
 906}
 907
 908static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
 909{
 910	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 911	struct hw_perf_event *hw = &event->hw;
 912
 913	local64_set(&event->hw.prev_count,
 914			arm_ccn_pmu_read_counter(ccn, hw->idx));
 915	hw->state = 0;
 916
 917	/* Set the DT bus input, engaging the counter */
 918	arm_ccn_pmu_xp_dt_config(event, 1);
 919}
 920
 921static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
 922{
 923	struct hw_perf_event *hw = &event->hw;
 924
 925	/* Disable counting, setting the DT bus to pass-through mode */
 926	arm_ccn_pmu_xp_dt_config(event, 0);
 927
 928	if (flags & PERF_EF_UPDATE)
 929		arm_ccn_pmu_event_update(event);
 930
 931	hw->state |= PERF_HES_STOPPED;
 932}
 933
 934static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
 935{
 936	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 937	struct hw_perf_event *hw = &event->hw;
 938	struct arm_ccn_component *source =
 939			ccn->dt.pmu_counters[hw->idx].source;
 940	unsigned long wp = hw->config_base;
 941	u32 val;
 942	u64 cmp_l = event->attr.config1;
 943	u64 cmp_h = event->attr.config2;
 944	u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l;
 945	u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h;
 946
 947	hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp);
 948
 949	/* Direction (RX/TX), device (port) & virtual channel */
 950	val = readl(source->base + CCN_XP_DT_INTERFACE_SEL);
 951	val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK <<
 952			CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp));
 953	val |= CCN_CONFIG_DIR(event->attr.config) <<
 954			CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp);
 955	val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK <<
 956			CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp));
 957	val |= CCN_CONFIG_PORT(event->attr.config) <<
 958			CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp);
 959	val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK <<
 960			CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp));
 961	val |= CCN_CONFIG_VC(event->attr.config) <<
 962			CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp);
 963	writel(val, source->base + CCN_XP_DT_INTERFACE_SEL);
 964
 965	/* Comparison values */
 966	writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
 967	writel((cmp_l >> 32) & 0x7fffffff,
 968			source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
 969	writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
 970	writel((cmp_h >> 32) & 0x0fffffff,
 971			source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4);
 972
 973	/* Mask */
 974	writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
 975	writel((mask_l >> 32) & 0x7fffffff,
 976			source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
 977	writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
 978	writel((mask_h >> 32) & 0x0fffffff,
 979			source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4);
 980}
 981
 982static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
 983{
 984	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 985	struct hw_perf_event *hw = &event->hw;
 986	struct arm_ccn_component *source =
 987			ccn->dt.pmu_counters[hw->idx].source;
 988	u32 val, id;
 989
 990	hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
 991
 992	id = (CCN_CONFIG_VC(event->attr.config) << 4) |
 993			(CCN_CONFIG_BUS(event->attr.config) << 3) |
 994			(CCN_CONFIG_EVENT(event->attr.config) << 0);
 995
 996	val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
 997	val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK <<
 998			CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
 999	val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
1000	writel(val, source->base + CCN_XP_PMU_EVENT_SEL);
1001}
1002
1003static void arm_ccn_pmu_node_event_config(struct perf_event *event)
1004{
1005	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1006	struct hw_perf_event *hw = &event->hw;
1007	struct arm_ccn_component *source =
1008			ccn->dt.pmu_counters[hw->idx].source;
1009	u32 type = CCN_CONFIG_TYPE(event->attr.config);
1010	u32 val, port;
1011
1012	port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config));
1013	hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port,
1014			hw->config_base);
1015
1016	/* These *_event_sel regs should be identical, but let's make sure... */
1017	BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL);
1018	BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL);
1019	BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) !=
1020			CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1));
1021	BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) !=
1022			CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1));
1023	BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK !=
1024			CCN_SBAS_PMU_EVENT_SEL__ID__MASK);
1025	BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK !=
1026			CCN_RNI_PMU_EVENT_SEL__ID__MASK);
1027	if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS &&
1028			!arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P)))
1029		return;
1030
1031	/* Set the event id for the pre-allocated counter */
1032	val = readl(source->base + CCN_HNF_PMU_EVENT_SEL);
1033	val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK <<
1034		CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
1035	val |= CCN_CONFIG_EVENT(event->attr.config) <<
1036		CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
1037	writel(val, source->base + CCN_HNF_PMU_EVENT_SEL);
1038}
1039
1040static void arm_ccn_pmu_event_config(struct perf_event *event)
1041{
1042	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1043	struct hw_perf_event *hw = &event->hw;
1044	u32 xp, offset, val;
1045
1046	/* Cycle counter requires no setup */
1047	if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
1048		return;
1049
1050	if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
1051		xp = CCN_CONFIG_XP(event->attr.config);
1052	else
1053		xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config));
1054
1055	spin_lock(&ccn->dt.config_lock);
1056
1057	/* Set the DT bus "distance" register */
1058	offset = (hw->idx / 4) * 4;
1059	val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
1060	val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK <<
1061			CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4));
1062	val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4);
1063	writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
1064
1065	if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) {
1066		if (CCN_CONFIG_EVENT(event->attr.config) ==
1067				CCN_EVENT_WATCHPOINT)
1068			arm_ccn_pmu_xp_watchpoint_config(event);
1069		else
1070			arm_ccn_pmu_xp_event_config(event);
1071	} else {
1072		arm_ccn_pmu_node_event_config(event);
1073	}
1074
1075	spin_unlock(&ccn->dt.config_lock);
1076}
1077
1078static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn)
1079{
1080	return bitmap_weight(ccn->dt.pmu_counters_mask,
1081			     CCN_NUM_PMU_EVENT_COUNTERS + 1);
1082}
1083
1084static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
1085{
1086	int err;
1087	struct hw_perf_event *hw = &event->hw;
1088	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1089
1090	err = arm_ccn_pmu_event_alloc(event);
1091	if (err)
1092		return err;
1093
1094	/*
1095	 * Pin the timer, so that the overflows are handled by the chosen
1096	 * event->cpu (this is the same one as presented in "cpumask"
1097	 * attribute).
1098	 */
1099	if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1)
1100		hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
1101			      HRTIMER_MODE_REL_PINNED);
1102
1103	arm_ccn_pmu_event_config(event);
1104
1105	hw->state = PERF_HES_STOPPED;
1106
1107	if (flags & PERF_EF_START)
1108		arm_ccn_pmu_event_start(event, PERF_EF_UPDATE);
1109
1110	return 0;
1111}
1112
1113static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
1114{
1115	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1116
1117	arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
1118
1119	arm_ccn_pmu_event_release(event);
1120
1121	if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0)
1122		hrtimer_cancel(&ccn->dt.hrtimer);
1123}
1124
1125static void arm_ccn_pmu_event_read(struct perf_event *event)
1126{
1127	arm_ccn_pmu_event_update(event);
1128}
1129
1130static void arm_ccn_pmu_enable(struct pmu *pmu)
1131{
1132	struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
1133
1134	u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
1135	val |= CCN_DT_PMCR__PMU_EN;
1136	writel(val, ccn->dt.base + CCN_DT_PMCR);
1137}
1138
1139static void arm_ccn_pmu_disable(struct pmu *pmu)
1140{
1141	struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
1142
1143	u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
1144	val &= ~CCN_DT_PMCR__PMU_EN;
1145	writel(val, ccn->dt.base + CCN_DT_PMCR);
1146}
1147
1148static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
1149{
1150	u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
1151	int idx;
1152
1153	if (!pmovsr)
1154		return IRQ_NONE;
1155
1156	writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR);
1157
1158	BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS);
1159
1160	for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) {
1161		struct perf_event *event = dt->pmu_counters[idx].event;
1162		int overflowed = pmovsr & BIT(idx);
1163
1164		WARN_ON_ONCE(overflowed && !event &&
1165				idx != CCN_IDX_PMU_CYCLE_COUNTER);
1166
1167		if (!event || !overflowed)
1168			continue;
1169
1170		arm_ccn_pmu_event_update(event);
1171	}
1172
1173	return IRQ_HANDLED;
1174}
1175
1176static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
1177{
1178	struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt,
1179			hrtimer);
1180	unsigned long flags;
1181
1182	local_irq_save(flags);
1183	arm_ccn_pmu_overflow_handler(dt);
1184	local_irq_restore(flags);
1185
1186	hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period());
1187	return HRTIMER_RESTART;
1188}
1189
1190
1191static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
1192{
1193	struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node);
1194	struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
1195	unsigned int target;
1196
1197	if (cpu != dt->cpu)
1198		return 0;
1199	target = cpumask_any_but(cpu_online_mask, cpu);
1200	if (target >= nr_cpu_ids)
1201		return 0;
1202	perf_pmu_migrate_context(&dt->pmu, cpu, target);
1203	dt->cpu = target;
1204	if (ccn->irq)
1205		WARN_ON(irq_set_affinity(ccn->irq, cpumask_of(dt->cpu)));
1206	return 0;
1207}
1208
1209static DEFINE_IDA(arm_ccn_pmu_ida);
1210
1211static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1212{
1213	int i;
1214	char *name;
1215	int err;
1216
1217	/* Initialize DT subsystem */
1218	ccn->dt.base = ccn->base + CCN_REGION_SIZE;
1219	spin_lock_init(&ccn->dt.config_lock);
1220	writel(CCN_DT_PMOVSR_CLR__MASK, ccn->dt.base + CCN_DT_PMOVSR_CLR);
1221	writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL);
1222	writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN,
1223			ccn->dt.base + CCN_DT_PMCR);
1224	writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
1225	for (i = 0; i < ccn->num_xps; i++) {
1226		writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG);
1227		writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
1228				CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) |
1229				(CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
1230				CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) |
1231				CCN_XP_DT_CONTROL__DT_ENABLE,
1232				ccn->xp[i].base + CCN_XP_DT_CONTROL);
1233	}
1234	ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0;
1235	ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0;
1236	ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0;
1237	ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0;
1238	ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0;
1239	ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15);
1240	ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0;
1241	ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
1242
1243	/* Get a convenient /sys/event_source/devices/ name */
1244	ccn->dt.id = ida_alloc(&arm_ccn_pmu_ida, GFP_KERNEL);
1245	if (ccn->dt.id == 0) {
1246		name = "ccn";
1247	} else {
1248		name = devm_kasprintf(ccn->dev, GFP_KERNEL, "ccn_%d",
1249				      ccn->dt.id);
1250		if (!name) {
1251			err = -ENOMEM;
1252			goto error_choose_name;
1253		}
1254	}
1255
1256	/* Perf driver registration */
1257	ccn->dt.pmu = (struct pmu) {
1258		.module = THIS_MODULE,
1259		.parent = ccn->dev,
1260		.attr_groups = arm_ccn_pmu_attr_groups,
1261		.task_ctx_nr = perf_invalid_context,
1262		.event_init = arm_ccn_pmu_event_init,
1263		.add = arm_ccn_pmu_event_add,
1264		.del = arm_ccn_pmu_event_del,
1265		.start = arm_ccn_pmu_event_start,
1266		.stop = arm_ccn_pmu_event_stop,
1267		.read = arm_ccn_pmu_event_read,
1268		.pmu_enable = arm_ccn_pmu_enable,
1269		.pmu_disable = arm_ccn_pmu_disable,
1270		.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1271	};
1272
1273	/* No overflow interrupt? Have to use a timer instead. */
1274	if (!ccn->irq) {
1275		dev_info(ccn->dev, "No access to interrupts, using timer.\n");
1276		hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
1277				HRTIMER_MODE_REL);
1278		ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
1279	}
1280
1281	/* Pick one CPU which we will use to collect data from CCN... */
1282	ccn->dt.cpu = raw_smp_processor_id();
1283
1284	/* Also make sure that the overflow interrupt is handled by this CPU */
1285	if (ccn->irq) {
1286		err = irq_set_affinity(ccn->irq, cpumask_of(ccn->dt.cpu));
1287		if (err) {
1288			dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
1289			goto error_set_affinity;
1290		}
1291	}
1292
1293	cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1294					 &ccn->dt.node);
1295
1296	err = perf_pmu_register(&ccn->dt.pmu, name, -1);
1297	if (err)
1298		goto error_pmu_register;
1299
1300	return 0;
1301
1302error_pmu_register:
1303	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1304					    &ccn->dt.node);
1305error_set_affinity:
1306error_choose_name:
1307	ida_free(&arm_ccn_pmu_ida, ccn->dt.id);
1308	for (i = 0; i < ccn->num_xps; i++)
1309		writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
1310	writel(0, ccn->dt.base + CCN_DT_PMCR);
1311	return err;
1312}
1313
1314static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
1315{
1316	int i;
1317
1318	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1319					    &ccn->dt.node);
1320	for (i = 0; i < ccn->num_xps; i++)
1321		writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
1322	writel(0, ccn->dt.base + CCN_DT_PMCR);
1323	perf_pmu_unregister(&ccn->dt.pmu);
1324	ida_free(&arm_ccn_pmu_ida, ccn->dt.id);
1325}
1326
1327static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
1328		int (*callback)(struct arm_ccn *ccn, int region,
1329		void __iomem *base, u32 type, u32 id))
1330{
1331	int region;
1332
1333	for (region = 0; region < CCN_NUM_REGIONS; region++) {
1334		u32 val, type, id;
1335		void __iomem *base;
1336		int err;
1337
1338		val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 +
1339				4 * (region / 32));
1340		if (!(val & (1 << (region % 32))))
1341			continue;
1342
1343		base = ccn->base + region * CCN_REGION_SIZE;
1344		val = readl(base + CCN_ALL_OLY_ID);
1345		type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) &
1346				CCN_ALL_OLY_ID__OLY_ID__MASK;
1347		id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) &
1348				CCN_ALL_OLY_ID__NODE_ID__MASK;
1349
1350		err = callback(ccn, region, base, type, id);
1351		if (err)
1352			return err;
1353	}
1354
1355	return 0;
1356}
1357
1358static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region,
1359		void __iomem *base, u32 type, u32 id)
1360{
1361
1362	if (type == CCN_TYPE_XP && id >= ccn->num_xps)
1363		ccn->num_xps = id + 1;
1364	else if (id >= ccn->num_nodes)
1365		ccn->num_nodes = id + 1;
1366
1367	return 0;
1368}
1369
1370static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
1371		void __iomem *base, u32 type, u32 id)
1372{
1373	struct arm_ccn_component *component;
1374
1375	dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type);
1376
1377	switch (type) {
1378	case CCN_TYPE_MN:
1379		ccn->mn_id = id;
1380		return 0;
1381	case CCN_TYPE_DT:
1382		return 0;
1383	case CCN_TYPE_XP:
1384		component = &ccn->xp[id];
1385		break;
1386	case CCN_TYPE_SBSX:
1387		ccn->sbsx_present = 1;
1388		component = &ccn->node[id];
1389		break;
1390	case CCN_TYPE_SBAS:
1391		ccn->sbas_present = 1;
1392		fallthrough;
1393	default:
1394		component = &ccn->node[id];
1395		break;
1396	}
1397
1398	component->base = base;
1399	component->type = type;
1400
1401	return 0;
1402}
1403
1404
1405static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn,
1406		const u32 *err_sig_val)
1407{
1408	/* This should be really handled by firmware... */
1409	dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n",
1410			err_sig_val[5], err_sig_val[4], err_sig_val[3],
1411			err_sig_val[2], err_sig_val[1], err_sig_val[0]);
1412	dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n");
1413	writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE,
1414			ccn->base + CCN_MN_ERRINT_STATUS);
1415
1416	return IRQ_HANDLED;
1417}
1418
1419
1420static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id)
1421{
1422	irqreturn_t res = IRQ_NONE;
1423	struct arm_ccn *ccn = dev_id;
1424	u32 err_sig_val[6];
1425	u32 err_or;
1426	int i;
1427
1428	/* PMU overflow is a special case */
1429	err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0);
1430	if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) {
1431		err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT;
1432		res = arm_ccn_pmu_overflow_handler(&ccn->dt);
1433	}
1434
1435	/* Have to read all err_sig_vals to clear them */
1436	for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) {
1437		err_sig_val[i] = readl(ccn->base +
1438				CCN_MN_ERR_SIG_VAL_63_0 + i * 4);
1439		err_or |= err_sig_val[i];
1440	}
1441	if (err_or)
1442		res |= arm_ccn_error_handler(ccn, err_sig_val);
1443
1444	if (res != IRQ_NONE)
1445		writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT,
1446				ccn->base + CCN_MN_ERRINT_STATUS);
1447
1448	return res;
1449}
1450
1451
1452static int arm_ccn_probe(struct platform_device *pdev)
1453{
1454	struct arm_ccn *ccn;
1455	int irq;
1456	int err;
1457
1458	ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL);
1459	if (!ccn)
1460		return -ENOMEM;
1461	ccn->dev = &pdev->dev;
1462	platform_set_drvdata(pdev, ccn);
1463
1464	ccn->base = devm_platform_ioremap_resource(pdev, 0);
1465	if (IS_ERR(ccn->base))
1466		return PTR_ERR(ccn->base);
1467
1468	irq = platform_get_irq(pdev, 0);
1469	if (irq < 0)
1470		return irq;
1471
1472	/* Check if we can use the interrupt */
1473	writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE,
1474			ccn->base + CCN_MN_ERRINT_STATUS);
1475	if (readl(ccn->base + CCN_MN_ERRINT_STATUS) &
1476			CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) {
1477		/* Can set 'disable' bits, so can acknowledge interrupts */
1478		writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
1479				ccn->base + CCN_MN_ERRINT_STATUS);
1480		err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler,
1481				       IRQF_NOBALANCING | IRQF_NO_THREAD,
1482				       dev_name(ccn->dev), ccn);
1483		if (err)
1484			return err;
1485
1486		ccn->irq = irq;
1487	}
1488
1489
1490	/* Build topology */
1491
1492	err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num);
1493	if (err)
1494		return err;
1495
1496	ccn->node = devm_kcalloc(ccn->dev, ccn->num_nodes, sizeof(*ccn->node),
1497				 GFP_KERNEL);
1498	ccn->xp = devm_kcalloc(ccn->dev, ccn->num_xps, sizeof(*ccn->node),
1499			       GFP_KERNEL);
1500	if (!ccn->node || !ccn->xp)
1501		return -ENOMEM;
1502
1503	err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes);
1504	if (err)
1505		return err;
1506
1507	return arm_ccn_pmu_init(ccn);
1508}
1509
1510static void arm_ccn_remove(struct platform_device *pdev)
1511{
1512	struct arm_ccn *ccn = platform_get_drvdata(pdev);
1513
1514	arm_ccn_pmu_cleanup(ccn);
 
 
1515}
1516
1517static const struct of_device_id arm_ccn_match[] = {
1518	{ .compatible = "arm,ccn-502", },
1519	{ .compatible = "arm,ccn-504", },
1520	{ .compatible = "arm,ccn-512", },
1521	{},
1522};
1523MODULE_DEVICE_TABLE(of, arm_ccn_match);
1524
1525static struct platform_driver arm_ccn_driver = {
1526	.driver = {
1527		.name = "arm-ccn",
1528		.of_match_table = arm_ccn_match,
1529		.suppress_bind_attrs = true,
1530	},
1531	.probe = arm_ccn_probe,
1532	.remove = arm_ccn_remove,
1533};
1534
1535static int __init arm_ccn_init(void)
1536{
1537	int i, ret;
1538
1539	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1540				      "perf/arm/ccn:online", NULL,
1541				      arm_ccn_pmu_offline_cpu);
1542	if (ret)
1543		return ret;
1544
1545	for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
1546		arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
1547
1548	ret = platform_driver_register(&arm_ccn_driver);
1549	if (ret)
1550		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1551	return ret;
1552}
1553
1554static void __exit arm_ccn_exit(void)
1555{
1556	platform_driver_unregister(&arm_ccn_driver);
1557	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1558}
1559
1560module_init(arm_ccn_init);
1561module_exit(arm_ccn_exit);
1562
1563MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
1564MODULE_DESCRIPTION("ARM CCN (Cache Coherent Network) Performance Monitor Driver");
1565MODULE_LICENSE("GPL v2");
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright (C) 2014 ARM Limited
   5 */
   6
   7#include <linux/ctype.h>
   8#include <linux/hrtimer.h>
   9#include <linux/idr.h>
  10#include <linux/interrupt.h>
  11#include <linux/io.h>
  12#include <linux/module.h>
  13#include <linux/mod_devicetable.h>
  14#include <linux/perf_event.h>
  15#include <linux/platform_device.h>
  16#include <linux/slab.h>
  17
  18#define CCN_NUM_XP_PORTS 2
  19#define CCN_NUM_VCS 4
  20#define CCN_NUM_REGIONS	256
  21#define CCN_REGION_SIZE	0x10000
  22
  23#define CCN_ALL_OLY_ID			0xff00
  24#define CCN_ALL_OLY_ID__OLY_ID__SHIFT			0
  25#define CCN_ALL_OLY_ID__OLY_ID__MASK			0x1f
  26#define CCN_ALL_OLY_ID__NODE_ID__SHIFT			8
  27#define CCN_ALL_OLY_ID__NODE_ID__MASK			0x3f
  28
  29#define CCN_MN_ERRINT_STATUS		0x0008
  30#define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT		0x11
  31#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE	0x02
  32#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED	0x20
  33#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE	0x22
  34#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE	0x04
  35#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED	0x40
  36#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE	0x44
  37#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE	0x08
  38#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED	0x80
  39#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE	0x88
  40#define CCN_MN_OLY_COMP_LIST_63_0	0x01e0
  41#define CCN_MN_ERR_SIG_VAL_63_0		0x0300
  42#define CCN_MN_ERR_SIG_VAL_63_0__DT			(1 << 1)
  43
  44#define CCN_DT_ACTIVE_DSM		0x0000
  45#define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n)		((n) * 8)
  46#define CCN_DT_ACTIVE_DSM__DSM_ID__MASK			0xff
  47#define CCN_DT_CTL			0x0028
  48#define CCN_DT_CTL__DT_EN				(1 << 0)
  49#define CCN_DT_PMEVCNT(n)		(0x0100 + (n) * 0x8)
  50#define CCN_DT_PMCCNTR			0x0140
  51#define CCN_DT_PMCCNTRSR		0x0190
  52#define CCN_DT_PMOVSR			0x0198
  53#define CCN_DT_PMOVSR_CLR		0x01a0
  54#define CCN_DT_PMOVSR_CLR__MASK				0x1f
  55#define CCN_DT_PMCR			0x01a8
  56#define CCN_DT_PMCR__OVFL_INTR_EN			(1 << 6)
  57#define CCN_DT_PMCR__PMU_EN				(1 << 0)
  58#define CCN_DT_PMSR			0x01b0
  59#define CCN_DT_PMSR_REQ			0x01b8
  60#define CCN_DT_PMSR_CLR			0x01c0
  61
  62#define CCN_HNF_PMU_EVENT_SEL		0x0600
  63#define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n)		((n) * 4)
  64#define CCN_HNF_PMU_EVENT_SEL__ID__MASK			0xf
  65
  66#define CCN_XP_DT_CONFIG		0x0300
  67#define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n)		((n) * 4)
  68#define CCN_XP_DT_CONFIG__DT_CFG__MASK			0xf
  69#define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH		0x0
  70#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1	0x1
  71#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n)		(0x2 + (n))
  72#define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n)	(0x4 + (n))
  73#define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n))
  74#define CCN_XP_DT_INTERFACE_SEL		0x0308
  75#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n)	(0 + (n) * 8)
  76#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK	0x1
  77#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n)	(1 + (n) * 8)
  78#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK	0x1
  79#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n)	(2 + (n) * 8)
  80#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK	0x3
  81#define CCN_XP_DT_CMP_VAL_L(n)		(0x0310 + (n) * 0x40)
  82#define CCN_XP_DT_CMP_VAL_H(n)		(0x0318 + (n) * 0x40)
  83#define CCN_XP_DT_CMP_MASK_L(n)		(0x0320 + (n) * 0x40)
  84#define CCN_XP_DT_CMP_MASK_H(n)		(0x0328 + (n) * 0x40)
  85#define CCN_XP_DT_CONTROL		0x0370
  86#define CCN_XP_DT_CONTROL__DT_ENABLE			(1 << 0)
  87#define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n)		(12 + (n) * 4)
  88#define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK		0xf
  89#define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS		0xf
  90#define CCN_XP_PMU_EVENT_SEL		0x0600
  91#define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n)		((n) * 7)
  92#define CCN_XP_PMU_EVENT_SEL__ID__MASK			0x3f
  93
  94#define CCN_SBAS_PMU_EVENT_SEL		0x0600
  95#define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n)		((n) * 4)
  96#define CCN_SBAS_PMU_EVENT_SEL__ID__MASK		0xf
  97
  98#define CCN_RNI_PMU_EVENT_SEL		0x0600
  99#define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n)		((n) * 4)
 100#define CCN_RNI_PMU_EVENT_SEL__ID__MASK			0xf
 101
 102#define CCN_TYPE_MN	0x01
 103#define CCN_TYPE_DT	0x02
 104#define CCN_TYPE_HNF	0x04
 105#define CCN_TYPE_HNI	0x05
 106#define CCN_TYPE_XP	0x08
 107#define CCN_TYPE_SBSX	0x0c
 108#define CCN_TYPE_SBAS	0x10
 109#define CCN_TYPE_RNI_1P	0x14
 110#define CCN_TYPE_RNI_2P	0x15
 111#define CCN_TYPE_RNI_3P	0x16
 112#define CCN_TYPE_RND_1P	0x18 /* RN-D = RN-I + DVM */
 113#define CCN_TYPE_RND_2P	0x19
 114#define CCN_TYPE_RND_3P	0x1a
 115#define CCN_TYPE_CYCLES	0xff /* Pseudotype */
 116
 117#define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */
 118
 119#define CCN_NUM_PMU_EVENTS		4
 120#define CCN_NUM_XP_WATCHPOINTS		2 /* See DT.dbg_id.num_watchpoints */
 121#define CCN_NUM_PMU_EVENT_COUNTERS	8 /* See DT.dbg_id.num_pmucntr */
 122#define CCN_IDX_PMU_CYCLE_COUNTER	CCN_NUM_PMU_EVENT_COUNTERS
 123
 124#define CCN_NUM_PREDEFINED_MASKS	4
 125#define CCN_IDX_MASK_ANY		(CCN_NUM_PMU_EVENT_COUNTERS + 0)
 126#define CCN_IDX_MASK_EXACT		(CCN_NUM_PMU_EVENT_COUNTERS + 1)
 127#define CCN_IDX_MASK_ORDER		(CCN_NUM_PMU_EVENT_COUNTERS + 2)
 128#define CCN_IDX_MASK_OPCODE		(CCN_NUM_PMU_EVENT_COUNTERS + 3)
 129
 130struct arm_ccn_component {
 131	void __iomem *base;
 132	u32 type;
 133
 134	DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS);
 135	union {
 136		struct {
 137			DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS);
 138		} xp;
 139	};
 140};
 141
 142#define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \
 143	struct arm_ccn_dt, pmu), struct arm_ccn, dt)
 144
 145struct arm_ccn_dt {
 146	int id;
 147	void __iomem *base;
 148
 149	spinlock_t config_lock;
 150
 151	DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1);
 152	struct {
 153		struct arm_ccn_component *source;
 154		struct perf_event *event;
 155	} pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1];
 156
 157	struct {
 158	       u64 l, h;
 159	} cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS];
 160
 161	struct hrtimer hrtimer;
 162
 163	unsigned int cpu;
 164	struct hlist_node node;
 165
 166	struct pmu pmu;
 167};
 168
 169struct arm_ccn {
 170	struct device *dev;
 171	void __iomem *base;
 172	unsigned int irq;
 173
 174	unsigned sbas_present:1;
 175	unsigned sbsx_present:1;
 176
 177	int num_nodes;
 178	struct arm_ccn_component *node;
 179
 180	int num_xps;
 181	struct arm_ccn_component *xp;
 182
 183	struct arm_ccn_dt dt;
 184	int mn_id;
 185};
 186
 187static int arm_ccn_node_to_xp(int node)
 188{
 189	return node / CCN_NUM_XP_PORTS;
 190}
 191
 192static int arm_ccn_node_to_xp_port(int node)
 193{
 194	return node % CCN_NUM_XP_PORTS;
 195}
 196
 197
 198/*
 199 * Bit shifts and masks in these defines must be kept in sync with
 200 * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below!
 201 */
 202#define CCN_CONFIG_NODE(_config)	(((_config) >> 0) & 0xff)
 203#define CCN_CONFIG_XP(_config)		(((_config) >> 0) & 0xff)
 204#define CCN_CONFIG_TYPE(_config)	(((_config) >> 8) & 0xff)
 205#define CCN_CONFIG_EVENT(_config)	(((_config) >> 16) & 0xff)
 206#define CCN_CONFIG_PORT(_config)	(((_config) >> 24) & 0x3)
 207#define CCN_CONFIG_BUS(_config)		(((_config) >> 24) & 0x3)
 208#define CCN_CONFIG_VC(_config)		(((_config) >> 26) & 0x7)
 209#define CCN_CONFIG_DIR(_config)		(((_config) >> 29) & 0x1)
 210#define CCN_CONFIG_MASK(_config)	(((_config) >> 30) & 0xf)
 211
 212static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
 213{
 214	*config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24));
 215	*config |= (node_xp << 0) | (type << 8) | (port << 24);
 216}
 217
 218static ssize_t arm_ccn_pmu_format_show(struct device *dev,
 219		struct device_attribute *attr, char *buf)
 220{
 221	struct dev_ext_attribute *ea = container_of(attr,
 222			struct dev_ext_attribute, attr);
 223
 224	return sysfs_emit(buf, "%s\n", (char *)ea->var);
 225}
 226
 227#define CCN_FORMAT_ATTR(_name, _config) \
 228	struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \
 229			{ __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \
 230			NULL), _config }
 231
 232static CCN_FORMAT_ATTR(node, "config:0-7");
 233static CCN_FORMAT_ATTR(xp, "config:0-7");
 234static CCN_FORMAT_ATTR(type, "config:8-15");
 235static CCN_FORMAT_ATTR(event, "config:16-23");
 236static CCN_FORMAT_ATTR(port, "config:24-25");
 237static CCN_FORMAT_ATTR(bus, "config:24-25");
 238static CCN_FORMAT_ATTR(vc, "config:26-28");
 239static CCN_FORMAT_ATTR(dir, "config:29-29");
 240static CCN_FORMAT_ATTR(mask, "config:30-33");
 241static CCN_FORMAT_ATTR(cmp_l, "config1:0-62");
 242static CCN_FORMAT_ATTR(cmp_h, "config2:0-59");
 243
 244static struct attribute *arm_ccn_pmu_format_attrs[] = {
 245	&arm_ccn_pmu_format_attr_node.attr.attr,
 246	&arm_ccn_pmu_format_attr_xp.attr.attr,
 247	&arm_ccn_pmu_format_attr_type.attr.attr,
 248	&arm_ccn_pmu_format_attr_event.attr.attr,
 249	&arm_ccn_pmu_format_attr_port.attr.attr,
 250	&arm_ccn_pmu_format_attr_bus.attr.attr,
 251	&arm_ccn_pmu_format_attr_vc.attr.attr,
 252	&arm_ccn_pmu_format_attr_dir.attr.attr,
 253	&arm_ccn_pmu_format_attr_mask.attr.attr,
 254	&arm_ccn_pmu_format_attr_cmp_l.attr.attr,
 255	&arm_ccn_pmu_format_attr_cmp_h.attr.attr,
 256	NULL
 257};
 258
 259static const struct attribute_group arm_ccn_pmu_format_attr_group = {
 260	.name = "format",
 261	.attrs = arm_ccn_pmu_format_attrs,
 262};
 263
 264
 265struct arm_ccn_pmu_event {
 266	struct device_attribute attr;
 267	u32 type;
 268	u32 event;
 269	int num_ports;
 270	int num_vcs;
 271	const char *def;
 272	int mask;
 273};
 274
 275#define CCN_EVENT_ATTR(_name) \
 276	__ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL)
 277
 278/*
 279 * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on
 280 * their ports in XP they are connected to. For the sake of usability they are
 281 * explicitly defined here (and translated into a relevant watchpoint in
 282 * arm_ccn_pmu_event_init()) so the user can easily request them without deep
 283 * knowledge of the flit format.
 284 */
 285
 286#define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \
 287		.type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
 288		.num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \
 289		.def = _def, .mask = _mask, }
 290
 291#define CCN_EVENT_HNI(_name, _def, _mask) { \
 292		.attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \
 293		.event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
 294		.num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
 295
 296#define CCN_EVENT_SBSX(_name, _def, _mask) { \
 297		.attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \
 298		.event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
 299		.num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
 300
 301#define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \
 302		.type = CCN_TYPE_HNF, .event = _event, }
 303
 304#define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \
 305		.type = CCN_TYPE_XP, .event = _event, \
 306		.num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, }
 307
 308/*
 309 * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending
 310 * on configuration. One of them is picked to represent the whole group,
 311 * as they all share the same event types.
 312 */
 313#define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \
 314		.type = CCN_TYPE_RNI_3P, .event = _event, }
 315
 316#define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \
 317		.type = CCN_TYPE_SBAS, .event = _event, }
 318
 319#define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \
 320		.type = CCN_TYPE_CYCLES }
 321
 322
 323static ssize_t arm_ccn_pmu_event_show(struct device *dev,
 324		struct device_attribute *attr, char *buf)
 325{
 326	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
 327	struct arm_ccn_pmu_event *event = container_of(attr,
 328			struct arm_ccn_pmu_event, attr);
 329	int res;
 330
 331	res = sysfs_emit(buf, "type=0x%x", event->type);
 332	if (event->event)
 333		res += sysfs_emit_at(buf, res, ",event=0x%x", event->event);
 334	if (event->def)
 335		res += sysfs_emit_at(buf, res, ",%s", event->def);
 336	if (event->mask)
 337		res += sysfs_emit_at(buf, res, ",mask=0x%x", event->mask);
 338
 339	/* Arguments required by an event */
 340	switch (event->type) {
 341	case CCN_TYPE_CYCLES:
 342		break;
 343	case CCN_TYPE_XP:
 344		res += sysfs_emit_at(buf, res, ",xp=?,vc=?");
 345		if (event->event == CCN_EVENT_WATCHPOINT)
 346			res += sysfs_emit_at(buf, res,
 347					",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
 348		else
 349			res += sysfs_emit_at(buf, res, ",bus=?");
 350
 351		break;
 352	case CCN_TYPE_MN:
 353		res += sysfs_emit_at(buf, res, ",node=%d", ccn->mn_id);
 354		break;
 355	default:
 356		res += sysfs_emit_at(buf, res, ",node=?");
 357		break;
 358	}
 359
 360	res += sysfs_emit_at(buf, res, "\n");
 361
 362	return res;
 363}
 364
 365static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
 366				     struct attribute *attr, int index)
 367{
 368	struct device *dev = kobj_to_dev(kobj);
 369	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
 370	struct device_attribute *dev_attr = container_of(attr,
 371			struct device_attribute, attr);
 372	struct arm_ccn_pmu_event *event = container_of(dev_attr,
 373			struct arm_ccn_pmu_event, attr);
 374
 375	if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present)
 376		return 0;
 377	if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present)
 378		return 0;
 379
 380	return attr->mode;
 381}
 382
 383static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
 384	CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
 385	CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
 386	CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
 387	CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
 388	CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
 389	CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
 390	CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
 391	CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
 392			CCN_IDX_MASK_ORDER),
 393	CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
 394	CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
 395	CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
 396	CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
 397	CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
 398			CCN_IDX_MASK_ORDER),
 399	CCN_EVENT_HNF(cache_miss, 0x1),
 400	CCN_EVENT_HNF(l3_sf_cache_access, 0x02),
 401	CCN_EVENT_HNF(cache_fill, 0x3),
 402	CCN_EVENT_HNF(pocq_retry, 0x4),
 403	CCN_EVENT_HNF(pocq_reqs_recvd, 0x5),
 404	CCN_EVENT_HNF(sf_hit, 0x6),
 405	CCN_EVENT_HNF(sf_evictions, 0x7),
 406	CCN_EVENT_HNF(snoops_sent, 0x8),
 407	CCN_EVENT_HNF(snoops_broadcast, 0x9),
 408	CCN_EVENT_HNF(l3_eviction, 0xa),
 409	CCN_EVENT_HNF(l3_fill_invalid_way, 0xb),
 410	CCN_EVENT_HNF(mc_retries, 0xc),
 411	CCN_EVENT_HNF(mc_reqs, 0xd),
 412	CCN_EVENT_HNF(qos_hh_retry, 0xe),
 413	CCN_EVENT_RNI(rdata_beats_p0, 0x1),
 414	CCN_EVENT_RNI(rdata_beats_p1, 0x2),
 415	CCN_EVENT_RNI(rdata_beats_p2, 0x3),
 416	CCN_EVENT_RNI(rxdat_flits, 0x4),
 417	CCN_EVENT_RNI(txdat_flits, 0x5),
 418	CCN_EVENT_RNI(txreq_flits, 0x6),
 419	CCN_EVENT_RNI(txreq_flits_retried, 0x7),
 420	CCN_EVENT_RNI(rrt_full, 0x8),
 421	CCN_EVENT_RNI(wrt_full, 0x9),
 422	CCN_EVENT_RNI(txreq_flits_replayed, 0xa),
 423	CCN_EVENT_XP(upload_starvation, 0x1),
 424	CCN_EVENT_XP(download_starvation, 0x2),
 425	CCN_EVENT_XP(respin, 0x3),
 426	CCN_EVENT_XP(valid_flit, 0x4),
 427	CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT),
 428	CCN_EVENT_SBAS(rdata_beats_p0, 0x1),
 429	CCN_EVENT_SBAS(rxdat_flits, 0x4),
 430	CCN_EVENT_SBAS(txdat_flits, 0x5),
 431	CCN_EVENT_SBAS(txreq_flits, 0x6),
 432	CCN_EVENT_SBAS(txreq_flits_retried, 0x7),
 433	CCN_EVENT_SBAS(rrt_full, 0x8),
 434	CCN_EVENT_SBAS(wrt_full, 0x9),
 435	CCN_EVENT_SBAS(txreq_flits_replayed, 0xa),
 436	CCN_EVENT_CYCLES(cycles),
 437};
 438
 439/* Populated in arm_ccn_init() */
 440static struct attribute
 441		*arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
 442
 443static const struct attribute_group arm_ccn_pmu_events_attr_group = {
 444	.name = "events",
 445	.is_visible = arm_ccn_pmu_events_is_visible,
 446	.attrs = arm_ccn_pmu_events_attrs,
 447};
 448
 449
 450static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name)
 451{
 452	unsigned long i;
 453
 454	if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1]))
 455		return NULL;
 456	i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a';
 457
 458	switch (name[1]) {
 459	case 'l':
 460		return &ccn->dt.cmp_mask[i].l;
 461	case 'h':
 462		return &ccn->dt.cmp_mask[i].h;
 463	default:
 464		return NULL;
 465	}
 466}
 467
 468static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
 469		struct device_attribute *attr, char *buf)
 470{
 471	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
 472	u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
 473
 474	return mask ? sysfs_emit(buf, "0x%016llx\n", *mask) : -EINVAL;
 475}
 476
 477static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
 478		struct device_attribute *attr, const char *buf, size_t count)
 479{
 480	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
 481	u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
 482	int err = -EINVAL;
 483
 484	if (mask)
 485		err = kstrtoull(buf, 0, mask);
 486
 487	return err ? err : count;
 488}
 489
 490#define CCN_CMP_MASK_ATTR(_name) \
 491	struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
 492			__ATTR(_name, S_IRUGO | S_IWUSR, \
 493			arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store)
 494
 495#define CCN_CMP_MASK_ATTR_RO(_name) \
 496	struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
 497			__ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL)
 498
 499static CCN_CMP_MASK_ATTR(0l);
 500static CCN_CMP_MASK_ATTR(0h);
 501static CCN_CMP_MASK_ATTR(1l);
 502static CCN_CMP_MASK_ATTR(1h);
 503static CCN_CMP_MASK_ATTR(2l);
 504static CCN_CMP_MASK_ATTR(2h);
 505static CCN_CMP_MASK_ATTR(3l);
 506static CCN_CMP_MASK_ATTR(3h);
 507static CCN_CMP_MASK_ATTR(4l);
 508static CCN_CMP_MASK_ATTR(4h);
 509static CCN_CMP_MASK_ATTR(5l);
 510static CCN_CMP_MASK_ATTR(5h);
 511static CCN_CMP_MASK_ATTR(6l);
 512static CCN_CMP_MASK_ATTR(6h);
 513static CCN_CMP_MASK_ATTR(7l);
 514static CCN_CMP_MASK_ATTR(7h);
 515static CCN_CMP_MASK_ATTR_RO(8l);
 516static CCN_CMP_MASK_ATTR_RO(8h);
 517static CCN_CMP_MASK_ATTR_RO(9l);
 518static CCN_CMP_MASK_ATTR_RO(9h);
 519static CCN_CMP_MASK_ATTR_RO(al);
 520static CCN_CMP_MASK_ATTR_RO(ah);
 521static CCN_CMP_MASK_ATTR_RO(bl);
 522static CCN_CMP_MASK_ATTR_RO(bh);
 523
 524static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
 525	&arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr,
 526	&arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr,
 527	&arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr,
 528	&arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr,
 529	&arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr,
 530	&arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr,
 531	&arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr,
 532	&arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr,
 533	&arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr,
 534	&arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr,
 535	&arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr,
 536	&arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr,
 537	NULL
 538};
 539
 540static const struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
 541	.name = "cmp_mask",
 542	.attrs = arm_ccn_pmu_cmp_mask_attrs,
 543};
 544
 545static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev,
 546				     struct device_attribute *attr, char *buf)
 547{
 548	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
 549
 550	return cpumap_print_to_pagebuf(true, buf, cpumask_of(ccn->dt.cpu));
 551}
 552
 553static struct device_attribute arm_ccn_pmu_cpumask_attr =
 554		__ATTR(cpumask, S_IRUGO, arm_ccn_pmu_cpumask_show, NULL);
 555
 556static struct attribute *arm_ccn_pmu_cpumask_attrs[] = {
 557	&arm_ccn_pmu_cpumask_attr.attr,
 558	NULL,
 559};
 560
 561static const struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
 562	.attrs = arm_ccn_pmu_cpumask_attrs,
 563};
 564
 565/*
 566 * Default poll period is 10ms, which is way over the top anyway,
 567 * as in the worst case scenario (an event every cycle), with 1GHz
 568 * clocked bus, the smallest, 32 bit counter will overflow in
 569 * more than 4s.
 570 */
 571static unsigned int arm_ccn_pmu_poll_period_us = 10000;
 572module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
 573		S_IRUGO | S_IWUSR);
 574
 575static ktime_t arm_ccn_pmu_timer_period(void)
 576{
 577	return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
 578}
 579
 580
 581static const struct attribute_group *arm_ccn_pmu_attr_groups[] = {
 582	&arm_ccn_pmu_events_attr_group,
 583	&arm_ccn_pmu_format_attr_group,
 584	&arm_ccn_pmu_cmp_mask_attr_group,
 585	&arm_ccn_pmu_cpumask_attr_group,
 586	NULL
 587};
 588
 589
 590static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size)
 591{
 592	int bit;
 593
 594	do {
 595		bit = find_first_zero_bit(bitmap, size);
 596		if (bit >= size)
 597			return -EAGAIN;
 598	} while (test_and_set_bit(bit, bitmap));
 599
 600	return bit;
 601}
 602
 603/* All RN-I and RN-D nodes have identical PMUs */
 604static int arm_ccn_pmu_type_eq(u32 a, u32 b)
 605{
 606	if (a == b)
 607		return 1;
 608
 609	switch (a) {
 610	case CCN_TYPE_RNI_1P:
 611	case CCN_TYPE_RNI_2P:
 612	case CCN_TYPE_RNI_3P:
 613	case CCN_TYPE_RND_1P:
 614	case CCN_TYPE_RND_2P:
 615	case CCN_TYPE_RND_3P:
 616		switch (b) {
 617		case CCN_TYPE_RNI_1P:
 618		case CCN_TYPE_RNI_2P:
 619		case CCN_TYPE_RNI_3P:
 620		case CCN_TYPE_RND_1P:
 621		case CCN_TYPE_RND_2P:
 622		case CCN_TYPE_RND_3P:
 623			return 1;
 624		}
 625		break;
 626	}
 627
 628	return 0;
 629}
 630
 631static int arm_ccn_pmu_event_alloc(struct perf_event *event)
 632{
 633	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 634	struct hw_perf_event *hw = &event->hw;
 635	u32 node_xp, type, event_id;
 636	struct arm_ccn_component *source;
 637	int bit;
 638
 639	node_xp = CCN_CONFIG_NODE(event->attr.config);
 640	type = CCN_CONFIG_TYPE(event->attr.config);
 641	event_id = CCN_CONFIG_EVENT(event->attr.config);
 642
 643	/* Allocate the cycle counter */
 644	if (type == CCN_TYPE_CYCLES) {
 645		if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER,
 646				ccn->dt.pmu_counters_mask))
 647			return -EAGAIN;
 648
 649		hw->idx = CCN_IDX_PMU_CYCLE_COUNTER;
 650		ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event;
 651
 652		return 0;
 653	}
 654
 655	/* Allocate an event counter */
 656	hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask,
 657			CCN_NUM_PMU_EVENT_COUNTERS);
 658	if (hw->idx < 0) {
 659		dev_dbg(ccn->dev, "No more counters available!\n");
 660		return -EAGAIN;
 661	}
 662
 663	if (type == CCN_TYPE_XP)
 664		source = &ccn->xp[node_xp];
 665	else
 666		source = &ccn->node[node_xp];
 667	ccn->dt.pmu_counters[hw->idx].source = source;
 668
 669	/* Allocate an event source or a watchpoint */
 670	if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT)
 671		bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask,
 672				CCN_NUM_XP_WATCHPOINTS);
 673	else
 674		bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask,
 675				CCN_NUM_PMU_EVENTS);
 676	if (bit < 0) {
 677		dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n",
 678				node_xp);
 679		clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
 680		return -EAGAIN;
 681	}
 682	hw->config_base = bit;
 683
 684	ccn->dt.pmu_counters[hw->idx].event = event;
 685
 686	return 0;
 687}
 688
 689static void arm_ccn_pmu_event_release(struct perf_event *event)
 690{
 691	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 692	struct hw_perf_event *hw = &event->hw;
 693
 694	if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
 695		clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
 696	} else {
 697		struct arm_ccn_component *source =
 698				ccn->dt.pmu_counters[hw->idx].source;
 699
 700		if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
 701				CCN_CONFIG_EVENT(event->attr.config) ==
 702				CCN_EVENT_WATCHPOINT)
 703			clear_bit(hw->config_base, source->xp.dt_cmp_mask);
 704		else
 705			clear_bit(hw->config_base, source->pmu_events_mask);
 706		clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
 707	}
 708
 709	ccn->dt.pmu_counters[hw->idx].source = NULL;
 710	ccn->dt.pmu_counters[hw->idx].event = NULL;
 711}
 712
 713static int arm_ccn_pmu_event_init(struct perf_event *event)
 714{
 715	struct arm_ccn *ccn;
 716	struct hw_perf_event *hw = &event->hw;
 717	u32 node_xp, type, event_id;
 718	int valid;
 719	int i;
 720	struct perf_event *sibling;
 721
 722	if (event->attr.type != event->pmu->type)
 723		return -ENOENT;
 724
 725	ccn = pmu_to_arm_ccn(event->pmu);
 726
 727	if (hw->sample_period) {
 728		dev_dbg(ccn->dev, "Sampling not supported!\n");
 729		return -EOPNOTSUPP;
 730	}
 731
 732	if (has_branch_stack(event)) {
 733		dev_dbg(ccn->dev, "Can't exclude execution levels!\n");
 734		return -EINVAL;
 735	}
 736
 737	if (event->cpu < 0) {
 738		dev_dbg(ccn->dev, "Can't provide per-task data!\n");
 739		return -EOPNOTSUPP;
 740	}
 741	/*
 742	 * Many perf core operations (eg. events rotation) operate on a
 743	 * single CPU context. This is obvious for CPU PMUs, where one
 744	 * expects the same sets of events being observed on all CPUs,
 745	 * but can lead to issues for off-core PMUs, like CCN, where each
 746	 * event could be theoretically assigned to a different CPU. To
 747	 * mitigate this, we enforce CPU assignment to one, selected
 748	 * processor (the one described in the "cpumask" attribute).
 749	 */
 750	event->cpu = ccn->dt.cpu;
 751
 752	node_xp = CCN_CONFIG_NODE(event->attr.config);
 753	type = CCN_CONFIG_TYPE(event->attr.config);
 754	event_id = CCN_CONFIG_EVENT(event->attr.config);
 755
 756	/* Validate node/xp vs topology */
 757	switch (type) {
 758	case CCN_TYPE_MN:
 759		if (node_xp != ccn->mn_id) {
 760			dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp);
 761			return -EINVAL;
 762		}
 763		break;
 764	case CCN_TYPE_XP:
 765		if (node_xp >= ccn->num_xps) {
 766			dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp);
 767			return -EINVAL;
 768		}
 769		break;
 770	case CCN_TYPE_CYCLES:
 771		break;
 772	default:
 773		if (node_xp >= ccn->num_nodes) {
 774			dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp);
 775			return -EINVAL;
 776		}
 777		if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
 778			dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n",
 779					type, node_xp);
 780			return -EINVAL;
 781		}
 782		break;
 783	}
 784
 785	/* Validate event ID vs available for the type */
 786	for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid;
 787			i++) {
 788		struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i];
 789		u32 port = CCN_CONFIG_PORT(event->attr.config);
 790		u32 vc = CCN_CONFIG_VC(event->attr.config);
 791
 792		if (!arm_ccn_pmu_type_eq(type, e->type))
 793			continue;
 794		if (event_id != e->event)
 795			continue;
 796		if (e->num_ports && port >= e->num_ports) {
 797			dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n",
 798					port, node_xp);
 799			return -EINVAL;
 800		}
 801		if (e->num_vcs && vc >= e->num_vcs) {
 802			dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n",
 803					vc, node_xp);
 804			return -EINVAL;
 805		}
 806		valid = 1;
 807	}
 808	if (!valid) {
 809		dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
 810				event_id, node_xp);
 811		return -EINVAL;
 812	}
 813
 814	/* Watchpoint-based event for a node is actually set on XP */
 815	if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) {
 816		u32 port;
 817
 818		type = CCN_TYPE_XP;
 819		port = arm_ccn_node_to_xp_port(node_xp);
 820		node_xp = arm_ccn_node_to_xp(node_xp);
 821
 822		arm_ccn_pmu_config_set(&event->attr.config,
 823				node_xp, type, port);
 824	}
 825
 826	/*
 827	 * We must NOT create groups containing mixed PMUs, although software
 828	 * events are acceptable (for example to create a CCN group
 829	 * periodically read when a hrtimer aka cpu-clock leader triggers).
 830	 */
 831	if (event->group_leader->pmu != event->pmu &&
 832			!is_software_event(event->group_leader))
 833		return -EINVAL;
 834
 835	for_each_sibling_event(sibling, event->group_leader) {
 836		if (sibling->pmu != event->pmu &&
 837				!is_software_event(sibling))
 838			return -EINVAL;
 839	}
 840
 841	return 0;
 842}
 843
 844static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx)
 845{
 846	u64 res;
 847
 848	if (idx == CCN_IDX_PMU_CYCLE_COUNTER) {
 849#ifdef readq
 850		res = readq(ccn->dt.base + CCN_DT_PMCCNTR);
 851#else
 852		/* 40 bit counter, can do snapshot and read in two parts */
 853		writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ);
 854		while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1))
 855			;
 856		writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
 857		res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff;
 858		res <<= 32;
 859		res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR);
 860#endif
 861	} else {
 862		res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx));
 863	}
 864
 865	return res;
 866}
 867
 868static void arm_ccn_pmu_event_update(struct perf_event *event)
 869{
 870	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 871	struct hw_perf_event *hw = &event->hw;
 872	u64 prev_count, new_count, mask;
 873
 874	do {
 875		prev_count = local64_read(&hw->prev_count);
 876		new_count = arm_ccn_pmu_read_counter(ccn, hw->idx);
 877	} while (local64_xchg(&hw->prev_count, new_count) != prev_count);
 878
 879	mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1;
 880
 881	local64_add((new_count - prev_count) & mask, &event->count);
 882}
 883
 884static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
 885{
 886	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 887	struct hw_perf_event *hw = &event->hw;
 888	struct arm_ccn_component *xp;
 889	u32 val, dt_cfg;
 890
 891	/* Nothing to do for cycle counter */
 892	if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
 893		return;
 894
 895	if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
 896		xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
 897	else
 898		xp = &ccn->xp[arm_ccn_node_to_xp(
 899				CCN_CONFIG_NODE(event->attr.config))];
 900
 901	if (enable)
 902		dt_cfg = hw->event_base;
 903	else
 904		dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH;
 905
 906	spin_lock(&ccn->dt.config_lock);
 907
 908	val = readl(xp->base + CCN_XP_DT_CONFIG);
 909	val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK <<
 910			CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx));
 911	val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx);
 912	writel(val, xp->base + CCN_XP_DT_CONFIG);
 913
 914	spin_unlock(&ccn->dt.config_lock);
 915}
 916
 917static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
 918{
 919	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 920	struct hw_perf_event *hw = &event->hw;
 921
 922	local64_set(&event->hw.prev_count,
 923			arm_ccn_pmu_read_counter(ccn, hw->idx));
 924	hw->state = 0;
 925
 926	/* Set the DT bus input, engaging the counter */
 927	arm_ccn_pmu_xp_dt_config(event, 1);
 928}
 929
 930static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
 931{
 932	struct hw_perf_event *hw = &event->hw;
 933
 934	/* Disable counting, setting the DT bus to pass-through mode */
 935	arm_ccn_pmu_xp_dt_config(event, 0);
 936
 937	if (flags & PERF_EF_UPDATE)
 938		arm_ccn_pmu_event_update(event);
 939
 940	hw->state |= PERF_HES_STOPPED;
 941}
 942
 943static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
 944{
 945	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 946	struct hw_perf_event *hw = &event->hw;
 947	struct arm_ccn_component *source =
 948			ccn->dt.pmu_counters[hw->idx].source;
 949	unsigned long wp = hw->config_base;
 950	u32 val;
 951	u64 cmp_l = event->attr.config1;
 952	u64 cmp_h = event->attr.config2;
 953	u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l;
 954	u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h;
 955
 956	hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp);
 957
 958	/* Direction (RX/TX), device (port) & virtual channel */
 959	val = readl(source->base + CCN_XP_DT_INTERFACE_SEL);
 960	val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK <<
 961			CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp));
 962	val |= CCN_CONFIG_DIR(event->attr.config) <<
 963			CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp);
 964	val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK <<
 965			CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp));
 966	val |= CCN_CONFIG_PORT(event->attr.config) <<
 967			CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp);
 968	val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK <<
 969			CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp));
 970	val |= CCN_CONFIG_VC(event->attr.config) <<
 971			CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp);
 972	writel(val, source->base + CCN_XP_DT_INTERFACE_SEL);
 973
 974	/* Comparison values */
 975	writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
 976	writel((cmp_l >> 32) & 0x7fffffff,
 977			source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
 978	writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
 979	writel((cmp_h >> 32) & 0x0fffffff,
 980			source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4);
 981
 982	/* Mask */
 983	writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
 984	writel((mask_l >> 32) & 0x7fffffff,
 985			source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
 986	writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
 987	writel((mask_h >> 32) & 0x0fffffff,
 988			source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4);
 989}
 990
 991static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
 992{
 993	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 994	struct hw_perf_event *hw = &event->hw;
 995	struct arm_ccn_component *source =
 996			ccn->dt.pmu_counters[hw->idx].source;
 997	u32 val, id;
 998
 999	hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
1000
1001	id = (CCN_CONFIG_VC(event->attr.config) << 4) |
1002			(CCN_CONFIG_BUS(event->attr.config) << 3) |
1003			(CCN_CONFIG_EVENT(event->attr.config) << 0);
1004
1005	val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
1006	val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK <<
1007			CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
1008	val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
1009	writel(val, source->base + CCN_XP_PMU_EVENT_SEL);
1010}
1011
1012static void arm_ccn_pmu_node_event_config(struct perf_event *event)
1013{
1014	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1015	struct hw_perf_event *hw = &event->hw;
1016	struct arm_ccn_component *source =
1017			ccn->dt.pmu_counters[hw->idx].source;
1018	u32 type = CCN_CONFIG_TYPE(event->attr.config);
1019	u32 val, port;
1020
1021	port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config));
1022	hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port,
1023			hw->config_base);
1024
1025	/* These *_event_sel regs should be identical, but let's make sure... */
1026	BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL);
1027	BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL);
1028	BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) !=
1029			CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1));
1030	BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) !=
1031			CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1));
1032	BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK !=
1033			CCN_SBAS_PMU_EVENT_SEL__ID__MASK);
1034	BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK !=
1035			CCN_RNI_PMU_EVENT_SEL__ID__MASK);
1036	if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS &&
1037			!arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P)))
1038		return;
1039
1040	/* Set the event id for the pre-allocated counter */
1041	val = readl(source->base + CCN_HNF_PMU_EVENT_SEL);
1042	val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK <<
1043		CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
1044	val |= CCN_CONFIG_EVENT(event->attr.config) <<
1045		CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
1046	writel(val, source->base + CCN_HNF_PMU_EVENT_SEL);
1047}
1048
1049static void arm_ccn_pmu_event_config(struct perf_event *event)
1050{
1051	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1052	struct hw_perf_event *hw = &event->hw;
1053	u32 xp, offset, val;
1054
1055	/* Cycle counter requires no setup */
1056	if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
1057		return;
1058
1059	if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
1060		xp = CCN_CONFIG_XP(event->attr.config);
1061	else
1062		xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config));
1063
1064	spin_lock(&ccn->dt.config_lock);
1065
1066	/* Set the DT bus "distance" register */
1067	offset = (hw->idx / 4) * 4;
1068	val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
1069	val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK <<
1070			CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4));
1071	val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4);
1072	writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
1073
1074	if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) {
1075		if (CCN_CONFIG_EVENT(event->attr.config) ==
1076				CCN_EVENT_WATCHPOINT)
1077			arm_ccn_pmu_xp_watchpoint_config(event);
1078		else
1079			arm_ccn_pmu_xp_event_config(event);
1080	} else {
1081		arm_ccn_pmu_node_event_config(event);
1082	}
1083
1084	spin_unlock(&ccn->dt.config_lock);
1085}
1086
1087static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn)
1088{
1089	return bitmap_weight(ccn->dt.pmu_counters_mask,
1090			     CCN_NUM_PMU_EVENT_COUNTERS + 1);
1091}
1092
1093static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
1094{
1095	int err;
1096	struct hw_perf_event *hw = &event->hw;
1097	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1098
1099	err = arm_ccn_pmu_event_alloc(event);
1100	if (err)
1101		return err;
1102
1103	/*
1104	 * Pin the timer, so that the overflows are handled by the chosen
1105	 * event->cpu (this is the same one as presented in "cpumask"
1106	 * attribute).
1107	 */
1108	if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1)
1109		hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
1110			      HRTIMER_MODE_REL_PINNED);
1111
1112	arm_ccn_pmu_event_config(event);
1113
1114	hw->state = PERF_HES_STOPPED;
1115
1116	if (flags & PERF_EF_START)
1117		arm_ccn_pmu_event_start(event, PERF_EF_UPDATE);
1118
1119	return 0;
1120}
1121
1122static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
1123{
1124	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1125
1126	arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
1127
1128	arm_ccn_pmu_event_release(event);
1129
1130	if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0)
1131		hrtimer_cancel(&ccn->dt.hrtimer);
1132}
1133
1134static void arm_ccn_pmu_event_read(struct perf_event *event)
1135{
1136	arm_ccn_pmu_event_update(event);
1137}
1138
1139static void arm_ccn_pmu_enable(struct pmu *pmu)
1140{
1141	struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
1142
1143	u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
1144	val |= CCN_DT_PMCR__PMU_EN;
1145	writel(val, ccn->dt.base + CCN_DT_PMCR);
1146}
1147
1148static void arm_ccn_pmu_disable(struct pmu *pmu)
1149{
1150	struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
1151
1152	u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
1153	val &= ~CCN_DT_PMCR__PMU_EN;
1154	writel(val, ccn->dt.base + CCN_DT_PMCR);
1155}
1156
1157static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
1158{
1159	u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
1160	int idx;
1161
1162	if (!pmovsr)
1163		return IRQ_NONE;
1164
1165	writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR);
1166
1167	BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS);
1168
1169	for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) {
1170		struct perf_event *event = dt->pmu_counters[idx].event;
1171		int overflowed = pmovsr & BIT(idx);
1172
1173		WARN_ON_ONCE(overflowed && !event &&
1174				idx != CCN_IDX_PMU_CYCLE_COUNTER);
1175
1176		if (!event || !overflowed)
1177			continue;
1178
1179		arm_ccn_pmu_event_update(event);
1180	}
1181
1182	return IRQ_HANDLED;
1183}
1184
1185static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
1186{
1187	struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt,
1188			hrtimer);
1189	unsigned long flags;
1190
1191	local_irq_save(flags);
1192	arm_ccn_pmu_overflow_handler(dt);
1193	local_irq_restore(flags);
1194
1195	hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period());
1196	return HRTIMER_RESTART;
1197}
1198
1199
1200static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
1201{
1202	struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node);
1203	struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
1204	unsigned int target;
1205
1206	if (cpu != dt->cpu)
1207		return 0;
1208	target = cpumask_any_but(cpu_online_mask, cpu);
1209	if (target >= nr_cpu_ids)
1210		return 0;
1211	perf_pmu_migrate_context(&dt->pmu, cpu, target);
1212	dt->cpu = target;
1213	if (ccn->irq)
1214		WARN_ON(irq_set_affinity(ccn->irq, cpumask_of(dt->cpu)));
1215	return 0;
1216}
1217
1218static DEFINE_IDA(arm_ccn_pmu_ida);
1219
1220static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1221{
1222	int i;
1223	char *name;
1224	int err;
1225
1226	/* Initialize DT subsystem */
1227	ccn->dt.base = ccn->base + CCN_REGION_SIZE;
1228	spin_lock_init(&ccn->dt.config_lock);
1229	writel(CCN_DT_PMOVSR_CLR__MASK, ccn->dt.base + CCN_DT_PMOVSR_CLR);
1230	writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL);
1231	writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN,
1232			ccn->dt.base + CCN_DT_PMCR);
1233	writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
1234	for (i = 0; i < ccn->num_xps; i++) {
1235		writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG);
1236		writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
1237				CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) |
1238				(CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
1239				CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) |
1240				CCN_XP_DT_CONTROL__DT_ENABLE,
1241				ccn->xp[i].base + CCN_XP_DT_CONTROL);
1242	}
1243	ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0;
1244	ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0;
1245	ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0;
1246	ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0;
1247	ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0;
1248	ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15);
1249	ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0;
1250	ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
1251
1252	/* Get a convenient /sys/event_source/devices/ name */
1253	ccn->dt.id = ida_alloc(&arm_ccn_pmu_ida, GFP_KERNEL);
1254	if (ccn->dt.id == 0) {
1255		name = "ccn";
1256	} else {
1257		name = devm_kasprintf(ccn->dev, GFP_KERNEL, "ccn_%d",
1258				      ccn->dt.id);
1259		if (!name) {
1260			err = -ENOMEM;
1261			goto error_choose_name;
1262		}
1263	}
1264
1265	/* Perf driver registration */
1266	ccn->dt.pmu = (struct pmu) {
1267		.module = THIS_MODULE,
 
1268		.attr_groups = arm_ccn_pmu_attr_groups,
1269		.task_ctx_nr = perf_invalid_context,
1270		.event_init = arm_ccn_pmu_event_init,
1271		.add = arm_ccn_pmu_event_add,
1272		.del = arm_ccn_pmu_event_del,
1273		.start = arm_ccn_pmu_event_start,
1274		.stop = arm_ccn_pmu_event_stop,
1275		.read = arm_ccn_pmu_event_read,
1276		.pmu_enable = arm_ccn_pmu_enable,
1277		.pmu_disable = arm_ccn_pmu_disable,
1278		.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1279	};
1280
1281	/* No overflow interrupt? Have to use a timer instead. */
1282	if (!ccn->irq) {
1283		dev_info(ccn->dev, "No access to interrupts, using timer.\n");
1284		hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
1285				HRTIMER_MODE_REL);
1286		ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
1287	}
1288
1289	/* Pick one CPU which we will use to collect data from CCN... */
1290	ccn->dt.cpu = raw_smp_processor_id();
1291
1292	/* Also make sure that the overflow interrupt is handled by this CPU */
1293	if (ccn->irq) {
1294		err = irq_set_affinity(ccn->irq, cpumask_of(ccn->dt.cpu));
1295		if (err) {
1296			dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
1297			goto error_set_affinity;
1298		}
1299	}
1300
1301	cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1302					 &ccn->dt.node);
1303
1304	err = perf_pmu_register(&ccn->dt.pmu, name, -1);
1305	if (err)
1306		goto error_pmu_register;
1307
1308	return 0;
1309
1310error_pmu_register:
1311	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1312					    &ccn->dt.node);
1313error_set_affinity:
1314error_choose_name:
1315	ida_free(&arm_ccn_pmu_ida, ccn->dt.id);
1316	for (i = 0; i < ccn->num_xps; i++)
1317		writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
1318	writel(0, ccn->dt.base + CCN_DT_PMCR);
1319	return err;
1320}
1321
1322static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
1323{
1324	int i;
1325
1326	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1327					    &ccn->dt.node);
1328	for (i = 0; i < ccn->num_xps; i++)
1329		writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
1330	writel(0, ccn->dt.base + CCN_DT_PMCR);
1331	perf_pmu_unregister(&ccn->dt.pmu);
1332	ida_free(&arm_ccn_pmu_ida, ccn->dt.id);
1333}
1334
1335static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
1336		int (*callback)(struct arm_ccn *ccn, int region,
1337		void __iomem *base, u32 type, u32 id))
1338{
1339	int region;
1340
1341	for (region = 0; region < CCN_NUM_REGIONS; region++) {
1342		u32 val, type, id;
1343		void __iomem *base;
1344		int err;
1345
1346		val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 +
1347				4 * (region / 32));
1348		if (!(val & (1 << (region % 32))))
1349			continue;
1350
1351		base = ccn->base + region * CCN_REGION_SIZE;
1352		val = readl(base + CCN_ALL_OLY_ID);
1353		type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) &
1354				CCN_ALL_OLY_ID__OLY_ID__MASK;
1355		id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) &
1356				CCN_ALL_OLY_ID__NODE_ID__MASK;
1357
1358		err = callback(ccn, region, base, type, id);
1359		if (err)
1360			return err;
1361	}
1362
1363	return 0;
1364}
1365
1366static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region,
1367		void __iomem *base, u32 type, u32 id)
1368{
1369
1370	if (type == CCN_TYPE_XP && id >= ccn->num_xps)
1371		ccn->num_xps = id + 1;
1372	else if (id >= ccn->num_nodes)
1373		ccn->num_nodes = id + 1;
1374
1375	return 0;
1376}
1377
1378static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
1379		void __iomem *base, u32 type, u32 id)
1380{
1381	struct arm_ccn_component *component;
1382
1383	dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type);
1384
1385	switch (type) {
1386	case CCN_TYPE_MN:
1387		ccn->mn_id = id;
1388		return 0;
1389	case CCN_TYPE_DT:
1390		return 0;
1391	case CCN_TYPE_XP:
1392		component = &ccn->xp[id];
1393		break;
1394	case CCN_TYPE_SBSX:
1395		ccn->sbsx_present = 1;
1396		component = &ccn->node[id];
1397		break;
1398	case CCN_TYPE_SBAS:
1399		ccn->sbas_present = 1;
1400		fallthrough;
1401	default:
1402		component = &ccn->node[id];
1403		break;
1404	}
1405
1406	component->base = base;
1407	component->type = type;
1408
1409	return 0;
1410}
1411
1412
1413static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn,
1414		const u32 *err_sig_val)
1415{
1416	/* This should be really handled by firmware... */
1417	dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n",
1418			err_sig_val[5], err_sig_val[4], err_sig_val[3],
1419			err_sig_val[2], err_sig_val[1], err_sig_val[0]);
1420	dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n");
1421	writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE,
1422			ccn->base + CCN_MN_ERRINT_STATUS);
1423
1424	return IRQ_HANDLED;
1425}
1426
1427
1428static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id)
1429{
1430	irqreturn_t res = IRQ_NONE;
1431	struct arm_ccn *ccn = dev_id;
1432	u32 err_sig_val[6];
1433	u32 err_or;
1434	int i;
1435
1436	/* PMU overflow is a special case */
1437	err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0);
1438	if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) {
1439		err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT;
1440		res = arm_ccn_pmu_overflow_handler(&ccn->dt);
1441	}
1442
1443	/* Have to read all err_sig_vals to clear them */
1444	for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) {
1445		err_sig_val[i] = readl(ccn->base +
1446				CCN_MN_ERR_SIG_VAL_63_0 + i * 4);
1447		err_or |= err_sig_val[i];
1448	}
1449	if (err_or)
1450		res |= arm_ccn_error_handler(ccn, err_sig_val);
1451
1452	if (res != IRQ_NONE)
1453		writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT,
1454				ccn->base + CCN_MN_ERRINT_STATUS);
1455
1456	return res;
1457}
1458
1459
1460static int arm_ccn_probe(struct platform_device *pdev)
1461{
1462	struct arm_ccn *ccn;
1463	int irq;
1464	int err;
1465
1466	ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL);
1467	if (!ccn)
1468		return -ENOMEM;
1469	ccn->dev = &pdev->dev;
1470	platform_set_drvdata(pdev, ccn);
1471
1472	ccn->base = devm_platform_ioremap_resource(pdev, 0);
1473	if (IS_ERR(ccn->base))
1474		return PTR_ERR(ccn->base);
1475
1476	irq = platform_get_irq(pdev, 0);
1477	if (irq < 0)
1478		return irq;
1479
1480	/* Check if we can use the interrupt */
1481	writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE,
1482			ccn->base + CCN_MN_ERRINT_STATUS);
1483	if (readl(ccn->base + CCN_MN_ERRINT_STATUS) &
1484			CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) {
1485		/* Can set 'disable' bits, so can acknowledge interrupts */
1486		writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
1487				ccn->base + CCN_MN_ERRINT_STATUS);
1488		err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler,
1489				       IRQF_NOBALANCING | IRQF_NO_THREAD,
1490				       dev_name(ccn->dev), ccn);
1491		if (err)
1492			return err;
1493
1494		ccn->irq = irq;
1495	}
1496
1497
1498	/* Build topology */
1499
1500	err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num);
1501	if (err)
1502		return err;
1503
1504	ccn->node = devm_kcalloc(ccn->dev, ccn->num_nodes, sizeof(*ccn->node),
1505				 GFP_KERNEL);
1506	ccn->xp = devm_kcalloc(ccn->dev, ccn->num_xps, sizeof(*ccn->node),
1507			       GFP_KERNEL);
1508	if (!ccn->node || !ccn->xp)
1509		return -ENOMEM;
1510
1511	err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes);
1512	if (err)
1513		return err;
1514
1515	return arm_ccn_pmu_init(ccn);
1516}
1517
1518static int arm_ccn_remove(struct platform_device *pdev)
1519{
1520	struct arm_ccn *ccn = platform_get_drvdata(pdev);
1521
1522	arm_ccn_pmu_cleanup(ccn);
1523
1524	return 0;
1525}
1526
1527static const struct of_device_id arm_ccn_match[] = {
1528	{ .compatible = "arm,ccn-502", },
1529	{ .compatible = "arm,ccn-504", },
1530	{ .compatible = "arm,ccn-512", },
1531	{},
1532};
1533MODULE_DEVICE_TABLE(of, arm_ccn_match);
1534
1535static struct platform_driver arm_ccn_driver = {
1536	.driver = {
1537		.name = "arm-ccn",
1538		.of_match_table = arm_ccn_match,
1539		.suppress_bind_attrs = true,
1540	},
1541	.probe = arm_ccn_probe,
1542	.remove = arm_ccn_remove,
1543};
1544
1545static int __init arm_ccn_init(void)
1546{
1547	int i, ret;
1548
1549	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1550				      "perf/arm/ccn:online", NULL,
1551				      arm_ccn_pmu_offline_cpu);
1552	if (ret)
1553		return ret;
1554
1555	for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
1556		arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
1557
1558	ret = platform_driver_register(&arm_ccn_driver);
1559	if (ret)
1560		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1561	return ret;
1562}
1563
1564static void __exit arm_ccn_exit(void)
1565{
1566	platform_driver_unregister(&arm_ccn_driver);
1567	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1568}
1569
1570module_init(arm_ccn_init);
1571module_exit(arm_ccn_exit);
1572
1573MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
 
1574MODULE_LICENSE("GPL v2");