Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
   4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
   5 */
   6
   7#include <linux/pid_namespace.h>
   8#include <linux/pm_runtime.h>
   9#include <linux/sysfs.h>
  10#include "coresight-etm4x.h"
  11#include "coresight-priv.h"
  12#include "coresight-syscfg.h"
  13
  14static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
  15{
  16	u8 idx;
  17	struct etmv4_config *config = &drvdata->config;
  18
  19	idx = config->addr_idx;
  20
  21	/*
  22	 * TRCACATRn.TYPE bit[1:0]: type of comparison
  23	 * the trace unit performs
  24	 */
  25	if (FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]) == TRCACATRn_TYPE_ADDR) {
  26		if (idx % 2 != 0)
  27			return -EINVAL;
  28
  29		/*
  30		 * We are performing instruction address comparison. Set the
  31		 * relevant bit of ViewInst Include/Exclude Control register
  32		 * for corresponding address comparator pair.
  33		 */
  34		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
  35		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
  36			return -EINVAL;
  37
  38		if (exclude == true) {
  39			/*
  40			 * Set exclude bit and unset the include bit
  41			 * corresponding to comparator pair
  42			 */
  43			config->viiectlr |= BIT(idx / 2 + 16);
  44			config->viiectlr &= ~BIT(idx / 2);
  45		} else {
  46			/*
  47			 * Set include bit and unset exclude bit
  48			 * corresponding to comparator pair
  49			 */
  50			config->viiectlr |= BIT(idx / 2);
  51			config->viiectlr &= ~BIT(idx / 2 + 16);
  52		}
  53	}
  54	return 0;
  55}
  56
  57static ssize_t nr_pe_cmp_show(struct device *dev,
  58			      struct device_attribute *attr,
  59			      char *buf)
  60{
  61	unsigned long val;
  62	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  63
  64	val = drvdata->nr_pe_cmp;
  65	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  66}
  67static DEVICE_ATTR_RO(nr_pe_cmp);
  68
  69static ssize_t nr_addr_cmp_show(struct device *dev,
  70				struct device_attribute *attr,
  71				char *buf)
  72{
  73	unsigned long val;
  74	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  75
  76	val = drvdata->nr_addr_cmp;
  77	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  78}
  79static DEVICE_ATTR_RO(nr_addr_cmp);
  80
  81static ssize_t nr_cntr_show(struct device *dev,
  82			    struct device_attribute *attr,
  83			    char *buf)
  84{
  85	unsigned long val;
  86	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  87
  88	val = drvdata->nr_cntr;
  89	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  90}
  91static DEVICE_ATTR_RO(nr_cntr);
  92
  93static ssize_t nr_ext_inp_show(struct device *dev,
  94			       struct device_attribute *attr,
  95			       char *buf)
  96{
  97	unsigned long val;
  98	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  99
 100	val = drvdata->nr_ext_inp;
 101	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 102}
 103static DEVICE_ATTR_RO(nr_ext_inp);
 104
 105static ssize_t numcidc_show(struct device *dev,
 106			    struct device_attribute *attr,
 107			    char *buf)
 108{
 109	unsigned long val;
 110	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 111
 112	val = drvdata->numcidc;
 113	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 114}
 115static DEVICE_ATTR_RO(numcidc);
 116
 117static ssize_t numvmidc_show(struct device *dev,
 118			     struct device_attribute *attr,
 119			     char *buf)
 120{
 121	unsigned long val;
 122	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 123
 124	val = drvdata->numvmidc;
 125	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 126}
 127static DEVICE_ATTR_RO(numvmidc);
 128
 129static ssize_t nrseqstate_show(struct device *dev,
 130			       struct device_attribute *attr,
 131			       char *buf)
 132{
 133	unsigned long val;
 134	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 135
 136	val = drvdata->nrseqstate;
 137	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 138}
 139static DEVICE_ATTR_RO(nrseqstate);
 140
 141static ssize_t nr_resource_show(struct device *dev,
 142				struct device_attribute *attr,
 143				char *buf)
 144{
 145	unsigned long val;
 146	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 147
 148	val = drvdata->nr_resource;
 149	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 150}
 151static DEVICE_ATTR_RO(nr_resource);
 152
 153static ssize_t nr_ss_cmp_show(struct device *dev,
 154			      struct device_attribute *attr,
 155			      char *buf)
 156{
 157	unsigned long val;
 158	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 159
 160	val = drvdata->nr_ss_cmp;
 161	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 162}
 163static DEVICE_ATTR_RO(nr_ss_cmp);
 164
 165static ssize_t reset_store(struct device *dev,
 166			   struct device_attribute *attr,
 167			   const char *buf, size_t size)
 168{
 169	int i;
 170	unsigned long val;
 171	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 172	struct etmv4_config *config = &drvdata->config;
 173
 174	if (kstrtoul(buf, 16, &val))
 175		return -EINVAL;
 176
 177	spin_lock(&drvdata->spinlock);
 178	if (val)
 179		config->mode = 0x0;
 180
 181	/* Disable data tracing: do not trace load and store data transfers */
 182	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
 183	config->cfg &= ~(TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE);
 184
 185	/* Disable data value and data address tracing */
 186	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
 187			   ETM_MODE_DATA_TRACE_VAL);
 188	config->cfg &= ~(TRCCONFIGR_DA | TRCCONFIGR_DV);
 189
 190	/* Disable all events tracing */
 191	config->eventctrl0 = 0x0;
 192	config->eventctrl1 = 0x0;
 193
 194	/* Disable timestamp event */
 195	config->ts_ctrl = 0x0;
 196
 197	/* Disable stalling */
 198	config->stall_ctrl = 0x0;
 199
 200	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
 201	if (drvdata->syncpr == false)
 202		config->syncfreq = 0x8;
 203
 204	/*
 205	 * Enable ViewInst to trace everything with start-stop logic in
 206	 * started state. ARM recommends start-stop logic is set before
 207	 * each trace run.
 208	 */
 209	config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
 210	if (drvdata->nr_addr_cmp > 0) {
 211		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
 212		/* SSSTATUS, bit[9] */
 213		config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
 214	}
 215
 216	/* No address range filtering for ViewInst */
 217	config->viiectlr = 0x0;
 218
 219	/* No start-stop filtering for ViewInst */
 220	config->vissctlr = 0x0;
 221	config->vipcssctlr = 0x0;
 222
 223	/* Disable seq events */
 224	for (i = 0; i < drvdata->nrseqstate-1; i++)
 225		config->seq_ctrl[i] = 0x0;
 226	config->seq_rst = 0x0;
 227	config->seq_state = 0x0;
 228
 229	/* Disable external input events */
 230	config->ext_inp = 0x0;
 231
 232	config->cntr_idx = 0x0;
 233	for (i = 0; i < drvdata->nr_cntr; i++) {
 234		config->cntrldvr[i] = 0x0;
 235		config->cntr_ctrl[i] = 0x0;
 236		config->cntr_val[i] = 0x0;
 237	}
 238
 239	config->res_idx = 0x0;
 240	for (i = 2; i < 2 * drvdata->nr_resource; i++)
 241		config->res_ctrl[i] = 0x0;
 242
 243	config->ss_idx = 0x0;
 244	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
 245		config->ss_ctrl[i] = 0x0;
 246		config->ss_pe_cmp[i] = 0x0;
 247	}
 248
 249	config->addr_idx = 0x0;
 250	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
 251		config->addr_val[i] = 0x0;
 252		config->addr_acc[i] = 0x0;
 253		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
 254	}
 255
 256	config->ctxid_idx = 0x0;
 257	for (i = 0; i < drvdata->numcidc; i++)
 258		config->ctxid_pid[i] = 0x0;
 259
 260	config->ctxid_mask0 = 0x0;
 261	config->ctxid_mask1 = 0x0;
 262
 263	config->vmid_idx = 0x0;
 264	for (i = 0; i < drvdata->numvmidc; i++)
 265		config->vmid_val[i] = 0x0;
 266	config->vmid_mask0 = 0x0;
 267	config->vmid_mask1 = 0x0;
 268
 269	drvdata->trcid = drvdata->cpu + 1;
 270
 271	spin_unlock(&drvdata->spinlock);
 272
 273	cscfg_csdev_reset_feats(to_coresight_device(dev));
 274
 275	return size;
 276}
 277static DEVICE_ATTR_WO(reset);
 278
 279static ssize_t mode_show(struct device *dev,
 280			 struct device_attribute *attr,
 281			 char *buf)
 282{
 283	unsigned long val;
 284	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 285	struct etmv4_config *config = &drvdata->config;
 286
 287	val = config->mode;
 288	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 289}
 290
 291static ssize_t mode_store(struct device *dev,
 292			  struct device_attribute *attr,
 293			  const char *buf, size_t size)
 294{
 295	unsigned long val, mode;
 296	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 297	struct etmv4_config *config = &drvdata->config;
 298
 299	if (kstrtoul(buf, 16, &val))
 300		return -EINVAL;
 301
 302	spin_lock(&drvdata->spinlock);
 303	config->mode = val & ETMv4_MODE_ALL;
 304
 305	if (drvdata->instrp0 == true) {
 306		/* start by clearing instruction P0 field */
 307		config->cfg  &= ~TRCCONFIGR_INSTP0_LOAD_STORE;
 308		if (config->mode & ETM_MODE_LOAD)
 309			/* 0b01 Trace load instructions as P0 instructions */
 310			config->cfg  |= TRCCONFIGR_INSTP0_LOAD;
 311		if (config->mode & ETM_MODE_STORE)
 312			/* 0b10 Trace store instructions as P0 instructions */
 313			config->cfg  |= TRCCONFIGR_INSTP0_STORE;
 314		if (config->mode & ETM_MODE_LOAD_STORE)
 315			/*
 316			 * 0b11 Trace load and store instructions
 317			 * as P0 instructions
 318			 */
 319			config->cfg  |= TRCCONFIGR_INSTP0_LOAD_STORE;
 320	}
 321
 322	/* bit[3], Branch broadcast mode */
 323	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
 324		config->cfg |= TRCCONFIGR_BB;
 325	else
 326		config->cfg &= ~TRCCONFIGR_BB;
 327
 328	/* bit[4], Cycle counting instruction trace bit */
 329	if ((config->mode & ETMv4_MODE_CYCACC) &&
 330		(drvdata->trccci == true))
 331		config->cfg |= TRCCONFIGR_CCI;
 332	else
 333		config->cfg &= ~TRCCONFIGR_CCI;
 334
 335	/* bit[6], Context ID tracing bit */
 336	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
 337		config->cfg |= TRCCONFIGR_CID;
 338	else
 339		config->cfg &= ~TRCCONFIGR_CID;
 340
 341	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
 342		config->cfg |= TRCCONFIGR_VMID;
 343	else
 344		config->cfg &= ~TRCCONFIGR_VMID;
 345
 346	/* bits[10:8], Conditional instruction tracing bit */
 347	mode = ETM_MODE_COND(config->mode);
 348	if (drvdata->trccond == true) {
 349		config->cfg &= ~TRCCONFIGR_COND_MASK;
 350		config->cfg |= mode << __bf_shf(TRCCONFIGR_COND_MASK);
 351	}
 352
 353	/* bit[11], Global timestamp tracing bit */
 354	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
 355		config->cfg |= TRCCONFIGR_TS;
 356	else
 357		config->cfg &= ~TRCCONFIGR_TS;
 358
 359	/* bit[12], Return stack enable bit */
 360	if ((config->mode & ETM_MODE_RETURNSTACK) &&
 361					(drvdata->retstack == true))
 362		config->cfg |= TRCCONFIGR_RS;
 363	else
 364		config->cfg &= ~TRCCONFIGR_RS;
 365
 366	/* bits[14:13], Q element enable field */
 367	mode = ETM_MODE_QELEM(config->mode);
 368	/* start by clearing QE bits */
 369	config->cfg &= ~(TRCCONFIGR_QE_W_COUNTS | TRCCONFIGR_QE_WO_COUNTS);
 370	/*
 371	 * if supported, Q elements with instruction counts are enabled.
 372	 * Always set the low bit for any requested mode. Valid combos are
 373	 * 0b00, 0b01 and 0b11.
 374	 */
 375	if (mode && drvdata->q_support)
 376		config->cfg |= TRCCONFIGR_QE_W_COUNTS;
 377	/*
 378	 * if supported, Q elements with and without instruction
 379	 * counts are enabled
 380	 */
 381	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
 382		config->cfg |= TRCCONFIGR_QE_WO_COUNTS;
 383
 384	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
 385	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
 386	    (drvdata->atbtrig == true))
 387		config->eventctrl1 |= TRCEVENTCTL1R_ATB;
 388	else
 389		config->eventctrl1 &= ~TRCEVENTCTL1R_ATB;
 390
 391	/* bit[12], Low-power state behavior override bit */
 392	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
 393	    (drvdata->lpoverride == true))
 394		config->eventctrl1 |= TRCEVENTCTL1R_LPOVERRIDE;
 395	else
 396		config->eventctrl1 &= ~TRCEVENTCTL1R_LPOVERRIDE;
 397
 398	/* bit[8], Instruction stall bit */
 399	if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
 400		config->stall_ctrl |= TRCSTALLCTLR_ISTALL;
 401	else
 402		config->stall_ctrl &= ~TRCSTALLCTLR_ISTALL;
 403
 404	/* bit[10], Prioritize instruction trace bit */
 405	if (config->mode & ETM_MODE_INSTPRIO)
 406		config->stall_ctrl |= TRCSTALLCTLR_INSTPRIORITY;
 407	else
 408		config->stall_ctrl &= ~TRCSTALLCTLR_INSTPRIORITY;
 409
 410	/* bit[13], Trace overflow prevention bit */
 411	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
 412		(drvdata->nooverflow == true))
 413		config->stall_ctrl |= TRCSTALLCTLR_NOOVERFLOW;
 414	else
 415		config->stall_ctrl &= ~TRCSTALLCTLR_NOOVERFLOW;
 416
 417	/* bit[9] Start/stop logic control bit */
 418	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
 419		config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
 420	else
 421		config->vinst_ctrl &= ~TRCVICTLR_SSSTATUS;
 422
 423	/* bit[10], Whether a trace unit must trace a Reset exception */
 424	if (config->mode & ETM_MODE_TRACE_RESET)
 425		config->vinst_ctrl |= TRCVICTLR_TRCRESET;
 426	else
 427		config->vinst_ctrl &= ~TRCVICTLR_TRCRESET;
 428
 429	/* bit[11], Whether a trace unit must trace a system error exception */
 430	if ((config->mode & ETM_MODE_TRACE_ERR) &&
 431		(drvdata->trc_error == true))
 432		config->vinst_ctrl |= TRCVICTLR_TRCERR;
 433	else
 434		config->vinst_ctrl &= ~TRCVICTLR_TRCERR;
 435
 436	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
 437		etm4_config_trace_mode(config);
 438
 439	spin_unlock(&drvdata->spinlock);
 440
 441	return size;
 442}
 443static DEVICE_ATTR_RW(mode);
 444
 445static ssize_t pe_show(struct device *dev,
 446		       struct device_attribute *attr,
 447		       char *buf)
 448{
 449	unsigned long val;
 450	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 451	struct etmv4_config *config = &drvdata->config;
 452
 453	val = config->pe_sel;
 454	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 455}
 456
 457static ssize_t pe_store(struct device *dev,
 458			struct device_attribute *attr,
 459			const char *buf, size_t size)
 460{
 461	unsigned long val;
 462	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 463	struct etmv4_config *config = &drvdata->config;
 464
 465	if (kstrtoul(buf, 16, &val))
 466		return -EINVAL;
 467
 468	spin_lock(&drvdata->spinlock);
 469	if (val > drvdata->nr_pe) {
 470		spin_unlock(&drvdata->spinlock);
 471		return -EINVAL;
 472	}
 473
 474	config->pe_sel = val;
 475	spin_unlock(&drvdata->spinlock);
 476	return size;
 477}
 478static DEVICE_ATTR_RW(pe);
 479
 480static ssize_t event_show(struct device *dev,
 481			  struct device_attribute *attr,
 482			  char *buf)
 483{
 484	unsigned long val;
 485	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 486	struct etmv4_config *config = &drvdata->config;
 487
 488	val = config->eventctrl0;
 489	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 490}
 491
 492static ssize_t event_store(struct device *dev,
 493			   struct device_attribute *attr,
 494			   const char *buf, size_t size)
 495{
 496	unsigned long val;
 497	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 498	struct etmv4_config *config = &drvdata->config;
 499
 500	if (kstrtoul(buf, 16, &val))
 501		return -EINVAL;
 502
 503	spin_lock(&drvdata->spinlock);
 504	switch (drvdata->nr_event) {
 505	case 0x0:
 506		/* EVENT0, bits[7:0] */
 507		config->eventctrl0 = val & 0xFF;
 508		break;
 509	case 0x1:
 510		 /* EVENT1, bits[15:8] */
 511		config->eventctrl0 = val & 0xFFFF;
 512		break;
 513	case 0x2:
 514		/* EVENT2, bits[23:16] */
 515		config->eventctrl0 = val & 0xFFFFFF;
 516		break;
 517	case 0x3:
 518		/* EVENT3, bits[31:24] */
 519		config->eventctrl0 = val;
 520		break;
 521	default:
 522		break;
 523	}
 524	spin_unlock(&drvdata->spinlock);
 525	return size;
 526}
 527static DEVICE_ATTR_RW(event);
 528
 529static ssize_t event_instren_show(struct device *dev,
 530				  struct device_attribute *attr,
 531				  char *buf)
 532{
 533	unsigned long val;
 534	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 535	struct etmv4_config *config = &drvdata->config;
 536
 537	val = FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK, config->eventctrl1);
 538	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 539}
 540
 541static ssize_t event_instren_store(struct device *dev,
 542				   struct device_attribute *attr,
 543				   const char *buf, size_t size)
 544{
 545	unsigned long val;
 546	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 547	struct etmv4_config *config = &drvdata->config;
 548
 549	if (kstrtoul(buf, 16, &val))
 550		return -EINVAL;
 551
 552	spin_lock(&drvdata->spinlock);
 553	/* start by clearing all instruction event enable bits */
 554	config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK;
 555	switch (drvdata->nr_event) {
 556	case 0x0:
 557		/* generate Event element for event 1 */
 558		config->eventctrl1 |= val & TRCEVENTCTL1R_INSTEN_1;
 559		break;
 560	case 0x1:
 561		/* generate Event element for event 1 and 2 */
 562		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | TRCEVENTCTL1R_INSTEN_1);
 563		break;
 564	case 0x2:
 565		/* generate Event element for event 1, 2 and 3 */
 566		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
 567					     TRCEVENTCTL1R_INSTEN_1 |
 568					     TRCEVENTCTL1R_INSTEN_2);
 569		break;
 570	case 0x3:
 571		/* generate Event element for all 4 events */
 572		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
 573					     TRCEVENTCTL1R_INSTEN_1 |
 574					     TRCEVENTCTL1R_INSTEN_2 |
 575					     TRCEVENTCTL1R_INSTEN_3);
 576		break;
 577	default:
 578		break;
 579	}
 580	spin_unlock(&drvdata->spinlock);
 581	return size;
 582}
 583static DEVICE_ATTR_RW(event_instren);
 584
 585static ssize_t event_ts_show(struct device *dev,
 586			     struct device_attribute *attr,
 587			     char *buf)
 588{
 589	unsigned long val;
 590	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 591	struct etmv4_config *config = &drvdata->config;
 592
 593	val = config->ts_ctrl;
 594	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 595}
 596
 597static ssize_t event_ts_store(struct device *dev,
 598			      struct device_attribute *attr,
 599			      const char *buf, size_t size)
 600{
 601	unsigned long val;
 602	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 603	struct etmv4_config *config = &drvdata->config;
 604
 605	if (kstrtoul(buf, 16, &val))
 606		return -EINVAL;
 607	if (!drvdata->ts_size)
 608		return -EINVAL;
 609
 610	config->ts_ctrl = val & ETMv4_EVENT_MASK;
 611	return size;
 612}
 613static DEVICE_ATTR_RW(event_ts);
 614
 615static ssize_t syncfreq_show(struct device *dev,
 616			     struct device_attribute *attr,
 617			     char *buf)
 618{
 619	unsigned long val;
 620	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 621	struct etmv4_config *config = &drvdata->config;
 622
 623	val = config->syncfreq;
 624	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 625}
 626
 627static ssize_t syncfreq_store(struct device *dev,
 628			      struct device_attribute *attr,
 629			      const char *buf, size_t size)
 630{
 631	unsigned long val;
 632	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 633	struct etmv4_config *config = &drvdata->config;
 634
 635	if (kstrtoul(buf, 16, &val))
 636		return -EINVAL;
 637	if (drvdata->syncpr == true)
 638		return -EINVAL;
 639
 640	config->syncfreq = val & ETMv4_SYNC_MASK;
 641	return size;
 642}
 643static DEVICE_ATTR_RW(syncfreq);
 644
 645static ssize_t cyc_threshold_show(struct device *dev,
 646				  struct device_attribute *attr,
 647				  char *buf)
 648{
 649	unsigned long val;
 650	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 651	struct etmv4_config *config = &drvdata->config;
 652
 653	val = config->ccctlr;
 654	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 655}
 656
 657static ssize_t cyc_threshold_store(struct device *dev,
 658				   struct device_attribute *attr,
 659				   const char *buf, size_t size)
 660{
 661	unsigned long val;
 662	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 663	struct etmv4_config *config = &drvdata->config;
 664
 665	if (kstrtoul(buf, 16, &val))
 666		return -EINVAL;
 667
 668	/* mask off max threshold before checking min value */
 669	val &= ETM_CYC_THRESHOLD_MASK;
 670	if (val < drvdata->ccitmin)
 671		return -EINVAL;
 672
 673	config->ccctlr = val;
 674	return size;
 675}
 676static DEVICE_ATTR_RW(cyc_threshold);
 677
 678static ssize_t bb_ctrl_show(struct device *dev,
 679			    struct device_attribute *attr,
 680			    char *buf)
 681{
 682	unsigned long val;
 683	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 684	struct etmv4_config *config = &drvdata->config;
 685
 686	val = config->bb_ctrl;
 687	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 688}
 689
 690static ssize_t bb_ctrl_store(struct device *dev,
 691			     struct device_attribute *attr,
 692			     const char *buf, size_t size)
 693{
 694	unsigned long val;
 695	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 696	struct etmv4_config *config = &drvdata->config;
 697
 698	if (kstrtoul(buf, 16, &val))
 699		return -EINVAL;
 700	if (drvdata->trcbb == false)
 701		return -EINVAL;
 702	if (!drvdata->nr_addr_cmp)
 703		return -EINVAL;
 704
 705	/*
 706	 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
 707	 * individual range comparators. If include then at least 1
 708	 * range must be selected.
 709	 */
 710	if ((val & TRCBBCTLR_MODE) && (FIELD_GET(TRCBBCTLR_RANGE_MASK, val) == 0))
 711		return -EINVAL;
 712
 713	config->bb_ctrl = val & (TRCBBCTLR_MODE | TRCBBCTLR_RANGE_MASK);
 714	return size;
 715}
 716static DEVICE_ATTR_RW(bb_ctrl);
 717
 718static ssize_t event_vinst_show(struct device *dev,
 719				struct device_attribute *attr,
 720				char *buf)
 721{
 722	unsigned long val;
 723	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 724	struct etmv4_config *config = &drvdata->config;
 725
 726	val = FIELD_GET(TRCVICTLR_EVENT_MASK, config->vinst_ctrl);
 727	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 728}
 729
 730static ssize_t event_vinst_store(struct device *dev,
 731				 struct device_attribute *attr,
 732				 const char *buf, size_t size)
 733{
 734	unsigned long val;
 735	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 736	struct etmv4_config *config = &drvdata->config;
 737
 738	if (kstrtoul(buf, 16, &val))
 739		return -EINVAL;
 740
 741	spin_lock(&drvdata->spinlock);
 742	val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK);
 743	config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK;
 744	config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val);
 745	spin_unlock(&drvdata->spinlock);
 746	return size;
 747}
 748static DEVICE_ATTR_RW(event_vinst);
 749
 750static ssize_t s_exlevel_vinst_show(struct device *dev,
 751				    struct device_attribute *attr,
 752				    char *buf)
 753{
 754	unsigned long val;
 755	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 756	struct etmv4_config *config = &drvdata->config;
 757
 758	val = FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK, config->vinst_ctrl);
 759	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 760}
 761
 762static ssize_t s_exlevel_vinst_store(struct device *dev,
 763				     struct device_attribute *attr,
 764				     const char *buf, size_t size)
 765{
 766	unsigned long val;
 767	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 768	struct etmv4_config *config = &drvdata->config;
 769
 770	if (kstrtoul(buf, 16, &val))
 771		return -EINVAL;
 772
 773	spin_lock(&drvdata->spinlock);
 774	/* clear all EXLEVEL_S bits  */
 775	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK;
 776	/* enable instruction tracing for corresponding exception level */
 777	val &= drvdata->s_ex_level;
 778	config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK);
 779	spin_unlock(&drvdata->spinlock);
 780	return size;
 781}
 782static DEVICE_ATTR_RW(s_exlevel_vinst);
 783
 784static ssize_t ns_exlevel_vinst_show(struct device *dev,
 785				     struct device_attribute *attr,
 786				     char *buf)
 787{
 788	unsigned long val;
 789	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 790	struct etmv4_config *config = &drvdata->config;
 791
 792	/* EXLEVEL_NS, bits[23:20] */
 793	val = FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK, config->vinst_ctrl);
 794	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 795}
 796
 797static ssize_t ns_exlevel_vinst_store(struct device *dev,
 798				      struct device_attribute *attr,
 799				      const char *buf, size_t size)
 800{
 801	unsigned long val;
 802	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 803	struct etmv4_config *config = &drvdata->config;
 804
 805	if (kstrtoul(buf, 16, &val))
 806		return -EINVAL;
 807
 808	spin_lock(&drvdata->spinlock);
 809	/* clear EXLEVEL_NS bits  */
 810	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK;
 811	/* enable instruction tracing for corresponding exception level */
 812	val &= drvdata->ns_ex_level;
 813	config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK);
 814	spin_unlock(&drvdata->spinlock);
 815	return size;
 816}
 817static DEVICE_ATTR_RW(ns_exlevel_vinst);
 818
 819static ssize_t addr_idx_show(struct device *dev,
 820			     struct device_attribute *attr,
 821			     char *buf)
 822{
 823	unsigned long val;
 824	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 825	struct etmv4_config *config = &drvdata->config;
 826
 827	val = config->addr_idx;
 828	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 829}
 830
 831static ssize_t addr_idx_store(struct device *dev,
 832			      struct device_attribute *attr,
 833			      const char *buf, size_t size)
 834{
 835	unsigned long val;
 836	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 837	struct etmv4_config *config = &drvdata->config;
 838
 839	if (kstrtoul(buf, 16, &val))
 840		return -EINVAL;
 841	if (val >= drvdata->nr_addr_cmp * 2)
 842		return -EINVAL;
 843
 844	/*
 845	 * Use spinlock to ensure index doesn't change while it gets
 846	 * dereferenced multiple times within a spinlock block elsewhere.
 847	 */
 848	spin_lock(&drvdata->spinlock);
 849	config->addr_idx = val;
 850	spin_unlock(&drvdata->spinlock);
 851	return size;
 852}
 853static DEVICE_ATTR_RW(addr_idx);
 854
 855static ssize_t addr_instdatatype_show(struct device *dev,
 856				      struct device_attribute *attr,
 857				      char *buf)
 858{
 859	ssize_t len;
 860	u8 val, idx;
 861	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 862	struct etmv4_config *config = &drvdata->config;
 863
 864	spin_lock(&drvdata->spinlock);
 865	idx = config->addr_idx;
 866	val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]);
 867	len = scnprintf(buf, PAGE_SIZE, "%s\n",
 868			val == TRCACATRn_TYPE_ADDR ? "instr" :
 869			(val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" :
 870			(val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" :
 871			"data_load_store")));
 872	spin_unlock(&drvdata->spinlock);
 873	return len;
 874}
 875
 876static ssize_t addr_instdatatype_store(struct device *dev,
 877				       struct device_attribute *attr,
 878				       const char *buf, size_t size)
 879{
 880	u8 idx;
 881	char str[20] = "";
 882	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 883	struct etmv4_config *config = &drvdata->config;
 884
 885	if (strlen(buf) >= 20)
 886		return -EINVAL;
 887	if (sscanf(buf, "%s", str) != 1)
 888		return -EINVAL;
 889
 890	spin_lock(&drvdata->spinlock);
 891	idx = config->addr_idx;
 892	if (!strcmp(str, "instr"))
 893		/* TYPE, bits[1:0] */
 894		config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK;
 895
 896	spin_unlock(&drvdata->spinlock);
 897	return size;
 898}
 899static DEVICE_ATTR_RW(addr_instdatatype);
 900
 901static ssize_t addr_single_show(struct device *dev,
 902				struct device_attribute *attr,
 903				char *buf)
 904{
 905	u8 idx;
 906	unsigned long val;
 907	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 908	struct etmv4_config *config = &drvdata->config;
 909
 910	idx = config->addr_idx;
 911	spin_lock(&drvdata->spinlock);
 912	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 913	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
 914		spin_unlock(&drvdata->spinlock);
 915		return -EPERM;
 916	}
 917	val = (unsigned long)config->addr_val[idx];
 918	spin_unlock(&drvdata->spinlock);
 919	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 920}
 921
 922static ssize_t addr_single_store(struct device *dev,
 923				 struct device_attribute *attr,
 924				 const char *buf, size_t size)
 925{
 926	u8 idx;
 927	unsigned long val;
 928	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 929	struct etmv4_config *config = &drvdata->config;
 930
 931	if (kstrtoul(buf, 16, &val))
 932		return -EINVAL;
 933
 934	spin_lock(&drvdata->spinlock);
 935	idx = config->addr_idx;
 936	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 937	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
 938		spin_unlock(&drvdata->spinlock);
 939		return -EPERM;
 940	}
 941
 942	config->addr_val[idx] = (u64)val;
 943	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
 944	spin_unlock(&drvdata->spinlock);
 945	return size;
 946}
 947static DEVICE_ATTR_RW(addr_single);
 948
 949static ssize_t addr_range_show(struct device *dev,
 950			       struct device_attribute *attr,
 951			       char *buf)
 952{
 953	u8 idx;
 954	unsigned long val1, val2;
 955	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 956	struct etmv4_config *config = &drvdata->config;
 957
 958	spin_lock(&drvdata->spinlock);
 959	idx = config->addr_idx;
 960	if (idx % 2 != 0) {
 961		spin_unlock(&drvdata->spinlock);
 962		return -EPERM;
 963	}
 964	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
 965	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
 966	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
 967	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
 968		spin_unlock(&drvdata->spinlock);
 969		return -EPERM;
 970	}
 971
 972	val1 = (unsigned long)config->addr_val[idx];
 973	val2 = (unsigned long)config->addr_val[idx + 1];
 974	spin_unlock(&drvdata->spinlock);
 975	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
 976}
 977
 978static ssize_t addr_range_store(struct device *dev,
 979				struct device_attribute *attr,
 980				const char *buf, size_t size)
 981{
 982	u8 idx;
 983	unsigned long val1, val2;
 984	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 985	struct etmv4_config *config = &drvdata->config;
 986	int elements, exclude;
 987
 988	elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
 989
 990	/*  exclude is optional, but need at least two parameter */
 991	if (elements < 2)
 992		return -EINVAL;
 993	/* lower address comparator cannot have a higher address value */
 994	if (val1 > val2)
 995		return -EINVAL;
 996
 997	spin_lock(&drvdata->spinlock);
 998	idx = config->addr_idx;
 999	if (idx % 2 != 0) {
1000		spin_unlock(&drvdata->spinlock);
1001		return -EPERM;
1002	}
1003
1004	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1005	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1006	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1007	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1008		spin_unlock(&drvdata->spinlock);
1009		return -EPERM;
1010	}
1011
1012	config->addr_val[idx] = (u64)val1;
1013	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1014	config->addr_val[idx + 1] = (u64)val2;
1015	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1016	/*
1017	 * Program include or exclude control bits for vinst or vdata
1018	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1019	 * use supplied value, or default to bit set in 'mode'
1020	 */
1021	if (elements != 3)
1022		exclude = config->mode & ETM_MODE_EXCLUDE;
1023	etm4_set_mode_exclude(drvdata, exclude ? true : false);
1024
1025	spin_unlock(&drvdata->spinlock);
1026	return size;
1027}
1028static DEVICE_ATTR_RW(addr_range);
1029
1030static ssize_t addr_start_show(struct device *dev,
1031			       struct device_attribute *attr,
1032			       char *buf)
1033{
1034	u8 idx;
1035	unsigned long val;
1036	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1037	struct etmv4_config *config = &drvdata->config;
1038
1039	spin_lock(&drvdata->spinlock);
1040	idx = config->addr_idx;
1041
1042	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1043	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1044		spin_unlock(&drvdata->spinlock);
1045		return -EPERM;
1046	}
1047
1048	val = (unsigned long)config->addr_val[idx];
1049	spin_unlock(&drvdata->spinlock);
1050	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1051}
1052
1053static ssize_t addr_start_store(struct device *dev,
1054				struct device_attribute *attr,
1055				const char *buf, size_t size)
1056{
1057	u8 idx;
1058	unsigned long val;
1059	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1060	struct etmv4_config *config = &drvdata->config;
1061
1062	if (kstrtoul(buf, 16, &val))
1063		return -EINVAL;
1064
1065	spin_lock(&drvdata->spinlock);
1066	idx = config->addr_idx;
1067	if (!drvdata->nr_addr_cmp) {
1068		spin_unlock(&drvdata->spinlock);
1069		return -EINVAL;
1070	}
1071	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1072	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1073		spin_unlock(&drvdata->spinlock);
1074		return -EPERM;
1075	}
1076
1077	config->addr_val[idx] = (u64)val;
1078	config->addr_type[idx] = ETM_ADDR_TYPE_START;
1079	config->vissctlr |= BIT(idx);
1080	spin_unlock(&drvdata->spinlock);
1081	return size;
1082}
1083static DEVICE_ATTR_RW(addr_start);
1084
1085static ssize_t addr_stop_show(struct device *dev,
1086			      struct device_attribute *attr,
1087			      char *buf)
1088{
1089	u8 idx;
1090	unsigned long val;
1091	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1092	struct etmv4_config *config = &drvdata->config;
1093
1094	spin_lock(&drvdata->spinlock);
1095	idx = config->addr_idx;
1096
1097	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1098	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1099		spin_unlock(&drvdata->spinlock);
1100		return -EPERM;
1101	}
1102
1103	val = (unsigned long)config->addr_val[idx];
1104	spin_unlock(&drvdata->spinlock);
1105	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1106}
1107
1108static ssize_t addr_stop_store(struct device *dev,
1109			       struct device_attribute *attr,
1110			       const char *buf, size_t size)
1111{
1112	u8 idx;
1113	unsigned long val;
1114	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1115	struct etmv4_config *config = &drvdata->config;
1116
1117	if (kstrtoul(buf, 16, &val))
1118		return -EINVAL;
1119
1120	spin_lock(&drvdata->spinlock);
1121	idx = config->addr_idx;
1122	if (!drvdata->nr_addr_cmp) {
1123		spin_unlock(&drvdata->spinlock);
1124		return -EINVAL;
1125	}
1126	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1127	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1128		spin_unlock(&drvdata->spinlock);
1129		return -EPERM;
1130	}
1131
1132	config->addr_val[idx] = (u64)val;
1133	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1134	config->vissctlr |= BIT(idx + 16);
1135	spin_unlock(&drvdata->spinlock);
1136	return size;
1137}
1138static DEVICE_ATTR_RW(addr_stop);
1139
1140static ssize_t addr_ctxtype_show(struct device *dev,
1141				 struct device_attribute *attr,
1142				 char *buf)
1143{
1144	ssize_t len;
1145	u8 idx, val;
1146	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1147	struct etmv4_config *config = &drvdata->config;
1148
1149	spin_lock(&drvdata->spinlock);
1150	idx = config->addr_idx;
1151	/* CONTEXTTYPE, bits[3:2] */
1152	val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]);
1153	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1154			(val == ETM_CTX_CTXID ? "ctxid" :
1155			(val == ETM_CTX_VMID ? "vmid" : "all")));
1156	spin_unlock(&drvdata->spinlock);
1157	return len;
1158}
1159
1160static ssize_t addr_ctxtype_store(struct device *dev,
1161				  struct device_attribute *attr,
1162				  const char *buf, size_t size)
1163{
1164	u8 idx;
1165	char str[10] = "";
1166	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1167	struct etmv4_config *config = &drvdata->config;
1168
1169	if (strlen(buf) >= 10)
1170		return -EINVAL;
1171	if (sscanf(buf, "%s", str) != 1)
1172		return -EINVAL;
1173
1174	spin_lock(&drvdata->spinlock);
1175	idx = config->addr_idx;
1176	if (!strcmp(str, "none"))
1177		/* start by clearing context type bits */
1178		config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_MASK;
1179	else if (!strcmp(str, "ctxid")) {
1180		/* 0b01 The trace unit performs a Context ID */
1181		if (drvdata->numcidc) {
1182			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1183			config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_VMID;
1184		}
1185	} else if (!strcmp(str, "vmid")) {
1186		/* 0b10 The trace unit performs a VMID */
1187		if (drvdata->numvmidc) {
1188			config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_CTXID;
1189			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1190		}
1191	} else if (!strcmp(str, "all")) {
1192		/*
1193		 * 0b11 The trace unit performs a Context ID
1194		 * comparison and a VMID
1195		 */
1196		if (drvdata->numcidc)
1197			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1198		if (drvdata->numvmidc)
1199			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1200	}
1201	spin_unlock(&drvdata->spinlock);
1202	return size;
1203}
1204static DEVICE_ATTR_RW(addr_ctxtype);
1205
1206static ssize_t addr_context_show(struct device *dev,
1207				 struct device_attribute *attr,
1208				 char *buf)
1209{
1210	u8 idx;
1211	unsigned long val;
1212	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1213	struct etmv4_config *config = &drvdata->config;
1214
1215	spin_lock(&drvdata->spinlock);
1216	idx = config->addr_idx;
1217	/* context ID comparator bits[6:4] */
1218	val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]);
1219	spin_unlock(&drvdata->spinlock);
1220	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1221}
1222
1223static ssize_t addr_context_store(struct device *dev,
1224				  struct device_attribute *attr,
1225				  const char *buf, size_t size)
1226{
1227	u8 idx;
1228	unsigned long val;
1229	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1230	struct etmv4_config *config = &drvdata->config;
1231
1232	if (kstrtoul(buf, 16, &val))
1233		return -EINVAL;
1234	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1235		return -EINVAL;
1236	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
1237		     drvdata->numcidc : drvdata->numvmidc))
1238		return -EINVAL;
1239
1240	spin_lock(&drvdata->spinlock);
1241	idx = config->addr_idx;
1242	/* clear context ID comparator bits[6:4] */
1243	config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK;
1244	config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK);
1245	spin_unlock(&drvdata->spinlock);
1246	return size;
1247}
1248static DEVICE_ATTR_RW(addr_context);
1249
1250static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1251				      struct device_attribute *attr,
1252				      char *buf)
1253{
1254	u8 idx;
1255	unsigned long val;
1256	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1257	struct etmv4_config *config = &drvdata->config;
1258
1259	spin_lock(&drvdata->spinlock);
1260	idx = config->addr_idx;
1261	val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]);
1262	spin_unlock(&drvdata->spinlock);
1263	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1264}
1265
1266static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1267				       struct device_attribute *attr,
1268				       const char *buf, size_t size)
1269{
1270	u8 idx;
1271	unsigned long val;
1272	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1273	struct etmv4_config *config = &drvdata->config;
1274
1275	if (kstrtoul(buf, 0, &val))
1276		return -EINVAL;
1277
1278	if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK)))
1279		return -EINVAL;
1280
1281	spin_lock(&drvdata->spinlock);
1282	idx = config->addr_idx;
1283	/* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1284	config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK;
1285	config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK);
1286	spin_unlock(&drvdata->spinlock);
1287	return size;
1288}
1289static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1290
1291static const char * const addr_type_names[] = {
1292	"unused",
1293	"single",
1294	"range",
1295	"start",
1296	"stop"
1297};
1298
1299static ssize_t addr_cmp_view_show(struct device *dev,
1300				  struct device_attribute *attr, char *buf)
1301{
1302	u8 idx, addr_type;
1303	unsigned long addr_v, addr_v2, addr_ctrl;
1304	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1305	struct etmv4_config *config = &drvdata->config;
1306	int size = 0;
1307	bool exclude = false;
1308
1309	spin_lock(&drvdata->spinlock);
1310	idx = config->addr_idx;
1311	addr_v = config->addr_val[idx];
1312	addr_ctrl = config->addr_acc[idx];
1313	addr_type = config->addr_type[idx];
1314	if (addr_type == ETM_ADDR_TYPE_RANGE) {
1315		if (idx & 0x1) {
1316			idx -= 1;
1317			addr_v2 = addr_v;
1318			addr_v = config->addr_val[idx];
1319		} else {
1320			addr_v2 = config->addr_val[idx + 1];
1321		}
1322		exclude = config->viiectlr & BIT(idx / 2 + 16);
1323	}
1324	spin_unlock(&drvdata->spinlock);
1325	if (addr_type) {
1326		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1327				 addr_type_names[addr_type], addr_v);
1328		if (addr_type == ETM_ADDR_TYPE_RANGE) {
1329			size += scnprintf(buf + size, PAGE_SIZE - size,
1330					  " %#lx %s", addr_v2,
1331					  exclude ? "exclude" : "include");
1332		}
1333		size += scnprintf(buf + size, PAGE_SIZE - size,
1334				  " ctrl(%#lx)\n", addr_ctrl);
1335	} else {
1336		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1337	}
1338	return size;
1339}
1340static DEVICE_ATTR_RO(addr_cmp_view);
1341
1342static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1343					    struct device_attribute *attr,
1344					    char *buf)
1345{
1346	unsigned long val;
1347	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1348	struct etmv4_config *config = &drvdata->config;
1349
1350	if (!drvdata->nr_pe_cmp)
1351		return -EINVAL;
1352	val = config->vipcssctlr;
1353	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1354}
1355static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1356					     struct device_attribute *attr,
1357					     const char *buf, size_t size)
1358{
1359	unsigned long val;
1360	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1361	struct etmv4_config *config = &drvdata->config;
1362
1363	if (kstrtoul(buf, 16, &val))
1364		return -EINVAL;
1365	if (!drvdata->nr_pe_cmp)
1366		return -EINVAL;
1367
1368	spin_lock(&drvdata->spinlock);
1369	config->vipcssctlr = val;
1370	spin_unlock(&drvdata->spinlock);
1371	return size;
1372}
1373static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1374
1375static ssize_t seq_idx_show(struct device *dev,
1376			    struct device_attribute *attr,
1377			    char *buf)
1378{
1379	unsigned long val;
1380	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1381	struct etmv4_config *config = &drvdata->config;
1382
1383	val = config->seq_idx;
1384	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1385}
1386
1387static ssize_t seq_idx_store(struct device *dev,
1388			     struct device_attribute *attr,
1389			     const char *buf, size_t size)
1390{
1391	unsigned long val;
1392	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1393	struct etmv4_config *config = &drvdata->config;
1394
1395	if (kstrtoul(buf, 16, &val))
1396		return -EINVAL;
1397	if (val >= drvdata->nrseqstate - 1)
1398		return -EINVAL;
1399
1400	/*
1401	 * Use spinlock to ensure index doesn't change while it gets
1402	 * dereferenced multiple times within a spinlock block elsewhere.
1403	 */
1404	spin_lock(&drvdata->spinlock);
1405	config->seq_idx = val;
1406	spin_unlock(&drvdata->spinlock);
1407	return size;
1408}
1409static DEVICE_ATTR_RW(seq_idx);
1410
1411static ssize_t seq_state_show(struct device *dev,
1412			      struct device_attribute *attr,
1413			      char *buf)
1414{
1415	unsigned long val;
1416	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1417	struct etmv4_config *config = &drvdata->config;
1418
1419	val = config->seq_state;
1420	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1421}
1422
1423static ssize_t seq_state_store(struct device *dev,
1424			       struct device_attribute *attr,
1425			       const char *buf, size_t size)
1426{
1427	unsigned long val;
1428	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1429	struct etmv4_config *config = &drvdata->config;
1430
1431	if (kstrtoul(buf, 16, &val))
1432		return -EINVAL;
1433	if (val >= drvdata->nrseqstate)
1434		return -EINVAL;
1435
1436	config->seq_state = val;
1437	return size;
1438}
1439static DEVICE_ATTR_RW(seq_state);
1440
1441static ssize_t seq_event_show(struct device *dev,
1442			      struct device_attribute *attr,
1443			      char *buf)
1444{
1445	u8 idx;
1446	unsigned long val;
1447	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1448	struct etmv4_config *config = &drvdata->config;
1449
1450	spin_lock(&drvdata->spinlock);
1451	idx = config->seq_idx;
1452	val = config->seq_ctrl[idx];
1453	spin_unlock(&drvdata->spinlock);
1454	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1455}
1456
1457static ssize_t seq_event_store(struct device *dev,
1458			       struct device_attribute *attr,
1459			       const char *buf, size_t size)
1460{
1461	u8 idx;
1462	unsigned long val;
1463	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1464	struct etmv4_config *config = &drvdata->config;
1465
1466	if (kstrtoul(buf, 16, &val))
1467		return -EINVAL;
1468
1469	spin_lock(&drvdata->spinlock);
1470	idx = config->seq_idx;
1471	/* Seq control has two masks B[15:8] F[7:0] */
1472	config->seq_ctrl[idx] = val & 0xFFFF;
1473	spin_unlock(&drvdata->spinlock);
1474	return size;
1475}
1476static DEVICE_ATTR_RW(seq_event);
1477
1478static ssize_t seq_reset_event_show(struct device *dev,
1479				    struct device_attribute *attr,
1480				    char *buf)
1481{
1482	unsigned long val;
1483	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1484	struct etmv4_config *config = &drvdata->config;
1485
1486	val = config->seq_rst;
1487	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1488}
1489
1490static ssize_t seq_reset_event_store(struct device *dev,
1491				     struct device_attribute *attr,
1492				     const char *buf, size_t size)
1493{
1494	unsigned long val;
1495	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1496	struct etmv4_config *config = &drvdata->config;
1497
1498	if (kstrtoul(buf, 16, &val))
1499		return -EINVAL;
1500	if (!(drvdata->nrseqstate))
1501		return -EINVAL;
1502
1503	config->seq_rst = val & ETMv4_EVENT_MASK;
1504	return size;
1505}
1506static DEVICE_ATTR_RW(seq_reset_event);
1507
1508static ssize_t cntr_idx_show(struct device *dev,
1509			     struct device_attribute *attr,
1510			     char *buf)
1511{
1512	unsigned long val;
1513	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1514	struct etmv4_config *config = &drvdata->config;
1515
1516	val = config->cntr_idx;
1517	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1518}
1519
1520static ssize_t cntr_idx_store(struct device *dev,
1521			      struct device_attribute *attr,
1522			      const char *buf, size_t size)
1523{
1524	unsigned long val;
1525	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1526	struct etmv4_config *config = &drvdata->config;
1527
1528	if (kstrtoul(buf, 16, &val))
1529		return -EINVAL;
1530	if (val >= drvdata->nr_cntr)
1531		return -EINVAL;
1532
1533	/*
1534	 * Use spinlock to ensure index doesn't change while it gets
1535	 * dereferenced multiple times within a spinlock block elsewhere.
1536	 */
1537	spin_lock(&drvdata->spinlock);
1538	config->cntr_idx = val;
1539	spin_unlock(&drvdata->spinlock);
1540	return size;
1541}
1542static DEVICE_ATTR_RW(cntr_idx);
1543
1544static ssize_t cntrldvr_show(struct device *dev,
1545			     struct device_attribute *attr,
1546			     char *buf)
1547{
1548	u8 idx;
1549	unsigned long val;
1550	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1551	struct etmv4_config *config = &drvdata->config;
1552
1553	spin_lock(&drvdata->spinlock);
1554	idx = config->cntr_idx;
1555	val = config->cntrldvr[idx];
1556	spin_unlock(&drvdata->spinlock);
1557	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1558}
1559
1560static ssize_t cntrldvr_store(struct device *dev,
1561			      struct device_attribute *attr,
1562			      const char *buf, size_t size)
1563{
1564	u8 idx;
1565	unsigned long val;
1566	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1567	struct etmv4_config *config = &drvdata->config;
1568
1569	if (kstrtoul(buf, 16, &val))
1570		return -EINVAL;
1571	if (val > ETM_CNTR_MAX_VAL)
1572		return -EINVAL;
1573
1574	spin_lock(&drvdata->spinlock);
1575	idx = config->cntr_idx;
1576	config->cntrldvr[idx] = val;
1577	spin_unlock(&drvdata->spinlock);
1578	return size;
1579}
1580static DEVICE_ATTR_RW(cntrldvr);
1581
1582static ssize_t cntr_val_show(struct device *dev,
1583			     struct device_attribute *attr,
1584			     char *buf)
1585{
1586	u8 idx;
1587	unsigned long val;
1588	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1589	struct etmv4_config *config = &drvdata->config;
1590
1591	spin_lock(&drvdata->spinlock);
1592	idx = config->cntr_idx;
1593	val = config->cntr_val[idx];
1594	spin_unlock(&drvdata->spinlock);
1595	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1596}
1597
1598static ssize_t cntr_val_store(struct device *dev,
1599			      struct device_attribute *attr,
1600			      const char *buf, size_t size)
1601{
1602	u8 idx;
1603	unsigned long val;
1604	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1605	struct etmv4_config *config = &drvdata->config;
1606
1607	if (kstrtoul(buf, 16, &val))
1608		return -EINVAL;
1609	if (val > ETM_CNTR_MAX_VAL)
1610		return -EINVAL;
1611
1612	spin_lock(&drvdata->spinlock);
1613	idx = config->cntr_idx;
1614	config->cntr_val[idx] = val;
1615	spin_unlock(&drvdata->spinlock);
1616	return size;
1617}
1618static DEVICE_ATTR_RW(cntr_val);
1619
1620static ssize_t cntr_ctrl_show(struct device *dev,
1621			      struct device_attribute *attr,
1622			      char *buf)
1623{
1624	u8 idx;
1625	unsigned long val;
1626	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1627	struct etmv4_config *config = &drvdata->config;
1628
1629	spin_lock(&drvdata->spinlock);
1630	idx = config->cntr_idx;
1631	val = config->cntr_ctrl[idx];
1632	spin_unlock(&drvdata->spinlock);
1633	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1634}
1635
1636static ssize_t cntr_ctrl_store(struct device *dev,
1637			       struct device_attribute *attr,
1638			       const char *buf, size_t size)
1639{
1640	u8 idx;
1641	unsigned long val;
1642	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1643	struct etmv4_config *config = &drvdata->config;
1644
1645	if (kstrtoul(buf, 16, &val))
1646		return -EINVAL;
1647
1648	spin_lock(&drvdata->spinlock);
1649	idx = config->cntr_idx;
1650	config->cntr_ctrl[idx] = val;
1651	spin_unlock(&drvdata->spinlock);
1652	return size;
1653}
1654static DEVICE_ATTR_RW(cntr_ctrl);
1655
1656static ssize_t res_idx_show(struct device *dev,
1657			    struct device_attribute *attr,
1658			    char *buf)
1659{
1660	unsigned long val;
1661	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1662	struct etmv4_config *config = &drvdata->config;
1663
1664	val = config->res_idx;
1665	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1666}
1667
1668static ssize_t res_idx_store(struct device *dev,
1669			     struct device_attribute *attr,
1670			     const char *buf, size_t size)
1671{
1672	unsigned long val;
1673	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1674	struct etmv4_config *config = &drvdata->config;
1675
1676	if (kstrtoul(buf, 16, &val))
1677		return -EINVAL;
1678	/*
1679	 * Resource selector pair 0 is always implemented and reserved,
1680	 * namely an idx with 0 and 1 is illegal.
1681	 */
1682	if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1683		return -EINVAL;
1684
1685	/*
1686	 * Use spinlock to ensure index doesn't change while it gets
1687	 * dereferenced multiple times within a spinlock block elsewhere.
1688	 */
1689	spin_lock(&drvdata->spinlock);
1690	config->res_idx = val;
1691	spin_unlock(&drvdata->spinlock);
1692	return size;
1693}
1694static DEVICE_ATTR_RW(res_idx);
1695
1696static ssize_t res_ctrl_show(struct device *dev,
1697			     struct device_attribute *attr,
1698			     char *buf)
1699{
1700	u8 idx;
1701	unsigned long val;
1702	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1703	struct etmv4_config *config = &drvdata->config;
1704
1705	spin_lock(&drvdata->spinlock);
1706	idx = config->res_idx;
1707	val = config->res_ctrl[idx];
1708	spin_unlock(&drvdata->spinlock);
1709	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1710}
1711
1712static ssize_t res_ctrl_store(struct device *dev,
1713			      struct device_attribute *attr,
1714			      const char *buf, size_t size)
1715{
1716	u8 idx;
1717	unsigned long val;
1718	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1719	struct etmv4_config *config = &drvdata->config;
1720
1721	if (kstrtoul(buf, 16, &val))
1722		return -EINVAL;
1723
1724	spin_lock(&drvdata->spinlock);
1725	idx = config->res_idx;
1726	/* For odd idx pair inversal bit is RES0 */
1727	if (idx % 2 != 0)
1728		/* PAIRINV, bit[21] */
1729		val &= ~TRCRSCTLRn_PAIRINV;
1730	config->res_ctrl[idx] = val & (TRCRSCTLRn_PAIRINV |
1731				       TRCRSCTLRn_INV |
1732				       TRCRSCTLRn_GROUP_MASK |
1733				       TRCRSCTLRn_SELECT_MASK);
1734	spin_unlock(&drvdata->spinlock);
1735	return size;
1736}
1737static DEVICE_ATTR_RW(res_ctrl);
1738
1739static ssize_t sshot_idx_show(struct device *dev,
1740			      struct device_attribute *attr, char *buf)
1741{
1742	unsigned long val;
1743	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1744	struct etmv4_config *config = &drvdata->config;
1745
1746	val = config->ss_idx;
1747	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1748}
1749
1750static ssize_t sshot_idx_store(struct device *dev,
1751			       struct device_attribute *attr,
1752			       const char *buf, size_t size)
1753{
1754	unsigned long val;
1755	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1756	struct etmv4_config *config = &drvdata->config;
1757
1758	if (kstrtoul(buf, 16, &val))
1759		return -EINVAL;
1760	if (val >= drvdata->nr_ss_cmp)
1761		return -EINVAL;
1762
1763	spin_lock(&drvdata->spinlock);
1764	config->ss_idx = val;
1765	spin_unlock(&drvdata->spinlock);
1766	return size;
1767}
1768static DEVICE_ATTR_RW(sshot_idx);
1769
1770static ssize_t sshot_ctrl_show(struct device *dev,
1771			       struct device_attribute *attr,
1772			       char *buf)
1773{
1774	unsigned long val;
1775	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1776	struct etmv4_config *config = &drvdata->config;
1777
1778	spin_lock(&drvdata->spinlock);
1779	val = config->ss_ctrl[config->ss_idx];
1780	spin_unlock(&drvdata->spinlock);
1781	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1782}
1783
1784static ssize_t sshot_ctrl_store(struct device *dev,
1785				struct device_attribute *attr,
1786				const char *buf, size_t size)
1787{
1788	u8 idx;
1789	unsigned long val;
1790	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1791	struct etmv4_config *config = &drvdata->config;
1792
1793	if (kstrtoul(buf, 16, &val))
1794		return -EINVAL;
1795
1796	spin_lock(&drvdata->spinlock);
1797	idx = config->ss_idx;
1798	config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val);
1799	/* must clear bit 31 in related status register on programming */
1800	config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1801	spin_unlock(&drvdata->spinlock);
1802	return size;
1803}
1804static DEVICE_ATTR_RW(sshot_ctrl);
1805
1806static ssize_t sshot_status_show(struct device *dev,
1807				 struct device_attribute *attr, char *buf)
1808{
1809	unsigned long val;
1810	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1811	struct etmv4_config *config = &drvdata->config;
1812
1813	spin_lock(&drvdata->spinlock);
1814	val = config->ss_status[config->ss_idx];
1815	spin_unlock(&drvdata->spinlock);
1816	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1817}
1818static DEVICE_ATTR_RO(sshot_status);
1819
1820static ssize_t sshot_pe_ctrl_show(struct device *dev,
1821				  struct device_attribute *attr,
1822				  char *buf)
1823{
1824	unsigned long val;
1825	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1826	struct etmv4_config *config = &drvdata->config;
1827
1828	spin_lock(&drvdata->spinlock);
1829	val = config->ss_pe_cmp[config->ss_idx];
1830	spin_unlock(&drvdata->spinlock);
1831	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1832}
1833
1834static ssize_t sshot_pe_ctrl_store(struct device *dev,
1835				   struct device_attribute *attr,
1836				   const char *buf, size_t size)
1837{
1838	u8 idx;
1839	unsigned long val;
1840	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1841	struct etmv4_config *config = &drvdata->config;
1842
1843	if (kstrtoul(buf, 16, &val))
1844		return -EINVAL;
1845
1846	spin_lock(&drvdata->spinlock);
1847	idx = config->ss_idx;
1848	config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val);
1849	/* must clear bit 31 in related status register on programming */
1850	config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1851	spin_unlock(&drvdata->spinlock);
1852	return size;
1853}
1854static DEVICE_ATTR_RW(sshot_pe_ctrl);
1855
1856static ssize_t ctxid_idx_show(struct device *dev,
1857			      struct device_attribute *attr,
1858			      char *buf)
1859{
1860	unsigned long val;
1861	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1862	struct etmv4_config *config = &drvdata->config;
1863
1864	val = config->ctxid_idx;
1865	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1866}
1867
1868static ssize_t ctxid_idx_store(struct device *dev,
1869			       struct device_attribute *attr,
1870			       const char *buf, size_t size)
1871{
1872	unsigned long val;
1873	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1874	struct etmv4_config *config = &drvdata->config;
1875
1876	if (kstrtoul(buf, 16, &val))
1877		return -EINVAL;
1878	if (val >= drvdata->numcidc)
1879		return -EINVAL;
1880
1881	/*
1882	 * Use spinlock to ensure index doesn't change while it gets
1883	 * dereferenced multiple times within a spinlock block elsewhere.
1884	 */
1885	spin_lock(&drvdata->spinlock);
1886	config->ctxid_idx = val;
1887	spin_unlock(&drvdata->spinlock);
1888	return size;
1889}
1890static DEVICE_ATTR_RW(ctxid_idx);
1891
1892static ssize_t ctxid_pid_show(struct device *dev,
1893			      struct device_attribute *attr,
1894			      char *buf)
1895{
1896	u8 idx;
1897	unsigned long val;
1898	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1899	struct etmv4_config *config = &drvdata->config;
1900
1901	/*
1902	 * Don't use contextID tracing if coming from a PID namespace.  See
1903	 * comment in ctxid_pid_store().
1904	 */
1905	if (task_active_pid_ns(current) != &init_pid_ns)
1906		return -EINVAL;
1907
1908	spin_lock(&drvdata->spinlock);
1909	idx = config->ctxid_idx;
1910	val = (unsigned long)config->ctxid_pid[idx];
1911	spin_unlock(&drvdata->spinlock);
1912	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1913}
1914
1915static ssize_t ctxid_pid_store(struct device *dev,
1916			       struct device_attribute *attr,
1917			       const char *buf, size_t size)
1918{
1919	u8 idx;
1920	unsigned long pid;
1921	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1922	struct etmv4_config *config = &drvdata->config;
1923
1924	/*
1925	 * When contextID tracing is enabled the tracers will insert the
1926	 * value found in the contextID register in the trace stream.  But if
1927	 * a process is in a namespace the PID of that process as seen from the
1928	 * namespace won't be what the kernel sees, something that makes the
1929	 * feature confusing and can potentially leak kernel only information.
1930	 * As such refuse to use the feature if @current is not in the initial
1931	 * PID namespace.
1932	 */
1933	if (task_active_pid_ns(current) != &init_pid_ns)
1934		return -EINVAL;
1935
1936	/*
1937	 * only implemented when ctxid tracing is enabled, i.e. at least one
1938	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1939	 * in length
1940	 */
1941	if (!drvdata->ctxid_size || !drvdata->numcidc)
1942		return -EINVAL;
1943	if (kstrtoul(buf, 16, &pid))
1944		return -EINVAL;
1945
1946	spin_lock(&drvdata->spinlock);
1947	idx = config->ctxid_idx;
1948	config->ctxid_pid[idx] = (u64)pid;
1949	spin_unlock(&drvdata->spinlock);
1950	return size;
1951}
1952static DEVICE_ATTR_RW(ctxid_pid);
1953
1954static ssize_t ctxid_masks_show(struct device *dev,
1955				struct device_attribute *attr,
1956				char *buf)
1957{
1958	unsigned long val1, val2;
1959	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1960	struct etmv4_config *config = &drvdata->config;
1961
1962	/*
1963	 * Don't use contextID tracing if coming from a PID namespace.  See
1964	 * comment in ctxid_pid_store().
1965	 */
1966	if (task_active_pid_ns(current) != &init_pid_ns)
1967		return -EINVAL;
1968
1969	spin_lock(&drvdata->spinlock);
1970	val1 = config->ctxid_mask0;
1971	val2 = config->ctxid_mask1;
1972	spin_unlock(&drvdata->spinlock);
1973	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1974}
1975
1976static ssize_t ctxid_masks_store(struct device *dev,
1977				struct device_attribute *attr,
1978				const char *buf, size_t size)
1979{
1980	u8 i, j, maskbyte;
1981	unsigned long val1, val2, mask;
1982	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1983	struct etmv4_config *config = &drvdata->config;
1984	int nr_inputs;
1985
1986	/*
1987	 * Don't use contextID tracing if coming from a PID namespace.  See
1988	 * comment in ctxid_pid_store().
1989	 */
1990	if (task_active_pid_ns(current) != &init_pid_ns)
1991		return -EINVAL;
1992
1993	/*
1994	 * only implemented when ctxid tracing is enabled, i.e. at least one
1995	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1996	 * in length
1997	 */
1998	if (!drvdata->ctxid_size || !drvdata->numcidc)
1999		return -EINVAL;
2000	/* one mask if <= 4 comparators, two for up to 8 */
2001	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2002	if ((drvdata->numcidc > 4) && (nr_inputs != 2))
2003		return -EINVAL;
2004
2005	spin_lock(&drvdata->spinlock);
2006	/*
2007	 * each byte[0..3] controls mask value applied to ctxid
2008	 * comparator[0..3]
2009	 */
2010	switch (drvdata->numcidc) {
2011	case 0x1:
2012		/* COMP0, bits[7:0] */
2013		config->ctxid_mask0 = val1 & 0xFF;
2014		break;
2015	case 0x2:
2016		/* COMP1, bits[15:8] */
2017		config->ctxid_mask0 = val1 & 0xFFFF;
2018		break;
2019	case 0x3:
2020		/* COMP2, bits[23:16] */
2021		config->ctxid_mask0 = val1 & 0xFFFFFF;
2022		break;
2023	case 0x4:
2024		 /* COMP3, bits[31:24] */
2025		config->ctxid_mask0 = val1;
2026		break;
2027	case 0x5:
2028		/* COMP4, bits[7:0] */
2029		config->ctxid_mask0 = val1;
2030		config->ctxid_mask1 = val2 & 0xFF;
2031		break;
2032	case 0x6:
2033		/* COMP5, bits[15:8] */
2034		config->ctxid_mask0 = val1;
2035		config->ctxid_mask1 = val2 & 0xFFFF;
2036		break;
2037	case 0x7:
2038		/* COMP6, bits[23:16] */
2039		config->ctxid_mask0 = val1;
2040		config->ctxid_mask1 = val2 & 0xFFFFFF;
2041		break;
2042	case 0x8:
2043		/* COMP7, bits[31:24] */
2044		config->ctxid_mask0 = val1;
2045		config->ctxid_mask1 = val2;
2046		break;
2047	default:
2048		break;
2049	}
2050	/*
2051	 * If software sets a mask bit to 1, it must program relevant byte
2052	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2053	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2054	 * of ctxid comparator0 value (corresponding to byte 0) register.
2055	 */
2056	mask = config->ctxid_mask0;
2057	for (i = 0; i < drvdata->numcidc; i++) {
2058		/* mask value of corresponding ctxid comparator */
2059		maskbyte = mask & ETMv4_EVENT_MASK;
2060		/*
2061		 * each bit corresponds to a byte of respective ctxid comparator
2062		 * value register
2063		 */
2064		for (j = 0; j < 8; j++) {
2065			if (maskbyte & 1)
2066				config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2067			maskbyte >>= 1;
2068		}
2069		/* Select the next ctxid comparator mask value */
2070		if (i == 3)
2071			/* ctxid comparators[4-7] */
2072			mask = config->ctxid_mask1;
2073		else
2074			mask >>= 0x8;
2075	}
2076
2077	spin_unlock(&drvdata->spinlock);
2078	return size;
2079}
2080static DEVICE_ATTR_RW(ctxid_masks);
2081
2082static ssize_t vmid_idx_show(struct device *dev,
2083			     struct device_attribute *attr,
2084			     char *buf)
2085{
2086	unsigned long val;
2087	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2088	struct etmv4_config *config = &drvdata->config;
2089
2090	val = config->vmid_idx;
2091	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2092}
2093
2094static ssize_t vmid_idx_store(struct device *dev,
2095			      struct device_attribute *attr,
2096			      const char *buf, size_t size)
2097{
2098	unsigned long val;
2099	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2100	struct etmv4_config *config = &drvdata->config;
2101
2102	if (kstrtoul(buf, 16, &val))
2103		return -EINVAL;
2104	if (val >= drvdata->numvmidc)
2105		return -EINVAL;
2106
2107	/*
2108	 * Use spinlock to ensure index doesn't change while it gets
2109	 * dereferenced multiple times within a spinlock block elsewhere.
2110	 */
2111	spin_lock(&drvdata->spinlock);
2112	config->vmid_idx = val;
2113	spin_unlock(&drvdata->spinlock);
2114	return size;
2115}
2116static DEVICE_ATTR_RW(vmid_idx);
2117
2118static ssize_t vmid_val_show(struct device *dev,
2119			     struct device_attribute *attr,
2120			     char *buf)
2121{
2122	unsigned long val;
2123	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2124	struct etmv4_config *config = &drvdata->config;
2125
2126	/*
2127	 * Don't use virtual contextID tracing if coming from a PID namespace.
2128	 * See comment in ctxid_pid_store().
2129	 */
2130	if (!task_is_in_init_pid_ns(current))
2131		return -EINVAL;
2132
2133	spin_lock(&drvdata->spinlock);
2134	val = (unsigned long)config->vmid_val[config->vmid_idx];
2135	spin_unlock(&drvdata->spinlock);
2136	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2137}
2138
2139static ssize_t vmid_val_store(struct device *dev,
2140			      struct device_attribute *attr,
2141			      const char *buf, size_t size)
2142{
2143	unsigned long val;
2144	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2145	struct etmv4_config *config = &drvdata->config;
2146
2147	/*
2148	 * Don't use virtual contextID tracing if coming from a PID namespace.
2149	 * See comment in ctxid_pid_store().
2150	 */
2151	if (!task_is_in_init_pid_ns(current))
2152		return -EINVAL;
2153
2154	/*
2155	 * only implemented when vmid tracing is enabled, i.e. at least one
2156	 * vmid comparator is implemented and at least 8 bit vmid size
2157	 */
2158	if (!drvdata->vmid_size || !drvdata->numvmidc)
2159		return -EINVAL;
2160	if (kstrtoul(buf, 16, &val))
2161		return -EINVAL;
2162
2163	spin_lock(&drvdata->spinlock);
2164	config->vmid_val[config->vmid_idx] = (u64)val;
2165	spin_unlock(&drvdata->spinlock);
2166	return size;
2167}
2168static DEVICE_ATTR_RW(vmid_val);
2169
2170static ssize_t vmid_masks_show(struct device *dev,
2171			       struct device_attribute *attr, char *buf)
2172{
2173	unsigned long val1, val2;
2174	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2175	struct etmv4_config *config = &drvdata->config;
2176
2177	/*
2178	 * Don't use virtual contextID tracing if coming from a PID namespace.
2179	 * See comment in ctxid_pid_store().
2180	 */
2181	if (!task_is_in_init_pid_ns(current))
2182		return -EINVAL;
2183
2184	spin_lock(&drvdata->spinlock);
2185	val1 = config->vmid_mask0;
2186	val2 = config->vmid_mask1;
2187	spin_unlock(&drvdata->spinlock);
2188	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2189}
2190
2191static ssize_t vmid_masks_store(struct device *dev,
2192				struct device_attribute *attr,
2193				const char *buf, size_t size)
2194{
2195	u8 i, j, maskbyte;
2196	unsigned long val1, val2, mask;
2197	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2198	struct etmv4_config *config = &drvdata->config;
2199	int nr_inputs;
2200
2201	/*
2202	 * Don't use virtual contextID tracing if coming from a PID namespace.
2203	 * See comment in ctxid_pid_store().
2204	 */
2205	if (!task_is_in_init_pid_ns(current))
2206		return -EINVAL;
2207
2208	/*
2209	 * only implemented when vmid tracing is enabled, i.e. at least one
2210	 * vmid comparator is implemented and at least 8 bit vmid size
2211	 */
2212	if (!drvdata->vmid_size || !drvdata->numvmidc)
2213		return -EINVAL;
2214	/* one mask if <= 4 comparators, two for up to 8 */
2215	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2216	if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2217		return -EINVAL;
2218
2219	spin_lock(&drvdata->spinlock);
2220
2221	/*
2222	 * each byte[0..3] controls mask value applied to vmid
2223	 * comparator[0..3]
2224	 */
2225	switch (drvdata->numvmidc) {
2226	case 0x1:
2227		/* COMP0, bits[7:0] */
2228		config->vmid_mask0 = val1 & 0xFF;
2229		break;
2230	case 0x2:
2231		/* COMP1, bits[15:8] */
2232		config->vmid_mask0 = val1 & 0xFFFF;
2233		break;
2234	case 0x3:
2235		/* COMP2, bits[23:16] */
2236		config->vmid_mask0 = val1 & 0xFFFFFF;
2237		break;
2238	case 0x4:
2239		/* COMP3, bits[31:24] */
2240		config->vmid_mask0 = val1;
2241		break;
2242	case 0x5:
2243		/* COMP4, bits[7:0] */
2244		config->vmid_mask0 = val1;
2245		config->vmid_mask1 = val2 & 0xFF;
2246		break;
2247	case 0x6:
2248		/* COMP5, bits[15:8] */
2249		config->vmid_mask0 = val1;
2250		config->vmid_mask1 = val2 & 0xFFFF;
2251		break;
2252	case 0x7:
2253		/* COMP6, bits[23:16] */
2254		config->vmid_mask0 = val1;
2255		config->vmid_mask1 = val2 & 0xFFFFFF;
2256		break;
2257	case 0x8:
2258		/* COMP7, bits[31:24] */
2259		config->vmid_mask0 = val1;
2260		config->vmid_mask1 = val2;
2261		break;
2262	default:
2263		break;
2264	}
2265
2266	/*
2267	 * If software sets a mask bit to 1, it must program relevant byte
2268	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2269	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2270	 * of vmid comparator0 value (corresponding to byte 0) register.
2271	 */
2272	mask = config->vmid_mask0;
2273	for (i = 0; i < drvdata->numvmidc; i++) {
2274		/* mask value of corresponding vmid comparator */
2275		maskbyte = mask & ETMv4_EVENT_MASK;
2276		/*
2277		 * each bit corresponds to a byte of respective vmid comparator
2278		 * value register
2279		 */
2280		for (j = 0; j < 8; j++) {
2281			if (maskbyte & 1)
2282				config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2283			maskbyte >>= 1;
2284		}
2285		/* Select the next vmid comparator mask value */
2286		if (i == 3)
2287			/* vmid comparators[4-7] */
2288			mask = config->vmid_mask1;
2289		else
2290			mask >>= 0x8;
2291	}
2292	spin_unlock(&drvdata->spinlock);
2293	return size;
2294}
2295static DEVICE_ATTR_RW(vmid_masks);
2296
2297static ssize_t cpu_show(struct device *dev,
2298			struct device_attribute *attr, char *buf)
2299{
2300	int val;
2301	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2302
2303	val = drvdata->cpu;
2304	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2305
2306}
2307static DEVICE_ATTR_RO(cpu);
2308
2309static ssize_t ts_source_show(struct device *dev,
2310			      struct device_attribute *attr,
2311			      char *buf)
2312{
2313	int val;
2314	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2315
2316	if (!drvdata->trfcr) {
2317		val = -1;
2318		goto out;
2319	}
2320
2321	switch (drvdata->trfcr & TRFCR_ELx_TS_MASK) {
2322	case TRFCR_ELx_TS_VIRTUAL:
2323	case TRFCR_ELx_TS_GUEST_PHYSICAL:
2324	case TRFCR_ELx_TS_PHYSICAL:
2325		val = FIELD_GET(TRFCR_ELx_TS_MASK, drvdata->trfcr);
2326		break;
2327	default:
2328		val = -1;
2329		break;
2330	}
2331
2332out:
2333	return sysfs_emit(buf, "%d\n", val);
2334}
2335static DEVICE_ATTR_RO(ts_source);
2336
2337static struct attribute *coresight_etmv4_attrs[] = {
2338	&dev_attr_nr_pe_cmp.attr,
2339	&dev_attr_nr_addr_cmp.attr,
2340	&dev_attr_nr_cntr.attr,
2341	&dev_attr_nr_ext_inp.attr,
2342	&dev_attr_numcidc.attr,
2343	&dev_attr_numvmidc.attr,
2344	&dev_attr_nrseqstate.attr,
2345	&dev_attr_nr_resource.attr,
2346	&dev_attr_nr_ss_cmp.attr,
2347	&dev_attr_reset.attr,
2348	&dev_attr_mode.attr,
2349	&dev_attr_pe.attr,
2350	&dev_attr_event.attr,
2351	&dev_attr_event_instren.attr,
2352	&dev_attr_event_ts.attr,
2353	&dev_attr_syncfreq.attr,
2354	&dev_attr_cyc_threshold.attr,
2355	&dev_attr_bb_ctrl.attr,
2356	&dev_attr_event_vinst.attr,
2357	&dev_attr_s_exlevel_vinst.attr,
2358	&dev_attr_ns_exlevel_vinst.attr,
2359	&dev_attr_addr_idx.attr,
2360	&dev_attr_addr_instdatatype.attr,
2361	&dev_attr_addr_single.attr,
2362	&dev_attr_addr_range.attr,
2363	&dev_attr_addr_start.attr,
2364	&dev_attr_addr_stop.attr,
2365	&dev_attr_addr_ctxtype.attr,
2366	&dev_attr_addr_context.attr,
2367	&dev_attr_addr_exlevel_s_ns.attr,
2368	&dev_attr_addr_cmp_view.attr,
2369	&dev_attr_vinst_pe_cmp_start_stop.attr,
2370	&dev_attr_sshot_idx.attr,
2371	&dev_attr_sshot_ctrl.attr,
2372	&dev_attr_sshot_pe_ctrl.attr,
2373	&dev_attr_sshot_status.attr,
2374	&dev_attr_seq_idx.attr,
2375	&dev_attr_seq_state.attr,
2376	&dev_attr_seq_event.attr,
2377	&dev_attr_seq_reset_event.attr,
2378	&dev_attr_cntr_idx.attr,
2379	&dev_attr_cntrldvr.attr,
2380	&dev_attr_cntr_val.attr,
2381	&dev_attr_cntr_ctrl.attr,
2382	&dev_attr_res_idx.attr,
2383	&dev_attr_res_ctrl.attr,
2384	&dev_attr_ctxid_idx.attr,
2385	&dev_attr_ctxid_pid.attr,
2386	&dev_attr_ctxid_masks.attr,
2387	&dev_attr_vmid_idx.attr,
2388	&dev_attr_vmid_val.attr,
2389	&dev_attr_vmid_masks.attr,
2390	&dev_attr_cpu.attr,
2391	&dev_attr_ts_source.attr,
2392	NULL,
2393};
2394
2395struct etmv4_reg {
2396	struct coresight_device *csdev;
2397	u32 offset;
2398	u32 data;
2399};
2400
2401static void do_smp_cross_read(void *data)
2402{
2403	struct etmv4_reg *reg = data;
2404
2405	reg->data = etm4x_relaxed_read32(&reg->csdev->access, reg->offset);
2406}
2407
2408static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
2409{
2410	struct etmv4_reg reg;
2411
2412	reg.offset = offset;
2413	reg.csdev = drvdata->csdev;
2414
2415	/*
2416	 * smp cross call ensures the CPU will be powered up before
2417	 * accessing the ETMv4 trace core registers
2418	 */
2419	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
2420	return reg.data;
2421}
2422
2423static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
2424{
2425	struct dev_ext_attribute *eattr;
2426
2427	eattr = container_of(attr, struct dev_ext_attribute, attr);
2428	return (u32)(unsigned long)eattr->var;
2429}
2430
2431static ssize_t coresight_etm4x_reg_show(struct device *dev,
2432					struct device_attribute *d_attr,
2433					char *buf)
2434{
2435	u32 val, offset;
2436	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2437
2438	offset = coresight_etm4x_attr_to_offset(d_attr);
2439
2440	pm_runtime_get_sync(dev->parent);
2441	val = etmv4_cross_read(drvdata, offset);
2442	pm_runtime_put_sync(dev->parent);
2443
2444	return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
2445}
2446
2447static inline bool
2448etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
2449{
2450	switch (offset) {
2451	ETM_COMMON_SYSREG_LIST_CASES
2452		/*
2453		 * Common registers to ETE & ETM4x accessible via system
2454		 * instructions are always implemented.
2455		 */
2456		return true;
2457
2458	ETM4x_ONLY_SYSREG_LIST_CASES
2459		/*
2460		 * We only support etm4x and ete. So if the device is not
2461		 * ETE, it must be ETMv4x.
2462		 */
2463		return !etm4x_is_ete(drvdata);
2464
2465	ETM4x_MMAP_LIST_CASES
2466		/*
2467		 * Registers accessible only via memory-mapped registers
2468		 * must not be accessed via system instructions.
2469		 * We cannot access the drvdata->csdev here, as this
2470		 * function is called during the device creation, via
2471		 * coresight_register() and the csdev is not initialized
2472		 * until that is done. So rely on the drvdata->base to
2473		 * detect if we have a memory mapped access.
2474		 * Also ETE doesn't implement memory mapped access, thus
2475		 * it is sufficient to check that we are using mmio.
2476		 */
2477		return !!drvdata->base;
2478
2479	ETE_ONLY_SYSREG_LIST_CASES
2480		return etm4x_is_ete(drvdata);
2481	}
2482
2483	return false;
2484}
2485
2486/*
2487 * Hide the ETM4x registers that may not be available on the
2488 * hardware.
2489 * There are certain management registers unavailable via system
2490 * instructions. Make those sysfs attributes hidden on such
2491 * systems.
2492 */
2493static umode_t
2494coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
2495				     struct attribute *attr, int unused)
2496{
2497	struct device *dev = kobj_to_dev(kobj);
2498	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2499	struct device_attribute *d_attr;
2500	u32 offset;
2501
2502	d_attr = container_of(attr, struct device_attribute, attr);
2503	offset = coresight_etm4x_attr_to_offset(d_attr);
2504
2505	if (etm4x_register_implemented(drvdata, offset))
2506		return attr->mode;
2507	return 0;
2508}
2509
2510#define coresight_etm4x_reg(name, offset)				\
2511	&((struct dev_ext_attribute[]) {				\
2512	   {								\
2513		__ATTR(name, 0444, coresight_etm4x_reg_show, NULL),	\
2514		(void *)(unsigned long)offset				\
2515	   }								\
2516	})[0].attr.attr
2517
2518static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2519	coresight_etm4x_reg(trcpdcr, TRCPDCR),
2520	coresight_etm4x_reg(trcpdsr, TRCPDSR),
2521	coresight_etm4x_reg(trclsr, TRCLSR),
2522	coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
2523	coresight_etm4x_reg(trcdevid, TRCDEVID),
2524	coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
2525	coresight_etm4x_reg(trcpidr0, TRCPIDR0),
2526	coresight_etm4x_reg(trcpidr1, TRCPIDR1),
2527	coresight_etm4x_reg(trcpidr2, TRCPIDR2),
2528	coresight_etm4x_reg(trcpidr3, TRCPIDR3),
2529	coresight_etm4x_reg(trcoslsr, TRCOSLSR),
2530	coresight_etm4x_reg(trcconfig, TRCCONFIGR),
2531	coresight_etm4x_reg(trctraceid, TRCTRACEIDR),
2532	coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
2533	NULL,
2534};
2535
2536static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2537	coresight_etm4x_reg(trcidr0, TRCIDR0),
2538	coresight_etm4x_reg(trcidr1, TRCIDR1),
2539	coresight_etm4x_reg(trcidr2, TRCIDR2),
2540	coresight_etm4x_reg(trcidr3, TRCIDR3),
2541	coresight_etm4x_reg(trcidr4, TRCIDR4),
2542	coresight_etm4x_reg(trcidr5, TRCIDR5),
2543	/* trcidr[6,7] are reserved */
2544	coresight_etm4x_reg(trcidr8, TRCIDR8),
2545	coresight_etm4x_reg(trcidr9, TRCIDR9),
2546	coresight_etm4x_reg(trcidr10, TRCIDR10),
2547	coresight_etm4x_reg(trcidr11, TRCIDR11),
2548	coresight_etm4x_reg(trcidr12, TRCIDR12),
2549	coresight_etm4x_reg(trcidr13, TRCIDR13),
2550	NULL,
2551};
2552
2553static const struct attribute_group coresight_etmv4_group = {
2554	.attrs = coresight_etmv4_attrs,
2555};
2556
2557static const struct attribute_group coresight_etmv4_mgmt_group = {
2558	.is_visible = coresight_etm4x_attr_reg_implemented,
2559	.attrs = coresight_etmv4_mgmt_attrs,
2560	.name = "mgmt",
2561};
2562
2563static const struct attribute_group coresight_etmv4_trcidr_group = {
2564	.attrs = coresight_etmv4_trcidr_attrs,
2565	.name = "trcidr",
2566};
2567
2568const struct attribute_group *coresight_etmv4_groups[] = {
2569	&coresight_etmv4_group,
2570	&coresight_etmv4_mgmt_group,
2571	&coresight_etmv4_trcidr_group,
2572	NULL,
2573};
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
   4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
   5 */
   6
   7#include <linux/pid_namespace.h>
   8#include <linux/pm_runtime.h>
   9#include <linux/sysfs.h>
  10#include "coresight-etm4x.h"
  11#include "coresight-priv.h"
 
  12
  13static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
  14{
  15	u8 idx;
  16	struct etmv4_config *config = &drvdata->config;
  17
  18	idx = config->addr_idx;
  19
  20	/*
  21	 * TRCACATRn.TYPE bit[1:0]: type of comparison
  22	 * the trace unit performs
  23	 */
  24	if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
  25		if (idx % 2 != 0)
  26			return -EINVAL;
  27
  28		/*
  29		 * We are performing instruction address comparison. Set the
  30		 * relevant bit of ViewInst Include/Exclude Control register
  31		 * for corresponding address comparator pair.
  32		 */
  33		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
  34		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
  35			return -EINVAL;
  36
  37		if (exclude == true) {
  38			/*
  39			 * Set exclude bit and unset the include bit
  40			 * corresponding to comparator pair
  41			 */
  42			config->viiectlr |= BIT(idx / 2 + 16);
  43			config->viiectlr &= ~BIT(idx / 2);
  44		} else {
  45			/*
  46			 * Set include bit and unset exclude bit
  47			 * corresponding to comparator pair
  48			 */
  49			config->viiectlr |= BIT(idx / 2);
  50			config->viiectlr &= ~BIT(idx / 2 + 16);
  51		}
  52	}
  53	return 0;
  54}
  55
  56static ssize_t nr_pe_cmp_show(struct device *dev,
  57			      struct device_attribute *attr,
  58			      char *buf)
  59{
  60	unsigned long val;
  61	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  62
  63	val = drvdata->nr_pe_cmp;
  64	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  65}
  66static DEVICE_ATTR_RO(nr_pe_cmp);
  67
  68static ssize_t nr_addr_cmp_show(struct device *dev,
  69				struct device_attribute *attr,
  70				char *buf)
  71{
  72	unsigned long val;
  73	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  74
  75	val = drvdata->nr_addr_cmp;
  76	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  77}
  78static DEVICE_ATTR_RO(nr_addr_cmp);
  79
  80static ssize_t nr_cntr_show(struct device *dev,
  81			    struct device_attribute *attr,
  82			    char *buf)
  83{
  84	unsigned long val;
  85	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  86
  87	val = drvdata->nr_cntr;
  88	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  89}
  90static DEVICE_ATTR_RO(nr_cntr);
  91
  92static ssize_t nr_ext_inp_show(struct device *dev,
  93			       struct device_attribute *attr,
  94			       char *buf)
  95{
  96	unsigned long val;
  97	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  98
  99	val = drvdata->nr_ext_inp;
 100	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 101}
 102static DEVICE_ATTR_RO(nr_ext_inp);
 103
 104static ssize_t numcidc_show(struct device *dev,
 105			    struct device_attribute *attr,
 106			    char *buf)
 107{
 108	unsigned long val;
 109	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 110
 111	val = drvdata->numcidc;
 112	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 113}
 114static DEVICE_ATTR_RO(numcidc);
 115
 116static ssize_t numvmidc_show(struct device *dev,
 117			     struct device_attribute *attr,
 118			     char *buf)
 119{
 120	unsigned long val;
 121	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 122
 123	val = drvdata->numvmidc;
 124	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 125}
 126static DEVICE_ATTR_RO(numvmidc);
 127
 128static ssize_t nrseqstate_show(struct device *dev,
 129			       struct device_attribute *attr,
 130			       char *buf)
 131{
 132	unsigned long val;
 133	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 134
 135	val = drvdata->nrseqstate;
 136	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 137}
 138static DEVICE_ATTR_RO(nrseqstate);
 139
 140static ssize_t nr_resource_show(struct device *dev,
 141				struct device_attribute *attr,
 142				char *buf)
 143{
 144	unsigned long val;
 145	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 146
 147	val = drvdata->nr_resource;
 148	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 149}
 150static DEVICE_ATTR_RO(nr_resource);
 151
 152static ssize_t nr_ss_cmp_show(struct device *dev,
 153			      struct device_attribute *attr,
 154			      char *buf)
 155{
 156	unsigned long val;
 157	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 158
 159	val = drvdata->nr_ss_cmp;
 160	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 161}
 162static DEVICE_ATTR_RO(nr_ss_cmp);
 163
 164static ssize_t reset_store(struct device *dev,
 165			   struct device_attribute *attr,
 166			   const char *buf, size_t size)
 167{
 168	int i;
 169	unsigned long val;
 170	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 171	struct etmv4_config *config = &drvdata->config;
 172
 173	if (kstrtoul(buf, 16, &val))
 174		return -EINVAL;
 175
 176	spin_lock(&drvdata->spinlock);
 177	if (val)
 178		config->mode = 0x0;
 179
 180	/* Disable data tracing: do not trace load and store data transfers */
 181	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
 182	config->cfg &= ~(BIT(1) | BIT(2));
 183
 184	/* Disable data value and data address tracing */
 185	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
 186			   ETM_MODE_DATA_TRACE_VAL);
 187	config->cfg &= ~(BIT(16) | BIT(17));
 188
 189	/* Disable all events tracing */
 190	config->eventctrl0 = 0x0;
 191	config->eventctrl1 = 0x0;
 192
 193	/* Disable timestamp event */
 194	config->ts_ctrl = 0x0;
 195
 196	/* Disable stalling */
 197	config->stall_ctrl = 0x0;
 198
 199	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
 200	if (drvdata->syncpr == false)
 201		config->syncfreq = 0x8;
 202
 203	/*
 204	 * Enable ViewInst to trace everything with start-stop logic in
 205	 * started state. ARM recommends start-stop logic is set before
 206	 * each trace run.
 207	 */
 208	config->vinst_ctrl = BIT(0);
 209	if (drvdata->nr_addr_cmp > 0) {
 210		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
 211		/* SSSTATUS, bit[9] */
 212		config->vinst_ctrl |= BIT(9);
 213	}
 214
 215	/* No address range filtering for ViewInst */
 216	config->viiectlr = 0x0;
 217
 218	/* No start-stop filtering for ViewInst */
 219	config->vissctlr = 0x0;
 220	config->vipcssctlr = 0x0;
 221
 222	/* Disable seq events */
 223	for (i = 0; i < drvdata->nrseqstate-1; i++)
 224		config->seq_ctrl[i] = 0x0;
 225	config->seq_rst = 0x0;
 226	config->seq_state = 0x0;
 227
 228	/* Disable external input events */
 229	config->ext_inp = 0x0;
 230
 231	config->cntr_idx = 0x0;
 232	for (i = 0; i < drvdata->nr_cntr; i++) {
 233		config->cntrldvr[i] = 0x0;
 234		config->cntr_ctrl[i] = 0x0;
 235		config->cntr_val[i] = 0x0;
 236	}
 237
 238	config->res_idx = 0x0;
 239	for (i = 2; i < 2 * drvdata->nr_resource; i++)
 240		config->res_ctrl[i] = 0x0;
 241
 242	config->ss_idx = 0x0;
 243	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
 244		config->ss_ctrl[i] = 0x0;
 245		config->ss_pe_cmp[i] = 0x0;
 246	}
 247
 248	config->addr_idx = 0x0;
 249	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
 250		config->addr_val[i] = 0x0;
 251		config->addr_acc[i] = 0x0;
 252		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
 253	}
 254
 255	config->ctxid_idx = 0x0;
 256	for (i = 0; i < drvdata->numcidc; i++)
 257		config->ctxid_pid[i] = 0x0;
 258
 259	config->ctxid_mask0 = 0x0;
 260	config->ctxid_mask1 = 0x0;
 261
 262	config->vmid_idx = 0x0;
 263	for (i = 0; i < drvdata->numvmidc; i++)
 264		config->vmid_val[i] = 0x0;
 265	config->vmid_mask0 = 0x0;
 266	config->vmid_mask1 = 0x0;
 267
 268	drvdata->trcid = drvdata->cpu + 1;
 269
 270	spin_unlock(&drvdata->spinlock);
 271
 
 
 272	return size;
 273}
 274static DEVICE_ATTR_WO(reset);
 275
 276static ssize_t mode_show(struct device *dev,
 277			 struct device_attribute *attr,
 278			 char *buf)
 279{
 280	unsigned long val;
 281	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 282	struct etmv4_config *config = &drvdata->config;
 283
 284	val = config->mode;
 285	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 286}
 287
 288static ssize_t mode_store(struct device *dev,
 289			  struct device_attribute *attr,
 290			  const char *buf, size_t size)
 291{
 292	unsigned long val, mode;
 293	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 294	struct etmv4_config *config = &drvdata->config;
 295
 296	if (kstrtoul(buf, 16, &val))
 297		return -EINVAL;
 298
 299	spin_lock(&drvdata->spinlock);
 300	config->mode = val & ETMv4_MODE_ALL;
 301
 302	if (drvdata->instrp0 == true) {
 303		/* start by clearing instruction P0 field */
 304		config->cfg  &= ~(BIT(1) | BIT(2));
 305		if (config->mode & ETM_MODE_LOAD)
 306			/* 0b01 Trace load instructions as P0 instructions */
 307			config->cfg  |= BIT(1);
 308		if (config->mode & ETM_MODE_STORE)
 309			/* 0b10 Trace store instructions as P0 instructions */
 310			config->cfg  |= BIT(2);
 311		if (config->mode & ETM_MODE_LOAD_STORE)
 312			/*
 313			 * 0b11 Trace load and store instructions
 314			 * as P0 instructions
 315			 */
 316			config->cfg  |= BIT(1) | BIT(2);
 317	}
 318
 319	/* bit[3], Branch broadcast mode */
 320	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
 321		config->cfg |= BIT(3);
 322	else
 323		config->cfg &= ~BIT(3);
 324
 325	/* bit[4], Cycle counting instruction trace bit */
 326	if ((config->mode & ETMv4_MODE_CYCACC) &&
 327		(drvdata->trccci == true))
 328		config->cfg |= BIT(4);
 329	else
 330		config->cfg &= ~BIT(4);
 331
 332	/* bit[6], Context ID tracing bit */
 333	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
 334		config->cfg |= BIT(6);
 335	else
 336		config->cfg &= ~BIT(6);
 337
 338	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
 339		config->cfg |= BIT(7);
 340	else
 341		config->cfg &= ~BIT(7);
 342
 343	/* bits[10:8], Conditional instruction tracing bit */
 344	mode = ETM_MODE_COND(config->mode);
 345	if (drvdata->trccond == true) {
 346		config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
 347		config->cfg |= mode << 8;
 348	}
 349
 350	/* bit[11], Global timestamp tracing bit */
 351	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
 352		config->cfg |= BIT(11);
 353	else
 354		config->cfg &= ~BIT(11);
 355
 356	/* bit[12], Return stack enable bit */
 357	if ((config->mode & ETM_MODE_RETURNSTACK) &&
 358					(drvdata->retstack == true))
 359		config->cfg |= BIT(12);
 360	else
 361		config->cfg &= ~BIT(12);
 362
 363	/* bits[14:13], Q element enable field */
 364	mode = ETM_MODE_QELEM(config->mode);
 365	/* start by clearing QE bits */
 366	config->cfg &= ~(BIT(13) | BIT(14));
 367	/* if supported, Q elements with instruction counts are enabled */
 368	if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
 369		config->cfg |= BIT(13);
 
 
 
 
 370	/*
 371	 * if supported, Q elements with and without instruction
 372	 * counts are enabled
 373	 */
 374	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
 375		config->cfg |= BIT(14);
 376
 377	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
 378	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
 379	    (drvdata->atbtrig == true))
 380		config->eventctrl1 |= BIT(11);
 381	else
 382		config->eventctrl1 &= ~BIT(11);
 383
 384	/* bit[12], Low-power state behavior override bit */
 385	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
 386	    (drvdata->lpoverride == true))
 387		config->eventctrl1 |= BIT(12);
 388	else
 389		config->eventctrl1 &= ~BIT(12);
 390
 391	/* bit[8], Instruction stall bit */
 392	if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
 393		config->stall_ctrl |= BIT(8);
 394	else
 395		config->stall_ctrl &= ~BIT(8);
 396
 397	/* bit[10], Prioritize instruction trace bit */
 398	if (config->mode & ETM_MODE_INSTPRIO)
 399		config->stall_ctrl |= BIT(10);
 400	else
 401		config->stall_ctrl &= ~BIT(10);
 402
 403	/* bit[13], Trace overflow prevention bit */
 404	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
 405		(drvdata->nooverflow == true))
 406		config->stall_ctrl |= BIT(13);
 407	else
 408		config->stall_ctrl &= ~BIT(13);
 409
 410	/* bit[9] Start/stop logic control bit */
 411	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
 412		config->vinst_ctrl |= BIT(9);
 413	else
 414		config->vinst_ctrl &= ~BIT(9);
 415
 416	/* bit[10], Whether a trace unit must trace a Reset exception */
 417	if (config->mode & ETM_MODE_TRACE_RESET)
 418		config->vinst_ctrl |= BIT(10);
 419	else
 420		config->vinst_ctrl &= ~BIT(10);
 421
 422	/* bit[11], Whether a trace unit must trace a system error exception */
 423	if ((config->mode & ETM_MODE_TRACE_ERR) &&
 424		(drvdata->trc_error == true))
 425		config->vinst_ctrl |= BIT(11);
 426	else
 427		config->vinst_ctrl &= ~BIT(11);
 428
 429	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
 430		etm4_config_trace_mode(config);
 431
 432	spin_unlock(&drvdata->spinlock);
 433
 434	return size;
 435}
 436static DEVICE_ATTR_RW(mode);
 437
 438static ssize_t pe_show(struct device *dev,
 439		       struct device_attribute *attr,
 440		       char *buf)
 441{
 442	unsigned long val;
 443	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 444	struct etmv4_config *config = &drvdata->config;
 445
 446	val = config->pe_sel;
 447	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 448}
 449
 450static ssize_t pe_store(struct device *dev,
 451			struct device_attribute *attr,
 452			const char *buf, size_t size)
 453{
 454	unsigned long val;
 455	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 456	struct etmv4_config *config = &drvdata->config;
 457
 458	if (kstrtoul(buf, 16, &val))
 459		return -EINVAL;
 460
 461	spin_lock(&drvdata->spinlock);
 462	if (val > drvdata->nr_pe) {
 463		spin_unlock(&drvdata->spinlock);
 464		return -EINVAL;
 465	}
 466
 467	config->pe_sel = val;
 468	spin_unlock(&drvdata->spinlock);
 469	return size;
 470}
 471static DEVICE_ATTR_RW(pe);
 472
 473static ssize_t event_show(struct device *dev,
 474			  struct device_attribute *attr,
 475			  char *buf)
 476{
 477	unsigned long val;
 478	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 479	struct etmv4_config *config = &drvdata->config;
 480
 481	val = config->eventctrl0;
 482	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 483}
 484
 485static ssize_t event_store(struct device *dev,
 486			   struct device_attribute *attr,
 487			   const char *buf, size_t size)
 488{
 489	unsigned long val;
 490	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 491	struct etmv4_config *config = &drvdata->config;
 492
 493	if (kstrtoul(buf, 16, &val))
 494		return -EINVAL;
 495
 496	spin_lock(&drvdata->spinlock);
 497	switch (drvdata->nr_event) {
 498	case 0x0:
 499		/* EVENT0, bits[7:0] */
 500		config->eventctrl0 = val & 0xFF;
 501		break;
 502	case 0x1:
 503		 /* EVENT1, bits[15:8] */
 504		config->eventctrl0 = val & 0xFFFF;
 505		break;
 506	case 0x2:
 507		/* EVENT2, bits[23:16] */
 508		config->eventctrl0 = val & 0xFFFFFF;
 509		break;
 510	case 0x3:
 511		/* EVENT3, bits[31:24] */
 512		config->eventctrl0 = val;
 513		break;
 514	default:
 515		break;
 516	}
 517	spin_unlock(&drvdata->spinlock);
 518	return size;
 519}
 520static DEVICE_ATTR_RW(event);
 521
 522static ssize_t event_instren_show(struct device *dev,
 523				  struct device_attribute *attr,
 524				  char *buf)
 525{
 526	unsigned long val;
 527	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 528	struct etmv4_config *config = &drvdata->config;
 529
 530	val = BMVAL(config->eventctrl1, 0, 3);
 531	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 532}
 533
 534static ssize_t event_instren_store(struct device *dev,
 535				   struct device_attribute *attr,
 536				   const char *buf, size_t size)
 537{
 538	unsigned long val;
 539	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 540	struct etmv4_config *config = &drvdata->config;
 541
 542	if (kstrtoul(buf, 16, &val))
 543		return -EINVAL;
 544
 545	spin_lock(&drvdata->spinlock);
 546	/* start by clearing all instruction event enable bits */
 547	config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
 548	switch (drvdata->nr_event) {
 549	case 0x0:
 550		/* generate Event element for event 1 */
 551		config->eventctrl1 |= val & BIT(1);
 552		break;
 553	case 0x1:
 554		/* generate Event element for event 1 and 2 */
 555		config->eventctrl1 |= val & (BIT(0) | BIT(1));
 556		break;
 557	case 0x2:
 558		/* generate Event element for event 1, 2 and 3 */
 559		config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
 
 
 560		break;
 561	case 0x3:
 562		/* generate Event element for all 4 events */
 563		config->eventctrl1 |= val & 0xF;
 
 
 
 564		break;
 565	default:
 566		break;
 567	}
 568	spin_unlock(&drvdata->spinlock);
 569	return size;
 570}
 571static DEVICE_ATTR_RW(event_instren);
 572
 573static ssize_t event_ts_show(struct device *dev,
 574			     struct device_attribute *attr,
 575			     char *buf)
 576{
 577	unsigned long val;
 578	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 579	struct etmv4_config *config = &drvdata->config;
 580
 581	val = config->ts_ctrl;
 582	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 583}
 584
 585static ssize_t event_ts_store(struct device *dev,
 586			      struct device_attribute *attr,
 587			      const char *buf, size_t size)
 588{
 589	unsigned long val;
 590	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 591	struct etmv4_config *config = &drvdata->config;
 592
 593	if (kstrtoul(buf, 16, &val))
 594		return -EINVAL;
 595	if (!drvdata->ts_size)
 596		return -EINVAL;
 597
 598	config->ts_ctrl = val & ETMv4_EVENT_MASK;
 599	return size;
 600}
 601static DEVICE_ATTR_RW(event_ts);
 602
 603static ssize_t syncfreq_show(struct device *dev,
 604			     struct device_attribute *attr,
 605			     char *buf)
 606{
 607	unsigned long val;
 608	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 609	struct etmv4_config *config = &drvdata->config;
 610
 611	val = config->syncfreq;
 612	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 613}
 614
 615static ssize_t syncfreq_store(struct device *dev,
 616			      struct device_attribute *attr,
 617			      const char *buf, size_t size)
 618{
 619	unsigned long val;
 620	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 621	struct etmv4_config *config = &drvdata->config;
 622
 623	if (kstrtoul(buf, 16, &val))
 624		return -EINVAL;
 625	if (drvdata->syncpr == true)
 626		return -EINVAL;
 627
 628	config->syncfreq = val & ETMv4_SYNC_MASK;
 629	return size;
 630}
 631static DEVICE_ATTR_RW(syncfreq);
 632
 633static ssize_t cyc_threshold_show(struct device *dev,
 634				  struct device_attribute *attr,
 635				  char *buf)
 636{
 637	unsigned long val;
 638	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 639	struct etmv4_config *config = &drvdata->config;
 640
 641	val = config->ccctlr;
 642	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 643}
 644
 645static ssize_t cyc_threshold_store(struct device *dev,
 646				   struct device_attribute *attr,
 647				   const char *buf, size_t size)
 648{
 649	unsigned long val;
 650	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 651	struct etmv4_config *config = &drvdata->config;
 652
 653	if (kstrtoul(buf, 16, &val))
 654		return -EINVAL;
 655
 656	/* mask off max threshold before checking min value */
 657	val &= ETM_CYC_THRESHOLD_MASK;
 658	if (val < drvdata->ccitmin)
 659		return -EINVAL;
 660
 661	config->ccctlr = val;
 662	return size;
 663}
 664static DEVICE_ATTR_RW(cyc_threshold);
 665
 666static ssize_t bb_ctrl_show(struct device *dev,
 667			    struct device_attribute *attr,
 668			    char *buf)
 669{
 670	unsigned long val;
 671	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 672	struct etmv4_config *config = &drvdata->config;
 673
 674	val = config->bb_ctrl;
 675	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 676}
 677
 678static ssize_t bb_ctrl_store(struct device *dev,
 679			     struct device_attribute *attr,
 680			     const char *buf, size_t size)
 681{
 682	unsigned long val;
 683	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 684	struct etmv4_config *config = &drvdata->config;
 685
 686	if (kstrtoul(buf, 16, &val))
 687		return -EINVAL;
 688	if (drvdata->trcbb == false)
 689		return -EINVAL;
 690	if (!drvdata->nr_addr_cmp)
 691		return -EINVAL;
 692
 693	/*
 694	 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
 695	 * individual range comparators. If include then at least 1
 696	 * range must be selected.
 697	 */
 698	if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
 699		return -EINVAL;
 700
 701	config->bb_ctrl = val & GENMASK(8, 0);
 702	return size;
 703}
 704static DEVICE_ATTR_RW(bb_ctrl);
 705
 706static ssize_t event_vinst_show(struct device *dev,
 707				struct device_attribute *attr,
 708				char *buf)
 709{
 710	unsigned long val;
 711	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 712	struct etmv4_config *config = &drvdata->config;
 713
 714	val = config->vinst_ctrl & ETMv4_EVENT_MASK;
 715	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 716}
 717
 718static ssize_t event_vinst_store(struct device *dev,
 719				 struct device_attribute *attr,
 720				 const char *buf, size_t size)
 721{
 722	unsigned long val;
 723	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 724	struct etmv4_config *config = &drvdata->config;
 725
 726	if (kstrtoul(buf, 16, &val))
 727		return -EINVAL;
 728
 729	spin_lock(&drvdata->spinlock);
 730	val &= ETMv4_EVENT_MASK;
 731	config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
 732	config->vinst_ctrl |= val;
 733	spin_unlock(&drvdata->spinlock);
 734	return size;
 735}
 736static DEVICE_ATTR_RW(event_vinst);
 737
 738static ssize_t s_exlevel_vinst_show(struct device *dev,
 739				    struct device_attribute *attr,
 740				    char *buf)
 741{
 742	unsigned long val;
 743	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 744	struct etmv4_config *config = &drvdata->config;
 745
 746	val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_S_MASK) >> TRCVICTLR_EXLEVEL_S_SHIFT;
 747	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 748}
 749
 750static ssize_t s_exlevel_vinst_store(struct device *dev,
 751				     struct device_attribute *attr,
 752				     const char *buf, size_t size)
 753{
 754	unsigned long val;
 755	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 756	struct etmv4_config *config = &drvdata->config;
 757
 758	if (kstrtoul(buf, 16, &val))
 759		return -EINVAL;
 760
 761	spin_lock(&drvdata->spinlock);
 762	/* clear all EXLEVEL_S bits  */
 763	config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_S_MASK);
 764	/* enable instruction tracing for corresponding exception level */
 765	val &= drvdata->s_ex_level;
 766	config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_S_SHIFT);
 767	spin_unlock(&drvdata->spinlock);
 768	return size;
 769}
 770static DEVICE_ATTR_RW(s_exlevel_vinst);
 771
 772static ssize_t ns_exlevel_vinst_show(struct device *dev,
 773				     struct device_attribute *attr,
 774				     char *buf)
 775{
 776	unsigned long val;
 777	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 778	struct etmv4_config *config = &drvdata->config;
 779
 780	/* EXLEVEL_NS, bits[23:20] */
 781	val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_NS_MASK) >> TRCVICTLR_EXLEVEL_NS_SHIFT;
 782	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 783}
 784
 785static ssize_t ns_exlevel_vinst_store(struct device *dev,
 786				      struct device_attribute *attr,
 787				      const char *buf, size_t size)
 788{
 789	unsigned long val;
 790	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 791	struct etmv4_config *config = &drvdata->config;
 792
 793	if (kstrtoul(buf, 16, &val))
 794		return -EINVAL;
 795
 796	spin_lock(&drvdata->spinlock);
 797	/* clear EXLEVEL_NS bits  */
 798	config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_NS_MASK);
 799	/* enable instruction tracing for corresponding exception level */
 800	val &= drvdata->ns_ex_level;
 801	config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_NS_SHIFT);
 802	spin_unlock(&drvdata->spinlock);
 803	return size;
 804}
 805static DEVICE_ATTR_RW(ns_exlevel_vinst);
 806
 807static ssize_t addr_idx_show(struct device *dev,
 808			     struct device_attribute *attr,
 809			     char *buf)
 810{
 811	unsigned long val;
 812	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 813	struct etmv4_config *config = &drvdata->config;
 814
 815	val = config->addr_idx;
 816	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 817}
 818
 819static ssize_t addr_idx_store(struct device *dev,
 820			      struct device_attribute *attr,
 821			      const char *buf, size_t size)
 822{
 823	unsigned long val;
 824	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 825	struct etmv4_config *config = &drvdata->config;
 826
 827	if (kstrtoul(buf, 16, &val))
 828		return -EINVAL;
 829	if (val >= drvdata->nr_addr_cmp * 2)
 830		return -EINVAL;
 831
 832	/*
 833	 * Use spinlock to ensure index doesn't change while it gets
 834	 * dereferenced multiple times within a spinlock block elsewhere.
 835	 */
 836	spin_lock(&drvdata->spinlock);
 837	config->addr_idx = val;
 838	spin_unlock(&drvdata->spinlock);
 839	return size;
 840}
 841static DEVICE_ATTR_RW(addr_idx);
 842
 843static ssize_t addr_instdatatype_show(struct device *dev,
 844				      struct device_attribute *attr,
 845				      char *buf)
 846{
 847	ssize_t len;
 848	u8 val, idx;
 849	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 850	struct etmv4_config *config = &drvdata->config;
 851
 852	spin_lock(&drvdata->spinlock);
 853	idx = config->addr_idx;
 854	val = BMVAL(config->addr_acc[idx], 0, 1);
 855	len = scnprintf(buf, PAGE_SIZE, "%s\n",
 856			val == ETM_INSTR_ADDR ? "instr" :
 857			(val == ETM_DATA_LOAD_ADDR ? "data_load" :
 858			(val == ETM_DATA_STORE_ADDR ? "data_store" :
 859			"data_load_store")));
 860	spin_unlock(&drvdata->spinlock);
 861	return len;
 862}
 863
 864static ssize_t addr_instdatatype_store(struct device *dev,
 865				       struct device_attribute *attr,
 866				       const char *buf, size_t size)
 867{
 868	u8 idx;
 869	char str[20] = "";
 870	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 871	struct etmv4_config *config = &drvdata->config;
 872
 873	if (strlen(buf) >= 20)
 874		return -EINVAL;
 875	if (sscanf(buf, "%s", str) != 1)
 876		return -EINVAL;
 877
 878	spin_lock(&drvdata->spinlock);
 879	idx = config->addr_idx;
 880	if (!strcmp(str, "instr"))
 881		/* TYPE, bits[1:0] */
 882		config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
 883
 884	spin_unlock(&drvdata->spinlock);
 885	return size;
 886}
 887static DEVICE_ATTR_RW(addr_instdatatype);
 888
 889static ssize_t addr_single_show(struct device *dev,
 890				struct device_attribute *attr,
 891				char *buf)
 892{
 893	u8 idx;
 894	unsigned long val;
 895	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 896	struct etmv4_config *config = &drvdata->config;
 897
 898	idx = config->addr_idx;
 899	spin_lock(&drvdata->spinlock);
 900	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 901	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
 902		spin_unlock(&drvdata->spinlock);
 903		return -EPERM;
 904	}
 905	val = (unsigned long)config->addr_val[idx];
 906	spin_unlock(&drvdata->spinlock);
 907	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 908}
 909
 910static ssize_t addr_single_store(struct device *dev,
 911				 struct device_attribute *attr,
 912				 const char *buf, size_t size)
 913{
 914	u8 idx;
 915	unsigned long val;
 916	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 917	struct etmv4_config *config = &drvdata->config;
 918
 919	if (kstrtoul(buf, 16, &val))
 920		return -EINVAL;
 921
 922	spin_lock(&drvdata->spinlock);
 923	idx = config->addr_idx;
 924	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 925	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
 926		spin_unlock(&drvdata->spinlock);
 927		return -EPERM;
 928	}
 929
 930	config->addr_val[idx] = (u64)val;
 931	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
 932	spin_unlock(&drvdata->spinlock);
 933	return size;
 934}
 935static DEVICE_ATTR_RW(addr_single);
 936
 937static ssize_t addr_range_show(struct device *dev,
 938			       struct device_attribute *attr,
 939			       char *buf)
 940{
 941	u8 idx;
 942	unsigned long val1, val2;
 943	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 944	struct etmv4_config *config = &drvdata->config;
 945
 946	spin_lock(&drvdata->spinlock);
 947	idx = config->addr_idx;
 948	if (idx % 2 != 0) {
 949		spin_unlock(&drvdata->spinlock);
 950		return -EPERM;
 951	}
 952	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
 953	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
 954	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
 955	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
 956		spin_unlock(&drvdata->spinlock);
 957		return -EPERM;
 958	}
 959
 960	val1 = (unsigned long)config->addr_val[idx];
 961	val2 = (unsigned long)config->addr_val[idx + 1];
 962	spin_unlock(&drvdata->spinlock);
 963	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
 964}
 965
 966static ssize_t addr_range_store(struct device *dev,
 967				struct device_attribute *attr,
 968				const char *buf, size_t size)
 969{
 970	u8 idx;
 971	unsigned long val1, val2;
 972	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 973	struct etmv4_config *config = &drvdata->config;
 974	int elements, exclude;
 975
 976	elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
 977
 978	/*  exclude is optional, but need at least two parameter */
 979	if (elements < 2)
 980		return -EINVAL;
 981	/* lower address comparator cannot have a higher address value */
 982	if (val1 > val2)
 983		return -EINVAL;
 984
 985	spin_lock(&drvdata->spinlock);
 986	idx = config->addr_idx;
 987	if (idx % 2 != 0) {
 988		spin_unlock(&drvdata->spinlock);
 989		return -EPERM;
 990	}
 991
 992	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
 993	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
 994	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
 995	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
 996		spin_unlock(&drvdata->spinlock);
 997		return -EPERM;
 998	}
 999
1000	config->addr_val[idx] = (u64)val1;
1001	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1002	config->addr_val[idx + 1] = (u64)val2;
1003	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1004	/*
1005	 * Program include or exclude control bits for vinst or vdata
1006	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1007	 * use supplied value, or default to bit set in 'mode'
1008	 */
1009	if (elements != 3)
1010		exclude = config->mode & ETM_MODE_EXCLUDE;
1011	etm4_set_mode_exclude(drvdata, exclude ? true : false);
1012
1013	spin_unlock(&drvdata->spinlock);
1014	return size;
1015}
1016static DEVICE_ATTR_RW(addr_range);
1017
1018static ssize_t addr_start_show(struct device *dev,
1019			       struct device_attribute *attr,
1020			       char *buf)
1021{
1022	u8 idx;
1023	unsigned long val;
1024	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1025	struct etmv4_config *config = &drvdata->config;
1026
1027	spin_lock(&drvdata->spinlock);
1028	idx = config->addr_idx;
1029
1030	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1031	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1032		spin_unlock(&drvdata->spinlock);
1033		return -EPERM;
1034	}
1035
1036	val = (unsigned long)config->addr_val[idx];
1037	spin_unlock(&drvdata->spinlock);
1038	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1039}
1040
1041static ssize_t addr_start_store(struct device *dev,
1042				struct device_attribute *attr,
1043				const char *buf, size_t size)
1044{
1045	u8 idx;
1046	unsigned long val;
1047	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1048	struct etmv4_config *config = &drvdata->config;
1049
1050	if (kstrtoul(buf, 16, &val))
1051		return -EINVAL;
1052
1053	spin_lock(&drvdata->spinlock);
1054	idx = config->addr_idx;
1055	if (!drvdata->nr_addr_cmp) {
1056		spin_unlock(&drvdata->spinlock);
1057		return -EINVAL;
1058	}
1059	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1060	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1061		spin_unlock(&drvdata->spinlock);
1062		return -EPERM;
1063	}
1064
1065	config->addr_val[idx] = (u64)val;
1066	config->addr_type[idx] = ETM_ADDR_TYPE_START;
1067	config->vissctlr |= BIT(idx);
1068	spin_unlock(&drvdata->spinlock);
1069	return size;
1070}
1071static DEVICE_ATTR_RW(addr_start);
1072
1073static ssize_t addr_stop_show(struct device *dev,
1074			      struct device_attribute *attr,
1075			      char *buf)
1076{
1077	u8 idx;
1078	unsigned long val;
1079	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1080	struct etmv4_config *config = &drvdata->config;
1081
1082	spin_lock(&drvdata->spinlock);
1083	idx = config->addr_idx;
1084
1085	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1086	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1087		spin_unlock(&drvdata->spinlock);
1088		return -EPERM;
1089	}
1090
1091	val = (unsigned long)config->addr_val[idx];
1092	spin_unlock(&drvdata->spinlock);
1093	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1094}
1095
1096static ssize_t addr_stop_store(struct device *dev,
1097			       struct device_attribute *attr,
1098			       const char *buf, size_t size)
1099{
1100	u8 idx;
1101	unsigned long val;
1102	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1103	struct etmv4_config *config = &drvdata->config;
1104
1105	if (kstrtoul(buf, 16, &val))
1106		return -EINVAL;
1107
1108	spin_lock(&drvdata->spinlock);
1109	idx = config->addr_idx;
1110	if (!drvdata->nr_addr_cmp) {
1111		spin_unlock(&drvdata->spinlock);
1112		return -EINVAL;
1113	}
1114	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1115	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1116		spin_unlock(&drvdata->spinlock);
1117		return -EPERM;
1118	}
1119
1120	config->addr_val[idx] = (u64)val;
1121	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1122	config->vissctlr |= BIT(idx + 16);
1123	spin_unlock(&drvdata->spinlock);
1124	return size;
1125}
1126static DEVICE_ATTR_RW(addr_stop);
1127
1128static ssize_t addr_ctxtype_show(struct device *dev,
1129				 struct device_attribute *attr,
1130				 char *buf)
1131{
1132	ssize_t len;
1133	u8 idx, val;
1134	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1135	struct etmv4_config *config = &drvdata->config;
1136
1137	spin_lock(&drvdata->spinlock);
1138	idx = config->addr_idx;
1139	/* CONTEXTTYPE, bits[3:2] */
1140	val = BMVAL(config->addr_acc[idx], 2, 3);
1141	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1142			(val == ETM_CTX_CTXID ? "ctxid" :
1143			(val == ETM_CTX_VMID ? "vmid" : "all")));
1144	spin_unlock(&drvdata->spinlock);
1145	return len;
1146}
1147
1148static ssize_t addr_ctxtype_store(struct device *dev,
1149				  struct device_attribute *attr,
1150				  const char *buf, size_t size)
1151{
1152	u8 idx;
1153	char str[10] = "";
1154	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1155	struct etmv4_config *config = &drvdata->config;
1156
1157	if (strlen(buf) >= 10)
1158		return -EINVAL;
1159	if (sscanf(buf, "%s", str) != 1)
1160		return -EINVAL;
1161
1162	spin_lock(&drvdata->spinlock);
1163	idx = config->addr_idx;
1164	if (!strcmp(str, "none"))
1165		/* start by clearing context type bits */
1166		config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1167	else if (!strcmp(str, "ctxid")) {
1168		/* 0b01 The trace unit performs a Context ID */
1169		if (drvdata->numcidc) {
1170			config->addr_acc[idx] |= BIT(2);
1171			config->addr_acc[idx] &= ~BIT(3);
1172		}
1173	} else if (!strcmp(str, "vmid")) {
1174		/* 0b10 The trace unit performs a VMID */
1175		if (drvdata->numvmidc) {
1176			config->addr_acc[idx] &= ~BIT(2);
1177			config->addr_acc[idx] |= BIT(3);
1178		}
1179	} else if (!strcmp(str, "all")) {
1180		/*
1181		 * 0b11 The trace unit performs a Context ID
1182		 * comparison and a VMID
1183		 */
1184		if (drvdata->numcidc)
1185			config->addr_acc[idx] |= BIT(2);
1186		if (drvdata->numvmidc)
1187			config->addr_acc[idx] |= BIT(3);
1188	}
1189	spin_unlock(&drvdata->spinlock);
1190	return size;
1191}
1192static DEVICE_ATTR_RW(addr_ctxtype);
1193
1194static ssize_t addr_context_show(struct device *dev,
1195				 struct device_attribute *attr,
1196				 char *buf)
1197{
1198	u8 idx;
1199	unsigned long val;
1200	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1201	struct etmv4_config *config = &drvdata->config;
1202
1203	spin_lock(&drvdata->spinlock);
1204	idx = config->addr_idx;
1205	/* context ID comparator bits[6:4] */
1206	val = BMVAL(config->addr_acc[idx], 4, 6);
1207	spin_unlock(&drvdata->spinlock);
1208	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1209}
1210
1211static ssize_t addr_context_store(struct device *dev,
1212				  struct device_attribute *attr,
1213				  const char *buf, size_t size)
1214{
1215	u8 idx;
1216	unsigned long val;
1217	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1218	struct etmv4_config *config = &drvdata->config;
1219
1220	if (kstrtoul(buf, 16, &val))
1221		return -EINVAL;
1222	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1223		return -EINVAL;
1224	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
1225		     drvdata->numcidc : drvdata->numvmidc))
1226		return -EINVAL;
1227
1228	spin_lock(&drvdata->spinlock);
1229	idx = config->addr_idx;
1230	/* clear context ID comparator bits[6:4] */
1231	config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1232	config->addr_acc[idx] |= (val << 4);
1233	spin_unlock(&drvdata->spinlock);
1234	return size;
1235}
1236static DEVICE_ATTR_RW(addr_context);
1237
1238static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1239				      struct device_attribute *attr,
1240				      char *buf)
1241{
1242	u8 idx;
1243	unsigned long val;
1244	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1245	struct etmv4_config *config = &drvdata->config;
1246
1247	spin_lock(&drvdata->spinlock);
1248	idx = config->addr_idx;
1249	val = BMVAL(config->addr_acc[idx], 8, 14);
1250	spin_unlock(&drvdata->spinlock);
1251	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1252}
1253
1254static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1255				       struct device_attribute *attr,
1256				       const char *buf, size_t size)
1257{
1258	u8 idx;
1259	unsigned long val;
1260	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1261	struct etmv4_config *config = &drvdata->config;
1262
1263	if (kstrtoul(buf, 0, &val))
1264		return -EINVAL;
1265
1266	if (val & ~((GENMASK(14, 8) >> 8)))
1267		return -EINVAL;
1268
1269	spin_lock(&drvdata->spinlock);
1270	idx = config->addr_idx;
1271	/* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1272	config->addr_acc[idx] &= ~(GENMASK(14, 8));
1273	config->addr_acc[idx] |= (val << 8);
1274	spin_unlock(&drvdata->spinlock);
1275	return size;
1276}
1277static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1278
1279static const char * const addr_type_names[] = {
1280	"unused",
1281	"single",
1282	"range",
1283	"start",
1284	"stop"
1285};
1286
1287static ssize_t addr_cmp_view_show(struct device *dev,
1288				  struct device_attribute *attr, char *buf)
1289{
1290	u8 idx, addr_type;
1291	unsigned long addr_v, addr_v2, addr_ctrl;
1292	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1293	struct etmv4_config *config = &drvdata->config;
1294	int size = 0;
1295	bool exclude = false;
1296
1297	spin_lock(&drvdata->spinlock);
1298	idx = config->addr_idx;
1299	addr_v = config->addr_val[idx];
1300	addr_ctrl = config->addr_acc[idx];
1301	addr_type = config->addr_type[idx];
1302	if (addr_type == ETM_ADDR_TYPE_RANGE) {
1303		if (idx & 0x1) {
1304			idx -= 1;
1305			addr_v2 = addr_v;
1306			addr_v = config->addr_val[idx];
1307		} else {
1308			addr_v2 = config->addr_val[idx + 1];
1309		}
1310		exclude = config->viiectlr & BIT(idx / 2 + 16);
1311	}
1312	spin_unlock(&drvdata->spinlock);
1313	if (addr_type) {
1314		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1315				 addr_type_names[addr_type], addr_v);
1316		if (addr_type == ETM_ADDR_TYPE_RANGE) {
1317			size += scnprintf(buf + size, PAGE_SIZE - size,
1318					  " %#lx %s", addr_v2,
1319					  exclude ? "exclude" : "include");
1320		}
1321		size += scnprintf(buf + size, PAGE_SIZE - size,
1322				  " ctrl(%#lx)\n", addr_ctrl);
1323	} else {
1324		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1325	}
1326	return size;
1327}
1328static DEVICE_ATTR_RO(addr_cmp_view);
1329
1330static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1331					    struct device_attribute *attr,
1332					    char *buf)
1333{
1334	unsigned long val;
1335	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1336	struct etmv4_config *config = &drvdata->config;
1337
1338	if (!drvdata->nr_pe_cmp)
1339		return -EINVAL;
1340	val = config->vipcssctlr;
1341	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1342}
1343static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1344					     struct device_attribute *attr,
1345					     const char *buf, size_t size)
1346{
1347	unsigned long val;
1348	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1349	struct etmv4_config *config = &drvdata->config;
1350
1351	if (kstrtoul(buf, 16, &val))
1352		return -EINVAL;
1353	if (!drvdata->nr_pe_cmp)
1354		return -EINVAL;
1355
1356	spin_lock(&drvdata->spinlock);
1357	config->vipcssctlr = val;
1358	spin_unlock(&drvdata->spinlock);
1359	return size;
1360}
1361static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1362
1363static ssize_t seq_idx_show(struct device *dev,
1364			    struct device_attribute *attr,
1365			    char *buf)
1366{
1367	unsigned long val;
1368	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1369	struct etmv4_config *config = &drvdata->config;
1370
1371	val = config->seq_idx;
1372	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1373}
1374
1375static ssize_t seq_idx_store(struct device *dev,
1376			     struct device_attribute *attr,
1377			     const char *buf, size_t size)
1378{
1379	unsigned long val;
1380	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1381	struct etmv4_config *config = &drvdata->config;
1382
1383	if (kstrtoul(buf, 16, &val))
1384		return -EINVAL;
1385	if (val >= drvdata->nrseqstate - 1)
1386		return -EINVAL;
1387
1388	/*
1389	 * Use spinlock to ensure index doesn't change while it gets
1390	 * dereferenced multiple times within a spinlock block elsewhere.
1391	 */
1392	spin_lock(&drvdata->spinlock);
1393	config->seq_idx = val;
1394	spin_unlock(&drvdata->spinlock);
1395	return size;
1396}
1397static DEVICE_ATTR_RW(seq_idx);
1398
1399static ssize_t seq_state_show(struct device *dev,
1400			      struct device_attribute *attr,
1401			      char *buf)
1402{
1403	unsigned long val;
1404	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1405	struct etmv4_config *config = &drvdata->config;
1406
1407	val = config->seq_state;
1408	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1409}
1410
1411static ssize_t seq_state_store(struct device *dev,
1412			       struct device_attribute *attr,
1413			       const char *buf, size_t size)
1414{
1415	unsigned long val;
1416	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1417	struct etmv4_config *config = &drvdata->config;
1418
1419	if (kstrtoul(buf, 16, &val))
1420		return -EINVAL;
1421	if (val >= drvdata->nrseqstate)
1422		return -EINVAL;
1423
1424	config->seq_state = val;
1425	return size;
1426}
1427static DEVICE_ATTR_RW(seq_state);
1428
1429static ssize_t seq_event_show(struct device *dev,
1430			      struct device_attribute *attr,
1431			      char *buf)
1432{
1433	u8 idx;
1434	unsigned long val;
1435	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1436	struct etmv4_config *config = &drvdata->config;
1437
1438	spin_lock(&drvdata->spinlock);
1439	idx = config->seq_idx;
1440	val = config->seq_ctrl[idx];
1441	spin_unlock(&drvdata->spinlock);
1442	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1443}
1444
1445static ssize_t seq_event_store(struct device *dev,
1446			       struct device_attribute *attr,
1447			       const char *buf, size_t size)
1448{
1449	u8 idx;
1450	unsigned long val;
1451	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1452	struct etmv4_config *config = &drvdata->config;
1453
1454	if (kstrtoul(buf, 16, &val))
1455		return -EINVAL;
1456
1457	spin_lock(&drvdata->spinlock);
1458	idx = config->seq_idx;
1459	/* Seq control has two masks B[15:8] F[7:0] */
1460	config->seq_ctrl[idx] = val & 0xFFFF;
1461	spin_unlock(&drvdata->spinlock);
1462	return size;
1463}
1464static DEVICE_ATTR_RW(seq_event);
1465
1466static ssize_t seq_reset_event_show(struct device *dev,
1467				    struct device_attribute *attr,
1468				    char *buf)
1469{
1470	unsigned long val;
1471	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1472	struct etmv4_config *config = &drvdata->config;
1473
1474	val = config->seq_rst;
1475	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1476}
1477
1478static ssize_t seq_reset_event_store(struct device *dev,
1479				     struct device_attribute *attr,
1480				     const char *buf, size_t size)
1481{
1482	unsigned long val;
1483	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1484	struct etmv4_config *config = &drvdata->config;
1485
1486	if (kstrtoul(buf, 16, &val))
1487		return -EINVAL;
1488	if (!(drvdata->nrseqstate))
1489		return -EINVAL;
1490
1491	config->seq_rst = val & ETMv4_EVENT_MASK;
1492	return size;
1493}
1494static DEVICE_ATTR_RW(seq_reset_event);
1495
1496static ssize_t cntr_idx_show(struct device *dev,
1497			     struct device_attribute *attr,
1498			     char *buf)
1499{
1500	unsigned long val;
1501	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1502	struct etmv4_config *config = &drvdata->config;
1503
1504	val = config->cntr_idx;
1505	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1506}
1507
1508static ssize_t cntr_idx_store(struct device *dev,
1509			      struct device_attribute *attr,
1510			      const char *buf, size_t size)
1511{
1512	unsigned long val;
1513	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1514	struct etmv4_config *config = &drvdata->config;
1515
1516	if (kstrtoul(buf, 16, &val))
1517		return -EINVAL;
1518	if (val >= drvdata->nr_cntr)
1519		return -EINVAL;
1520
1521	/*
1522	 * Use spinlock to ensure index doesn't change while it gets
1523	 * dereferenced multiple times within a spinlock block elsewhere.
1524	 */
1525	spin_lock(&drvdata->spinlock);
1526	config->cntr_idx = val;
1527	spin_unlock(&drvdata->spinlock);
1528	return size;
1529}
1530static DEVICE_ATTR_RW(cntr_idx);
1531
1532static ssize_t cntrldvr_show(struct device *dev,
1533			     struct device_attribute *attr,
1534			     char *buf)
1535{
1536	u8 idx;
1537	unsigned long val;
1538	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1539	struct etmv4_config *config = &drvdata->config;
1540
1541	spin_lock(&drvdata->spinlock);
1542	idx = config->cntr_idx;
1543	val = config->cntrldvr[idx];
1544	spin_unlock(&drvdata->spinlock);
1545	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1546}
1547
1548static ssize_t cntrldvr_store(struct device *dev,
1549			      struct device_attribute *attr,
1550			      const char *buf, size_t size)
1551{
1552	u8 idx;
1553	unsigned long val;
1554	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1555	struct etmv4_config *config = &drvdata->config;
1556
1557	if (kstrtoul(buf, 16, &val))
1558		return -EINVAL;
1559	if (val > ETM_CNTR_MAX_VAL)
1560		return -EINVAL;
1561
1562	spin_lock(&drvdata->spinlock);
1563	idx = config->cntr_idx;
1564	config->cntrldvr[idx] = val;
1565	spin_unlock(&drvdata->spinlock);
1566	return size;
1567}
1568static DEVICE_ATTR_RW(cntrldvr);
1569
1570static ssize_t cntr_val_show(struct device *dev,
1571			     struct device_attribute *attr,
1572			     char *buf)
1573{
1574	u8 idx;
1575	unsigned long val;
1576	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1577	struct etmv4_config *config = &drvdata->config;
1578
1579	spin_lock(&drvdata->spinlock);
1580	idx = config->cntr_idx;
1581	val = config->cntr_val[idx];
1582	spin_unlock(&drvdata->spinlock);
1583	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1584}
1585
1586static ssize_t cntr_val_store(struct device *dev,
1587			      struct device_attribute *attr,
1588			      const char *buf, size_t size)
1589{
1590	u8 idx;
1591	unsigned long val;
1592	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1593	struct etmv4_config *config = &drvdata->config;
1594
1595	if (kstrtoul(buf, 16, &val))
1596		return -EINVAL;
1597	if (val > ETM_CNTR_MAX_VAL)
1598		return -EINVAL;
1599
1600	spin_lock(&drvdata->spinlock);
1601	idx = config->cntr_idx;
1602	config->cntr_val[idx] = val;
1603	spin_unlock(&drvdata->spinlock);
1604	return size;
1605}
1606static DEVICE_ATTR_RW(cntr_val);
1607
1608static ssize_t cntr_ctrl_show(struct device *dev,
1609			      struct device_attribute *attr,
1610			      char *buf)
1611{
1612	u8 idx;
1613	unsigned long val;
1614	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1615	struct etmv4_config *config = &drvdata->config;
1616
1617	spin_lock(&drvdata->spinlock);
1618	idx = config->cntr_idx;
1619	val = config->cntr_ctrl[idx];
1620	spin_unlock(&drvdata->spinlock);
1621	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1622}
1623
1624static ssize_t cntr_ctrl_store(struct device *dev,
1625			       struct device_attribute *attr,
1626			       const char *buf, size_t size)
1627{
1628	u8 idx;
1629	unsigned long val;
1630	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1631	struct etmv4_config *config = &drvdata->config;
1632
1633	if (kstrtoul(buf, 16, &val))
1634		return -EINVAL;
1635
1636	spin_lock(&drvdata->spinlock);
1637	idx = config->cntr_idx;
1638	config->cntr_ctrl[idx] = val;
1639	spin_unlock(&drvdata->spinlock);
1640	return size;
1641}
1642static DEVICE_ATTR_RW(cntr_ctrl);
1643
1644static ssize_t res_idx_show(struct device *dev,
1645			    struct device_attribute *attr,
1646			    char *buf)
1647{
1648	unsigned long val;
1649	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1650	struct etmv4_config *config = &drvdata->config;
1651
1652	val = config->res_idx;
1653	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1654}
1655
1656static ssize_t res_idx_store(struct device *dev,
1657			     struct device_attribute *attr,
1658			     const char *buf, size_t size)
1659{
1660	unsigned long val;
1661	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1662	struct etmv4_config *config = &drvdata->config;
1663
1664	if (kstrtoul(buf, 16, &val))
1665		return -EINVAL;
1666	/*
1667	 * Resource selector pair 0 is always implemented and reserved,
1668	 * namely an idx with 0 and 1 is illegal.
1669	 */
1670	if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1671		return -EINVAL;
1672
1673	/*
1674	 * Use spinlock to ensure index doesn't change while it gets
1675	 * dereferenced multiple times within a spinlock block elsewhere.
1676	 */
1677	spin_lock(&drvdata->spinlock);
1678	config->res_idx = val;
1679	spin_unlock(&drvdata->spinlock);
1680	return size;
1681}
1682static DEVICE_ATTR_RW(res_idx);
1683
1684static ssize_t res_ctrl_show(struct device *dev,
1685			     struct device_attribute *attr,
1686			     char *buf)
1687{
1688	u8 idx;
1689	unsigned long val;
1690	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1691	struct etmv4_config *config = &drvdata->config;
1692
1693	spin_lock(&drvdata->spinlock);
1694	idx = config->res_idx;
1695	val = config->res_ctrl[idx];
1696	spin_unlock(&drvdata->spinlock);
1697	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1698}
1699
1700static ssize_t res_ctrl_store(struct device *dev,
1701			      struct device_attribute *attr,
1702			      const char *buf, size_t size)
1703{
1704	u8 idx;
1705	unsigned long val;
1706	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1707	struct etmv4_config *config = &drvdata->config;
1708
1709	if (kstrtoul(buf, 16, &val))
1710		return -EINVAL;
1711
1712	spin_lock(&drvdata->spinlock);
1713	idx = config->res_idx;
1714	/* For odd idx pair inversal bit is RES0 */
1715	if (idx % 2 != 0)
1716		/* PAIRINV, bit[21] */
1717		val &= ~BIT(21);
1718	config->res_ctrl[idx] = val & GENMASK(21, 0);
 
 
 
1719	spin_unlock(&drvdata->spinlock);
1720	return size;
1721}
1722static DEVICE_ATTR_RW(res_ctrl);
1723
1724static ssize_t sshot_idx_show(struct device *dev,
1725			      struct device_attribute *attr, char *buf)
1726{
1727	unsigned long val;
1728	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1729	struct etmv4_config *config = &drvdata->config;
1730
1731	val = config->ss_idx;
1732	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1733}
1734
1735static ssize_t sshot_idx_store(struct device *dev,
1736			       struct device_attribute *attr,
1737			       const char *buf, size_t size)
1738{
1739	unsigned long val;
1740	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1741	struct etmv4_config *config = &drvdata->config;
1742
1743	if (kstrtoul(buf, 16, &val))
1744		return -EINVAL;
1745	if (val >= drvdata->nr_ss_cmp)
1746		return -EINVAL;
1747
1748	spin_lock(&drvdata->spinlock);
1749	config->ss_idx = val;
1750	spin_unlock(&drvdata->spinlock);
1751	return size;
1752}
1753static DEVICE_ATTR_RW(sshot_idx);
1754
1755static ssize_t sshot_ctrl_show(struct device *dev,
1756			       struct device_attribute *attr,
1757			       char *buf)
1758{
1759	unsigned long val;
1760	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1761	struct etmv4_config *config = &drvdata->config;
1762
1763	spin_lock(&drvdata->spinlock);
1764	val = config->ss_ctrl[config->ss_idx];
1765	spin_unlock(&drvdata->spinlock);
1766	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1767}
1768
1769static ssize_t sshot_ctrl_store(struct device *dev,
1770				struct device_attribute *attr,
1771				const char *buf, size_t size)
1772{
1773	u8 idx;
1774	unsigned long val;
1775	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1776	struct etmv4_config *config = &drvdata->config;
1777
1778	if (kstrtoul(buf, 16, &val))
1779		return -EINVAL;
1780
1781	spin_lock(&drvdata->spinlock);
1782	idx = config->ss_idx;
1783	config->ss_ctrl[idx] = val & GENMASK(24, 0);
1784	/* must clear bit 31 in related status register on programming */
1785	config->ss_status[idx] &= ~BIT(31);
1786	spin_unlock(&drvdata->spinlock);
1787	return size;
1788}
1789static DEVICE_ATTR_RW(sshot_ctrl);
1790
1791static ssize_t sshot_status_show(struct device *dev,
1792				 struct device_attribute *attr, char *buf)
1793{
1794	unsigned long val;
1795	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1796	struct etmv4_config *config = &drvdata->config;
1797
1798	spin_lock(&drvdata->spinlock);
1799	val = config->ss_status[config->ss_idx];
1800	spin_unlock(&drvdata->spinlock);
1801	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1802}
1803static DEVICE_ATTR_RO(sshot_status);
1804
1805static ssize_t sshot_pe_ctrl_show(struct device *dev,
1806				  struct device_attribute *attr,
1807				  char *buf)
1808{
1809	unsigned long val;
1810	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1811	struct etmv4_config *config = &drvdata->config;
1812
1813	spin_lock(&drvdata->spinlock);
1814	val = config->ss_pe_cmp[config->ss_idx];
1815	spin_unlock(&drvdata->spinlock);
1816	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1817}
1818
1819static ssize_t sshot_pe_ctrl_store(struct device *dev,
1820				   struct device_attribute *attr,
1821				   const char *buf, size_t size)
1822{
1823	u8 idx;
1824	unsigned long val;
1825	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1826	struct etmv4_config *config = &drvdata->config;
1827
1828	if (kstrtoul(buf, 16, &val))
1829		return -EINVAL;
1830
1831	spin_lock(&drvdata->spinlock);
1832	idx = config->ss_idx;
1833	config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
1834	/* must clear bit 31 in related status register on programming */
1835	config->ss_status[idx] &= ~BIT(31);
1836	spin_unlock(&drvdata->spinlock);
1837	return size;
1838}
1839static DEVICE_ATTR_RW(sshot_pe_ctrl);
1840
1841static ssize_t ctxid_idx_show(struct device *dev,
1842			      struct device_attribute *attr,
1843			      char *buf)
1844{
1845	unsigned long val;
1846	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1847	struct etmv4_config *config = &drvdata->config;
1848
1849	val = config->ctxid_idx;
1850	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1851}
1852
1853static ssize_t ctxid_idx_store(struct device *dev,
1854			       struct device_attribute *attr,
1855			       const char *buf, size_t size)
1856{
1857	unsigned long val;
1858	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1859	struct etmv4_config *config = &drvdata->config;
1860
1861	if (kstrtoul(buf, 16, &val))
1862		return -EINVAL;
1863	if (val >= drvdata->numcidc)
1864		return -EINVAL;
1865
1866	/*
1867	 * Use spinlock to ensure index doesn't change while it gets
1868	 * dereferenced multiple times within a spinlock block elsewhere.
1869	 */
1870	spin_lock(&drvdata->spinlock);
1871	config->ctxid_idx = val;
1872	spin_unlock(&drvdata->spinlock);
1873	return size;
1874}
1875static DEVICE_ATTR_RW(ctxid_idx);
1876
1877static ssize_t ctxid_pid_show(struct device *dev,
1878			      struct device_attribute *attr,
1879			      char *buf)
1880{
1881	u8 idx;
1882	unsigned long val;
1883	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1884	struct etmv4_config *config = &drvdata->config;
1885
1886	/*
1887	 * Don't use contextID tracing if coming from a PID namespace.  See
1888	 * comment in ctxid_pid_store().
1889	 */
1890	if (task_active_pid_ns(current) != &init_pid_ns)
1891		return -EINVAL;
1892
1893	spin_lock(&drvdata->spinlock);
1894	idx = config->ctxid_idx;
1895	val = (unsigned long)config->ctxid_pid[idx];
1896	spin_unlock(&drvdata->spinlock);
1897	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1898}
1899
1900static ssize_t ctxid_pid_store(struct device *dev,
1901			       struct device_attribute *attr,
1902			       const char *buf, size_t size)
1903{
1904	u8 idx;
1905	unsigned long pid;
1906	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1907	struct etmv4_config *config = &drvdata->config;
1908
1909	/*
1910	 * When contextID tracing is enabled the tracers will insert the
1911	 * value found in the contextID register in the trace stream.  But if
1912	 * a process is in a namespace the PID of that process as seen from the
1913	 * namespace won't be what the kernel sees, something that makes the
1914	 * feature confusing and can potentially leak kernel only information.
1915	 * As such refuse to use the feature if @current is not in the initial
1916	 * PID namespace.
1917	 */
1918	if (task_active_pid_ns(current) != &init_pid_ns)
1919		return -EINVAL;
1920
1921	/*
1922	 * only implemented when ctxid tracing is enabled, i.e. at least one
1923	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1924	 * in length
1925	 */
1926	if (!drvdata->ctxid_size || !drvdata->numcidc)
1927		return -EINVAL;
1928	if (kstrtoul(buf, 16, &pid))
1929		return -EINVAL;
1930
1931	spin_lock(&drvdata->spinlock);
1932	idx = config->ctxid_idx;
1933	config->ctxid_pid[idx] = (u64)pid;
1934	spin_unlock(&drvdata->spinlock);
1935	return size;
1936}
1937static DEVICE_ATTR_RW(ctxid_pid);
1938
1939static ssize_t ctxid_masks_show(struct device *dev,
1940				struct device_attribute *attr,
1941				char *buf)
1942{
1943	unsigned long val1, val2;
1944	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1945	struct etmv4_config *config = &drvdata->config;
1946
1947	/*
1948	 * Don't use contextID tracing if coming from a PID namespace.  See
1949	 * comment in ctxid_pid_store().
1950	 */
1951	if (task_active_pid_ns(current) != &init_pid_ns)
1952		return -EINVAL;
1953
1954	spin_lock(&drvdata->spinlock);
1955	val1 = config->ctxid_mask0;
1956	val2 = config->ctxid_mask1;
1957	spin_unlock(&drvdata->spinlock);
1958	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1959}
1960
1961static ssize_t ctxid_masks_store(struct device *dev,
1962				struct device_attribute *attr,
1963				const char *buf, size_t size)
1964{
1965	u8 i, j, maskbyte;
1966	unsigned long val1, val2, mask;
1967	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1968	struct etmv4_config *config = &drvdata->config;
1969	int nr_inputs;
1970
1971	/*
1972	 * Don't use contextID tracing if coming from a PID namespace.  See
1973	 * comment in ctxid_pid_store().
1974	 */
1975	if (task_active_pid_ns(current) != &init_pid_ns)
1976		return -EINVAL;
1977
1978	/*
1979	 * only implemented when ctxid tracing is enabled, i.e. at least one
1980	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1981	 * in length
1982	 */
1983	if (!drvdata->ctxid_size || !drvdata->numcidc)
1984		return -EINVAL;
1985	/* one mask if <= 4 comparators, two for up to 8 */
1986	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
1987	if ((drvdata->numcidc > 4) && (nr_inputs != 2))
1988		return -EINVAL;
1989
1990	spin_lock(&drvdata->spinlock);
1991	/*
1992	 * each byte[0..3] controls mask value applied to ctxid
1993	 * comparator[0..3]
1994	 */
1995	switch (drvdata->numcidc) {
1996	case 0x1:
1997		/* COMP0, bits[7:0] */
1998		config->ctxid_mask0 = val1 & 0xFF;
1999		break;
2000	case 0x2:
2001		/* COMP1, bits[15:8] */
2002		config->ctxid_mask0 = val1 & 0xFFFF;
2003		break;
2004	case 0x3:
2005		/* COMP2, bits[23:16] */
2006		config->ctxid_mask0 = val1 & 0xFFFFFF;
2007		break;
2008	case 0x4:
2009		 /* COMP3, bits[31:24] */
2010		config->ctxid_mask0 = val1;
2011		break;
2012	case 0x5:
2013		/* COMP4, bits[7:0] */
2014		config->ctxid_mask0 = val1;
2015		config->ctxid_mask1 = val2 & 0xFF;
2016		break;
2017	case 0x6:
2018		/* COMP5, bits[15:8] */
2019		config->ctxid_mask0 = val1;
2020		config->ctxid_mask1 = val2 & 0xFFFF;
2021		break;
2022	case 0x7:
2023		/* COMP6, bits[23:16] */
2024		config->ctxid_mask0 = val1;
2025		config->ctxid_mask1 = val2 & 0xFFFFFF;
2026		break;
2027	case 0x8:
2028		/* COMP7, bits[31:24] */
2029		config->ctxid_mask0 = val1;
2030		config->ctxid_mask1 = val2;
2031		break;
2032	default:
2033		break;
2034	}
2035	/*
2036	 * If software sets a mask bit to 1, it must program relevant byte
2037	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2038	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2039	 * of ctxid comparator0 value (corresponding to byte 0) register.
2040	 */
2041	mask = config->ctxid_mask0;
2042	for (i = 0; i < drvdata->numcidc; i++) {
2043		/* mask value of corresponding ctxid comparator */
2044		maskbyte = mask & ETMv4_EVENT_MASK;
2045		/*
2046		 * each bit corresponds to a byte of respective ctxid comparator
2047		 * value register
2048		 */
2049		for (j = 0; j < 8; j++) {
2050			if (maskbyte & 1)
2051				config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2052			maskbyte >>= 1;
2053		}
2054		/* Select the next ctxid comparator mask value */
2055		if (i == 3)
2056			/* ctxid comparators[4-7] */
2057			mask = config->ctxid_mask1;
2058		else
2059			mask >>= 0x8;
2060	}
2061
2062	spin_unlock(&drvdata->spinlock);
2063	return size;
2064}
2065static DEVICE_ATTR_RW(ctxid_masks);
2066
2067static ssize_t vmid_idx_show(struct device *dev,
2068			     struct device_attribute *attr,
2069			     char *buf)
2070{
2071	unsigned long val;
2072	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2073	struct etmv4_config *config = &drvdata->config;
2074
2075	val = config->vmid_idx;
2076	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2077}
2078
2079static ssize_t vmid_idx_store(struct device *dev,
2080			      struct device_attribute *attr,
2081			      const char *buf, size_t size)
2082{
2083	unsigned long val;
2084	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2085	struct etmv4_config *config = &drvdata->config;
2086
2087	if (kstrtoul(buf, 16, &val))
2088		return -EINVAL;
2089	if (val >= drvdata->numvmidc)
2090		return -EINVAL;
2091
2092	/*
2093	 * Use spinlock to ensure index doesn't change while it gets
2094	 * dereferenced multiple times within a spinlock block elsewhere.
2095	 */
2096	spin_lock(&drvdata->spinlock);
2097	config->vmid_idx = val;
2098	spin_unlock(&drvdata->spinlock);
2099	return size;
2100}
2101static DEVICE_ATTR_RW(vmid_idx);
2102
2103static ssize_t vmid_val_show(struct device *dev,
2104			     struct device_attribute *attr,
2105			     char *buf)
2106{
2107	unsigned long val;
2108	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2109	struct etmv4_config *config = &drvdata->config;
2110
 
 
 
 
 
 
 
 
2111	val = (unsigned long)config->vmid_val[config->vmid_idx];
 
2112	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2113}
2114
2115static ssize_t vmid_val_store(struct device *dev,
2116			      struct device_attribute *attr,
2117			      const char *buf, size_t size)
2118{
2119	unsigned long val;
2120	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2121	struct etmv4_config *config = &drvdata->config;
2122
2123	/*
 
 
 
 
 
 
 
2124	 * only implemented when vmid tracing is enabled, i.e. at least one
2125	 * vmid comparator is implemented and at least 8 bit vmid size
2126	 */
2127	if (!drvdata->vmid_size || !drvdata->numvmidc)
2128		return -EINVAL;
2129	if (kstrtoul(buf, 16, &val))
2130		return -EINVAL;
2131
2132	spin_lock(&drvdata->spinlock);
2133	config->vmid_val[config->vmid_idx] = (u64)val;
2134	spin_unlock(&drvdata->spinlock);
2135	return size;
2136}
2137static DEVICE_ATTR_RW(vmid_val);
2138
2139static ssize_t vmid_masks_show(struct device *dev,
2140			       struct device_attribute *attr, char *buf)
2141{
2142	unsigned long val1, val2;
2143	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2144	struct etmv4_config *config = &drvdata->config;
2145
 
 
 
 
 
 
 
2146	spin_lock(&drvdata->spinlock);
2147	val1 = config->vmid_mask0;
2148	val2 = config->vmid_mask1;
2149	spin_unlock(&drvdata->spinlock);
2150	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2151}
2152
2153static ssize_t vmid_masks_store(struct device *dev,
2154				struct device_attribute *attr,
2155				const char *buf, size_t size)
2156{
2157	u8 i, j, maskbyte;
2158	unsigned long val1, val2, mask;
2159	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2160	struct etmv4_config *config = &drvdata->config;
2161	int nr_inputs;
2162
2163	/*
 
 
 
 
 
 
 
2164	 * only implemented when vmid tracing is enabled, i.e. at least one
2165	 * vmid comparator is implemented and at least 8 bit vmid size
2166	 */
2167	if (!drvdata->vmid_size || !drvdata->numvmidc)
2168		return -EINVAL;
2169	/* one mask if <= 4 comparators, two for up to 8 */
2170	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2171	if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2172		return -EINVAL;
2173
2174	spin_lock(&drvdata->spinlock);
2175
2176	/*
2177	 * each byte[0..3] controls mask value applied to vmid
2178	 * comparator[0..3]
2179	 */
2180	switch (drvdata->numvmidc) {
2181	case 0x1:
2182		/* COMP0, bits[7:0] */
2183		config->vmid_mask0 = val1 & 0xFF;
2184		break;
2185	case 0x2:
2186		/* COMP1, bits[15:8] */
2187		config->vmid_mask0 = val1 & 0xFFFF;
2188		break;
2189	case 0x3:
2190		/* COMP2, bits[23:16] */
2191		config->vmid_mask0 = val1 & 0xFFFFFF;
2192		break;
2193	case 0x4:
2194		/* COMP3, bits[31:24] */
2195		config->vmid_mask0 = val1;
2196		break;
2197	case 0x5:
2198		/* COMP4, bits[7:0] */
2199		config->vmid_mask0 = val1;
2200		config->vmid_mask1 = val2 & 0xFF;
2201		break;
2202	case 0x6:
2203		/* COMP5, bits[15:8] */
2204		config->vmid_mask0 = val1;
2205		config->vmid_mask1 = val2 & 0xFFFF;
2206		break;
2207	case 0x7:
2208		/* COMP6, bits[23:16] */
2209		config->vmid_mask0 = val1;
2210		config->vmid_mask1 = val2 & 0xFFFFFF;
2211		break;
2212	case 0x8:
2213		/* COMP7, bits[31:24] */
2214		config->vmid_mask0 = val1;
2215		config->vmid_mask1 = val2;
2216		break;
2217	default:
2218		break;
2219	}
2220
2221	/*
2222	 * If software sets a mask bit to 1, it must program relevant byte
2223	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2224	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2225	 * of vmid comparator0 value (corresponding to byte 0) register.
2226	 */
2227	mask = config->vmid_mask0;
2228	for (i = 0; i < drvdata->numvmidc; i++) {
2229		/* mask value of corresponding vmid comparator */
2230		maskbyte = mask & ETMv4_EVENT_MASK;
2231		/*
2232		 * each bit corresponds to a byte of respective vmid comparator
2233		 * value register
2234		 */
2235		for (j = 0; j < 8; j++) {
2236			if (maskbyte & 1)
2237				config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2238			maskbyte >>= 1;
2239		}
2240		/* Select the next vmid comparator mask value */
2241		if (i == 3)
2242			/* vmid comparators[4-7] */
2243			mask = config->vmid_mask1;
2244		else
2245			mask >>= 0x8;
2246	}
2247	spin_unlock(&drvdata->spinlock);
2248	return size;
2249}
2250static DEVICE_ATTR_RW(vmid_masks);
2251
2252static ssize_t cpu_show(struct device *dev,
2253			struct device_attribute *attr, char *buf)
2254{
2255	int val;
2256	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2257
2258	val = drvdata->cpu;
2259	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2260
2261}
2262static DEVICE_ATTR_RO(cpu);
2263
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2264static struct attribute *coresight_etmv4_attrs[] = {
2265	&dev_attr_nr_pe_cmp.attr,
2266	&dev_attr_nr_addr_cmp.attr,
2267	&dev_attr_nr_cntr.attr,
2268	&dev_attr_nr_ext_inp.attr,
2269	&dev_attr_numcidc.attr,
2270	&dev_attr_numvmidc.attr,
2271	&dev_attr_nrseqstate.attr,
2272	&dev_attr_nr_resource.attr,
2273	&dev_attr_nr_ss_cmp.attr,
2274	&dev_attr_reset.attr,
2275	&dev_attr_mode.attr,
2276	&dev_attr_pe.attr,
2277	&dev_attr_event.attr,
2278	&dev_attr_event_instren.attr,
2279	&dev_attr_event_ts.attr,
2280	&dev_attr_syncfreq.attr,
2281	&dev_attr_cyc_threshold.attr,
2282	&dev_attr_bb_ctrl.attr,
2283	&dev_attr_event_vinst.attr,
2284	&dev_attr_s_exlevel_vinst.attr,
2285	&dev_attr_ns_exlevel_vinst.attr,
2286	&dev_attr_addr_idx.attr,
2287	&dev_attr_addr_instdatatype.attr,
2288	&dev_attr_addr_single.attr,
2289	&dev_attr_addr_range.attr,
2290	&dev_attr_addr_start.attr,
2291	&dev_attr_addr_stop.attr,
2292	&dev_attr_addr_ctxtype.attr,
2293	&dev_attr_addr_context.attr,
2294	&dev_attr_addr_exlevel_s_ns.attr,
2295	&dev_attr_addr_cmp_view.attr,
2296	&dev_attr_vinst_pe_cmp_start_stop.attr,
2297	&dev_attr_sshot_idx.attr,
2298	&dev_attr_sshot_ctrl.attr,
2299	&dev_attr_sshot_pe_ctrl.attr,
2300	&dev_attr_sshot_status.attr,
2301	&dev_attr_seq_idx.attr,
2302	&dev_attr_seq_state.attr,
2303	&dev_attr_seq_event.attr,
2304	&dev_attr_seq_reset_event.attr,
2305	&dev_attr_cntr_idx.attr,
2306	&dev_attr_cntrldvr.attr,
2307	&dev_attr_cntr_val.attr,
2308	&dev_attr_cntr_ctrl.attr,
2309	&dev_attr_res_idx.attr,
2310	&dev_attr_res_ctrl.attr,
2311	&dev_attr_ctxid_idx.attr,
2312	&dev_attr_ctxid_pid.attr,
2313	&dev_attr_ctxid_masks.attr,
2314	&dev_attr_vmid_idx.attr,
2315	&dev_attr_vmid_val.attr,
2316	&dev_attr_vmid_masks.attr,
2317	&dev_attr_cpu.attr,
 
2318	NULL,
2319};
2320
2321struct etmv4_reg {
2322	struct coresight_device *csdev;
2323	u32 offset;
2324	u32 data;
2325};
2326
2327static void do_smp_cross_read(void *data)
2328{
2329	struct etmv4_reg *reg = data;
2330
2331	reg->data = etm4x_relaxed_read32(&reg->csdev->access, reg->offset);
2332}
2333
2334static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
2335{
2336	struct etmv4_reg reg;
2337
2338	reg.offset = offset;
2339	reg.csdev = drvdata->csdev;
2340
2341	/*
2342	 * smp cross call ensures the CPU will be powered up before
2343	 * accessing the ETMv4 trace core registers
2344	 */
2345	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
2346	return reg.data;
2347}
2348
2349static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
2350{
2351	struct dev_ext_attribute *eattr;
2352
2353	eattr = container_of(attr, struct dev_ext_attribute, attr);
2354	return (u32)(unsigned long)eattr->var;
2355}
2356
2357static ssize_t coresight_etm4x_reg_show(struct device *dev,
2358					struct device_attribute *d_attr,
2359					char *buf)
2360{
2361	u32 val, offset;
2362	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2363
2364	offset = coresight_etm4x_attr_to_offset(d_attr);
2365
2366	pm_runtime_get_sync(dev->parent);
2367	val = etmv4_cross_read(drvdata, offset);
2368	pm_runtime_put_sync(dev->parent);
2369
2370	return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
2371}
2372
2373static inline bool
2374etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
2375{
2376	switch (offset) {
2377	ETM_COMMON_SYSREG_LIST_CASES
2378		/*
2379		 * Common registers to ETE & ETM4x accessible via system
2380		 * instructions are always implemented.
2381		 */
2382		return true;
2383
2384	ETM4x_ONLY_SYSREG_LIST_CASES
2385		/*
2386		 * We only support etm4x and ete. So if the device is not
2387		 * ETE, it must be ETMv4x.
2388		 */
2389		return !etm4x_is_ete(drvdata);
2390
2391	ETM4x_MMAP_LIST_CASES
2392		/*
2393		 * Registers accessible only via memory-mapped registers
2394		 * must not be accessed via system instructions.
2395		 * We cannot access the drvdata->csdev here, as this
2396		 * function is called during the device creation, via
2397		 * coresight_register() and the csdev is not initialized
2398		 * until that is done. So rely on the drvdata->base to
2399		 * detect if we have a memory mapped access.
2400		 * Also ETE doesn't implement memory mapped access, thus
2401		 * it is sufficient to check that we are using mmio.
2402		 */
2403		return !!drvdata->base;
2404
2405	ETE_ONLY_SYSREG_LIST_CASES
2406		return etm4x_is_ete(drvdata);
2407	}
2408
2409	return false;
2410}
2411
2412/*
2413 * Hide the ETM4x registers that may not be available on the
2414 * hardware.
2415 * There are certain management registers unavailable via system
2416 * instructions. Make those sysfs attributes hidden on such
2417 * systems.
2418 */
2419static umode_t
2420coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
2421				     struct attribute *attr, int unused)
2422{
2423	struct device *dev = kobj_to_dev(kobj);
2424	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2425	struct device_attribute *d_attr;
2426	u32 offset;
2427
2428	d_attr = container_of(attr, struct device_attribute, attr);
2429	offset = coresight_etm4x_attr_to_offset(d_attr);
2430
2431	if (etm4x_register_implemented(drvdata, offset))
2432		return attr->mode;
2433	return 0;
2434}
2435
2436#define coresight_etm4x_reg(name, offset)				\
2437	&((struct dev_ext_attribute[]) {				\
2438	   {								\
2439		__ATTR(name, 0444, coresight_etm4x_reg_show, NULL),	\
2440		(void *)(unsigned long)offset				\
2441	   }								\
2442	})[0].attr.attr
2443
2444static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2445	coresight_etm4x_reg(trcpdcr, TRCPDCR),
2446	coresight_etm4x_reg(trcpdsr, TRCPDSR),
2447	coresight_etm4x_reg(trclsr, TRCLSR),
2448	coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
2449	coresight_etm4x_reg(trcdevid, TRCDEVID),
2450	coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
2451	coresight_etm4x_reg(trcpidr0, TRCPIDR0),
2452	coresight_etm4x_reg(trcpidr1, TRCPIDR1),
2453	coresight_etm4x_reg(trcpidr2, TRCPIDR2),
2454	coresight_etm4x_reg(trcpidr3, TRCPIDR3),
2455	coresight_etm4x_reg(trcoslsr, TRCOSLSR),
2456	coresight_etm4x_reg(trcconfig, TRCCONFIGR),
2457	coresight_etm4x_reg(trctraceid, TRCTRACEIDR),
2458	coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
2459	NULL,
2460};
2461
2462static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2463	coresight_etm4x_reg(trcidr0, TRCIDR0),
2464	coresight_etm4x_reg(trcidr1, TRCIDR1),
2465	coresight_etm4x_reg(trcidr2, TRCIDR2),
2466	coresight_etm4x_reg(trcidr3, TRCIDR3),
2467	coresight_etm4x_reg(trcidr4, TRCIDR4),
2468	coresight_etm4x_reg(trcidr5, TRCIDR5),
2469	/* trcidr[6,7] are reserved */
2470	coresight_etm4x_reg(trcidr8, TRCIDR8),
2471	coresight_etm4x_reg(trcidr9, TRCIDR9),
2472	coresight_etm4x_reg(trcidr10, TRCIDR10),
2473	coresight_etm4x_reg(trcidr11, TRCIDR11),
2474	coresight_etm4x_reg(trcidr12, TRCIDR12),
2475	coresight_etm4x_reg(trcidr13, TRCIDR13),
2476	NULL,
2477};
2478
2479static const struct attribute_group coresight_etmv4_group = {
2480	.attrs = coresight_etmv4_attrs,
2481};
2482
2483static const struct attribute_group coresight_etmv4_mgmt_group = {
2484	.is_visible = coresight_etm4x_attr_reg_implemented,
2485	.attrs = coresight_etmv4_mgmt_attrs,
2486	.name = "mgmt",
2487};
2488
2489static const struct attribute_group coresight_etmv4_trcidr_group = {
2490	.attrs = coresight_etmv4_trcidr_attrs,
2491	.name = "trcidr",
2492};
2493
2494const struct attribute_group *coresight_etmv4_groups[] = {
2495	&coresight_etmv4_group,
2496	&coresight_etmv4_mgmt_group,
2497	&coresight_etmv4_trcidr_group,
2498	NULL,
2499};