Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
   4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
   5 */
   6
   7#include <linux/pid_namespace.h>
   8#include <linux/pm_runtime.h>
   9#include <linux/sysfs.h>
  10#include "coresight-etm4x.h"
  11#include "coresight-priv.h"
  12#include "coresight-syscfg.h"
  13
  14static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
  15{
  16	u8 idx;
  17	struct etmv4_config *config = &drvdata->config;
  18
  19	idx = config->addr_idx;
  20
  21	/*
  22	 * TRCACATRn.TYPE bit[1:0]: type of comparison
  23	 * the trace unit performs
  24	 */
  25	if (FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]) == TRCACATRn_TYPE_ADDR) {
  26		if (idx % 2 != 0)
  27			return -EINVAL;
  28
  29		/*
  30		 * We are performing instruction address comparison. Set the
  31		 * relevant bit of ViewInst Include/Exclude Control register
  32		 * for corresponding address comparator pair.
  33		 */
  34		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
  35		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
  36			return -EINVAL;
  37
  38		if (exclude == true) {
  39			/*
  40			 * Set exclude bit and unset the include bit
  41			 * corresponding to comparator pair
  42			 */
  43			config->viiectlr |= BIT(idx / 2 + 16);
  44			config->viiectlr &= ~BIT(idx / 2);
  45		} else {
  46			/*
  47			 * Set include bit and unset exclude bit
  48			 * corresponding to comparator pair
  49			 */
  50			config->viiectlr |= BIT(idx / 2);
  51			config->viiectlr &= ~BIT(idx / 2 + 16);
  52		}
  53	}
  54	return 0;
  55}
  56
  57static ssize_t nr_pe_cmp_show(struct device *dev,
  58			      struct device_attribute *attr,
  59			      char *buf)
  60{
  61	unsigned long val;
  62	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  63
  64	val = drvdata->nr_pe_cmp;
  65	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  66}
  67static DEVICE_ATTR_RO(nr_pe_cmp);
  68
  69static ssize_t nr_addr_cmp_show(struct device *dev,
  70				struct device_attribute *attr,
  71				char *buf)
  72{
  73	unsigned long val;
  74	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  75
  76	val = drvdata->nr_addr_cmp;
  77	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  78}
  79static DEVICE_ATTR_RO(nr_addr_cmp);
  80
  81static ssize_t nr_cntr_show(struct device *dev,
  82			    struct device_attribute *attr,
  83			    char *buf)
  84{
  85	unsigned long val;
  86	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  87
  88	val = drvdata->nr_cntr;
  89	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  90}
  91static DEVICE_ATTR_RO(nr_cntr);
  92
  93static ssize_t nr_ext_inp_show(struct device *dev,
  94			       struct device_attribute *attr,
  95			       char *buf)
  96{
  97	unsigned long val;
  98	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  99
 100	val = drvdata->nr_ext_inp;
 101	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 102}
 103static DEVICE_ATTR_RO(nr_ext_inp);
 104
 105static ssize_t numcidc_show(struct device *dev,
 106			    struct device_attribute *attr,
 107			    char *buf)
 108{
 109	unsigned long val;
 110	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 111
 112	val = drvdata->numcidc;
 113	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 114}
 115static DEVICE_ATTR_RO(numcidc);
 116
 117static ssize_t numvmidc_show(struct device *dev,
 118			     struct device_attribute *attr,
 119			     char *buf)
 120{
 121	unsigned long val;
 122	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 123
 124	val = drvdata->numvmidc;
 125	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 126}
 127static DEVICE_ATTR_RO(numvmidc);
 128
 129static ssize_t nrseqstate_show(struct device *dev,
 130			       struct device_attribute *attr,
 131			       char *buf)
 132{
 133	unsigned long val;
 134	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 135
 136	val = drvdata->nrseqstate;
 137	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 138}
 139static DEVICE_ATTR_RO(nrseqstate);
 140
 141static ssize_t nr_resource_show(struct device *dev,
 142				struct device_attribute *attr,
 143				char *buf)
 144{
 145	unsigned long val;
 146	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 147
 148	val = drvdata->nr_resource;
 149	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 150}
 151static DEVICE_ATTR_RO(nr_resource);
 152
 153static ssize_t nr_ss_cmp_show(struct device *dev,
 154			      struct device_attribute *attr,
 155			      char *buf)
 156{
 157	unsigned long val;
 158	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 159
 160	val = drvdata->nr_ss_cmp;
 161	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 162}
 163static DEVICE_ATTR_RO(nr_ss_cmp);
 164
 165static ssize_t reset_store(struct device *dev,
 166			   struct device_attribute *attr,
 167			   const char *buf, size_t size)
 168{
 169	int i;
 170	unsigned long val;
 171	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 172	struct etmv4_config *config = &drvdata->config;
 173
 174	if (kstrtoul(buf, 16, &val))
 175		return -EINVAL;
 176
 177	spin_lock(&drvdata->spinlock);
 178	if (val)
 179		config->mode = 0x0;
 180
 181	/* Disable data tracing: do not trace load and store data transfers */
 182	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
 183	config->cfg &= ~(TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE);
 184
 185	/* Disable data value and data address tracing */
 186	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
 187			   ETM_MODE_DATA_TRACE_VAL);
 188	config->cfg &= ~(TRCCONFIGR_DA | TRCCONFIGR_DV);
 189
 190	/* Disable all events tracing */
 191	config->eventctrl0 = 0x0;
 192	config->eventctrl1 = 0x0;
 193
 194	/* Disable timestamp event */
 195	config->ts_ctrl = 0x0;
 196
 197	/* Disable stalling */
 198	config->stall_ctrl = 0x0;
 199
 200	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
 201	if (drvdata->syncpr == false)
 202		config->syncfreq = 0x8;
 203
 204	/*
 205	 * Enable ViewInst to trace everything with start-stop logic in
 206	 * started state. ARM recommends start-stop logic is set before
 207	 * each trace run.
 208	 */
 209	config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
 210	if (drvdata->nr_addr_cmp > 0) {
 211		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
 212		/* SSSTATUS, bit[9] */
 213		config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
 214	}
 215
 216	/* No address range filtering for ViewInst */
 217	config->viiectlr = 0x0;
 218
 219	/* No start-stop filtering for ViewInst */
 220	config->vissctlr = 0x0;
 221	config->vipcssctlr = 0x0;
 222
 223	/* Disable seq events */
 224	for (i = 0; i < drvdata->nrseqstate-1; i++)
 225		config->seq_ctrl[i] = 0x0;
 226	config->seq_rst = 0x0;
 227	config->seq_state = 0x0;
 228
 229	/* Disable external input events */
 230	config->ext_inp = 0x0;
 231
 232	config->cntr_idx = 0x0;
 233	for (i = 0; i < drvdata->nr_cntr; i++) {
 234		config->cntrldvr[i] = 0x0;
 235		config->cntr_ctrl[i] = 0x0;
 236		config->cntr_val[i] = 0x0;
 237	}
 238
 239	config->res_idx = 0x0;
 240	for (i = 2; i < 2 * drvdata->nr_resource; i++)
 241		config->res_ctrl[i] = 0x0;
 242
 243	config->ss_idx = 0x0;
 244	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
 245		config->ss_ctrl[i] = 0x0;
 246		config->ss_pe_cmp[i] = 0x0;
 247	}
 248
 249	config->addr_idx = 0x0;
 250	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
 251		config->addr_val[i] = 0x0;
 252		config->addr_acc[i] = 0x0;
 253		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
 254	}
 255
 256	config->ctxid_idx = 0x0;
 257	for (i = 0; i < drvdata->numcidc; i++)
 258		config->ctxid_pid[i] = 0x0;
 259
 260	config->ctxid_mask0 = 0x0;
 261	config->ctxid_mask1 = 0x0;
 262
 263	config->vmid_idx = 0x0;
 264	for (i = 0; i < drvdata->numvmidc; i++)
 265		config->vmid_val[i] = 0x0;
 266	config->vmid_mask0 = 0x0;
 267	config->vmid_mask1 = 0x0;
 268
 269	drvdata->trcid = drvdata->cpu + 1;
 270
 271	spin_unlock(&drvdata->spinlock);
 272
 273	cscfg_csdev_reset_feats(to_coresight_device(dev));
 274
 275	return size;
 276}
 277static DEVICE_ATTR_WO(reset);
 278
 279static ssize_t mode_show(struct device *dev,
 280			 struct device_attribute *attr,
 281			 char *buf)
 282{
 283	unsigned long val;
 284	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 285	struct etmv4_config *config = &drvdata->config;
 286
 287	val = config->mode;
 288	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 289}
 290
 291static ssize_t mode_store(struct device *dev,
 292			  struct device_attribute *attr,
 293			  const char *buf, size_t size)
 294{
 295	unsigned long val, mode;
 296	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 297	struct etmv4_config *config = &drvdata->config;
 298
 299	if (kstrtoul(buf, 16, &val))
 300		return -EINVAL;
 301
 302	spin_lock(&drvdata->spinlock);
 303	config->mode = val & ETMv4_MODE_ALL;
 
 
 304
 305	if (drvdata->instrp0 == true) {
 306		/* start by clearing instruction P0 field */
 307		config->cfg  &= ~TRCCONFIGR_INSTP0_LOAD_STORE;
 308		if (config->mode & ETM_MODE_LOAD)
 309			/* 0b01 Trace load instructions as P0 instructions */
 310			config->cfg  |= TRCCONFIGR_INSTP0_LOAD;
 311		if (config->mode & ETM_MODE_STORE)
 312			/* 0b10 Trace store instructions as P0 instructions */
 313			config->cfg  |= TRCCONFIGR_INSTP0_STORE;
 314		if (config->mode & ETM_MODE_LOAD_STORE)
 315			/*
 316			 * 0b11 Trace load and store instructions
 317			 * as P0 instructions
 318			 */
 319			config->cfg  |= TRCCONFIGR_INSTP0_LOAD_STORE;
 320	}
 321
 322	/* bit[3], Branch broadcast mode */
 323	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
 324		config->cfg |= TRCCONFIGR_BB;
 325	else
 326		config->cfg &= ~TRCCONFIGR_BB;
 327
 328	/* bit[4], Cycle counting instruction trace bit */
 329	if ((config->mode & ETMv4_MODE_CYCACC) &&
 330		(drvdata->trccci == true))
 331		config->cfg |= TRCCONFIGR_CCI;
 332	else
 333		config->cfg &= ~TRCCONFIGR_CCI;
 334
 335	/* bit[6], Context ID tracing bit */
 336	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
 337		config->cfg |= TRCCONFIGR_CID;
 338	else
 339		config->cfg &= ~TRCCONFIGR_CID;
 340
 341	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
 342		config->cfg |= TRCCONFIGR_VMID;
 343	else
 344		config->cfg &= ~TRCCONFIGR_VMID;
 345
 346	/* bits[10:8], Conditional instruction tracing bit */
 347	mode = ETM_MODE_COND(config->mode);
 348	if (drvdata->trccond == true) {
 349		config->cfg &= ~TRCCONFIGR_COND_MASK;
 350		config->cfg |= mode << __bf_shf(TRCCONFIGR_COND_MASK);
 351	}
 352
 353	/* bit[11], Global timestamp tracing bit */
 354	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
 355		config->cfg |= TRCCONFIGR_TS;
 356	else
 357		config->cfg &= ~TRCCONFIGR_TS;
 358
 359	/* bit[12], Return stack enable bit */
 360	if ((config->mode & ETM_MODE_RETURNSTACK) &&
 361					(drvdata->retstack == true))
 362		config->cfg |= TRCCONFIGR_RS;
 363	else
 364		config->cfg &= ~TRCCONFIGR_RS;
 365
 366	/* bits[14:13], Q element enable field */
 367	mode = ETM_MODE_QELEM(config->mode);
 368	/* start by clearing QE bits */
 369	config->cfg &= ~(TRCCONFIGR_QE_W_COUNTS | TRCCONFIGR_QE_WO_COUNTS);
 370	/*
 371	 * if supported, Q elements with instruction counts are enabled.
 372	 * Always set the low bit for any requested mode. Valid combos are
 373	 * 0b00, 0b01 and 0b11.
 374	 */
 375	if (mode && drvdata->q_support)
 376		config->cfg |= TRCCONFIGR_QE_W_COUNTS;
 377	/*
 378	 * if supported, Q elements with and without instruction
 379	 * counts are enabled
 380	 */
 381	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
 382		config->cfg |= TRCCONFIGR_QE_WO_COUNTS;
 383
 384	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
 385	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
 386	    (drvdata->atbtrig == true))
 387		config->eventctrl1 |= TRCEVENTCTL1R_ATB;
 388	else
 389		config->eventctrl1 &= ~TRCEVENTCTL1R_ATB;
 390
 391	/* bit[12], Low-power state behavior override bit */
 392	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
 393	    (drvdata->lpoverride == true))
 394		config->eventctrl1 |= TRCEVENTCTL1R_LPOVERRIDE;
 395	else
 396		config->eventctrl1 &= ~TRCEVENTCTL1R_LPOVERRIDE;
 397
 398	/* bit[8], Instruction stall bit */
 399	if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
 400		config->stall_ctrl |= TRCSTALLCTLR_ISTALL;
 401	else
 402		config->stall_ctrl &= ~TRCSTALLCTLR_ISTALL;
 403
 404	/* bit[10], Prioritize instruction trace bit */
 405	if (config->mode & ETM_MODE_INSTPRIO)
 406		config->stall_ctrl |= TRCSTALLCTLR_INSTPRIORITY;
 407	else
 408		config->stall_ctrl &= ~TRCSTALLCTLR_INSTPRIORITY;
 409
 410	/* bit[13], Trace overflow prevention bit */
 411	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
 412		(drvdata->nooverflow == true))
 413		config->stall_ctrl |= TRCSTALLCTLR_NOOVERFLOW;
 414	else
 415		config->stall_ctrl &= ~TRCSTALLCTLR_NOOVERFLOW;
 416
 417	/* bit[9] Start/stop logic control bit */
 418	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
 419		config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
 420	else
 421		config->vinst_ctrl &= ~TRCVICTLR_SSSTATUS;
 422
 423	/* bit[10], Whether a trace unit must trace a Reset exception */
 424	if (config->mode & ETM_MODE_TRACE_RESET)
 425		config->vinst_ctrl |= TRCVICTLR_TRCRESET;
 426	else
 427		config->vinst_ctrl &= ~TRCVICTLR_TRCRESET;
 428
 429	/* bit[11], Whether a trace unit must trace a system error exception */
 430	if ((config->mode & ETM_MODE_TRACE_ERR) &&
 431		(drvdata->trc_error == true))
 432		config->vinst_ctrl |= TRCVICTLR_TRCERR;
 433	else
 434		config->vinst_ctrl &= ~TRCVICTLR_TRCERR;
 435
 436	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
 437		etm4_config_trace_mode(config);
 438
 439	spin_unlock(&drvdata->spinlock);
 440
 441	return size;
 442}
 443static DEVICE_ATTR_RW(mode);
 444
 445static ssize_t pe_show(struct device *dev,
 446		       struct device_attribute *attr,
 447		       char *buf)
 448{
 449	unsigned long val;
 450	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 451	struct etmv4_config *config = &drvdata->config;
 452
 453	val = config->pe_sel;
 454	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 455}
 456
 457static ssize_t pe_store(struct device *dev,
 458			struct device_attribute *attr,
 459			const char *buf, size_t size)
 460{
 461	unsigned long val;
 462	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 463	struct etmv4_config *config = &drvdata->config;
 464
 465	if (kstrtoul(buf, 16, &val))
 466		return -EINVAL;
 467
 468	spin_lock(&drvdata->spinlock);
 469	if (val > drvdata->nr_pe) {
 470		spin_unlock(&drvdata->spinlock);
 471		return -EINVAL;
 472	}
 473
 474	config->pe_sel = val;
 475	spin_unlock(&drvdata->spinlock);
 476	return size;
 477}
 478static DEVICE_ATTR_RW(pe);
 479
 480static ssize_t event_show(struct device *dev,
 481			  struct device_attribute *attr,
 482			  char *buf)
 483{
 484	unsigned long val;
 485	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 486	struct etmv4_config *config = &drvdata->config;
 487
 488	val = config->eventctrl0;
 489	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 490}
 491
 492static ssize_t event_store(struct device *dev,
 493			   struct device_attribute *attr,
 494			   const char *buf, size_t size)
 495{
 496	unsigned long val;
 497	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 498	struct etmv4_config *config = &drvdata->config;
 499
 500	if (kstrtoul(buf, 16, &val))
 501		return -EINVAL;
 502
 503	spin_lock(&drvdata->spinlock);
 504	switch (drvdata->nr_event) {
 505	case 0x0:
 506		/* EVENT0, bits[7:0] */
 507		config->eventctrl0 = val & 0xFF;
 508		break;
 509	case 0x1:
 510		 /* EVENT1, bits[15:8] */
 511		config->eventctrl0 = val & 0xFFFF;
 512		break;
 513	case 0x2:
 514		/* EVENT2, bits[23:16] */
 515		config->eventctrl0 = val & 0xFFFFFF;
 516		break;
 517	case 0x3:
 518		/* EVENT3, bits[31:24] */
 519		config->eventctrl0 = val;
 520		break;
 521	default:
 522		break;
 523	}
 524	spin_unlock(&drvdata->spinlock);
 525	return size;
 526}
 527static DEVICE_ATTR_RW(event);
 528
 529static ssize_t event_instren_show(struct device *dev,
 530				  struct device_attribute *attr,
 531				  char *buf)
 532{
 533	unsigned long val;
 534	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 535	struct etmv4_config *config = &drvdata->config;
 536
 537	val = FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK, config->eventctrl1);
 538	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 539}
 540
 541static ssize_t event_instren_store(struct device *dev,
 542				   struct device_attribute *attr,
 543				   const char *buf, size_t size)
 544{
 545	unsigned long val;
 546	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 547	struct etmv4_config *config = &drvdata->config;
 548
 549	if (kstrtoul(buf, 16, &val))
 550		return -EINVAL;
 551
 552	spin_lock(&drvdata->spinlock);
 553	/* start by clearing all instruction event enable bits */
 554	config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK;
 555	switch (drvdata->nr_event) {
 556	case 0x0:
 557		/* generate Event element for event 1 */
 558		config->eventctrl1 |= val & TRCEVENTCTL1R_INSTEN_1;
 559		break;
 560	case 0x1:
 561		/* generate Event element for event 1 and 2 */
 562		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | TRCEVENTCTL1R_INSTEN_1);
 563		break;
 564	case 0x2:
 565		/* generate Event element for event 1, 2 and 3 */
 566		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
 567					     TRCEVENTCTL1R_INSTEN_1 |
 568					     TRCEVENTCTL1R_INSTEN_2);
 569		break;
 570	case 0x3:
 571		/* generate Event element for all 4 events */
 572		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
 573					     TRCEVENTCTL1R_INSTEN_1 |
 574					     TRCEVENTCTL1R_INSTEN_2 |
 575					     TRCEVENTCTL1R_INSTEN_3);
 576		break;
 577	default:
 578		break;
 579	}
 580	spin_unlock(&drvdata->spinlock);
 581	return size;
 582}
 583static DEVICE_ATTR_RW(event_instren);
 584
 585static ssize_t event_ts_show(struct device *dev,
 586			     struct device_attribute *attr,
 587			     char *buf)
 588{
 589	unsigned long val;
 590	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 591	struct etmv4_config *config = &drvdata->config;
 592
 593	val = config->ts_ctrl;
 594	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 595}
 596
 597static ssize_t event_ts_store(struct device *dev,
 598			      struct device_attribute *attr,
 599			      const char *buf, size_t size)
 600{
 601	unsigned long val;
 602	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 603	struct etmv4_config *config = &drvdata->config;
 604
 605	if (kstrtoul(buf, 16, &val))
 606		return -EINVAL;
 607	if (!drvdata->ts_size)
 608		return -EINVAL;
 609
 610	config->ts_ctrl = val & ETMv4_EVENT_MASK;
 611	return size;
 612}
 613static DEVICE_ATTR_RW(event_ts);
 614
 615static ssize_t syncfreq_show(struct device *dev,
 616			     struct device_attribute *attr,
 617			     char *buf)
 618{
 619	unsigned long val;
 620	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 621	struct etmv4_config *config = &drvdata->config;
 622
 623	val = config->syncfreq;
 624	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 625}
 626
 627static ssize_t syncfreq_store(struct device *dev,
 628			      struct device_attribute *attr,
 629			      const char *buf, size_t size)
 630{
 631	unsigned long val;
 632	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 633	struct etmv4_config *config = &drvdata->config;
 634
 635	if (kstrtoul(buf, 16, &val))
 636		return -EINVAL;
 637	if (drvdata->syncpr == true)
 638		return -EINVAL;
 639
 640	config->syncfreq = val & ETMv4_SYNC_MASK;
 641	return size;
 642}
 643static DEVICE_ATTR_RW(syncfreq);
 644
 645static ssize_t cyc_threshold_show(struct device *dev,
 646				  struct device_attribute *attr,
 647				  char *buf)
 648{
 649	unsigned long val;
 650	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 651	struct etmv4_config *config = &drvdata->config;
 652
 653	val = config->ccctlr;
 654	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 655}
 656
 657static ssize_t cyc_threshold_store(struct device *dev,
 658				   struct device_attribute *attr,
 659				   const char *buf, size_t size)
 660{
 661	unsigned long val;
 662	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 663	struct etmv4_config *config = &drvdata->config;
 664
 665	if (kstrtoul(buf, 16, &val))
 666		return -EINVAL;
 667
 668	/* mask off max threshold before checking min value */
 669	val &= ETM_CYC_THRESHOLD_MASK;
 670	if (val < drvdata->ccitmin)
 671		return -EINVAL;
 672
 673	config->ccctlr = val;
 674	return size;
 675}
 676static DEVICE_ATTR_RW(cyc_threshold);
 677
 678static ssize_t bb_ctrl_show(struct device *dev,
 679			    struct device_attribute *attr,
 680			    char *buf)
 681{
 682	unsigned long val;
 683	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 684	struct etmv4_config *config = &drvdata->config;
 685
 686	val = config->bb_ctrl;
 687	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 688}
 689
 690static ssize_t bb_ctrl_store(struct device *dev,
 691			     struct device_attribute *attr,
 692			     const char *buf, size_t size)
 693{
 694	unsigned long val;
 695	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 696	struct etmv4_config *config = &drvdata->config;
 697
 698	if (kstrtoul(buf, 16, &val))
 699		return -EINVAL;
 700	if (drvdata->trcbb == false)
 701		return -EINVAL;
 702	if (!drvdata->nr_addr_cmp)
 703		return -EINVAL;
 704
 705	/*
 706	 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
 707	 * individual range comparators. If include then at least 1
 708	 * range must be selected.
 709	 */
 710	if ((val & TRCBBCTLR_MODE) && (FIELD_GET(TRCBBCTLR_RANGE_MASK, val) == 0))
 711		return -EINVAL;
 712
 713	config->bb_ctrl = val & (TRCBBCTLR_MODE | TRCBBCTLR_RANGE_MASK);
 714	return size;
 715}
 716static DEVICE_ATTR_RW(bb_ctrl);
 717
 718static ssize_t event_vinst_show(struct device *dev,
 719				struct device_attribute *attr,
 720				char *buf)
 721{
 722	unsigned long val;
 723	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 724	struct etmv4_config *config = &drvdata->config;
 725
 726	val = FIELD_GET(TRCVICTLR_EVENT_MASK, config->vinst_ctrl);
 727	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 728}
 729
 730static ssize_t event_vinst_store(struct device *dev,
 731				 struct device_attribute *attr,
 732				 const char *buf, size_t size)
 733{
 734	unsigned long val;
 735	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 736	struct etmv4_config *config = &drvdata->config;
 737
 738	if (kstrtoul(buf, 16, &val))
 739		return -EINVAL;
 740
 741	spin_lock(&drvdata->spinlock);
 742	val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK);
 743	config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK;
 744	config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val);
 745	spin_unlock(&drvdata->spinlock);
 746	return size;
 747}
 748static DEVICE_ATTR_RW(event_vinst);
 749
 750static ssize_t s_exlevel_vinst_show(struct device *dev,
 751				    struct device_attribute *attr,
 752				    char *buf)
 753{
 754	unsigned long val;
 755	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 756	struct etmv4_config *config = &drvdata->config;
 757
 758	val = FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK, config->vinst_ctrl);
 759	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 760}
 761
 762static ssize_t s_exlevel_vinst_store(struct device *dev,
 763				     struct device_attribute *attr,
 764				     const char *buf, size_t size)
 765{
 766	unsigned long val;
 767	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 768	struct etmv4_config *config = &drvdata->config;
 769
 770	if (kstrtoul(buf, 16, &val))
 771		return -EINVAL;
 772
 773	spin_lock(&drvdata->spinlock);
 774	/* clear all EXLEVEL_S bits  */
 775	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK;
 776	/* enable instruction tracing for corresponding exception level */
 777	val &= drvdata->s_ex_level;
 778	config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK);
 779	spin_unlock(&drvdata->spinlock);
 780	return size;
 781}
 782static DEVICE_ATTR_RW(s_exlevel_vinst);
 783
 784static ssize_t ns_exlevel_vinst_show(struct device *dev,
 785				     struct device_attribute *attr,
 786				     char *buf)
 787{
 788	unsigned long val;
 789	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 790	struct etmv4_config *config = &drvdata->config;
 791
 792	/* EXLEVEL_NS, bits[23:20] */
 793	val = FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK, config->vinst_ctrl);
 794	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 795}
 796
 797static ssize_t ns_exlevel_vinst_store(struct device *dev,
 798				      struct device_attribute *attr,
 799				      const char *buf, size_t size)
 800{
 801	unsigned long val;
 802	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 803	struct etmv4_config *config = &drvdata->config;
 804
 805	if (kstrtoul(buf, 16, &val))
 806		return -EINVAL;
 807
 808	spin_lock(&drvdata->spinlock);
 809	/* clear EXLEVEL_NS bits  */
 810	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK;
 811	/* enable instruction tracing for corresponding exception level */
 812	val &= drvdata->ns_ex_level;
 813	config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK);
 814	spin_unlock(&drvdata->spinlock);
 815	return size;
 816}
 817static DEVICE_ATTR_RW(ns_exlevel_vinst);
 818
 819static ssize_t addr_idx_show(struct device *dev,
 820			     struct device_attribute *attr,
 821			     char *buf)
 822{
 823	unsigned long val;
 824	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 825	struct etmv4_config *config = &drvdata->config;
 826
 827	val = config->addr_idx;
 828	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 829}
 830
 831static ssize_t addr_idx_store(struct device *dev,
 832			      struct device_attribute *attr,
 833			      const char *buf, size_t size)
 834{
 835	unsigned long val;
 836	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 837	struct etmv4_config *config = &drvdata->config;
 838
 839	if (kstrtoul(buf, 16, &val))
 840		return -EINVAL;
 841	if (val >= drvdata->nr_addr_cmp * 2)
 842		return -EINVAL;
 843
 844	/*
 845	 * Use spinlock to ensure index doesn't change while it gets
 846	 * dereferenced multiple times within a spinlock block elsewhere.
 847	 */
 848	spin_lock(&drvdata->spinlock);
 849	config->addr_idx = val;
 850	spin_unlock(&drvdata->spinlock);
 851	return size;
 852}
 853static DEVICE_ATTR_RW(addr_idx);
 854
 855static ssize_t addr_instdatatype_show(struct device *dev,
 856				      struct device_attribute *attr,
 857				      char *buf)
 858{
 859	ssize_t len;
 860	u8 val, idx;
 861	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 862	struct etmv4_config *config = &drvdata->config;
 863
 864	spin_lock(&drvdata->spinlock);
 865	idx = config->addr_idx;
 866	val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]);
 867	len = scnprintf(buf, PAGE_SIZE, "%s\n",
 868			val == TRCACATRn_TYPE_ADDR ? "instr" :
 869			(val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" :
 870			(val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" :
 871			"data_load_store")));
 872	spin_unlock(&drvdata->spinlock);
 873	return len;
 874}
 875
 876static ssize_t addr_instdatatype_store(struct device *dev,
 877				       struct device_attribute *attr,
 878				       const char *buf, size_t size)
 879{
 880	u8 idx;
 881	char str[20] = "";
 882	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 883	struct etmv4_config *config = &drvdata->config;
 884
 885	if (strlen(buf) >= 20)
 886		return -EINVAL;
 887	if (sscanf(buf, "%s", str) != 1)
 888		return -EINVAL;
 889
 890	spin_lock(&drvdata->spinlock);
 891	idx = config->addr_idx;
 892	if (!strcmp(str, "instr"))
 893		/* TYPE, bits[1:0] */
 894		config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK;
 895
 896	spin_unlock(&drvdata->spinlock);
 897	return size;
 898}
 899static DEVICE_ATTR_RW(addr_instdatatype);
 900
 901static ssize_t addr_single_show(struct device *dev,
 902				struct device_attribute *attr,
 903				char *buf)
 904{
 905	u8 idx;
 906	unsigned long val;
 907	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 908	struct etmv4_config *config = &drvdata->config;
 909
 910	idx = config->addr_idx;
 911	spin_lock(&drvdata->spinlock);
 912	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 913	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
 914		spin_unlock(&drvdata->spinlock);
 915		return -EPERM;
 916	}
 917	val = (unsigned long)config->addr_val[idx];
 918	spin_unlock(&drvdata->spinlock);
 919	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 920}
 921
 922static ssize_t addr_single_store(struct device *dev,
 923				 struct device_attribute *attr,
 924				 const char *buf, size_t size)
 925{
 926	u8 idx;
 927	unsigned long val;
 928	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 929	struct etmv4_config *config = &drvdata->config;
 930
 931	if (kstrtoul(buf, 16, &val))
 932		return -EINVAL;
 933
 934	spin_lock(&drvdata->spinlock);
 935	idx = config->addr_idx;
 936	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 937	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
 938		spin_unlock(&drvdata->spinlock);
 939		return -EPERM;
 940	}
 941
 942	config->addr_val[idx] = (u64)val;
 943	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
 944	spin_unlock(&drvdata->spinlock);
 945	return size;
 946}
 947static DEVICE_ATTR_RW(addr_single);
 948
 949static ssize_t addr_range_show(struct device *dev,
 950			       struct device_attribute *attr,
 951			       char *buf)
 952{
 953	u8 idx;
 954	unsigned long val1, val2;
 955	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 956	struct etmv4_config *config = &drvdata->config;
 957
 958	spin_lock(&drvdata->spinlock);
 959	idx = config->addr_idx;
 960	if (idx % 2 != 0) {
 961		spin_unlock(&drvdata->spinlock);
 962		return -EPERM;
 963	}
 964	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
 965	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
 966	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
 967	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
 968		spin_unlock(&drvdata->spinlock);
 969		return -EPERM;
 970	}
 971
 972	val1 = (unsigned long)config->addr_val[idx];
 973	val2 = (unsigned long)config->addr_val[idx + 1];
 974	spin_unlock(&drvdata->spinlock);
 975	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
 976}
 977
 978static ssize_t addr_range_store(struct device *dev,
 979				struct device_attribute *attr,
 980				const char *buf, size_t size)
 981{
 982	u8 idx;
 983	unsigned long val1, val2;
 984	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 985	struct etmv4_config *config = &drvdata->config;
 986	int elements, exclude;
 987
 988	elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
 989
 990	/*  exclude is optional, but need at least two parameter */
 991	if (elements < 2)
 992		return -EINVAL;
 993	/* lower address comparator cannot have a higher address value */
 994	if (val1 > val2)
 995		return -EINVAL;
 996
 997	spin_lock(&drvdata->spinlock);
 998	idx = config->addr_idx;
 999	if (idx % 2 != 0) {
1000		spin_unlock(&drvdata->spinlock);
1001		return -EPERM;
1002	}
1003
1004	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1005	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1006	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1007	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1008		spin_unlock(&drvdata->spinlock);
1009		return -EPERM;
1010	}
1011
1012	config->addr_val[idx] = (u64)val1;
1013	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1014	config->addr_val[idx + 1] = (u64)val2;
1015	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1016	/*
1017	 * Program include or exclude control bits for vinst or vdata
1018	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1019	 * use supplied value, or default to bit set in 'mode'
1020	 */
1021	if (elements != 3)
1022		exclude = config->mode & ETM_MODE_EXCLUDE;
1023	etm4_set_mode_exclude(drvdata, exclude ? true : false);
1024
1025	spin_unlock(&drvdata->spinlock);
1026	return size;
1027}
1028static DEVICE_ATTR_RW(addr_range);
1029
1030static ssize_t addr_start_show(struct device *dev,
1031			       struct device_attribute *attr,
1032			       char *buf)
1033{
1034	u8 idx;
1035	unsigned long val;
1036	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1037	struct etmv4_config *config = &drvdata->config;
1038
1039	spin_lock(&drvdata->spinlock);
1040	idx = config->addr_idx;
1041
1042	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1043	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1044		spin_unlock(&drvdata->spinlock);
1045		return -EPERM;
1046	}
1047
1048	val = (unsigned long)config->addr_val[idx];
1049	spin_unlock(&drvdata->spinlock);
1050	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1051}
1052
1053static ssize_t addr_start_store(struct device *dev,
1054				struct device_attribute *attr,
1055				const char *buf, size_t size)
1056{
1057	u8 idx;
1058	unsigned long val;
1059	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1060	struct etmv4_config *config = &drvdata->config;
1061
1062	if (kstrtoul(buf, 16, &val))
1063		return -EINVAL;
1064
1065	spin_lock(&drvdata->spinlock);
1066	idx = config->addr_idx;
1067	if (!drvdata->nr_addr_cmp) {
1068		spin_unlock(&drvdata->spinlock);
1069		return -EINVAL;
1070	}
1071	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1072	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1073		spin_unlock(&drvdata->spinlock);
1074		return -EPERM;
1075	}
1076
1077	config->addr_val[idx] = (u64)val;
1078	config->addr_type[idx] = ETM_ADDR_TYPE_START;
1079	config->vissctlr |= BIT(idx);
 
 
1080	spin_unlock(&drvdata->spinlock);
1081	return size;
1082}
1083static DEVICE_ATTR_RW(addr_start);
1084
1085static ssize_t addr_stop_show(struct device *dev,
1086			      struct device_attribute *attr,
1087			      char *buf)
1088{
1089	u8 idx;
1090	unsigned long val;
1091	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1092	struct etmv4_config *config = &drvdata->config;
1093
1094	spin_lock(&drvdata->spinlock);
1095	idx = config->addr_idx;
1096
1097	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1098	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1099		spin_unlock(&drvdata->spinlock);
1100		return -EPERM;
1101	}
1102
1103	val = (unsigned long)config->addr_val[idx];
1104	spin_unlock(&drvdata->spinlock);
1105	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1106}
1107
1108static ssize_t addr_stop_store(struct device *dev,
1109			       struct device_attribute *attr,
1110			       const char *buf, size_t size)
1111{
1112	u8 idx;
1113	unsigned long val;
1114	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1115	struct etmv4_config *config = &drvdata->config;
1116
1117	if (kstrtoul(buf, 16, &val))
1118		return -EINVAL;
1119
1120	spin_lock(&drvdata->spinlock);
1121	idx = config->addr_idx;
1122	if (!drvdata->nr_addr_cmp) {
1123		spin_unlock(&drvdata->spinlock);
1124		return -EINVAL;
1125	}
1126	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1127	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1128		spin_unlock(&drvdata->spinlock);
1129		return -EPERM;
1130	}
1131
1132	config->addr_val[idx] = (u64)val;
1133	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1134	config->vissctlr |= BIT(idx + 16);
 
 
1135	spin_unlock(&drvdata->spinlock);
1136	return size;
1137}
1138static DEVICE_ATTR_RW(addr_stop);
1139
1140static ssize_t addr_ctxtype_show(struct device *dev,
1141				 struct device_attribute *attr,
1142				 char *buf)
1143{
1144	ssize_t len;
1145	u8 idx, val;
1146	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1147	struct etmv4_config *config = &drvdata->config;
1148
1149	spin_lock(&drvdata->spinlock);
1150	idx = config->addr_idx;
1151	/* CONTEXTTYPE, bits[3:2] */
1152	val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]);
1153	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1154			(val == ETM_CTX_CTXID ? "ctxid" :
1155			(val == ETM_CTX_VMID ? "vmid" : "all")));
1156	spin_unlock(&drvdata->spinlock);
1157	return len;
1158}
1159
1160static ssize_t addr_ctxtype_store(struct device *dev,
1161				  struct device_attribute *attr,
1162				  const char *buf, size_t size)
1163{
1164	u8 idx;
1165	char str[10] = "";
1166	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1167	struct etmv4_config *config = &drvdata->config;
1168
1169	if (strlen(buf) >= 10)
1170		return -EINVAL;
1171	if (sscanf(buf, "%s", str) != 1)
1172		return -EINVAL;
1173
1174	spin_lock(&drvdata->spinlock);
1175	idx = config->addr_idx;
1176	if (!strcmp(str, "none"))
1177		/* start by clearing context type bits */
1178		config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_MASK;
1179	else if (!strcmp(str, "ctxid")) {
1180		/* 0b01 The trace unit performs a Context ID */
1181		if (drvdata->numcidc) {
1182			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1183			config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_VMID;
1184		}
1185	} else if (!strcmp(str, "vmid")) {
1186		/* 0b10 The trace unit performs a VMID */
1187		if (drvdata->numvmidc) {
1188			config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_CTXID;
1189			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1190		}
1191	} else if (!strcmp(str, "all")) {
1192		/*
1193		 * 0b11 The trace unit performs a Context ID
1194		 * comparison and a VMID
1195		 */
1196		if (drvdata->numcidc)
1197			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1198		if (drvdata->numvmidc)
1199			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1200	}
1201	spin_unlock(&drvdata->spinlock);
1202	return size;
1203}
1204static DEVICE_ATTR_RW(addr_ctxtype);
1205
1206static ssize_t addr_context_show(struct device *dev,
1207				 struct device_attribute *attr,
1208				 char *buf)
1209{
1210	u8 idx;
1211	unsigned long val;
1212	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1213	struct etmv4_config *config = &drvdata->config;
1214
1215	spin_lock(&drvdata->spinlock);
1216	idx = config->addr_idx;
1217	/* context ID comparator bits[6:4] */
1218	val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]);
1219	spin_unlock(&drvdata->spinlock);
1220	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1221}
1222
1223static ssize_t addr_context_store(struct device *dev,
1224				  struct device_attribute *attr,
1225				  const char *buf, size_t size)
1226{
1227	u8 idx;
1228	unsigned long val;
1229	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1230	struct etmv4_config *config = &drvdata->config;
1231
1232	if (kstrtoul(buf, 16, &val))
1233		return -EINVAL;
1234	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1235		return -EINVAL;
1236	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
1237		     drvdata->numcidc : drvdata->numvmidc))
1238		return -EINVAL;
1239
1240	spin_lock(&drvdata->spinlock);
1241	idx = config->addr_idx;
1242	/* clear context ID comparator bits[6:4] */
1243	config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK;
1244	config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK);
1245	spin_unlock(&drvdata->spinlock);
1246	return size;
1247}
1248static DEVICE_ATTR_RW(addr_context);
1249
1250static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1251				      struct device_attribute *attr,
1252				      char *buf)
1253{
1254	u8 idx;
1255	unsigned long val;
1256	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1257	struct etmv4_config *config = &drvdata->config;
1258
1259	spin_lock(&drvdata->spinlock);
1260	idx = config->addr_idx;
1261	val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]);
1262	spin_unlock(&drvdata->spinlock);
1263	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1264}
1265
1266static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1267				       struct device_attribute *attr,
1268				       const char *buf, size_t size)
1269{
1270	u8 idx;
1271	unsigned long val;
1272	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1273	struct etmv4_config *config = &drvdata->config;
1274
1275	if (kstrtoul(buf, 0, &val))
1276		return -EINVAL;
1277
1278	if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK)))
1279		return -EINVAL;
1280
1281	spin_lock(&drvdata->spinlock);
1282	idx = config->addr_idx;
1283	/* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1284	config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK;
1285	config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK);
1286	spin_unlock(&drvdata->spinlock);
1287	return size;
1288}
1289static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1290
1291static const char * const addr_type_names[] = {
1292	"unused",
1293	"single",
1294	"range",
1295	"start",
1296	"stop"
1297};
1298
1299static ssize_t addr_cmp_view_show(struct device *dev,
1300				  struct device_attribute *attr, char *buf)
1301{
1302	u8 idx, addr_type;
1303	unsigned long addr_v, addr_v2, addr_ctrl;
1304	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1305	struct etmv4_config *config = &drvdata->config;
1306	int size = 0;
1307	bool exclude = false;
1308
1309	spin_lock(&drvdata->spinlock);
1310	idx = config->addr_idx;
1311	addr_v = config->addr_val[idx];
1312	addr_ctrl = config->addr_acc[idx];
1313	addr_type = config->addr_type[idx];
1314	if (addr_type == ETM_ADDR_TYPE_RANGE) {
1315		if (idx & 0x1) {
1316			idx -= 1;
1317			addr_v2 = addr_v;
1318			addr_v = config->addr_val[idx];
1319		} else {
1320			addr_v2 = config->addr_val[idx + 1];
1321		}
1322		exclude = config->viiectlr & BIT(idx / 2 + 16);
1323	}
1324	spin_unlock(&drvdata->spinlock);
1325	if (addr_type) {
1326		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1327				 addr_type_names[addr_type], addr_v);
1328		if (addr_type == ETM_ADDR_TYPE_RANGE) {
1329			size += scnprintf(buf + size, PAGE_SIZE - size,
1330					  " %#lx %s", addr_v2,
1331					  exclude ? "exclude" : "include");
1332		}
1333		size += scnprintf(buf + size, PAGE_SIZE - size,
1334				  " ctrl(%#lx)\n", addr_ctrl);
1335	} else {
1336		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1337	}
1338	return size;
1339}
1340static DEVICE_ATTR_RO(addr_cmp_view);
1341
1342static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1343					    struct device_attribute *attr,
1344					    char *buf)
1345{
1346	unsigned long val;
1347	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1348	struct etmv4_config *config = &drvdata->config;
1349
1350	if (!drvdata->nr_pe_cmp)
1351		return -EINVAL;
1352	val = config->vipcssctlr;
1353	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1354}
1355static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1356					     struct device_attribute *attr,
1357					     const char *buf, size_t size)
1358{
1359	unsigned long val;
1360	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1361	struct etmv4_config *config = &drvdata->config;
1362
1363	if (kstrtoul(buf, 16, &val))
1364		return -EINVAL;
1365	if (!drvdata->nr_pe_cmp)
1366		return -EINVAL;
1367
1368	spin_lock(&drvdata->spinlock);
1369	config->vipcssctlr = val;
1370	spin_unlock(&drvdata->spinlock);
1371	return size;
1372}
1373static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1374
1375static ssize_t seq_idx_show(struct device *dev,
1376			    struct device_attribute *attr,
1377			    char *buf)
1378{
1379	unsigned long val;
1380	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1381	struct etmv4_config *config = &drvdata->config;
1382
1383	val = config->seq_idx;
1384	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1385}
1386
1387static ssize_t seq_idx_store(struct device *dev,
1388			     struct device_attribute *attr,
1389			     const char *buf, size_t size)
1390{
1391	unsigned long val;
1392	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1393	struct etmv4_config *config = &drvdata->config;
1394
1395	if (kstrtoul(buf, 16, &val))
1396		return -EINVAL;
1397	if (val >= drvdata->nrseqstate - 1)
1398		return -EINVAL;
1399
1400	/*
1401	 * Use spinlock to ensure index doesn't change while it gets
1402	 * dereferenced multiple times within a spinlock block elsewhere.
1403	 */
1404	spin_lock(&drvdata->spinlock);
1405	config->seq_idx = val;
1406	spin_unlock(&drvdata->spinlock);
1407	return size;
1408}
1409static DEVICE_ATTR_RW(seq_idx);
1410
1411static ssize_t seq_state_show(struct device *dev,
1412			      struct device_attribute *attr,
1413			      char *buf)
1414{
1415	unsigned long val;
1416	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1417	struct etmv4_config *config = &drvdata->config;
1418
1419	val = config->seq_state;
1420	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1421}
1422
1423static ssize_t seq_state_store(struct device *dev,
1424			       struct device_attribute *attr,
1425			       const char *buf, size_t size)
1426{
1427	unsigned long val;
1428	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1429	struct etmv4_config *config = &drvdata->config;
1430
1431	if (kstrtoul(buf, 16, &val))
1432		return -EINVAL;
1433	if (val >= drvdata->nrseqstate)
1434		return -EINVAL;
1435
1436	config->seq_state = val;
1437	return size;
1438}
1439static DEVICE_ATTR_RW(seq_state);
1440
1441static ssize_t seq_event_show(struct device *dev,
1442			      struct device_attribute *attr,
1443			      char *buf)
1444{
1445	u8 idx;
1446	unsigned long val;
1447	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1448	struct etmv4_config *config = &drvdata->config;
1449
1450	spin_lock(&drvdata->spinlock);
1451	idx = config->seq_idx;
1452	val = config->seq_ctrl[idx];
1453	spin_unlock(&drvdata->spinlock);
1454	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1455}
1456
1457static ssize_t seq_event_store(struct device *dev,
1458			       struct device_attribute *attr,
1459			       const char *buf, size_t size)
1460{
1461	u8 idx;
1462	unsigned long val;
1463	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1464	struct etmv4_config *config = &drvdata->config;
1465
1466	if (kstrtoul(buf, 16, &val))
1467		return -EINVAL;
1468
1469	spin_lock(&drvdata->spinlock);
1470	idx = config->seq_idx;
1471	/* Seq control has two masks B[15:8] F[7:0] */
1472	config->seq_ctrl[idx] = val & 0xFFFF;
1473	spin_unlock(&drvdata->spinlock);
1474	return size;
1475}
1476static DEVICE_ATTR_RW(seq_event);
1477
1478static ssize_t seq_reset_event_show(struct device *dev,
1479				    struct device_attribute *attr,
1480				    char *buf)
1481{
1482	unsigned long val;
1483	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1484	struct etmv4_config *config = &drvdata->config;
1485
1486	val = config->seq_rst;
1487	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1488}
1489
1490static ssize_t seq_reset_event_store(struct device *dev,
1491				     struct device_attribute *attr,
1492				     const char *buf, size_t size)
1493{
1494	unsigned long val;
1495	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1496	struct etmv4_config *config = &drvdata->config;
1497
1498	if (kstrtoul(buf, 16, &val))
1499		return -EINVAL;
1500	if (!(drvdata->nrseqstate))
1501		return -EINVAL;
1502
1503	config->seq_rst = val & ETMv4_EVENT_MASK;
1504	return size;
1505}
1506static DEVICE_ATTR_RW(seq_reset_event);
1507
1508static ssize_t cntr_idx_show(struct device *dev,
1509			     struct device_attribute *attr,
1510			     char *buf)
1511{
1512	unsigned long val;
1513	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1514	struct etmv4_config *config = &drvdata->config;
1515
1516	val = config->cntr_idx;
1517	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1518}
1519
1520static ssize_t cntr_idx_store(struct device *dev,
1521			      struct device_attribute *attr,
1522			      const char *buf, size_t size)
1523{
1524	unsigned long val;
1525	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1526	struct etmv4_config *config = &drvdata->config;
1527
1528	if (kstrtoul(buf, 16, &val))
1529		return -EINVAL;
1530	if (val >= drvdata->nr_cntr)
1531		return -EINVAL;
1532
1533	/*
1534	 * Use spinlock to ensure index doesn't change while it gets
1535	 * dereferenced multiple times within a spinlock block elsewhere.
1536	 */
1537	spin_lock(&drvdata->spinlock);
1538	config->cntr_idx = val;
1539	spin_unlock(&drvdata->spinlock);
1540	return size;
1541}
1542static DEVICE_ATTR_RW(cntr_idx);
1543
1544static ssize_t cntrldvr_show(struct device *dev,
1545			     struct device_attribute *attr,
1546			     char *buf)
1547{
1548	u8 idx;
1549	unsigned long val;
1550	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1551	struct etmv4_config *config = &drvdata->config;
1552
1553	spin_lock(&drvdata->spinlock);
1554	idx = config->cntr_idx;
1555	val = config->cntrldvr[idx];
1556	spin_unlock(&drvdata->spinlock);
1557	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1558}
1559
1560static ssize_t cntrldvr_store(struct device *dev,
1561			      struct device_attribute *attr,
1562			      const char *buf, size_t size)
1563{
1564	u8 idx;
1565	unsigned long val;
1566	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1567	struct etmv4_config *config = &drvdata->config;
1568
1569	if (kstrtoul(buf, 16, &val))
1570		return -EINVAL;
1571	if (val > ETM_CNTR_MAX_VAL)
1572		return -EINVAL;
1573
1574	spin_lock(&drvdata->spinlock);
1575	idx = config->cntr_idx;
1576	config->cntrldvr[idx] = val;
1577	spin_unlock(&drvdata->spinlock);
1578	return size;
1579}
1580static DEVICE_ATTR_RW(cntrldvr);
1581
1582static ssize_t cntr_val_show(struct device *dev,
1583			     struct device_attribute *attr,
1584			     char *buf)
1585{
1586	u8 idx;
1587	unsigned long val;
1588	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1589	struct etmv4_config *config = &drvdata->config;
1590
1591	spin_lock(&drvdata->spinlock);
1592	idx = config->cntr_idx;
1593	val = config->cntr_val[idx];
1594	spin_unlock(&drvdata->spinlock);
1595	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1596}
1597
1598static ssize_t cntr_val_store(struct device *dev,
1599			      struct device_attribute *attr,
1600			      const char *buf, size_t size)
1601{
1602	u8 idx;
1603	unsigned long val;
1604	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1605	struct etmv4_config *config = &drvdata->config;
1606
1607	if (kstrtoul(buf, 16, &val))
1608		return -EINVAL;
1609	if (val > ETM_CNTR_MAX_VAL)
1610		return -EINVAL;
1611
1612	spin_lock(&drvdata->spinlock);
1613	idx = config->cntr_idx;
1614	config->cntr_val[idx] = val;
1615	spin_unlock(&drvdata->spinlock);
1616	return size;
1617}
1618static DEVICE_ATTR_RW(cntr_val);
1619
1620static ssize_t cntr_ctrl_show(struct device *dev,
1621			      struct device_attribute *attr,
1622			      char *buf)
1623{
1624	u8 idx;
1625	unsigned long val;
1626	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1627	struct etmv4_config *config = &drvdata->config;
1628
1629	spin_lock(&drvdata->spinlock);
1630	idx = config->cntr_idx;
1631	val = config->cntr_ctrl[idx];
1632	spin_unlock(&drvdata->spinlock);
1633	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1634}
1635
1636static ssize_t cntr_ctrl_store(struct device *dev,
1637			       struct device_attribute *attr,
1638			       const char *buf, size_t size)
1639{
1640	u8 idx;
1641	unsigned long val;
1642	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1643	struct etmv4_config *config = &drvdata->config;
1644
1645	if (kstrtoul(buf, 16, &val))
1646		return -EINVAL;
1647
1648	spin_lock(&drvdata->spinlock);
1649	idx = config->cntr_idx;
1650	config->cntr_ctrl[idx] = val;
1651	spin_unlock(&drvdata->spinlock);
1652	return size;
1653}
1654static DEVICE_ATTR_RW(cntr_ctrl);
1655
1656static ssize_t res_idx_show(struct device *dev,
1657			    struct device_attribute *attr,
1658			    char *buf)
1659{
1660	unsigned long val;
1661	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1662	struct etmv4_config *config = &drvdata->config;
1663
1664	val = config->res_idx;
1665	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1666}
1667
1668static ssize_t res_idx_store(struct device *dev,
1669			     struct device_attribute *attr,
1670			     const char *buf, size_t size)
1671{
1672	unsigned long val;
1673	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1674	struct etmv4_config *config = &drvdata->config;
1675
1676	if (kstrtoul(buf, 16, &val))
1677		return -EINVAL;
1678	/*
1679	 * Resource selector pair 0 is always implemented and reserved,
1680	 * namely an idx with 0 and 1 is illegal.
1681	 */
1682	if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1683		return -EINVAL;
1684
1685	/*
1686	 * Use spinlock to ensure index doesn't change while it gets
1687	 * dereferenced multiple times within a spinlock block elsewhere.
1688	 */
1689	spin_lock(&drvdata->spinlock);
1690	config->res_idx = val;
1691	spin_unlock(&drvdata->spinlock);
1692	return size;
1693}
1694static DEVICE_ATTR_RW(res_idx);
1695
1696static ssize_t res_ctrl_show(struct device *dev,
1697			     struct device_attribute *attr,
1698			     char *buf)
1699{
1700	u8 idx;
1701	unsigned long val;
1702	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1703	struct etmv4_config *config = &drvdata->config;
1704
1705	spin_lock(&drvdata->spinlock);
1706	idx = config->res_idx;
1707	val = config->res_ctrl[idx];
1708	spin_unlock(&drvdata->spinlock);
1709	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1710}
1711
1712static ssize_t res_ctrl_store(struct device *dev,
1713			      struct device_attribute *attr,
1714			      const char *buf, size_t size)
1715{
1716	u8 idx;
1717	unsigned long val;
1718	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1719	struct etmv4_config *config = &drvdata->config;
1720
1721	if (kstrtoul(buf, 16, &val))
1722		return -EINVAL;
1723
1724	spin_lock(&drvdata->spinlock);
1725	idx = config->res_idx;
1726	/* For odd idx pair inversal bit is RES0 */
1727	if (idx % 2 != 0)
1728		/* PAIRINV, bit[21] */
1729		val &= ~TRCRSCTLRn_PAIRINV;
1730	config->res_ctrl[idx] = val & (TRCRSCTLRn_PAIRINV |
1731				       TRCRSCTLRn_INV |
1732				       TRCRSCTLRn_GROUP_MASK |
1733				       TRCRSCTLRn_SELECT_MASK);
1734	spin_unlock(&drvdata->spinlock);
1735	return size;
1736}
1737static DEVICE_ATTR_RW(res_ctrl);
1738
1739static ssize_t sshot_idx_show(struct device *dev,
1740			      struct device_attribute *attr, char *buf)
1741{
1742	unsigned long val;
1743	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1744	struct etmv4_config *config = &drvdata->config;
1745
1746	val = config->ss_idx;
1747	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1748}
1749
1750static ssize_t sshot_idx_store(struct device *dev,
1751			       struct device_attribute *attr,
1752			       const char *buf, size_t size)
1753{
1754	unsigned long val;
1755	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1756	struct etmv4_config *config = &drvdata->config;
1757
1758	if (kstrtoul(buf, 16, &val))
1759		return -EINVAL;
1760	if (val >= drvdata->nr_ss_cmp)
1761		return -EINVAL;
1762
1763	spin_lock(&drvdata->spinlock);
1764	config->ss_idx = val;
1765	spin_unlock(&drvdata->spinlock);
1766	return size;
1767}
1768static DEVICE_ATTR_RW(sshot_idx);
1769
1770static ssize_t sshot_ctrl_show(struct device *dev,
1771			       struct device_attribute *attr,
1772			       char *buf)
1773{
1774	unsigned long val;
1775	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1776	struct etmv4_config *config = &drvdata->config;
1777
1778	spin_lock(&drvdata->spinlock);
1779	val = config->ss_ctrl[config->ss_idx];
1780	spin_unlock(&drvdata->spinlock);
1781	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1782}
1783
1784static ssize_t sshot_ctrl_store(struct device *dev,
1785				struct device_attribute *attr,
1786				const char *buf, size_t size)
1787{
1788	u8 idx;
1789	unsigned long val;
1790	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1791	struct etmv4_config *config = &drvdata->config;
1792
1793	if (kstrtoul(buf, 16, &val))
1794		return -EINVAL;
1795
1796	spin_lock(&drvdata->spinlock);
1797	idx = config->ss_idx;
1798	config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val);
1799	/* must clear bit 31 in related status register on programming */
1800	config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1801	spin_unlock(&drvdata->spinlock);
1802	return size;
1803}
1804static DEVICE_ATTR_RW(sshot_ctrl);
1805
1806static ssize_t sshot_status_show(struct device *dev,
1807				 struct device_attribute *attr, char *buf)
1808{
1809	unsigned long val;
1810	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1811	struct etmv4_config *config = &drvdata->config;
1812
1813	spin_lock(&drvdata->spinlock);
1814	val = config->ss_status[config->ss_idx];
1815	spin_unlock(&drvdata->spinlock);
1816	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1817}
1818static DEVICE_ATTR_RO(sshot_status);
1819
1820static ssize_t sshot_pe_ctrl_show(struct device *dev,
1821				  struct device_attribute *attr,
1822				  char *buf)
1823{
1824	unsigned long val;
1825	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1826	struct etmv4_config *config = &drvdata->config;
1827
1828	spin_lock(&drvdata->spinlock);
1829	val = config->ss_pe_cmp[config->ss_idx];
1830	spin_unlock(&drvdata->spinlock);
1831	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1832}
1833
1834static ssize_t sshot_pe_ctrl_store(struct device *dev,
1835				   struct device_attribute *attr,
1836				   const char *buf, size_t size)
1837{
1838	u8 idx;
1839	unsigned long val;
1840	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1841	struct etmv4_config *config = &drvdata->config;
1842
1843	if (kstrtoul(buf, 16, &val))
1844		return -EINVAL;
1845
1846	spin_lock(&drvdata->spinlock);
1847	idx = config->ss_idx;
1848	config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val);
1849	/* must clear bit 31 in related status register on programming */
1850	config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1851	spin_unlock(&drvdata->spinlock);
1852	return size;
1853}
1854static DEVICE_ATTR_RW(sshot_pe_ctrl);
1855
1856static ssize_t ctxid_idx_show(struct device *dev,
1857			      struct device_attribute *attr,
1858			      char *buf)
1859{
1860	unsigned long val;
1861	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1862	struct etmv4_config *config = &drvdata->config;
1863
1864	val = config->ctxid_idx;
1865	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1866}
1867
1868static ssize_t ctxid_idx_store(struct device *dev,
1869			       struct device_attribute *attr,
1870			       const char *buf, size_t size)
1871{
1872	unsigned long val;
1873	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1874	struct etmv4_config *config = &drvdata->config;
1875
1876	if (kstrtoul(buf, 16, &val))
1877		return -EINVAL;
1878	if (val >= drvdata->numcidc)
1879		return -EINVAL;
1880
1881	/*
1882	 * Use spinlock to ensure index doesn't change while it gets
1883	 * dereferenced multiple times within a spinlock block elsewhere.
1884	 */
1885	spin_lock(&drvdata->spinlock);
1886	config->ctxid_idx = val;
1887	spin_unlock(&drvdata->spinlock);
1888	return size;
1889}
1890static DEVICE_ATTR_RW(ctxid_idx);
1891
1892static ssize_t ctxid_pid_show(struct device *dev,
1893			      struct device_attribute *attr,
1894			      char *buf)
1895{
1896	u8 idx;
1897	unsigned long val;
1898	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1899	struct etmv4_config *config = &drvdata->config;
1900
1901	/*
1902	 * Don't use contextID tracing if coming from a PID namespace.  See
1903	 * comment in ctxid_pid_store().
1904	 */
1905	if (task_active_pid_ns(current) != &init_pid_ns)
1906		return -EINVAL;
1907
1908	spin_lock(&drvdata->spinlock);
1909	idx = config->ctxid_idx;
1910	val = (unsigned long)config->ctxid_pid[idx];
1911	spin_unlock(&drvdata->spinlock);
1912	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1913}
1914
1915static ssize_t ctxid_pid_store(struct device *dev,
1916			       struct device_attribute *attr,
1917			       const char *buf, size_t size)
1918{
1919	u8 idx;
1920	unsigned long pid;
1921	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1922	struct etmv4_config *config = &drvdata->config;
1923
1924	/*
1925	 * When contextID tracing is enabled the tracers will insert the
1926	 * value found in the contextID register in the trace stream.  But if
1927	 * a process is in a namespace the PID of that process as seen from the
1928	 * namespace won't be what the kernel sees, something that makes the
1929	 * feature confusing and can potentially leak kernel only information.
1930	 * As such refuse to use the feature if @current is not in the initial
1931	 * PID namespace.
1932	 */
1933	if (task_active_pid_ns(current) != &init_pid_ns)
1934		return -EINVAL;
1935
1936	/*
1937	 * only implemented when ctxid tracing is enabled, i.e. at least one
1938	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1939	 * in length
1940	 */
1941	if (!drvdata->ctxid_size || !drvdata->numcidc)
1942		return -EINVAL;
1943	if (kstrtoul(buf, 16, &pid))
1944		return -EINVAL;
1945
1946	spin_lock(&drvdata->spinlock);
1947	idx = config->ctxid_idx;
1948	config->ctxid_pid[idx] = (u64)pid;
1949	spin_unlock(&drvdata->spinlock);
1950	return size;
1951}
1952static DEVICE_ATTR_RW(ctxid_pid);
1953
1954static ssize_t ctxid_masks_show(struct device *dev,
1955				struct device_attribute *attr,
1956				char *buf)
1957{
1958	unsigned long val1, val2;
1959	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1960	struct etmv4_config *config = &drvdata->config;
1961
1962	/*
1963	 * Don't use contextID tracing if coming from a PID namespace.  See
1964	 * comment in ctxid_pid_store().
1965	 */
1966	if (task_active_pid_ns(current) != &init_pid_ns)
1967		return -EINVAL;
1968
1969	spin_lock(&drvdata->spinlock);
1970	val1 = config->ctxid_mask0;
1971	val2 = config->ctxid_mask1;
1972	spin_unlock(&drvdata->spinlock);
1973	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1974}
1975
1976static ssize_t ctxid_masks_store(struct device *dev,
1977				struct device_attribute *attr,
1978				const char *buf, size_t size)
1979{
1980	u8 i, j, maskbyte;
1981	unsigned long val1, val2, mask;
1982	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1983	struct etmv4_config *config = &drvdata->config;
1984	int nr_inputs;
1985
1986	/*
1987	 * Don't use contextID tracing if coming from a PID namespace.  See
1988	 * comment in ctxid_pid_store().
1989	 */
1990	if (task_active_pid_ns(current) != &init_pid_ns)
1991		return -EINVAL;
1992
1993	/*
1994	 * only implemented when ctxid tracing is enabled, i.e. at least one
1995	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1996	 * in length
1997	 */
1998	if (!drvdata->ctxid_size || !drvdata->numcidc)
1999		return -EINVAL;
2000	/* one mask if <= 4 comparators, two for up to 8 */
2001	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2002	if ((drvdata->numcidc > 4) && (nr_inputs != 2))
2003		return -EINVAL;
2004
2005	spin_lock(&drvdata->spinlock);
2006	/*
2007	 * each byte[0..3] controls mask value applied to ctxid
2008	 * comparator[0..3]
2009	 */
2010	switch (drvdata->numcidc) {
2011	case 0x1:
2012		/* COMP0, bits[7:0] */
2013		config->ctxid_mask0 = val1 & 0xFF;
2014		break;
2015	case 0x2:
2016		/* COMP1, bits[15:8] */
2017		config->ctxid_mask0 = val1 & 0xFFFF;
2018		break;
2019	case 0x3:
2020		/* COMP2, bits[23:16] */
2021		config->ctxid_mask0 = val1 & 0xFFFFFF;
2022		break;
2023	case 0x4:
2024		 /* COMP3, bits[31:24] */
2025		config->ctxid_mask0 = val1;
2026		break;
2027	case 0x5:
2028		/* COMP4, bits[7:0] */
2029		config->ctxid_mask0 = val1;
2030		config->ctxid_mask1 = val2 & 0xFF;
2031		break;
2032	case 0x6:
2033		/* COMP5, bits[15:8] */
2034		config->ctxid_mask0 = val1;
2035		config->ctxid_mask1 = val2 & 0xFFFF;
2036		break;
2037	case 0x7:
2038		/* COMP6, bits[23:16] */
2039		config->ctxid_mask0 = val1;
2040		config->ctxid_mask1 = val2 & 0xFFFFFF;
2041		break;
2042	case 0x8:
2043		/* COMP7, bits[31:24] */
2044		config->ctxid_mask0 = val1;
2045		config->ctxid_mask1 = val2;
2046		break;
2047	default:
2048		break;
2049	}
2050	/*
2051	 * If software sets a mask bit to 1, it must program relevant byte
2052	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2053	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2054	 * of ctxid comparator0 value (corresponding to byte 0) register.
2055	 */
2056	mask = config->ctxid_mask0;
2057	for (i = 0; i < drvdata->numcidc; i++) {
2058		/* mask value of corresponding ctxid comparator */
2059		maskbyte = mask & ETMv4_EVENT_MASK;
2060		/*
2061		 * each bit corresponds to a byte of respective ctxid comparator
2062		 * value register
2063		 */
2064		for (j = 0; j < 8; j++) {
2065			if (maskbyte & 1)
2066				config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2067			maskbyte >>= 1;
2068		}
2069		/* Select the next ctxid comparator mask value */
2070		if (i == 3)
2071			/* ctxid comparators[4-7] */
2072			mask = config->ctxid_mask1;
2073		else
2074			mask >>= 0x8;
2075	}
2076
2077	spin_unlock(&drvdata->spinlock);
2078	return size;
2079}
2080static DEVICE_ATTR_RW(ctxid_masks);
2081
2082static ssize_t vmid_idx_show(struct device *dev,
2083			     struct device_attribute *attr,
2084			     char *buf)
2085{
2086	unsigned long val;
2087	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2088	struct etmv4_config *config = &drvdata->config;
2089
2090	val = config->vmid_idx;
2091	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2092}
2093
2094static ssize_t vmid_idx_store(struct device *dev,
2095			      struct device_attribute *attr,
2096			      const char *buf, size_t size)
2097{
2098	unsigned long val;
2099	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2100	struct etmv4_config *config = &drvdata->config;
2101
2102	if (kstrtoul(buf, 16, &val))
2103		return -EINVAL;
2104	if (val >= drvdata->numvmidc)
2105		return -EINVAL;
2106
2107	/*
2108	 * Use spinlock to ensure index doesn't change while it gets
2109	 * dereferenced multiple times within a spinlock block elsewhere.
2110	 */
2111	spin_lock(&drvdata->spinlock);
2112	config->vmid_idx = val;
2113	spin_unlock(&drvdata->spinlock);
2114	return size;
2115}
2116static DEVICE_ATTR_RW(vmid_idx);
2117
2118static ssize_t vmid_val_show(struct device *dev,
2119			     struct device_attribute *attr,
2120			     char *buf)
2121{
2122	unsigned long val;
2123	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2124	struct etmv4_config *config = &drvdata->config;
2125
2126	/*
2127	 * Don't use virtual contextID tracing if coming from a PID namespace.
2128	 * See comment in ctxid_pid_store().
2129	 */
2130	if (!task_is_in_init_pid_ns(current))
2131		return -EINVAL;
2132
2133	spin_lock(&drvdata->spinlock);
2134	val = (unsigned long)config->vmid_val[config->vmid_idx];
2135	spin_unlock(&drvdata->spinlock);
2136	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2137}
2138
2139static ssize_t vmid_val_store(struct device *dev,
2140			      struct device_attribute *attr,
2141			      const char *buf, size_t size)
2142{
2143	unsigned long val;
2144	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2145	struct etmv4_config *config = &drvdata->config;
2146
2147	/*
2148	 * Don't use virtual contextID tracing if coming from a PID namespace.
2149	 * See comment in ctxid_pid_store().
2150	 */
2151	if (!task_is_in_init_pid_ns(current))
2152		return -EINVAL;
2153
2154	/*
2155	 * only implemented when vmid tracing is enabled, i.e. at least one
2156	 * vmid comparator is implemented and at least 8 bit vmid size
2157	 */
2158	if (!drvdata->vmid_size || !drvdata->numvmidc)
2159		return -EINVAL;
2160	if (kstrtoul(buf, 16, &val))
2161		return -EINVAL;
2162
2163	spin_lock(&drvdata->spinlock);
2164	config->vmid_val[config->vmid_idx] = (u64)val;
2165	spin_unlock(&drvdata->spinlock);
2166	return size;
2167}
2168static DEVICE_ATTR_RW(vmid_val);
2169
2170static ssize_t vmid_masks_show(struct device *dev,
2171			       struct device_attribute *attr, char *buf)
2172{
2173	unsigned long val1, val2;
2174	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2175	struct etmv4_config *config = &drvdata->config;
2176
2177	/*
2178	 * Don't use virtual contextID tracing if coming from a PID namespace.
2179	 * See comment in ctxid_pid_store().
2180	 */
2181	if (!task_is_in_init_pid_ns(current))
2182		return -EINVAL;
2183
2184	spin_lock(&drvdata->spinlock);
2185	val1 = config->vmid_mask0;
2186	val2 = config->vmid_mask1;
2187	spin_unlock(&drvdata->spinlock);
2188	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2189}
2190
2191static ssize_t vmid_masks_store(struct device *dev,
2192				struct device_attribute *attr,
2193				const char *buf, size_t size)
2194{
2195	u8 i, j, maskbyte;
2196	unsigned long val1, val2, mask;
2197	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2198	struct etmv4_config *config = &drvdata->config;
2199	int nr_inputs;
2200
2201	/*
2202	 * Don't use virtual contextID tracing if coming from a PID namespace.
2203	 * See comment in ctxid_pid_store().
2204	 */
2205	if (!task_is_in_init_pid_ns(current))
2206		return -EINVAL;
2207
2208	/*
2209	 * only implemented when vmid tracing is enabled, i.e. at least one
2210	 * vmid comparator is implemented and at least 8 bit vmid size
2211	 */
2212	if (!drvdata->vmid_size || !drvdata->numvmidc)
2213		return -EINVAL;
2214	/* one mask if <= 4 comparators, two for up to 8 */
2215	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2216	if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2217		return -EINVAL;
2218
2219	spin_lock(&drvdata->spinlock);
2220
2221	/*
2222	 * each byte[0..3] controls mask value applied to vmid
2223	 * comparator[0..3]
2224	 */
2225	switch (drvdata->numvmidc) {
2226	case 0x1:
2227		/* COMP0, bits[7:0] */
2228		config->vmid_mask0 = val1 & 0xFF;
2229		break;
2230	case 0x2:
2231		/* COMP1, bits[15:8] */
2232		config->vmid_mask0 = val1 & 0xFFFF;
2233		break;
2234	case 0x3:
2235		/* COMP2, bits[23:16] */
2236		config->vmid_mask0 = val1 & 0xFFFFFF;
2237		break;
2238	case 0x4:
2239		/* COMP3, bits[31:24] */
2240		config->vmid_mask0 = val1;
2241		break;
2242	case 0x5:
2243		/* COMP4, bits[7:0] */
2244		config->vmid_mask0 = val1;
2245		config->vmid_mask1 = val2 & 0xFF;
2246		break;
2247	case 0x6:
2248		/* COMP5, bits[15:8] */
2249		config->vmid_mask0 = val1;
2250		config->vmid_mask1 = val2 & 0xFFFF;
2251		break;
2252	case 0x7:
2253		/* COMP6, bits[23:16] */
2254		config->vmid_mask0 = val1;
2255		config->vmid_mask1 = val2 & 0xFFFFFF;
2256		break;
2257	case 0x8:
2258		/* COMP7, bits[31:24] */
2259		config->vmid_mask0 = val1;
2260		config->vmid_mask1 = val2;
2261		break;
2262	default:
2263		break;
2264	}
2265
2266	/*
2267	 * If software sets a mask bit to 1, it must program relevant byte
2268	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2269	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2270	 * of vmid comparator0 value (corresponding to byte 0) register.
2271	 */
2272	mask = config->vmid_mask0;
2273	for (i = 0; i < drvdata->numvmidc; i++) {
2274		/* mask value of corresponding vmid comparator */
2275		maskbyte = mask & ETMv4_EVENT_MASK;
2276		/*
2277		 * each bit corresponds to a byte of respective vmid comparator
2278		 * value register
2279		 */
2280		for (j = 0; j < 8; j++) {
2281			if (maskbyte & 1)
2282				config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2283			maskbyte >>= 1;
2284		}
2285		/* Select the next vmid comparator mask value */
2286		if (i == 3)
2287			/* vmid comparators[4-7] */
2288			mask = config->vmid_mask1;
2289		else
2290			mask >>= 0x8;
2291	}
2292	spin_unlock(&drvdata->spinlock);
2293	return size;
2294}
2295static DEVICE_ATTR_RW(vmid_masks);
2296
2297static ssize_t cpu_show(struct device *dev,
2298			struct device_attribute *attr, char *buf)
2299{
2300	int val;
2301	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2302
2303	val = drvdata->cpu;
2304	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2305
2306}
2307static DEVICE_ATTR_RO(cpu);
2308
2309static ssize_t ts_source_show(struct device *dev,
2310			      struct device_attribute *attr,
2311			      char *buf)
2312{
2313	int val;
2314	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2315
2316	if (!drvdata->trfcr) {
2317		val = -1;
2318		goto out;
2319	}
2320
2321	switch (drvdata->trfcr & TRFCR_ELx_TS_MASK) {
2322	case TRFCR_ELx_TS_VIRTUAL:
2323	case TRFCR_ELx_TS_GUEST_PHYSICAL:
2324	case TRFCR_ELx_TS_PHYSICAL:
2325		val = FIELD_GET(TRFCR_ELx_TS_MASK, drvdata->trfcr);
2326		break;
2327	default:
2328		val = -1;
2329		break;
2330	}
2331
2332out:
2333	return sysfs_emit(buf, "%d\n", val);
2334}
2335static DEVICE_ATTR_RO(ts_source);
2336
2337static struct attribute *coresight_etmv4_attrs[] = {
2338	&dev_attr_nr_pe_cmp.attr,
2339	&dev_attr_nr_addr_cmp.attr,
2340	&dev_attr_nr_cntr.attr,
2341	&dev_attr_nr_ext_inp.attr,
2342	&dev_attr_numcidc.attr,
2343	&dev_attr_numvmidc.attr,
2344	&dev_attr_nrseqstate.attr,
2345	&dev_attr_nr_resource.attr,
2346	&dev_attr_nr_ss_cmp.attr,
2347	&dev_attr_reset.attr,
2348	&dev_attr_mode.attr,
2349	&dev_attr_pe.attr,
2350	&dev_attr_event.attr,
2351	&dev_attr_event_instren.attr,
2352	&dev_attr_event_ts.attr,
2353	&dev_attr_syncfreq.attr,
2354	&dev_attr_cyc_threshold.attr,
2355	&dev_attr_bb_ctrl.attr,
2356	&dev_attr_event_vinst.attr,
2357	&dev_attr_s_exlevel_vinst.attr,
2358	&dev_attr_ns_exlevel_vinst.attr,
2359	&dev_attr_addr_idx.attr,
2360	&dev_attr_addr_instdatatype.attr,
2361	&dev_attr_addr_single.attr,
2362	&dev_attr_addr_range.attr,
2363	&dev_attr_addr_start.attr,
2364	&dev_attr_addr_stop.attr,
2365	&dev_attr_addr_ctxtype.attr,
2366	&dev_attr_addr_context.attr,
2367	&dev_attr_addr_exlevel_s_ns.attr,
2368	&dev_attr_addr_cmp_view.attr,
2369	&dev_attr_vinst_pe_cmp_start_stop.attr,
2370	&dev_attr_sshot_idx.attr,
2371	&dev_attr_sshot_ctrl.attr,
2372	&dev_attr_sshot_pe_ctrl.attr,
2373	&dev_attr_sshot_status.attr,
2374	&dev_attr_seq_idx.attr,
2375	&dev_attr_seq_state.attr,
2376	&dev_attr_seq_event.attr,
2377	&dev_attr_seq_reset_event.attr,
2378	&dev_attr_cntr_idx.attr,
2379	&dev_attr_cntrldvr.attr,
2380	&dev_attr_cntr_val.attr,
2381	&dev_attr_cntr_ctrl.attr,
2382	&dev_attr_res_idx.attr,
2383	&dev_attr_res_ctrl.attr,
2384	&dev_attr_ctxid_idx.attr,
2385	&dev_attr_ctxid_pid.attr,
2386	&dev_attr_ctxid_masks.attr,
2387	&dev_attr_vmid_idx.attr,
2388	&dev_attr_vmid_val.attr,
2389	&dev_attr_vmid_masks.attr,
2390	&dev_attr_cpu.attr,
2391	&dev_attr_ts_source.attr,
2392	NULL,
2393};
2394
2395struct etmv4_reg {
2396	struct coresight_device *csdev;
2397	u32 offset;
2398	u32 data;
2399};
2400
2401static void do_smp_cross_read(void *data)
2402{
2403	struct etmv4_reg *reg = data;
2404
2405	reg->data = etm4x_relaxed_read32(&reg->csdev->access, reg->offset);
2406}
2407
2408static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
2409{
 
2410	struct etmv4_reg reg;
2411
2412	reg.offset = offset;
2413	reg.csdev = drvdata->csdev;
2414
2415	/*
2416	 * smp cross call ensures the CPU will be powered up before
2417	 * accessing the ETMv4 trace core registers
2418	 */
2419	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
2420	return reg.data;
2421}
2422
2423static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
2424{
2425	struct dev_ext_attribute *eattr;
2426
2427	eattr = container_of(attr, struct dev_ext_attribute, attr);
2428	return (u32)(unsigned long)eattr->var;
2429}
2430
2431static ssize_t coresight_etm4x_reg_show(struct device *dev,
2432					struct device_attribute *d_attr,
2433					char *buf)
2434{
2435	u32 val, offset;
2436	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2437
2438	offset = coresight_etm4x_attr_to_offset(d_attr);
2439
2440	pm_runtime_get_sync(dev->parent);
2441	val = etmv4_cross_read(drvdata, offset);
2442	pm_runtime_put_sync(dev->parent);
2443
2444	return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
2445}
2446
2447static inline bool
2448etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
2449{
2450	switch (offset) {
2451	ETM_COMMON_SYSREG_LIST_CASES
2452		/*
2453		 * Common registers to ETE & ETM4x accessible via system
2454		 * instructions are always implemented.
2455		 */
2456		return true;
2457
2458	ETM4x_ONLY_SYSREG_LIST_CASES
2459		/*
2460		 * We only support etm4x and ete. So if the device is not
2461		 * ETE, it must be ETMv4x.
2462		 */
2463		return !etm4x_is_ete(drvdata);
2464
2465	ETM4x_MMAP_LIST_CASES
2466		/*
2467		 * Registers accessible only via memory-mapped registers
2468		 * must not be accessed via system instructions.
2469		 * We cannot access the drvdata->csdev here, as this
2470		 * function is called during the device creation, via
2471		 * coresight_register() and the csdev is not initialized
2472		 * until that is done. So rely on the drvdata->base to
2473		 * detect if we have a memory mapped access.
2474		 * Also ETE doesn't implement memory mapped access, thus
2475		 * it is sufficient to check that we are using mmio.
2476		 */
2477		return !!drvdata->base;
2478
2479	ETE_ONLY_SYSREG_LIST_CASES
2480		return etm4x_is_ete(drvdata);
2481	}
2482
2483	return false;
2484}
2485
2486/*
2487 * Hide the ETM4x registers that may not be available on the
2488 * hardware.
2489 * There are certain management registers unavailable via system
2490 * instructions. Make those sysfs attributes hidden on such
2491 * systems.
2492 */
2493static umode_t
2494coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
2495				     struct attribute *attr, int unused)
2496{
2497	struct device *dev = kobj_to_dev(kobj);
2498	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2499	struct device_attribute *d_attr;
2500	u32 offset;
2501
2502	d_attr = container_of(attr, struct device_attribute, attr);
2503	offset = coresight_etm4x_attr_to_offset(d_attr);
2504
2505	if (etm4x_register_implemented(drvdata, offset))
2506		return attr->mode;
2507	return 0;
2508}
2509
2510#define coresight_etm4x_reg(name, offset)				\
2511	&((struct dev_ext_attribute[]) {				\
2512	   {								\
2513		__ATTR(name, 0444, coresight_etm4x_reg_show, NULL),	\
2514		(void *)(unsigned long)offset				\
2515	   }								\
2516	})[0].attr.attr
2517
2518static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2519	coresight_etm4x_reg(trcpdcr, TRCPDCR),
2520	coresight_etm4x_reg(trcpdsr, TRCPDSR),
2521	coresight_etm4x_reg(trclsr, TRCLSR),
2522	coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
2523	coresight_etm4x_reg(trcdevid, TRCDEVID),
2524	coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
2525	coresight_etm4x_reg(trcpidr0, TRCPIDR0),
2526	coresight_etm4x_reg(trcpidr1, TRCPIDR1),
2527	coresight_etm4x_reg(trcpidr2, TRCPIDR2),
2528	coresight_etm4x_reg(trcpidr3, TRCPIDR3),
2529	coresight_etm4x_reg(trcoslsr, TRCOSLSR),
2530	coresight_etm4x_reg(trcconfig, TRCCONFIGR),
2531	coresight_etm4x_reg(trctraceid, TRCTRACEIDR),
2532	coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
2533	NULL,
2534};
2535
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2536static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2537	coresight_etm4x_reg(trcidr0, TRCIDR0),
2538	coresight_etm4x_reg(trcidr1, TRCIDR1),
2539	coresight_etm4x_reg(trcidr2, TRCIDR2),
2540	coresight_etm4x_reg(trcidr3, TRCIDR3),
2541	coresight_etm4x_reg(trcidr4, TRCIDR4),
2542	coresight_etm4x_reg(trcidr5, TRCIDR5),
2543	/* trcidr[6,7] are reserved */
2544	coresight_etm4x_reg(trcidr8, TRCIDR8),
2545	coresight_etm4x_reg(trcidr9, TRCIDR9),
2546	coresight_etm4x_reg(trcidr10, TRCIDR10),
2547	coresight_etm4x_reg(trcidr11, TRCIDR11),
2548	coresight_etm4x_reg(trcidr12, TRCIDR12),
2549	coresight_etm4x_reg(trcidr13, TRCIDR13),
2550	NULL,
2551};
2552
2553static const struct attribute_group coresight_etmv4_group = {
2554	.attrs = coresight_etmv4_attrs,
2555};
2556
2557static const struct attribute_group coresight_etmv4_mgmt_group = {
2558	.is_visible = coresight_etm4x_attr_reg_implemented,
2559	.attrs = coresight_etmv4_mgmt_attrs,
2560	.name = "mgmt",
2561};
2562
2563static const struct attribute_group coresight_etmv4_trcidr_group = {
2564	.attrs = coresight_etmv4_trcidr_attrs,
2565	.name = "trcidr",
2566};
2567
2568const struct attribute_group *coresight_etmv4_groups[] = {
2569	&coresight_etmv4_group,
2570	&coresight_etmv4_mgmt_group,
2571	&coresight_etmv4_trcidr_group,
2572	NULL,
2573};
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
   4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
   5 */
   6
   7#include <linux/pid_namespace.h>
   8#include <linux/pm_runtime.h>
   9#include <linux/sysfs.h>
  10#include "coresight-etm4x.h"
  11#include "coresight-priv.h"
 
  12
  13static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
  14{
  15	u8 idx;
  16	struct etmv4_config *config = &drvdata->config;
  17
  18	idx = config->addr_idx;
  19
  20	/*
  21	 * TRCACATRn.TYPE bit[1:0]: type of comparison
  22	 * the trace unit performs
  23	 */
  24	if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
  25		if (idx % 2 != 0)
  26			return -EINVAL;
  27
  28		/*
  29		 * We are performing instruction address comparison. Set the
  30		 * relevant bit of ViewInst Include/Exclude Control register
  31		 * for corresponding address comparator pair.
  32		 */
  33		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
  34		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
  35			return -EINVAL;
  36
  37		if (exclude == true) {
  38			/*
  39			 * Set exclude bit and unset the include bit
  40			 * corresponding to comparator pair
  41			 */
  42			config->viiectlr |= BIT(idx / 2 + 16);
  43			config->viiectlr &= ~BIT(idx / 2);
  44		} else {
  45			/*
  46			 * Set include bit and unset exclude bit
  47			 * corresponding to comparator pair
  48			 */
  49			config->viiectlr |= BIT(idx / 2);
  50			config->viiectlr &= ~BIT(idx / 2 + 16);
  51		}
  52	}
  53	return 0;
  54}
  55
  56static ssize_t nr_pe_cmp_show(struct device *dev,
  57			      struct device_attribute *attr,
  58			      char *buf)
  59{
  60	unsigned long val;
  61	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  62
  63	val = drvdata->nr_pe_cmp;
  64	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  65}
  66static DEVICE_ATTR_RO(nr_pe_cmp);
  67
  68static ssize_t nr_addr_cmp_show(struct device *dev,
  69				struct device_attribute *attr,
  70				char *buf)
  71{
  72	unsigned long val;
  73	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  74
  75	val = drvdata->nr_addr_cmp;
  76	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  77}
  78static DEVICE_ATTR_RO(nr_addr_cmp);
  79
  80static ssize_t nr_cntr_show(struct device *dev,
  81			    struct device_attribute *attr,
  82			    char *buf)
  83{
  84	unsigned long val;
  85	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  86
  87	val = drvdata->nr_cntr;
  88	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  89}
  90static DEVICE_ATTR_RO(nr_cntr);
  91
  92static ssize_t nr_ext_inp_show(struct device *dev,
  93			       struct device_attribute *attr,
  94			       char *buf)
  95{
  96	unsigned long val;
  97	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  98
  99	val = drvdata->nr_ext_inp;
 100	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 101}
 102static DEVICE_ATTR_RO(nr_ext_inp);
 103
 104static ssize_t numcidc_show(struct device *dev,
 105			    struct device_attribute *attr,
 106			    char *buf)
 107{
 108	unsigned long val;
 109	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 110
 111	val = drvdata->numcidc;
 112	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 113}
 114static DEVICE_ATTR_RO(numcidc);
 115
 116static ssize_t numvmidc_show(struct device *dev,
 117			     struct device_attribute *attr,
 118			     char *buf)
 119{
 120	unsigned long val;
 121	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 122
 123	val = drvdata->numvmidc;
 124	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 125}
 126static DEVICE_ATTR_RO(numvmidc);
 127
 128static ssize_t nrseqstate_show(struct device *dev,
 129			       struct device_attribute *attr,
 130			       char *buf)
 131{
 132	unsigned long val;
 133	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 134
 135	val = drvdata->nrseqstate;
 136	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 137}
 138static DEVICE_ATTR_RO(nrseqstate);
 139
 140static ssize_t nr_resource_show(struct device *dev,
 141				struct device_attribute *attr,
 142				char *buf)
 143{
 144	unsigned long val;
 145	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 146
 147	val = drvdata->nr_resource;
 148	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 149}
 150static DEVICE_ATTR_RO(nr_resource);
 151
 152static ssize_t nr_ss_cmp_show(struct device *dev,
 153			      struct device_attribute *attr,
 154			      char *buf)
 155{
 156	unsigned long val;
 157	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 158
 159	val = drvdata->nr_ss_cmp;
 160	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 161}
 162static DEVICE_ATTR_RO(nr_ss_cmp);
 163
 164static ssize_t reset_store(struct device *dev,
 165			   struct device_attribute *attr,
 166			   const char *buf, size_t size)
 167{
 168	int i;
 169	unsigned long val;
 170	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 171	struct etmv4_config *config = &drvdata->config;
 172
 173	if (kstrtoul(buf, 16, &val))
 174		return -EINVAL;
 175
 176	spin_lock(&drvdata->spinlock);
 177	if (val)
 178		config->mode = 0x0;
 179
 180	/* Disable data tracing: do not trace load and store data transfers */
 181	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
 182	config->cfg &= ~(BIT(1) | BIT(2));
 183
 184	/* Disable data value and data address tracing */
 185	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
 186			   ETM_MODE_DATA_TRACE_VAL);
 187	config->cfg &= ~(BIT(16) | BIT(17));
 188
 189	/* Disable all events tracing */
 190	config->eventctrl0 = 0x0;
 191	config->eventctrl1 = 0x0;
 192
 193	/* Disable timestamp event */
 194	config->ts_ctrl = 0x0;
 195
 196	/* Disable stalling */
 197	config->stall_ctrl = 0x0;
 198
 199	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
 200	if (drvdata->syncpr == false)
 201		config->syncfreq = 0x8;
 202
 203	/*
 204	 * Enable ViewInst to trace everything with start-stop logic in
 205	 * started state. ARM recommends start-stop logic is set before
 206	 * each trace run.
 207	 */
 208	config->vinst_ctrl |= BIT(0);
 209	if (drvdata->nr_addr_cmp == true) {
 210		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
 211		/* SSSTATUS, bit[9] */
 212		config->vinst_ctrl |= BIT(9);
 213	}
 214
 215	/* No address range filtering for ViewInst */
 216	config->viiectlr = 0x0;
 217
 218	/* No start-stop filtering for ViewInst */
 219	config->vissctlr = 0x0;
 
 220
 221	/* Disable seq events */
 222	for (i = 0; i < drvdata->nrseqstate-1; i++)
 223		config->seq_ctrl[i] = 0x0;
 224	config->seq_rst = 0x0;
 225	config->seq_state = 0x0;
 226
 227	/* Disable external input events */
 228	config->ext_inp = 0x0;
 229
 230	config->cntr_idx = 0x0;
 231	for (i = 0; i < drvdata->nr_cntr; i++) {
 232		config->cntrldvr[i] = 0x0;
 233		config->cntr_ctrl[i] = 0x0;
 234		config->cntr_val[i] = 0x0;
 235	}
 236
 237	config->res_idx = 0x0;
 238	for (i = 0; i < drvdata->nr_resource; i++)
 239		config->res_ctrl[i] = 0x0;
 240
 
 241	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
 242		config->ss_ctrl[i] = 0x0;
 243		config->ss_pe_cmp[i] = 0x0;
 244	}
 245
 246	config->addr_idx = 0x0;
 247	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
 248		config->addr_val[i] = 0x0;
 249		config->addr_acc[i] = 0x0;
 250		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
 251	}
 252
 253	config->ctxid_idx = 0x0;
 254	for (i = 0; i < drvdata->numcidc; i++)
 255		config->ctxid_pid[i] = 0x0;
 256
 257	config->ctxid_mask0 = 0x0;
 258	config->ctxid_mask1 = 0x0;
 259
 260	config->vmid_idx = 0x0;
 261	for (i = 0; i < drvdata->numvmidc; i++)
 262		config->vmid_val[i] = 0x0;
 263	config->vmid_mask0 = 0x0;
 264	config->vmid_mask1 = 0x0;
 265
 266	drvdata->trcid = drvdata->cpu + 1;
 267
 268	spin_unlock(&drvdata->spinlock);
 269
 
 
 270	return size;
 271}
 272static DEVICE_ATTR_WO(reset);
 273
 274static ssize_t mode_show(struct device *dev,
 275			 struct device_attribute *attr,
 276			 char *buf)
 277{
 278	unsigned long val;
 279	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 280	struct etmv4_config *config = &drvdata->config;
 281
 282	val = config->mode;
 283	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 284}
 285
 286static ssize_t mode_store(struct device *dev,
 287			  struct device_attribute *attr,
 288			  const char *buf, size_t size)
 289{
 290	unsigned long val, mode;
 291	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 292	struct etmv4_config *config = &drvdata->config;
 293
 294	if (kstrtoul(buf, 16, &val))
 295		return -EINVAL;
 296
 297	spin_lock(&drvdata->spinlock);
 298	config->mode = val & ETMv4_MODE_ALL;
 299	etm4_set_mode_exclude(drvdata,
 300			      config->mode & ETM_MODE_EXCLUDE ? true : false);
 301
 302	if (drvdata->instrp0 == true) {
 303		/* start by clearing instruction P0 field */
 304		config->cfg  &= ~(BIT(1) | BIT(2));
 305		if (config->mode & ETM_MODE_LOAD)
 306			/* 0b01 Trace load instructions as P0 instructions */
 307			config->cfg  |= BIT(1);
 308		if (config->mode & ETM_MODE_STORE)
 309			/* 0b10 Trace store instructions as P0 instructions */
 310			config->cfg  |= BIT(2);
 311		if (config->mode & ETM_MODE_LOAD_STORE)
 312			/*
 313			 * 0b11 Trace load and store instructions
 314			 * as P0 instructions
 315			 */
 316			config->cfg  |= BIT(1) | BIT(2);
 317	}
 318
 319	/* bit[3], Branch broadcast mode */
 320	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
 321		config->cfg |= BIT(3);
 322	else
 323		config->cfg &= ~BIT(3);
 324
 325	/* bit[4], Cycle counting instruction trace bit */
 326	if ((config->mode & ETMv4_MODE_CYCACC) &&
 327		(drvdata->trccci == true))
 328		config->cfg |= BIT(4);
 329	else
 330		config->cfg &= ~BIT(4);
 331
 332	/* bit[6], Context ID tracing bit */
 333	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
 334		config->cfg |= BIT(6);
 335	else
 336		config->cfg &= ~BIT(6);
 337
 338	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
 339		config->cfg |= BIT(7);
 340	else
 341		config->cfg &= ~BIT(7);
 342
 343	/* bits[10:8], Conditional instruction tracing bit */
 344	mode = ETM_MODE_COND(config->mode);
 345	if (drvdata->trccond == true) {
 346		config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
 347		config->cfg |= mode << 8;
 348	}
 349
 350	/* bit[11], Global timestamp tracing bit */
 351	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
 352		config->cfg |= BIT(11);
 353	else
 354		config->cfg &= ~BIT(11);
 355
 356	/* bit[12], Return stack enable bit */
 357	if ((config->mode & ETM_MODE_RETURNSTACK) &&
 358					(drvdata->retstack == true))
 359		config->cfg |= BIT(12);
 360	else
 361		config->cfg &= ~BIT(12);
 362
 363	/* bits[14:13], Q element enable field */
 364	mode = ETM_MODE_QELEM(config->mode);
 365	/* start by clearing QE bits */
 366	config->cfg &= ~(BIT(13) | BIT(14));
 367	/* if supported, Q elements with instruction counts are enabled */
 368	if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
 369		config->cfg |= BIT(13);
 
 
 
 
 370	/*
 371	 * if supported, Q elements with and without instruction
 372	 * counts are enabled
 373	 */
 374	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
 375		config->cfg |= BIT(14);
 376
 377	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
 378	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
 379	    (drvdata->atbtrig == true))
 380		config->eventctrl1 |= BIT(11);
 381	else
 382		config->eventctrl1 &= ~BIT(11);
 383
 384	/* bit[12], Low-power state behavior override bit */
 385	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
 386	    (drvdata->lpoverride == true))
 387		config->eventctrl1 |= BIT(12);
 388	else
 389		config->eventctrl1 &= ~BIT(12);
 390
 391	/* bit[8], Instruction stall bit */
 392	if (config->mode & ETM_MODE_ISTALL_EN)
 393		config->stall_ctrl |= BIT(8);
 394	else
 395		config->stall_ctrl &= ~BIT(8);
 396
 397	/* bit[10], Prioritize instruction trace bit */
 398	if (config->mode & ETM_MODE_INSTPRIO)
 399		config->stall_ctrl |= BIT(10);
 400	else
 401		config->stall_ctrl &= ~BIT(10);
 402
 403	/* bit[13], Trace overflow prevention bit */
 404	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
 405		(drvdata->nooverflow == true))
 406		config->stall_ctrl |= BIT(13);
 407	else
 408		config->stall_ctrl &= ~BIT(13);
 409
 410	/* bit[9] Start/stop logic control bit */
 411	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
 412		config->vinst_ctrl |= BIT(9);
 413	else
 414		config->vinst_ctrl &= ~BIT(9);
 415
 416	/* bit[10], Whether a trace unit must trace a Reset exception */
 417	if (config->mode & ETM_MODE_TRACE_RESET)
 418		config->vinst_ctrl |= BIT(10);
 419	else
 420		config->vinst_ctrl &= ~BIT(10);
 421
 422	/* bit[11], Whether a trace unit must trace a system error exception */
 423	if ((config->mode & ETM_MODE_TRACE_ERR) &&
 424		(drvdata->trc_error == true))
 425		config->vinst_ctrl |= BIT(11);
 426	else
 427		config->vinst_ctrl &= ~BIT(11);
 428
 429	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
 430		etm4_config_trace_mode(config);
 431
 432	spin_unlock(&drvdata->spinlock);
 433
 434	return size;
 435}
 436static DEVICE_ATTR_RW(mode);
 437
 438static ssize_t pe_show(struct device *dev,
 439		       struct device_attribute *attr,
 440		       char *buf)
 441{
 442	unsigned long val;
 443	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 444	struct etmv4_config *config = &drvdata->config;
 445
 446	val = config->pe_sel;
 447	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 448}
 449
 450static ssize_t pe_store(struct device *dev,
 451			struct device_attribute *attr,
 452			const char *buf, size_t size)
 453{
 454	unsigned long val;
 455	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 456	struct etmv4_config *config = &drvdata->config;
 457
 458	if (kstrtoul(buf, 16, &val))
 459		return -EINVAL;
 460
 461	spin_lock(&drvdata->spinlock);
 462	if (val > drvdata->nr_pe) {
 463		spin_unlock(&drvdata->spinlock);
 464		return -EINVAL;
 465	}
 466
 467	config->pe_sel = val;
 468	spin_unlock(&drvdata->spinlock);
 469	return size;
 470}
 471static DEVICE_ATTR_RW(pe);
 472
 473static ssize_t event_show(struct device *dev,
 474			  struct device_attribute *attr,
 475			  char *buf)
 476{
 477	unsigned long val;
 478	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 479	struct etmv4_config *config = &drvdata->config;
 480
 481	val = config->eventctrl0;
 482	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 483}
 484
 485static ssize_t event_store(struct device *dev,
 486			   struct device_attribute *attr,
 487			   const char *buf, size_t size)
 488{
 489	unsigned long val;
 490	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 491	struct etmv4_config *config = &drvdata->config;
 492
 493	if (kstrtoul(buf, 16, &val))
 494		return -EINVAL;
 495
 496	spin_lock(&drvdata->spinlock);
 497	switch (drvdata->nr_event) {
 498	case 0x0:
 499		/* EVENT0, bits[7:0] */
 500		config->eventctrl0 = val & 0xFF;
 501		break;
 502	case 0x1:
 503		 /* EVENT1, bits[15:8] */
 504		config->eventctrl0 = val & 0xFFFF;
 505		break;
 506	case 0x2:
 507		/* EVENT2, bits[23:16] */
 508		config->eventctrl0 = val & 0xFFFFFF;
 509		break;
 510	case 0x3:
 511		/* EVENT3, bits[31:24] */
 512		config->eventctrl0 = val;
 513		break;
 514	default:
 515		break;
 516	}
 517	spin_unlock(&drvdata->spinlock);
 518	return size;
 519}
 520static DEVICE_ATTR_RW(event);
 521
 522static ssize_t event_instren_show(struct device *dev,
 523				  struct device_attribute *attr,
 524				  char *buf)
 525{
 526	unsigned long val;
 527	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 528	struct etmv4_config *config = &drvdata->config;
 529
 530	val = BMVAL(config->eventctrl1, 0, 3);
 531	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 532}
 533
 534static ssize_t event_instren_store(struct device *dev,
 535				   struct device_attribute *attr,
 536				   const char *buf, size_t size)
 537{
 538	unsigned long val;
 539	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 540	struct etmv4_config *config = &drvdata->config;
 541
 542	if (kstrtoul(buf, 16, &val))
 543		return -EINVAL;
 544
 545	spin_lock(&drvdata->spinlock);
 546	/* start by clearing all instruction event enable bits */
 547	config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
 548	switch (drvdata->nr_event) {
 549	case 0x0:
 550		/* generate Event element for event 1 */
 551		config->eventctrl1 |= val & BIT(1);
 552		break;
 553	case 0x1:
 554		/* generate Event element for event 1 and 2 */
 555		config->eventctrl1 |= val & (BIT(0) | BIT(1));
 556		break;
 557	case 0x2:
 558		/* generate Event element for event 1, 2 and 3 */
 559		config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
 
 
 560		break;
 561	case 0x3:
 562		/* generate Event element for all 4 events */
 563		config->eventctrl1 |= val & 0xF;
 
 
 
 564		break;
 565	default:
 566		break;
 567	}
 568	spin_unlock(&drvdata->spinlock);
 569	return size;
 570}
 571static DEVICE_ATTR_RW(event_instren);
 572
 573static ssize_t event_ts_show(struct device *dev,
 574			     struct device_attribute *attr,
 575			     char *buf)
 576{
 577	unsigned long val;
 578	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 579	struct etmv4_config *config = &drvdata->config;
 580
 581	val = config->ts_ctrl;
 582	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 583}
 584
 585static ssize_t event_ts_store(struct device *dev,
 586			      struct device_attribute *attr,
 587			      const char *buf, size_t size)
 588{
 589	unsigned long val;
 590	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 591	struct etmv4_config *config = &drvdata->config;
 592
 593	if (kstrtoul(buf, 16, &val))
 594		return -EINVAL;
 595	if (!drvdata->ts_size)
 596		return -EINVAL;
 597
 598	config->ts_ctrl = val & ETMv4_EVENT_MASK;
 599	return size;
 600}
 601static DEVICE_ATTR_RW(event_ts);
 602
 603static ssize_t syncfreq_show(struct device *dev,
 604			     struct device_attribute *attr,
 605			     char *buf)
 606{
 607	unsigned long val;
 608	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 609	struct etmv4_config *config = &drvdata->config;
 610
 611	val = config->syncfreq;
 612	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 613}
 614
 615static ssize_t syncfreq_store(struct device *dev,
 616			      struct device_attribute *attr,
 617			      const char *buf, size_t size)
 618{
 619	unsigned long val;
 620	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 621	struct etmv4_config *config = &drvdata->config;
 622
 623	if (kstrtoul(buf, 16, &val))
 624		return -EINVAL;
 625	if (drvdata->syncpr == true)
 626		return -EINVAL;
 627
 628	config->syncfreq = val & ETMv4_SYNC_MASK;
 629	return size;
 630}
 631static DEVICE_ATTR_RW(syncfreq);
 632
 633static ssize_t cyc_threshold_show(struct device *dev,
 634				  struct device_attribute *attr,
 635				  char *buf)
 636{
 637	unsigned long val;
 638	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 639	struct etmv4_config *config = &drvdata->config;
 640
 641	val = config->ccctlr;
 642	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 643}
 644
 645static ssize_t cyc_threshold_store(struct device *dev,
 646				   struct device_attribute *attr,
 647				   const char *buf, size_t size)
 648{
 649	unsigned long val;
 650	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 651	struct etmv4_config *config = &drvdata->config;
 652
 653	if (kstrtoul(buf, 16, &val))
 654		return -EINVAL;
 
 
 
 655	if (val < drvdata->ccitmin)
 656		return -EINVAL;
 657
 658	config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
 659	return size;
 660}
 661static DEVICE_ATTR_RW(cyc_threshold);
 662
 663static ssize_t bb_ctrl_show(struct device *dev,
 664			    struct device_attribute *attr,
 665			    char *buf)
 666{
 667	unsigned long val;
 668	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 669	struct etmv4_config *config = &drvdata->config;
 670
 671	val = config->bb_ctrl;
 672	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 673}
 674
 675static ssize_t bb_ctrl_store(struct device *dev,
 676			     struct device_attribute *attr,
 677			     const char *buf, size_t size)
 678{
 679	unsigned long val;
 680	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 681	struct etmv4_config *config = &drvdata->config;
 682
 683	if (kstrtoul(buf, 16, &val))
 684		return -EINVAL;
 685	if (drvdata->trcbb == false)
 686		return -EINVAL;
 687	if (!drvdata->nr_addr_cmp)
 688		return -EINVAL;
 
 689	/*
 690	 * Bit[7:0] selects which address range comparator is used for
 691	 * branch broadcast control.
 
 692	 */
 693	if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
 694		return -EINVAL;
 695
 696	config->bb_ctrl = val;
 697	return size;
 698}
 699static DEVICE_ATTR_RW(bb_ctrl);
 700
 701static ssize_t event_vinst_show(struct device *dev,
 702				struct device_attribute *attr,
 703				char *buf)
 704{
 705	unsigned long val;
 706	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 707	struct etmv4_config *config = &drvdata->config;
 708
 709	val = config->vinst_ctrl & ETMv4_EVENT_MASK;
 710	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 711}
 712
 713static ssize_t event_vinst_store(struct device *dev,
 714				 struct device_attribute *attr,
 715				 const char *buf, size_t size)
 716{
 717	unsigned long val;
 718	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 719	struct etmv4_config *config = &drvdata->config;
 720
 721	if (kstrtoul(buf, 16, &val))
 722		return -EINVAL;
 723
 724	spin_lock(&drvdata->spinlock);
 725	val &= ETMv4_EVENT_MASK;
 726	config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
 727	config->vinst_ctrl |= val;
 728	spin_unlock(&drvdata->spinlock);
 729	return size;
 730}
 731static DEVICE_ATTR_RW(event_vinst);
 732
 733static ssize_t s_exlevel_vinst_show(struct device *dev,
 734				    struct device_attribute *attr,
 735				    char *buf)
 736{
 737	unsigned long val;
 738	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 739	struct etmv4_config *config = &drvdata->config;
 740
 741	val = BMVAL(config->vinst_ctrl, 16, 19);
 742	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 743}
 744
 745static ssize_t s_exlevel_vinst_store(struct device *dev,
 746				     struct device_attribute *attr,
 747				     const char *buf, size_t size)
 748{
 749	unsigned long val;
 750	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 751	struct etmv4_config *config = &drvdata->config;
 752
 753	if (kstrtoul(buf, 16, &val))
 754		return -EINVAL;
 755
 756	spin_lock(&drvdata->spinlock);
 757	/* clear all EXLEVEL_S bits (bit[18] is never implemented) */
 758	config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
 759	/* enable instruction tracing for corresponding exception level */
 760	val &= drvdata->s_ex_level;
 761	config->vinst_ctrl |= (val << 16);
 762	spin_unlock(&drvdata->spinlock);
 763	return size;
 764}
 765static DEVICE_ATTR_RW(s_exlevel_vinst);
 766
 767static ssize_t ns_exlevel_vinst_show(struct device *dev,
 768				     struct device_attribute *attr,
 769				     char *buf)
 770{
 771	unsigned long val;
 772	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 773	struct etmv4_config *config = &drvdata->config;
 774
 775	/* EXLEVEL_NS, bits[23:20] */
 776	val = BMVAL(config->vinst_ctrl, 20, 23);
 777	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 778}
 779
 780static ssize_t ns_exlevel_vinst_store(struct device *dev,
 781				      struct device_attribute *attr,
 782				      const char *buf, size_t size)
 783{
 784	unsigned long val;
 785	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 786	struct etmv4_config *config = &drvdata->config;
 787
 788	if (kstrtoul(buf, 16, &val))
 789		return -EINVAL;
 790
 791	spin_lock(&drvdata->spinlock);
 792	/* clear EXLEVEL_NS bits (bit[23] is never implemented */
 793	config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
 794	/* enable instruction tracing for corresponding exception level */
 795	val &= drvdata->ns_ex_level;
 796	config->vinst_ctrl |= (val << 20);
 797	spin_unlock(&drvdata->spinlock);
 798	return size;
 799}
 800static DEVICE_ATTR_RW(ns_exlevel_vinst);
 801
 802static ssize_t addr_idx_show(struct device *dev,
 803			     struct device_attribute *attr,
 804			     char *buf)
 805{
 806	unsigned long val;
 807	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 808	struct etmv4_config *config = &drvdata->config;
 809
 810	val = config->addr_idx;
 811	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 812}
 813
 814static ssize_t addr_idx_store(struct device *dev,
 815			      struct device_attribute *attr,
 816			      const char *buf, size_t size)
 817{
 818	unsigned long val;
 819	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 820	struct etmv4_config *config = &drvdata->config;
 821
 822	if (kstrtoul(buf, 16, &val))
 823		return -EINVAL;
 824	if (val >= drvdata->nr_addr_cmp * 2)
 825		return -EINVAL;
 826
 827	/*
 828	 * Use spinlock to ensure index doesn't change while it gets
 829	 * dereferenced multiple times within a spinlock block elsewhere.
 830	 */
 831	spin_lock(&drvdata->spinlock);
 832	config->addr_idx = val;
 833	spin_unlock(&drvdata->spinlock);
 834	return size;
 835}
 836static DEVICE_ATTR_RW(addr_idx);
 837
 838static ssize_t addr_instdatatype_show(struct device *dev,
 839				      struct device_attribute *attr,
 840				      char *buf)
 841{
 842	ssize_t len;
 843	u8 val, idx;
 844	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 845	struct etmv4_config *config = &drvdata->config;
 846
 847	spin_lock(&drvdata->spinlock);
 848	idx = config->addr_idx;
 849	val = BMVAL(config->addr_acc[idx], 0, 1);
 850	len = scnprintf(buf, PAGE_SIZE, "%s\n",
 851			val == ETM_INSTR_ADDR ? "instr" :
 852			(val == ETM_DATA_LOAD_ADDR ? "data_load" :
 853			(val == ETM_DATA_STORE_ADDR ? "data_store" :
 854			"data_load_store")));
 855	spin_unlock(&drvdata->spinlock);
 856	return len;
 857}
 858
 859static ssize_t addr_instdatatype_store(struct device *dev,
 860				       struct device_attribute *attr,
 861				       const char *buf, size_t size)
 862{
 863	u8 idx;
 864	char str[20] = "";
 865	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 866	struct etmv4_config *config = &drvdata->config;
 867
 868	if (strlen(buf) >= 20)
 869		return -EINVAL;
 870	if (sscanf(buf, "%s", str) != 1)
 871		return -EINVAL;
 872
 873	spin_lock(&drvdata->spinlock);
 874	idx = config->addr_idx;
 875	if (!strcmp(str, "instr"))
 876		/* TYPE, bits[1:0] */
 877		config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
 878
 879	spin_unlock(&drvdata->spinlock);
 880	return size;
 881}
 882static DEVICE_ATTR_RW(addr_instdatatype);
 883
 884static ssize_t addr_single_show(struct device *dev,
 885				struct device_attribute *attr,
 886				char *buf)
 887{
 888	u8 idx;
 889	unsigned long val;
 890	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 891	struct etmv4_config *config = &drvdata->config;
 892
 893	idx = config->addr_idx;
 894	spin_lock(&drvdata->spinlock);
 895	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 896	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
 897		spin_unlock(&drvdata->spinlock);
 898		return -EPERM;
 899	}
 900	val = (unsigned long)config->addr_val[idx];
 901	spin_unlock(&drvdata->spinlock);
 902	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 903}
 904
 905static ssize_t addr_single_store(struct device *dev,
 906				 struct device_attribute *attr,
 907				 const char *buf, size_t size)
 908{
 909	u8 idx;
 910	unsigned long val;
 911	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 912	struct etmv4_config *config = &drvdata->config;
 913
 914	if (kstrtoul(buf, 16, &val))
 915		return -EINVAL;
 916
 917	spin_lock(&drvdata->spinlock);
 918	idx = config->addr_idx;
 919	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 920	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
 921		spin_unlock(&drvdata->spinlock);
 922		return -EPERM;
 923	}
 924
 925	config->addr_val[idx] = (u64)val;
 926	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
 927	spin_unlock(&drvdata->spinlock);
 928	return size;
 929}
 930static DEVICE_ATTR_RW(addr_single);
 931
 932static ssize_t addr_range_show(struct device *dev,
 933			       struct device_attribute *attr,
 934			       char *buf)
 935{
 936	u8 idx;
 937	unsigned long val1, val2;
 938	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 939	struct etmv4_config *config = &drvdata->config;
 940
 941	spin_lock(&drvdata->spinlock);
 942	idx = config->addr_idx;
 943	if (idx % 2 != 0) {
 944		spin_unlock(&drvdata->spinlock);
 945		return -EPERM;
 946	}
 947	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
 948	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
 949	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
 950	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
 951		spin_unlock(&drvdata->spinlock);
 952		return -EPERM;
 953	}
 954
 955	val1 = (unsigned long)config->addr_val[idx];
 956	val2 = (unsigned long)config->addr_val[idx + 1];
 957	spin_unlock(&drvdata->spinlock);
 958	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
 959}
 960
 961static ssize_t addr_range_store(struct device *dev,
 962				struct device_attribute *attr,
 963				const char *buf, size_t size)
 964{
 965	u8 idx;
 966	unsigned long val1, val2;
 967	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 968	struct etmv4_config *config = &drvdata->config;
 
 
 
 969
 970	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
 
 971		return -EINVAL;
 972	/* lower address comparator cannot have a higher address value */
 973	if (val1 > val2)
 974		return -EINVAL;
 975
 976	spin_lock(&drvdata->spinlock);
 977	idx = config->addr_idx;
 978	if (idx % 2 != 0) {
 979		spin_unlock(&drvdata->spinlock);
 980		return -EPERM;
 981	}
 982
 983	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
 984	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
 985	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
 986	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
 987		spin_unlock(&drvdata->spinlock);
 988		return -EPERM;
 989	}
 990
 991	config->addr_val[idx] = (u64)val1;
 992	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
 993	config->addr_val[idx + 1] = (u64)val2;
 994	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
 995	/*
 996	 * Program include or exclude control bits for vinst or vdata
 997	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
 
 998	 */
 999	etm4_set_mode_exclude(drvdata,
1000			      config->mode & ETM_MODE_EXCLUDE ? true : false);
 
1001
1002	spin_unlock(&drvdata->spinlock);
1003	return size;
1004}
1005static DEVICE_ATTR_RW(addr_range);
1006
1007static ssize_t addr_start_show(struct device *dev,
1008			       struct device_attribute *attr,
1009			       char *buf)
1010{
1011	u8 idx;
1012	unsigned long val;
1013	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1014	struct etmv4_config *config = &drvdata->config;
1015
1016	spin_lock(&drvdata->spinlock);
1017	idx = config->addr_idx;
1018
1019	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1020	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1021		spin_unlock(&drvdata->spinlock);
1022		return -EPERM;
1023	}
1024
1025	val = (unsigned long)config->addr_val[idx];
1026	spin_unlock(&drvdata->spinlock);
1027	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1028}
1029
1030static ssize_t addr_start_store(struct device *dev,
1031				struct device_attribute *attr,
1032				const char *buf, size_t size)
1033{
1034	u8 idx;
1035	unsigned long val;
1036	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1037	struct etmv4_config *config = &drvdata->config;
1038
1039	if (kstrtoul(buf, 16, &val))
1040		return -EINVAL;
1041
1042	spin_lock(&drvdata->spinlock);
1043	idx = config->addr_idx;
1044	if (!drvdata->nr_addr_cmp) {
1045		spin_unlock(&drvdata->spinlock);
1046		return -EINVAL;
1047	}
1048	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1049	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1050		spin_unlock(&drvdata->spinlock);
1051		return -EPERM;
1052	}
1053
1054	config->addr_val[idx] = (u64)val;
1055	config->addr_type[idx] = ETM_ADDR_TYPE_START;
1056	config->vissctlr |= BIT(idx);
1057	/* SSSTATUS, bit[9] - turn on start/stop logic */
1058	config->vinst_ctrl |= BIT(9);
1059	spin_unlock(&drvdata->spinlock);
1060	return size;
1061}
1062static DEVICE_ATTR_RW(addr_start);
1063
1064static ssize_t addr_stop_show(struct device *dev,
1065			      struct device_attribute *attr,
1066			      char *buf)
1067{
1068	u8 idx;
1069	unsigned long val;
1070	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1071	struct etmv4_config *config = &drvdata->config;
1072
1073	spin_lock(&drvdata->spinlock);
1074	idx = config->addr_idx;
1075
1076	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1077	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1078		spin_unlock(&drvdata->spinlock);
1079		return -EPERM;
1080	}
1081
1082	val = (unsigned long)config->addr_val[idx];
1083	spin_unlock(&drvdata->spinlock);
1084	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1085}
1086
1087static ssize_t addr_stop_store(struct device *dev,
1088			       struct device_attribute *attr,
1089			       const char *buf, size_t size)
1090{
1091	u8 idx;
1092	unsigned long val;
1093	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1094	struct etmv4_config *config = &drvdata->config;
1095
1096	if (kstrtoul(buf, 16, &val))
1097		return -EINVAL;
1098
1099	spin_lock(&drvdata->spinlock);
1100	idx = config->addr_idx;
1101	if (!drvdata->nr_addr_cmp) {
1102		spin_unlock(&drvdata->spinlock);
1103		return -EINVAL;
1104	}
1105	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1106	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1107		spin_unlock(&drvdata->spinlock);
1108		return -EPERM;
1109	}
1110
1111	config->addr_val[idx] = (u64)val;
1112	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1113	config->vissctlr |= BIT(idx + 16);
1114	/* SSSTATUS, bit[9] - turn on start/stop logic */
1115	config->vinst_ctrl |= BIT(9);
1116	spin_unlock(&drvdata->spinlock);
1117	return size;
1118}
1119static DEVICE_ATTR_RW(addr_stop);
1120
1121static ssize_t addr_ctxtype_show(struct device *dev,
1122				 struct device_attribute *attr,
1123				 char *buf)
1124{
1125	ssize_t len;
1126	u8 idx, val;
1127	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1128	struct etmv4_config *config = &drvdata->config;
1129
1130	spin_lock(&drvdata->spinlock);
1131	idx = config->addr_idx;
1132	/* CONTEXTTYPE, bits[3:2] */
1133	val = BMVAL(config->addr_acc[idx], 2, 3);
1134	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1135			(val == ETM_CTX_CTXID ? "ctxid" :
1136			(val == ETM_CTX_VMID ? "vmid" : "all")));
1137	spin_unlock(&drvdata->spinlock);
1138	return len;
1139}
1140
1141static ssize_t addr_ctxtype_store(struct device *dev,
1142				  struct device_attribute *attr,
1143				  const char *buf, size_t size)
1144{
1145	u8 idx;
1146	char str[10] = "";
1147	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1148	struct etmv4_config *config = &drvdata->config;
1149
1150	if (strlen(buf) >= 10)
1151		return -EINVAL;
1152	if (sscanf(buf, "%s", str) != 1)
1153		return -EINVAL;
1154
1155	spin_lock(&drvdata->spinlock);
1156	idx = config->addr_idx;
1157	if (!strcmp(str, "none"))
1158		/* start by clearing context type bits */
1159		config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1160	else if (!strcmp(str, "ctxid")) {
1161		/* 0b01 The trace unit performs a Context ID */
1162		if (drvdata->numcidc) {
1163			config->addr_acc[idx] |= BIT(2);
1164			config->addr_acc[idx] &= ~BIT(3);
1165		}
1166	} else if (!strcmp(str, "vmid")) {
1167		/* 0b10 The trace unit performs a VMID */
1168		if (drvdata->numvmidc) {
1169			config->addr_acc[idx] &= ~BIT(2);
1170			config->addr_acc[idx] |= BIT(3);
1171		}
1172	} else if (!strcmp(str, "all")) {
1173		/*
1174		 * 0b11 The trace unit performs a Context ID
1175		 * comparison and a VMID
1176		 */
1177		if (drvdata->numcidc)
1178			config->addr_acc[idx] |= BIT(2);
1179		if (drvdata->numvmidc)
1180			config->addr_acc[idx] |= BIT(3);
1181	}
1182	spin_unlock(&drvdata->spinlock);
1183	return size;
1184}
1185static DEVICE_ATTR_RW(addr_ctxtype);
1186
1187static ssize_t addr_context_show(struct device *dev,
1188				 struct device_attribute *attr,
1189				 char *buf)
1190{
1191	u8 idx;
1192	unsigned long val;
1193	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1194	struct etmv4_config *config = &drvdata->config;
1195
1196	spin_lock(&drvdata->spinlock);
1197	idx = config->addr_idx;
1198	/* context ID comparator bits[6:4] */
1199	val = BMVAL(config->addr_acc[idx], 4, 6);
1200	spin_unlock(&drvdata->spinlock);
1201	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1202}
1203
1204static ssize_t addr_context_store(struct device *dev,
1205				  struct device_attribute *attr,
1206				  const char *buf, size_t size)
1207{
1208	u8 idx;
1209	unsigned long val;
1210	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1211	struct etmv4_config *config = &drvdata->config;
1212
1213	if (kstrtoul(buf, 16, &val))
1214		return -EINVAL;
1215	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1216		return -EINVAL;
1217	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
1218		     drvdata->numcidc : drvdata->numvmidc))
1219		return -EINVAL;
1220
1221	spin_lock(&drvdata->spinlock);
1222	idx = config->addr_idx;
1223	/* clear context ID comparator bits[6:4] */
1224	config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1225	config->addr_acc[idx] |= (val << 4);
1226	spin_unlock(&drvdata->spinlock);
1227	return size;
1228}
1229static DEVICE_ATTR_RW(addr_context);
1230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1231static ssize_t seq_idx_show(struct device *dev,
1232			    struct device_attribute *attr,
1233			    char *buf)
1234{
1235	unsigned long val;
1236	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1237	struct etmv4_config *config = &drvdata->config;
1238
1239	val = config->seq_idx;
1240	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1241}
1242
1243static ssize_t seq_idx_store(struct device *dev,
1244			     struct device_attribute *attr,
1245			     const char *buf, size_t size)
1246{
1247	unsigned long val;
1248	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1249	struct etmv4_config *config = &drvdata->config;
1250
1251	if (kstrtoul(buf, 16, &val))
1252		return -EINVAL;
1253	if (val >= drvdata->nrseqstate - 1)
1254		return -EINVAL;
1255
1256	/*
1257	 * Use spinlock to ensure index doesn't change while it gets
1258	 * dereferenced multiple times within a spinlock block elsewhere.
1259	 */
1260	spin_lock(&drvdata->spinlock);
1261	config->seq_idx = val;
1262	spin_unlock(&drvdata->spinlock);
1263	return size;
1264}
1265static DEVICE_ATTR_RW(seq_idx);
1266
1267static ssize_t seq_state_show(struct device *dev,
1268			      struct device_attribute *attr,
1269			      char *buf)
1270{
1271	unsigned long val;
1272	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1273	struct etmv4_config *config = &drvdata->config;
1274
1275	val = config->seq_state;
1276	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1277}
1278
1279static ssize_t seq_state_store(struct device *dev,
1280			       struct device_attribute *attr,
1281			       const char *buf, size_t size)
1282{
1283	unsigned long val;
1284	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1285	struct etmv4_config *config = &drvdata->config;
1286
1287	if (kstrtoul(buf, 16, &val))
1288		return -EINVAL;
1289	if (val >= drvdata->nrseqstate)
1290		return -EINVAL;
1291
1292	config->seq_state = val;
1293	return size;
1294}
1295static DEVICE_ATTR_RW(seq_state);
1296
1297static ssize_t seq_event_show(struct device *dev,
1298			      struct device_attribute *attr,
1299			      char *buf)
1300{
1301	u8 idx;
1302	unsigned long val;
1303	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1304	struct etmv4_config *config = &drvdata->config;
1305
1306	spin_lock(&drvdata->spinlock);
1307	idx = config->seq_idx;
1308	val = config->seq_ctrl[idx];
1309	spin_unlock(&drvdata->spinlock);
1310	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1311}
1312
1313static ssize_t seq_event_store(struct device *dev,
1314			       struct device_attribute *attr,
1315			       const char *buf, size_t size)
1316{
1317	u8 idx;
1318	unsigned long val;
1319	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1320	struct etmv4_config *config = &drvdata->config;
1321
1322	if (kstrtoul(buf, 16, &val))
1323		return -EINVAL;
1324
1325	spin_lock(&drvdata->spinlock);
1326	idx = config->seq_idx;
1327	/* RST, bits[7:0] */
1328	config->seq_ctrl[idx] = val & 0xFF;
1329	spin_unlock(&drvdata->spinlock);
1330	return size;
1331}
1332static DEVICE_ATTR_RW(seq_event);
1333
1334static ssize_t seq_reset_event_show(struct device *dev,
1335				    struct device_attribute *attr,
1336				    char *buf)
1337{
1338	unsigned long val;
1339	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1340	struct etmv4_config *config = &drvdata->config;
1341
1342	val = config->seq_rst;
1343	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1344}
1345
1346static ssize_t seq_reset_event_store(struct device *dev,
1347				     struct device_attribute *attr,
1348				     const char *buf, size_t size)
1349{
1350	unsigned long val;
1351	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1352	struct etmv4_config *config = &drvdata->config;
1353
1354	if (kstrtoul(buf, 16, &val))
1355		return -EINVAL;
1356	if (!(drvdata->nrseqstate))
1357		return -EINVAL;
1358
1359	config->seq_rst = val & ETMv4_EVENT_MASK;
1360	return size;
1361}
1362static DEVICE_ATTR_RW(seq_reset_event);
1363
1364static ssize_t cntr_idx_show(struct device *dev,
1365			     struct device_attribute *attr,
1366			     char *buf)
1367{
1368	unsigned long val;
1369	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1370	struct etmv4_config *config = &drvdata->config;
1371
1372	val = config->cntr_idx;
1373	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1374}
1375
1376static ssize_t cntr_idx_store(struct device *dev,
1377			      struct device_attribute *attr,
1378			      const char *buf, size_t size)
1379{
1380	unsigned long val;
1381	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1382	struct etmv4_config *config = &drvdata->config;
1383
1384	if (kstrtoul(buf, 16, &val))
1385		return -EINVAL;
1386	if (val >= drvdata->nr_cntr)
1387		return -EINVAL;
1388
1389	/*
1390	 * Use spinlock to ensure index doesn't change while it gets
1391	 * dereferenced multiple times within a spinlock block elsewhere.
1392	 */
1393	spin_lock(&drvdata->spinlock);
1394	config->cntr_idx = val;
1395	spin_unlock(&drvdata->spinlock);
1396	return size;
1397}
1398static DEVICE_ATTR_RW(cntr_idx);
1399
1400static ssize_t cntrldvr_show(struct device *dev,
1401			     struct device_attribute *attr,
1402			     char *buf)
1403{
1404	u8 idx;
1405	unsigned long val;
1406	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1407	struct etmv4_config *config = &drvdata->config;
1408
1409	spin_lock(&drvdata->spinlock);
1410	idx = config->cntr_idx;
1411	val = config->cntrldvr[idx];
1412	spin_unlock(&drvdata->spinlock);
1413	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1414}
1415
1416static ssize_t cntrldvr_store(struct device *dev,
1417			      struct device_attribute *attr,
1418			      const char *buf, size_t size)
1419{
1420	u8 idx;
1421	unsigned long val;
1422	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1423	struct etmv4_config *config = &drvdata->config;
1424
1425	if (kstrtoul(buf, 16, &val))
1426		return -EINVAL;
1427	if (val > ETM_CNTR_MAX_VAL)
1428		return -EINVAL;
1429
1430	spin_lock(&drvdata->spinlock);
1431	idx = config->cntr_idx;
1432	config->cntrldvr[idx] = val;
1433	spin_unlock(&drvdata->spinlock);
1434	return size;
1435}
1436static DEVICE_ATTR_RW(cntrldvr);
1437
1438static ssize_t cntr_val_show(struct device *dev,
1439			     struct device_attribute *attr,
1440			     char *buf)
1441{
1442	u8 idx;
1443	unsigned long val;
1444	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1445	struct etmv4_config *config = &drvdata->config;
1446
1447	spin_lock(&drvdata->spinlock);
1448	idx = config->cntr_idx;
1449	val = config->cntr_val[idx];
1450	spin_unlock(&drvdata->spinlock);
1451	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1452}
1453
1454static ssize_t cntr_val_store(struct device *dev,
1455			      struct device_attribute *attr,
1456			      const char *buf, size_t size)
1457{
1458	u8 idx;
1459	unsigned long val;
1460	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1461	struct etmv4_config *config = &drvdata->config;
1462
1463	if (kstrtoul(buf, 16, &val))
1464		return -EINVAL;
1465	if (val > ETM_CNTR_MAX_VAL)
1466		return -EINVAL;
1467
1468	spin_lock(&drvdata->spinlock);
1469	idx = config->cntr_idx;
1470	config->cntr_val[idx] = val;
1471	spin_unlock(&drvdata->spinlock);
1472	return size;
1473}
1474static DEVICE_ATTR_RW(cntr_val);
1475
1476static ssize_t cntr_ctrl_show(struct device *dev,
1477			      struct device_attribute *attr,
1478			      char *buf)
1479{
1480	u8 idx;
1481	unsigned long val;
1482	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1483	struct etmv4_config *config = &drvdata->config;
1484
1485	spin_lock(&drvdata->spinlock);
1486	idx = config->cntr_idx;
1487	val = config->cntr_ctrl[idx];
1488	spin_unlock(&drvdata->spinlock);
1489	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1490}
1491
1492static ssize_t cntr_ctrl_store(struct device *dev,
1493			       struct device_attribute *attr,
1494			       const char *buf, size_t size)
1495{
1496	u8 idx;
1497	unsigned long val;
1498	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1499	struct etmv4_config *config = &drvdata->config;
1500
1501	if (kstrtoul(buf, 16, &val))
1502		return -EINVAL;
1503
1504	spin_lock(&drvdata->spinlock);
1505	idx = config->cntr_idx;
1506	config->cntr_ctrl[idx] = val;
1507	spin_unlock(&drvdata->spinlock);
1508	return size;
1509}
1510static DEVICE_ATTR_RW(cntr_ctrl);
1511
1512static ssize_t res_idx_show(struct device *dev,
1513			    struct device_attribute *attr,
1514			    char *buf)
1515{
1516	unsigned long val;
1517	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1518	struct etmv4_config *config = &drvdata->config;
1519
1520	val = config->res_idx;
1521	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1522}
1523
1524static ssize_t res_idx_store(struct device *dev,
1525			     struct device_attribute *attr,
1526			     const char *buf, size_t size)
1527{
1528	unsigned long val;
1529	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1530	struct etmv4_config *config = &drvdata->config;
1531
1532	if (kstrtoul(buf, 16, &val))
1533		return -EINVAL;
1534	/* Resource selector pair 0 is always implemented and reserved */
1535	if ((val == 0) || (val >= drvdata->nr_resource))
 
 
 
1536		return -EINVAL;
1537
1538	/*
1539	 * Use spinlock to ensure index doesn't change while it gets
1540	 * dereferenced multiple times within a spinlock block elsewhere.
1541	 */
1542	spin_lock(&drvdata->spinlock);
1543	config->res_idx = val;
1544	spin_unlock(&drvdata->spinlock);
1545	return size;
1546}
1547static DEVICE_ATTR_RW(res_idx);
1548
1549static ssize_t res_ctrl_show(struct device *dev,
1550			     struct device_attribute *attr,
1551			     char *buf)
1552{
1553	u8 idx;
1554	unsigned long val;
1555	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1556	struct etmv4_config *config = &drvdata->config;
1557
1558	spin_lock(&drvdata->spinlock);
1559	idx = config->res_idx;
1560	val = config->res_ctrl[idx];
1561	spin_unlock(&drvdata->spinlock);
1562	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1563}
1564
1565static ssize_t res_ctrl_store(struct device *dev,
1566			      struct device_attribute *attr,
1567			      const char *buf, size_t size)
1568{
1569	u8 idx;
1570	unsigned long val;
1571	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1572	struct etmv4_config *config = &drvdata->config;
1573
1574	if (kstrtoul(buf, 16, &val))
1575		return -EINVAL;
1576
1577	spin_lock(&drvdata->spinlock);
1578	idx = config->res_idx;
1579	/* For odd idx pair inversal bit is RES0 */
1580	if (idx % 2 != 0)
1581		/* PAIRINV, bit[21] */
1582		val &= ~BIT(21);
1583	config->res_ctrl[idx] = val;
 
 
 
1584	spin_unlock(&drvdata->spinlock);
1585	return size;
1586}
1587static DEVICE_ATTR_RW(res_ctrl);
1588
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1589static ssize_t ctxid_idx_show(struct device *dev,
1590			      struct device_attribute *attr,
1591			      char *buf)
1592{
1593	unsigned long val;
1594	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1595	struct etmv4_config *config = &drvdata->config;
1596
1597	val = config->ctxid_idx;
1598	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1599}
1600
1601static ssize_t ctxid_idx_store(struct device *dev,
1602			       struct device_attribute *attr,
1603			       const char *buf, size_t size)
1604{
1605	unsigned long val;
1606	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1607	struct etmv4_config *config = &drvdata->config;
1608
1609	if (kstrtoul(buf, 16, &val))
1610		return -EINVAL;
1611	if (val >= drvdata->numcidc)
1612		return -EINVAL;
1613
1614	/*
1615	 * Use spinlock to ensure index doesn't change while it gets
1616	 * dereferenced multiple times within a spinlock block elsewhere.
1617	 */
1618	spin_lock(&drvdata->spinlock);
1619	config->ctxid_idx = val;
1620	spin_unlock(&drvdata->spinlock);
1621	return size;
1622}
1623static DEVICE_ATTR_RW(ctxid_idx);
1624
1625static ssize_t ctxid_pid_show(struct device *dev,
1626			      struct device_attribute *attr,
1627			      char *buf)
1628{
1629	u8 idx;
1630	unsigned long val;
1631	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1632	struct etmv4_config *config = &drvdata->config;
1633
1634	/*
1635	 * Don't use contextID tracing if coming from a PID namespace.  See
1636	 * comment in ctxid_pid_store().
1637	 */
1638	if (task_active_pid_ns(current) != &init_pid_ns)
1639		return -EINVAL;
1640
1641	spin_lock(&drvdata->spinlock);
1642	idx = config->ctxid_idx;
1643	val = (unsigned long)config->ctxid_pid[idx];
1644	spin_unlock(&drvdata->spinlock);
1645	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1646}
1647
1648static ssize_t ctxid_pid_store(struct device *dev,
1649			       struct device_attribute *attr,
1650			       const char *buf, size_t size)
1651{
1652	u8 idx;
1653	unsigned long pid;
1654	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1655	struct etmv4_config *config = &drvdata->config;
1656
1657	/*
1658	 * When contextID tracing is enabled the tracers will insert the
1659	 * value found in the contextID register in the trace stream.  But if
1660	 * a process is in a namespace the PID of that process as seen from the
1661	 * namespace won't be what the kernel sees, something that makes the
1662	 * feature confusing and can potentially leak kernel only information.
1663	 * As such refuse to use the feature if @current is not in the initial
1664	 * PID namespace.
1665	 */
1666	if (task_active_pid_ns(current) != &init_pid_ns)
1667		return -EINVAL;
1668
1669	/*
1670	 * only implemented when ctxid tracing is enabled, i.e. at least one
1671	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1672	 * in length
1673	 */
1674	if (!drvdata->ctxid_size || !drvdata->numcidc)
1675		return -EINVAL;
1676	if (kstrtoul(buf, 16, &pid))
1677		return -EINVAL;
1678
1679	spin_lock(&drvdata->spinlock);
1680	idx = config->ctxid_idx;
1681	config->ctxid_pid[idx] = (u64)pid;
1682	spin_unlock(&drvdata->spinlock);
1683	return size;
1684}
1685static DEVICE_ATTR_RW(ctxid_pid);
1686
1687static ssize_t ctxid_masks_show(struct device *dev,
1688				struct device_attribute *attr,
1689				char *buf)
1690{
1691	unsigned long val1, val2;
1692	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1693	struct etmv4_config *config = &drvdata->config;
1694
1695	/*
1696	 * Don't use contextID tracing if coming from a PID namespace.  See
1697	 * comment in ctxid_pid_store().
1698	 */
1699	if (task_active_pid_ns(current) != &init_pid_ns)
1700		return -EINVAL;
1701
1702	spin_lock(&drvdata->spinlock);
1703	val1 = config->ctxid_mask0;
1704	val2 = config->ctxid_mask1;
1705	spin_unlock(&drvdata->spinlock);
1706	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1707}
1708
1709static ssize_t ctxid_masks_store(struct device *dev,
1710				struct device_attribute *attr,
1711				const char *buf, size_t size)
1712{
1713	u8 i, j, maskbyte;
1714	unsigned long val1, val2, mask;
1715	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1716	struct etmv4_config *config = &drvdata->config;
 
1717
1718	/*
1719	 * Don't use contextID tracing if coming from a PID namespace.  See
1720	 * comment in ctxid_pid_store().
1721	 */
1722	if (task_active_pid_ns(current) != &init_pid_ns)
1723		return -EINVAL;
1724
1725	/*
1726	 * only implemented when ctxid tracing is enabled, i.e. at least one
1727	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1728	 * in length
1729	 */
1730	if (!drvdata->ctxid_size || !drvdata->numcidc)
1731		return -EINVAL;
1732	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
 
 
1733		return -EINVAL;
1734
1735	spin_lock(&drvdata->spinlock);
1736	/*
1737	 * each byte[0..3] controls mask value applied to ctxid
1738	 * comparator[0..3]
1739	 */
1740	switch (drvdata->numcidc) {
1741	case 0x1:
1742		/* COMP0, bits[7:0] */
1743		config->ctxid_mask0 = val1 & 0xFF;
1744		break;
1745	case 0x2:
1746		/* COMP1, bits[15:8] */
1747		config->ctxid_mask0 = val1 & 0xFFFF;
1748		break;
1749	case 0x3:
1750		/* COMP2, bits[23:16] */
1751		config->ctxid_mask0 = val1 & 0xFFFFFF;
1752		break;
1753	case 0x4:
1754		 /* COMP3, bits[31:24] */
1755		config->ctxid_mask0 = val1;
1756		break;
1757	case 0x5:
1758		/* COMP4, bits[7:0] */
1759		config->ctxid_mask0 = val1;
1760		config->ctxid_mask1 = val2 & 0xFF;
1761		break;
1762	case 0x6:
1763		/* COMP5, bits[15:8] */
1764		config->ctxid_mask0 = val1;
1765		config->ctxid_mask1 = val2 & 0xFFFF;
1766		break;
1767	case 0x7:
1768		/* COMP6, bits[23:16] */
1769		config->ctxid_mask0 = val1;
1770		config->ctxid_mask1 = val2 & 0xFFFFFF;
1771		break;
1772	case 0x8:
1773		/* COMP7, bits[31:24] */
1774		config->ctxid_mask0 = val1;
1775		config->ctxid_mask1 = val2;
1776		break;
1777	default:
1778		break;
1779	}
1780	/*
1781	 * If software sets a mask bit to 1, it must program relevant byte
1782	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1783	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1784	 * of ctxid comparator0 value (corresponding to byte 0) register.
1785	 */
1786	mask = config->ctxid_mask0;
1787	for (i = 0; i < drvdata->numcidc; i++) {
1788		/* mask value of corresponding ctxid comparator */
1789		maskbyte = mask & ETMv4_EVENT_MASK;
1790		/*
1791		 * each bit corresponds to a byte of respective ctxid comparator
1792		 * value register
1793		 */
1794		for (j = 0; j < 8; j++) {
1795			if (maskbyte & 1)
1796				config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
1797			maskbyte >>= 1;
1798		}
1799		/* Select the next ctxid comparator mask value */
1800		if (i == 3)
1801			/* ctxid comparators[4-7] */
1802			mask = config->ctxid_mask1;
1803		else
1804			mask >>= 0x8;
1805	}
1806
1807	spin_unlock(&drvdata->spinlock);
1808	return size;
1809}
1810static DEVICE_ATTR_RW(ctxid_masks);
1811
1812static ssize_t vmid_idx_show(struct device *dev,
1813			     struct device_attribute *attr,
1814			     char *buf)
1815{
1816	unsigned long val;
1817	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1818	struct etmv4_config *config = &drvdata->config;
1819
1820	val = config->vmid_idx;
1821	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1822}
1823
1824static ssize_t vmid_idx_store(struct device *dev,
1825			      struct device_attribute *attr,
1826			      const char *buf, size_t size)
1827{
1828	unsigned long val;
1829	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1830	struct etmv4_config *config = &drvdata->config;
1831
1832	if (kstrtoul(buf, 16, &val))
1833		return -EINVAL;
1834	if (val >= drvdata->numvmidc)
1835		return -EINVAL;
1836
1837	/*
1838	 * Use spinlock to ensure index doesn't change while it gets
1839	 * dereferenced multiple times within a spinlock block elsewhere.
1840	 */
1841	spin_lock(&drvdata->spinlock);
1842	config->vmid_idx = val;
1843	spin_unlock(&drvdata->spinlock);
1844	return size;
1845}
1846static DEVICE_ATTR_RW(vmid_idx);
1847
1848static ssize_t vmid_val_show(struct device *dev,
1849			     struct device_attribute *attr,
1850			     char *buf)
1851{
1852	unsigned long val;
1853	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1854	struct etmv4_config *config = &drvdata->config;
1855
 
 
 
 
 
 
 
 
1856	val = (unsigned long)config->vmid_val[config->vmid_idx];
 
1857	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1858}
1859
1860static ssize_t vmid_val_store(struct device *dev,
1861			      struct device_attribute *attr,
1862			      const char *buf, size_t size)
1863{
1864	unsigned long val;
1865	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1866	struct etmv4_config *config = &drvdata->config;
1867
1868	/*
 
 
 
 
 
 
 
1869	 * only implemented when vmid tracing is enabled, i.e. at least one
1870	 * vmid comparator is implemented and at least 8 bit vmid size
1871	 */
1872	if (!drvdata->vmid_size || !drvdata->numvmidc)
1873		return -EINVAL;
1874	if (kstrtoul(buf, 16, &val))
1875		return -EINVAL;
1876
1877	spin_lock(&drvdata->spinlock);
1878	config->vmid_val[config->vmid_idx] = (u64)val;
1879	spin_unlock(&drvdata->spinlock);
1880	return size;
1881}
1882static DEVICE_ATTR_RW(vmid_val);
1883
1884static ssize_t vmid_masks_show(struct device *dev,
1885			       struct device_attribute *attr, char *buf)
1886{
1887	unsigned long val1, val2;
1888	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1889	struct etmv4_config *config = &drvdata->config;
1890
 
 
 
 
 
 
 
1891	spin_lock(&drvdata->spinlock);
1892	val1 = config->vmid_mask0;
1893	val2 = config->vmid_mask1;
1894	spin_unlock(&drvdata->spinlock);
1895	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1896}
1897
1898static ssize_t vmid_masks_store(struct device *dev,
1899				struct device_attribute *attr,
1900				const char *buf, size_t size)
1901{
1902	u8 i, j, maskbyte;
1903	unsigned long val1, val2, mask;
1904	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1905	struct etmv4_config *config = &drvdata->config;
 
 
 
 
 
 
 
 
1906
1907	/*
1908	 * only implemented when vmid tracing is enabled, i.e. at least one
1909	 * vmid comparator is implemented and at least 8 bit vmid size
1910	 */
1911	if (!drvdata->vmid_size || !drvdata->numvmidc)
1912		return -EINVAL;
1913	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
 
 
1914		return -EINVAL;
1915
1916	spin_lock(&drvdata->spinlock);
1917
1918	/*
1919	 * each byte[0..3] controls mask value applied to vmid
1920	 * comparator[0..3]
1921	 */
1922	switch (drvdata->numvmidc) {
1923	case 0x1:
1924		/* COMP0, bits[7:0] */
1925		config->vmid_mask0 = val1 & 0xFF;
1926		break;
1927	case 0x2:
1928		/* COMP1, bits[15:8] */
1929		config->vmid_mask0 = val1 & 0xFFFF;
1930		break;
1931	case 0x3:
1932		/* COMP2, bits[23:16] */
1933		config->vmid_mask0 = val1 & 0xFFFFFF;
1934		break;
1935	case 0x4:
1936		/* COMP3, bits[31:24] */
1937		config->vmid_mask0 = val1;
1938		break;
1939	case 0x5:
1940		/* COMP4, bits[7:0] */
1941		config->vmid_mask0 = val1;
1942		config->vmid_mask1 = val2 & 0xFF;
1943		break;
1944	case 0x6:
1945		/* COMP5, bits[15:8] */
1946		config->vmid_mask0 = val1;
1947		config->vmid_mask1 = val2 & 0xFFFF;
1948		break;
1949	case 0x7:
1950		/* COMP6, bits[23:16] */
1951		config->vmid_mask0 = val1;
1952		config->vmid_mask1 = val2 & 0xFFFFFF;
1953		break;
1954	case 0x8:
1955		/* COMP7, bits[31:24] */
1956		config->vmid_mask0 = val1;
1957		config->vmid_mask1 = val2;
1958		break;
1959	default:
1960		break;
1961	}
1962
1963	/*
1964	 * If software sets a mask bit to 1, it must program relevant byte
1965	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1966	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1967	 * of vmid comparator0 value (corresponding to byte 0) register.
1968	 */
1969	mask = config->vmid_mask0;
1970	for (i = 0; i < drvdata->numvmidc; i++) {
1971		/* mask value of corresponding vmid comparator */
1972		maskbyte = mask & ETMv4_EVENT_MASK;
1973		/*
1974		 * each bit corresponds to a byte of respective vmid comparator
1975		 * value register
1976		 */
1977		for (j = 0; j < 8; j++) {
1978			if (maskbyte & 1)
1979				config->vmid_val[i] &= ~(0xFFUL << (j * 8));
1980			maskbyte >>= 1;
1981		}
1982		/* Select the next vmid comparator mask value */
1983		if (i == 3)
1984			/* vmid comparators[4-7] */
1985			mask = config->vmid_mask1;
1986		else
1987			mask >>= 0x8;
1988	}
1989	spin_unlock(&drvdata->spinlock);
1990	return size;
1991}
1992static DEVICE_ATTR_RW(vmid_masks);
1993
1994static ssize_t cpu_show(struct device *dev,
1995			struct device_attribute *attr, char *buf)
1996{
1997	int val;
1998	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1999
2000	val = drvdata->cpu;
2001	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2002
2003}
2004static DEVICE_ATTR_RO(cpu);
2005
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2006static struct attribute *coresight_etmv4_attrs[] = {
2007	&dev_attr_nr_pe_cmp.attr,
2008	&dev_attr_nr_addr_cmp.attr,
2009	&dev_attr_nr_cntr.attr,
2010	&dev_attr_nr_ext_inp.attr,
2011	&dev_attr_numcidc.attr,
2012	&dev_attr_numvmidc.attr,
2013	&dev_attr_nrseqstate.attr,
2014	&dev_attr_nr_resource.attr,
2015	&dev_attr_nr_ss_cmp.attr,
2016	&dev_attr_reset.attr,
2017	&dev_attr_mode.attr,
2018	&dev_attr_pe.attr,
2019	&dev_attr_event.attr,
2020	&dev_attr_event_instren.attr,
2021	&dev_attr_event_ts.attr,
2022	&dev_attr_syncfreq.attr,
2023	&dev_attr_cyc_threshold.attr,
2024	&dev_attr_bb_ctrl.attr,
2025	&dev_attr_event_vinst.attr,
2026	&dev_attr_s_exlevel_vinst.attr,
2027	&dev_attr_ns_exlevel_vinst.attr,
2028	&dev_attr_addr_idx.attr,
2029	&dev_attr_addr_instdatatype.attr,
2030	&dev_attr_addr_single.attr,
2031	&dev_attr_addr_range.attr,
2032	&dev_attr_addr_start.attr,
2033	&dev_attr_addr_stop.attr,
2034	&dev_attr_addr_ctxtype.attr,
2035	&dev_attr_addr_context.attr,
 
 
 
 
 
 
 
2036	&dev_attr_seq_idx.attr,
2037	&dev_attr_seq_state.attr,
2038	&dev_attr_seq_event.attr,
2039	&dev_attr_seq_reset_event.attr,
2040	&dev_attr_cntr_idx.attr,
2041	&dev_attr_cntrldvr.attr,
2042	&dev_attr_cntr_val.attr,
2043	&dev_attr_cntr_ctrl.attr,
2044	&dev_attr_res_idx.attr,
2045	&dev_attr_res_ctrl.attr,
2046	&dev_attr_ctxid_idx.attr,
2047	&dev_attr_ctxid_pid.attr,
2048	&dev_attr_ctxid_masks.attr,
2049	&dev_attr_vmid_idx.attr,
2050	&dev_attr_vmid_val.attr,
2051	&dev_attr_vmid_masks.attr,
2052	&dev_attr_cpu.attr,
 
2053	NULL,
2054};
2055
2056struct etmv4_reg {
2057	void __iomem *addr;
 
2058	u32 data;
2059};
2060
2061static void do_smp_cross_read(void *data)
2062{
2063	struct etmv4_reg *reg = data;
2064
2065	reg->data = readl_relaxed(reg->addr);
2066}
2067
2068static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2069{
2070	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2071	struct etmv4_reg reg;
2072
2073	reg.addr = drvdata->base + offset;
 
 
2074	/*
2075	 * smp cross call ensures the CPU will be powered up before
2076	 * accessing the ETMv4 trace core registers
2077	 */
2078	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
2079	return reg.data;
2080}
2081
2082#define coresight_etm4x_reg(name, offset)			\
2083	coresight_simple_reg32(struct etmv4_drvdata, name, offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2084
2085#define coresight_etm4x_cross_read(name, offset)			\
2086	coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read,	\
2087			      name, offset)
2088
2089coresight_etm4x_reg(trcpdcr, TRCPDCR);
2090coresight_etm4x_reg(trcpdsr, TRCPDSR);
2091coresight_etm4x_reg(trclsr, TRCLSR);
2092coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2093coresight_etm4x_reg(trcdevid, TRCDEVID);
2094coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2095coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2096coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2097coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2098coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2099coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2100coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2101coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2102
2103static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2104	&dev_attr_trcoslsr.attr,
2105	&dev_attr_trcpdcr.attr,
2106	&dev_attr_trcpdsr.attr,
2107	&dev_attr_trclsr.attr,
2108	&dev_attr_trcconfig.attr,
2109	&dev_attr_trctraceid.attr,
2110	&dev_attr_trcauthstatus.attr,
2111	&dev_attr_trcdevid.attr,
2112	&dev_attr_trcdevtype.attr,
2113	&dev_attr_trcpidr0.attr,
2114	&dev_attr_trcpidr1.attr,
2115	&dev_attr_trcpidr2.attr,
2116	&dev_attr_trcpidr3.attr,
 
2117	NULL,
2118};
2119
2120coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2121coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2122coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2123coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2124coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2125coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2126/* trcidr[6,7] are reserved */
2127coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2128coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2129coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2130coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2131coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2132coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2133
2134static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2135	&dev_attr_trcidr0.attr,
2136	&dev_attr_trcidr1.attr,
2137	&dev_attr_trcidr2.attr,
2138	&dev_attr_trcidr3.attr,
2139	&dev_attr_trcidr4.attr,
2140	&dev_attr_trcidr5.attr,
2141	/* trcidr[6,7] are reserved */
2142	&dev_attr_trcidr8.attr,
2143	&dev_attr_trcidr9.attr,
2144	&dev_attr_trcidr10.attr,
2145	&dev_attr_trcidr11.attr,
2146	&dev_attr_trcidr12.attr,
2147	&dev_attr_trcidr13.attr,
2148	NULL,
2149};
2150
2151static const struct attribute_group coresight_etmv4_group = {
2152	.attrs = coresight_etmv4_attrs,
2153};
2154
2155static const struct attribute_group coresight_etmv4_mgmt_group = {
 
2156	.attrs = coresight_etmv4_mgmt_attrs,
2157	.name = "mgmt",
2158};
2159
2160static const struct attribute_group coresight_etmv4_trcidr_group = {
2161	.attrs = coresight_etmv4_trcidr_attrs,
2162	.name = "trcidr",
2163};
2164
2165const struct attribute_group *coresight_etmv4_groups[] = {
2166	&coresight_etmv4_group,
2167	&coresight_etmv4_mgmt_group,
2168	&coresight_etmv4_trcidr_group,
2169	NULL,
2170};