Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7#include <linux/pid_namespace.h>
8#include <linux/pm_runtime.h>
9#include <linux/sysfs.h>
10#include "coresight-etm4x.h"
11#include "coresight-priv.h"
12#include "coresight-syscfg.h"
13
14static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
15{
16 u8 idx;
17 struct etmv4_config *config = &drvdata->config;
18
19 idx = config->addr_idx;
20
21 /*
22 * TRCACATRn.TYPE bit[1:0]: type of comparison
23 * the trace unit performs
24 */
25 if (FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]) == TRCACATRn_TYPE_ADDR) {
26 if (idx % 2 != 0)
27 return -EINVAL;
28
29 /*
30 * We are performing instruction address comparison. Set the
31 * relevant bit of ViewInst Include/Exclude Control register
32 * for corresponding address comparator pair.
33 */
34 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
35 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
36 return -EINVAL;
37
38 if (exclude == true) {
39 /*
40 * Set exclude bit and unset the include bit
41 * corresponding to comparator pair
42 */
43 config->viiectlr |= BIT(idx / 2 + 16);
44 config->viiectlr &= ~BIT(idx / 2);
45 } else {
46 /*
47 * Set include bit and unset exclude bit
48 * corresponding to comparator pair
49 */
50 config->viiectlr |= BIT(idx / 2);
51 config->viiectlr &= ~BIT(idx / 2 + 16);
52 }
53 }
54 return 0;
55}
56
57static ssize_t nr_pe_cmp_show(struct device *dev,
58 struct device_attribute *attr,
59 char *buf)
60{
61 unsigned long val;
62 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
63
64 val = drvdata->nr_pe_cmp;
65 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
66}
67static DEVICE_ATTR_RO(nr_pe_cmp);
68
69static ssize_t nr_addr_cmp_show(struct device *dev,
70 struct device_attribute *attr,
71 char *buf)
72{
73 unsigned long val;
74 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
75
76 val = drvdata->nr_addr_cmp;
77 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
78}
79static DEVICE_ATTR_RO(nr_addr_cmp);
80
81static ssize_t nr_cntr_show(struct device *dev,
82 struct device_attribute *attr,
83 char *buf)
84{
85 unsigned long val;
86 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
87
88 val = drvdata->nr_cntr;
89 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
90}
91static DEVICE_ATTR_RO(nr_cntr);
92
93static ssize_t nr_ext_inp_show(struct device *dev,
94 struct device_attribute *attr,
95 char *buf)
96{
97 unsigned long val;
98 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
99
100 val = drvdata->nr_ext_inp;
101 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
102}
103static DEVICE_ATTR_RO(nr_ext_inp);
104
105static ssize_t numcidc_show(struct device *dev,
106 struct device_attribute *attr,
107 char *buf)
108{
109 unsigned long val;
110 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
111
112 val = drvdata->numcidc;
113 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
114}
115static DEVICE_ATTR_RO(numcidc);
116
117static ssize_t numvmidc_show(struct device *dev,
118 struct device_attribute *attr,
119 char *buf)
120{
121 unsigned long val;
122 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
123
124 val = drvdata->numvmidc;
125 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
126}
127static DEVICE_ATTR_RO(numvmidc);
128
129static ssize_t nrseqstate_show(struct device *dev,
130 struct device_attribute *attr,
131 char *buf)
132{
133 unsigned long val;
134 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
135
136 val = drvdata->nrseqstate;
137 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
138}
139static DEVICE_ATTR_RO(nrseqstate);
140
141static ssize_t nr_resource_show(struct device *dev,
142 struct device_attribute *attr,
143 char *buf)
144{
145 unsigned long val;
146 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
147
148 val = drvdata->nr_resource;
149 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
150}
151static DEVICE_ATTR_RO(nr_resource);
152
153static ssize_t nr_ss_cmp_show(struct device *dev,
154 struct device_attribute *attr,
155 char *buf)
156{
157 unsigned long val;
158 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
159
160 val = drvdata->nr_ss_cmp;
161 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
162}
163static DEVICE_ATTR_RO(nr_ss_cmp);
164
165static ssize_t reset_store(struct device *dev,
166 struct device_attribute *attr,
167 const char *buf, size_t size)
168{
169 int i;
170 unsigned long val;
171 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
172 struct etmv4_config *config = &drvdata->config;
173
174 if (kstrtoul(buf, 16, &val))
175 return -EINVAL;
176
177 spin_lock(&drvdata->spinlock);
178 if (val)
179 config->mode = 0x0;
180
181 /* Disable data tracing: do not trace load and store data transfers */
182 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
183 config->cfg &= ~(TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE);
184
185 /* Disable data value and data address tracing */
186 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
187 ETM_MODE_DATA_TRACE_VAL);
188 config->cfg &= ~(TRCCONFIGR_DA | TRCCONFIGR_DV);
189
190 /* Disable all events tracing */
191 config->eventctrl0 = 0x0;
192 config->eventctrl1 = 0x0;
193
194 /* Disable timestamp event */
195 config->ts_ctrl = 0x0;
196
197 /* Disable stalling */
198 config->stall_ctrl = 0x0;
199
200 /* Reset trace synchronization period to 2^8 = 256 bytes*/
201 if (drvdata->syncpr == false)
202 config->syncfreq = 0x8;
203
204 /*
205 * Enable ViewInst to trace everything with start-stop logic in
206 * started state. ARM recommends start-stop logic is set before
207 * each trace run.
208 */
209 config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
210 if (drvdata->nr_addr_cmp > 0) {
211 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
212 /* SSSTATUS, bit[9] */
213 config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
214 }
215
216 /* No address range filtering for ViewInst */
217 config->viiectlr = 0x0;
218
219 /* No start-stop filtering for ViewInst */
220 config->vissctlr = 0x0;
221 config->vipcssctlr = 0x0;
222
223 /* Disable seq events */
224 for (i = 0; i < drvdata->nrseqstate-1; i++)
225 config->seq_ctrl[i] = 0x0;
226 config->seq_rst = 0x0;
227 config->seq_state = 0x0;
228
229 /* Disable external input events */
230 config->ext_inp = 0x0;
231
232 config->cntr_idx = 0x0;
233 for (i = 0; i < drvdata->nr_cntr; i++) {
234 config->cntrldvr[i] = 0x0;
235 config->cntr_ctrl[i] = 0x0;
236 config->cntr_val[i] = 0x0;
237 }
238
239 config->res_idx = 0x0;
240 for (i = 2; i < 2 * drvdata->nr_resource; i++)
241 config->res_ctrl[i] = 0x0;
242
243 config->ss_idx = 0x0;
244 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
245 config->ss_ctrl[i] = 0x0;
246 config->ss_pe_cmp[i] = 0x0;
247 }
248
249 config->addr_idx = 0x0;
250 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
251 config->addr_val[i] = 0x0;
252 config->addr_acc[i] = 0x0;
253 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
254 }
255
256 config->ctxid_idx = 0x0;
257 for (i = 0; i < drvdata->numcidc; i++)
258 config->ctxid_pid[i] = 0x0;
259
260 config->ctxid_mask0 = 0x0;
261 config->ctxid_mask1 = 0x0;
262
263 config->vmid_idx = 0x0;
264 for (i = 0; i < drvdata->numvmidc; i++)
265 config->vmid_val[i] = 0x0;
266 config->vmid_mask0 = 0x0;
267 config->vmid_mask1 = 0x0;
268
269 spin_unlock(&drvdata->spinlock);
270
271 /* for sysfs - only release trace id when resetting */
272 etm4_release_trace_id(drvdata);
273
274 cscfg_csdev_reset_feats(to_coresight_device(dev));
275
276 return size;
277}
278static DEVICE_ATTR_WO(reset);
279
280static ssize_t mode_show(struct device *dev,
281 struct device_attribute *attr,
282 char *buf)
283{
284 unsigned long val;
285 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
286 struct etmv4_config *config = &drvdata->config;
287
288 val = config->mode;
289 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
290}
291
292static ssize_t mode_store(struct device *dev,
293 struct device_attribute *attr,
294 const char *buf, size_t size)
295{
296 unsigned long val, mode;
297 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
298 struct etmv4_config *config = &drvdata->config;
299
300 if (kstrtoul(buf, 16, &val))
301 return -EINVAL;
302
303 spin_lock(&drvdata->spinlock);
304 config->mode = val & ETMv4_MODE_ALL;
305
306 if (drvdata->instrp0 == true) {
307 /* start by clearing instruction P0 field */
308 config->cfg &= ~TRCCONFIGR_INSTP0_LOAD_STORE;
309 if (config->mode & ETM_MODE_LOAD)
310 /* 0b01 Trace load instructions as P0 instructions */
311 config->cfg |= TRCCONFIGR_INSTP0_LOAD;
312 if (config->mode & ETM_MODE_STORE)
313 /* 0b10 Trace store instructions as P0 instructions */
314 config->cfg |= TRCCONFIGR_INSTP0_STORE;
315 if (config->mode & ETM_MODE_LOAD_STORE)
316 /*
317 * 0b11 Trace load and store instructions
318 * as P0 instructions
319 */
320 config->cfg |= TRCCONFIGR_INSTP0_LOAD_STORE;
321 }
322
323 /* bit[3], Branch broadcast mode */
324 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
325 config->cfg |= TRCCONFIGR_BB;
326 else
327 config->cfg &= ~TRCCONFIGR_BB;
328
329 /* bit[4], Cycle counting instruction trace bit */
330 if ((config->mode & ETMv4_MODE_CYCACC) &&
331 (drvdata->trccci == true))
332 config->cfg |= TRCCONFIGR_CCI;
333 else
334 config->cfg &= ~TRCCONFIGR_CCI;
335
336 /* bit[6], Context ID tracing bit */
337 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
338 config->cfg |= TRCCONFIGR_CID;
339 else
340 config->cfg &= ~TRCCONFIGR_CID;
341
342 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
343 config->cfg |= TRCCONFIGR_VMID;
344 else
345 config->cfg &= ~TRCCONFIGR_VMID;
346
347 /* bits[10:8], Conditional instruction tracing bit */
348 mode = ETM_MODE_COND(config->mode);
349 if (drvdata->trccond == true) {
350 config->cfg &= ~TRCCONFIGR_COND_MASK;
351 config->cfg |= mode << __bf_shf(TRCCONFIGR_COND_MASK);
352 }
353
354 /* bit[11], Global timestamp tracing bit */
355 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
356 config->cfg |= TRCCONFIGR_TS;
357 else
358 config->cfg &= ~TRCCONFIGR_TS;
359
360 /* bit[12], Return stack enable bit */
361 if ((config->mode & ETM_MODE_RETURNSTACK) &&
362 (drvdata->retstack == true))
363 config->cfg |= TRCCONFIGR_RS;
364 else
365 config->cfg &= ~TRCCONFIGR_RS;
366
367 /* bits[14:13], Q element enable field */
368 mode = ETM_MODE_QELEM(config->mode);
369 /* start by clearing QE bits */
370 config->cfg &= ~(TRCCONFIGR_QE_W_COUNTS | TRCCONFIGR_QE_WO_COUNTS);
371 /*
372 * if supported, Q elements with instruction counts are enabled.
373 * Always set the low bit for any requested mode. Valid combos are
374 * 0b00, 0b01 and 0b11.
375 */
376 if (mode && drvdata->q_support)
377 config->cfg |= TRCCONFIGR_QE_W_COUNTS;
378 /*
379 * if supported, Q elements with and without instruction
380 * counts are enabled
381 */
382 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
383 config->cfg |= TRCCONFIGR_QE_WO_COUNTS;
384
385 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
386 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
387 (drvdata->atbtrig == true))
388 config->eventctrl1 |= TRCEVENTCTL1R_ATB;
389 else
390 config->eventctrl1 &= ~TRCEVENTCTL1R_ATB;
391
392 /* bit[12], Low-power state behavior override bit */
393 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
394 (drvdata->lpoverride == true))
395 config->eventctrl1 |= TRCEVENTCTL1R_LPOVERRIDE;
396 else
397 config->eventctrl1 &= ~TRCEVENTCTL1R_LPOVERRIDE;
398
399 /* bit[8], Instruction stall bit */
400 if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
401 config->stall_ctrl |= TRCSTALLCTLR_ISTALL;
402 else
403 config->stall_ctrl &= ~TRCSTALLCTLR_ISTALL;
404
405 /* bit[10], Prioritize instruction trace bit */
406 if (config->mode & ETM_MODE_INSTPRIO)
407 config->stall_ctrl |= TRCSTALLCTLR_INSTPRIORITY;
408 else
409 config->stall_ctrl &= ~TRCSTALLCTLR_INSTPRIORITY;
410
411 /* bit[13], Trace overflow prevention bit */
412 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
413 (drvdata->nooverflow == true))
414 config->stall_ctrl |= TRCSTALLCTLR_NOOVERFLOW;
415 else
416 config->stall_ctrl &= ~TRCSTALLCTLR_NOOVERFLOW;
417
418 /* bit[9] Start/stop logic control bit */
419 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
420 config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
421 else
422 config->vinst_ctrl &= ~TRCVICTLR_SSSTATUS;
423
424 /* bit[10], Whether a trace unit must trace a Reset exception */
425 if (config->mode & ETM_MODE_TRACE_RESET)
426 config->vinst_ctrl |= TRCVICTLR_TRCRESET;
427 else
428 config->vinst_ctrl &= ~TRCVICTLR_TRCRESET;
429
430 /* bit[11], Whether a trace unit must trace a system error exception */
431 if ((config->mode & ETM_MODE_TRACE_ERR) &&
432 (drvdata->trc_error == true))
433 config->vinst_ctrl |= TRCVICTLR_TRCERR;
434 else
435 config->vinst_ctrl &= ~TRCVICTLR_TRCERR;
436
437 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
438 etm4_config_trace_mode(config);
439
440 spin_unlock(&drvdata->spinlock);
441
442 return size;
443}
444static DEVICE_ATTR_RW(mode);
445
446static ssize_t pe_show(struct device *dev,
447 struct device_attribute *attr,
448 char *buf)
449{
450 unsigned long val;
451 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
452 struct etmv4_config *config = &drvdata->config;
453
454 val = config->pe_sel;
455 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
456}
457
458static ssize_t pe_store(struct device *dev,
459 struct device_attribute *attr,
460 const char *buf, size_t size)
461{
462 unsigned long val;
463 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
464 struct etmv4_config *config = &drvdata->config;
465
466 if (kstrtoul(buf, 16, &val))
467 return -EINVAL;
468
469 spin_lock(&drvdata->spinlock);
470 if (val > drvdata->nr_pe) {
471 spin_unlock(&drvdata->spinlock);
472 return -EINVAL;
473 }
474
475 config->pe_sel = val;
476 spin_unlock(&drvdata->spinlock);
477 return size;
478}
479static DEVICE_ATTR_RW(pe);
480
481static ssize_t event_show(struct device *dev,
482 struct device_attribute *attr,
483 char *buf)
484{
485 unsigned long val;
486 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
487 struct etmv4_config *config = &drvdata->config;
488
489 val = config->eventctrl0;
490 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
491}
492
493static ssize_t event_store(struct device *dev,
494 struct device_attribute *attr,
495 const char *buf, size_t size)
496{
497 unsigned long val;
498 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
499 struct etmv4_config *config = &drvdata->config;
500
501 if (kstrtoul(buf, 16, &val))
502 return -EINVAL;
503
504 spin_lock(&drvdata->spinlock);
505 switch (drvdata->nr_event) {
506 case 0x0:
507 /* EVENT0, bits[7:0] */
508 config->eventctrl0 = val & 0xFF;
509 break;
510 case 0x1:
511 /* EVENT1, bits[15:8] */
512 config->eventctrl0 = val & 0xFFFF;
513 break;
514 case 0x2:
515 /* EVENT2, bits[23:16] */
516 config->eventctrl0 = val & 0xFFFFFF;
517 break;
518 case 0x3:
519 /* EVENT3, bits[31:24] */
520 config->eventctrl0 = val;
521 break;
522 default:
523 break;
524 }
525 spin_unlock(&drvdata->spinlock);
526 return size;
527}
528static DEVICE_ATTR_RW(event);
529
530static ssize_t event_instren_show(struct device *dev,
531 struct device_attribute *attr,
532 char *buf)
533{
534 unsigned long val;
535 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
536 struct etmv4_config *config = &drvdata->config;
537
538 val = FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK, config->eventctrl1);
539 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
540}
541
542static ssize_t event_instren_store(struct device *dev,
543 struct device_attribute *attr,
544 const char *buf, size_t size)
545{
546 unsigned long val;
547 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
548 struct etmv4_config *config = &drvdata->config;
549
550 if (kstrtoul(buf, 16, &val))
551 return -EINVAL;
552
553 spin_lock(&drvdata->spinlock);
554 /* start by clearing all instruction event enable bits */
555 config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK;
556 switch (drvdata->nr_event) {
557 case 0x0:
558 /* generate Event element for event 1 */
559 config->eventctrl1 |= val & TRCEVENTCTL1R_INSTEN_1;
560 break;
561 case 0x1:
562 /* generate Event element for event 1 and 2 */
563 config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | TRCEVENTCTL1R_INSTEN_1);
564 break;
565 case 0x2:
566 /* generate Event element for event 1, 2 and 3 */
567 config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
568 TRCEVENTCTL1R_INSTEN_1 |
569 TRCEVENTCTL1R_INSTEN_2);
570 break;
571 case 0x3:
572 /* generate Event element for all 4 events */
573 config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
574 TRCEVENTCTL1R_INSTEN_1 |
575 TRCEVENTCTL1R_INSTEN_2 |
576 TRCEVENTCTL1R_INSTEN_3);
577 break;
578 default:
579 break;
580 }
581 spin_unlock(&drvdata->spinlock);
582 return size;
583}
584static DEVICE_ATTR_RW(event_instren);
585
586static ssize_t event_ts_show(struct device *dev,
587 struct device_attribute *attr,
588 char *buf)
589{
590 unsigned long val;
591 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
592 struct etmv4_config *config = &drvdata->config;
593
594 val = config->ts_ctrl;
595 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
596}
597
598static ssize_t event_ts_store(struct device *dev,
599 struct device_attribute *attr,
600 const char *buf, size_t size)
601{
602 unsigned long val;
603 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
604 struct etmv4_config *config = &drvdata->config;
605
606 if (kstrtoul(buf, 16, &val))
607 return -EINVAL;
608 if (!drvdata->ts_size)
609 return -EINVAL;
610
611 config->ts_ctrl = val & ETMv4_EVENT_MASK;
612 return size;
613}
614static DEVICE_ATTR_RW(event_ts);
615
616static ssize_t syncfreq_show(struct device *dev,
617 struct device_attribute *attr,
618 char *buf)
619{
620 unsigned long val;
621 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
622 struct etmv4_config *config = &drvdata->config;
623
624 val = config->syncfreq;
625 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
626}
627
628static ssize_t syncfreq_store(struct device *dev,
629 struct device_attribute *attr,
630 const char *buf, size_t size)
631{
632 unsigned long val;
633 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
634 struct etmv4_config *config = &drvdata->config;
635
636 if (kstrtoul(buf, 16, &val))
637 return -EINVAL;
638 if (drvdata->syncpr == true)
639 return -EINVAL;
640
641 config->syncfreq = val & ETMv4_SYNC_MASK;
642 return size;
643}
644static DEVICE_ATTR_RW(syncfreq);
645
646static ssize_t cyc_threshold_show(struct device *dev,
647 struct device_attribute *attr,
648 char *buf)
649{
650 unsigned long val;
651 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
652 struct etmv4_config *config = &drvdata->config;
653
654 val = config->ccctlr;
655 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
656}
657
658static ssize_t cyc_threshold_store(struct device *dev,
659 struct device_attribute *attr,
660 const char *buf, size_t size)
661{
662 unsigned long val;
663 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
664 struct etmv4_config *config = &drvdata->config;
665
666 if (kstrtoul(buf, 16, &val))
667 return -EINVAL;
668
669 /* mask off max threshold before checking min value */
670 val &= ETM_CYC_THRESHOLD_MASK;
671 if (val < drvdata->ccitmin)
672 return -EINVAL;
673
674 config->ccctlr = val;
675 return size;
676}
677static DEVICE_ATTR_RW(cyc_threshold);
678
679static ssize_t bb_ctrl_show(struct device *dev,
680 struct device_attribute *attr,
681 char *buf)
682{
683 unsigned long val;
684 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
685 struct etmv4_config *config = &drvdata->config;
686
687 val = config->bb_ctrl;
688 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
689}
690
691static ssize_t bb_ctrl_store(struct device *dev,
692 struct device_attribute *attr,
693 const char *buf, size_t size)
694{
695 unsigned long val;
696 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
697 struct etmv4_config *config = &drvdata->config;
698
699 if (kstrtoul(buf, 16, &val))
700 return -EINVAL;
701 if (drvdata->trcbb == false)
702 return -EINVAL;
703 if (!drvdata->nr_addr_cmp)
704 return -EINVAL;
705
706 /*
707 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
708 * individual range comparators. If include then at least 1
709 * range must be selected.
710 */
711 if ((val & TRCBBCTLR_MODE) && (FIELD_GET(TRCBBCTLR_RANGE_MASK, val) == 0))
712 return -EINVAL;
713
714 config->bb_ctrl = val & (TRCBBCTLR_MODE | TRCBBCTLR_RANGE_MASK);
715 return size;
716}
717static DEVICE_ATTR_RW(bb_ctrl);
718
719static ssize_t event_vinst_show(struct device *dev,
720 struct device_attribute *attr,
721 char *buf)
722{
723 unsigned long val;
724 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
725 struct etmv4_config *config = &drvdata->config;
726
727 val = FIELD_GET(TRCVICTLR_EVENT_MASK, config->vinst_ctrl);
728 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
729}
730
731static ssize_t event_vinst_store(struct device *dev,
732 struct device_attribute *attr,
733 const char *buf, size_t size)
734{
735 unsigned long val;
736 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
737 struct etmv4_config *config = &drvdata->config;
738
739 if (kstrtoul(buf, 16, &val))
740 return -EINVAL;
741
742 spin_lock(&drvdata->spinlock);
743 val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK);
744 config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK;
745 config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val);
746 spin_unlock(&drvdata->spinlock);
747 return size;
748}
749static DEVICE_ATTR_RW(event_vinst);
750
751static ssize_t s_exlevel_vinst_show(struct device *dev,
752 struct device_attribute *attr,
753 char *buf)
754{
755 unsigned long val;
756 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
757 struct etmv4_config *config = &drvdata->config;
758
759 val = FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK, config->vinst_ctrl);
760 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
761}
762
763static ssize_t s_exlevel_vinst_store(struct device *dev,
764 struct device_attribute *attr,
765 const char *buf, size_t size)
766{
767 unsigned long val;
768 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
769 struct etmv4_config *config = &drvdata->config;
770
771 if (kstrtoul(buf, 16, &val))
772 return -EINVAL;
773
774 spin_lock(&drvdata->spinlock);
775 /* clear all EXLEVEL_S bits */
776 config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK;
777 /* enable instruction tracing for corresponding exception level */
778 val &= drvdata->s_ex_level;
779 config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK);
780 spin_unlock(&drvdata->spinlock);
781 return size;
782}
783static DEVICE_ATTR_RW(s_exlevel_vinst);
784
785static ssize_t ns_exlevel_vinst_show(struct device *dev,
786 struct device_attribute *attr,
787 char *buf)
788{
789 unsigned long val;
790 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
791 struct etmv4_config *config = &drvdata->config;
792
793 /* EXLEVEL_NS, bits[23:20] */
794 val = FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK, config->vinst_ctrl);
795 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
796}
797
798static ssize_t ns_exlevel_vinst_store(struct device *dev,
799 struct device_attribute *attr,
800 const char *buf, size_t size)
801{
802 unsigned long val;
803 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
804 struct etmv4_config *config = &drvdata->config;
805
806 if (kstrtoul(buf, 16, &val))
807 return -EINVAL;
808
809 spin_lock(&drvdata->spinlock);
810 /* clear EXLEVEL_NS bits */
811 config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK;
812 /* enable instruction tracing for corresponding exception level */
813 val &= drvdata->ns_ex_level;
814 config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK);
815 spin_unlock(&drvdata->spinlock);
816 return size;
817}
818static DEVICE_ATTR_RW(ns_exlevel_vinst);
819
820static ssize_t addr_idx_show(struct device *dev,
821 struct device_attribute *attr,
822 char *buf)
823{
824 unsigned long val;
825 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
826 struct etmv4_config *config = &drvdata->config;
827
828 val = config->addr_idx;
829 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
830}
831
832static ssize_t addr_idx_store(struct device *dev,
833 struct device_attribute *attr,
834 const char *buf, size_t size)
835{
836 unsigned long val;
837 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
838 struct etmv4_config *config = &drvdata->config;
839
840 if (kstrtoul(buf, 16, &val))
841 return -EINVAL;
842 if (val >= drvdata->nr_addr_cmp * 2)
843 return -EINVAL;
844
845 /*
846 * Use spinlock to ensure index doesn't change while it gets
847 * dereferenced multiple times within a spinlock block elsewhere.
848 */
849 spin_lock(&drvdata->spinlock);
850 config->addr_idx = val;
851 spin_unlock(&drvdata->spinlock);
852 return size;
853}
854static DEVICE_ATTR_RW(addr_idx);
855
856static ssize_t addr_instdatatype_show(struct device *dev,
857 struct device_attribute *attr,
858 char *buf)
859{
860 ssize_t len;
861 u8 val, idx;
862 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
863 struct etmv4_config *config = &drvdata->config;
864
865 spin_lock(&drvdata->spinlock);
866 idx = config->addr_idx;
867 val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]);
868 len = scnprintf(buf, PAGE_SIZE, "%s\n",
869 val == TRCACATRn_TYPE_ADDR ? "instr" :
870 (val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" :
871 (val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" :
872 "data_load_store")));
873 spin_unlock(&drvdata->spinlock);
874 return len;
875}
876
877static ssize_t addr_instdatatype_store(struct device *dev,
878 struct device_attribute *attr,
879 const char *buf, size_t size)
880{
881 u8 idx;
882 char str[20] = "";
883 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
884 struct etmv4_config *config = &drvdata->config;
885
886 if (strlen(buf) >= 20)
887 return -EINVAL;
888 if (sscanf(buf, "%s", str) != 1)
889 return -EINVAL;
890
891 spin_lock(&drvdata->spinlock);
892 idx = config->addr_idx;
893 if (!strcmp(str, "instr"))
894 /* TYPE, bits[1:0] */
895 config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK;
896
897 spin_unlock(&drvdata->spinlock);
898 return size;
899}
900static DEVICE_ATTR_RW(addr_instdatatype);
901
902static ssize_t addr_single_show(struct device *dev,
903 struct device_attribute *attr,
904 char *buf)
905{
906 u8 idx;
907 unsigned long val;
908 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
909 struct etmv4_config *config = &drvdata->config;
910
911 idx = config->addr_idx;
912 spin_lock(&drvdata->spinlock);
913 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
914 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
915 spin_unlock(&drvdata->spinlock);
916 return -EPERM;
917 }
918 val = (unsigned long)config->addr_val[idx];
919 spin_unlock(&drvdata->spinlock);
920 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
921}
922
923static ssize_t addr_single_store(struct device *dev,
924 struct device_attribute *attr,
925 const char *buf, size_t size)
926{
927 u8 idx;
928 unsigned long val;
929 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
930 struct etmv4_config *config = &drvdata->config;
931
932 if (kstrtoul(buf, 16, &val))
933 return -EINVAL;
934
935 spin_lock(&drvdata->spinlock);
936 idx = config->addr_idx;
937 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
938 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
939 spin_unlock(&drvdata->spinlock);
940 return -EPERM;
941 }
942
943 config->addr_val[idx] = (u64)val;
944 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
945 spin_unlock(&drvdata->spinlock);
946 return size;
947}
948static DEVICE_ATTR_RW(addr_single);
949
950static ssize_t addr_range_show(struct device *dev,
951 struct device_attribute *attr,
952 char *buf)
953{
954 u8 idx;
955 unsigned long val1, val2;
956 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
957 struct etmv4_config *config = &drvdata->config;
958
959 spin_lock(&drvdata->spinlock);
960 idx = config->addr_idx;
961 if (idx % 2 != 0) {
962 spin_unlock(&drvdata->spinlock);
963 return -EPERM;
964 }
965 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
966 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
967 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
968 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
969 spin_unlock(&drvdata->spinlock);
970 return -EPERM;
971 }
972
973 val1 = (unsigned long)config->addr_val[idx];
974 val2 = (unsigned long)config->addr_val[idx + 1];
975 spin_unlock(&drvdata->spinlock);
976 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
977}
978
979static ssize_t addr_range_store(struct device *dev,
980 struct device_attribute *attr,
981 const char *buf, size_t size)
982{
983 u8 idx;
984 unsigned long val1, val2;
985 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
986 struct etmv4_config *config = &drvdata->config;
987 int elements, exclude;
988
989 elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
990
991 /* exclude is optional, but need at least two parameter */
992 if (elements < 2)
993 return -EINVAL;
994 /* lower address comparator cannot have a higher address value */
995 if (val1 > val2)
996 return -EINVAL;
997
998 spin_lock(&drvdata->spinlock);
999 idx = config->addr_idx;
1000 if (idx % 2 != 0) {
1001 spin_unlock(&drvdata->spinlock);
1002 return -EPERM;
1003 }
1004
1005 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1006 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1007 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1008 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1009 spin_unlock(&drvdata->spinlock);
1010 return -EPERM;
1011 }
1012
1013 config->addr_val[idx] = (u64)val1;
1014 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1015 config->addr_val[idx + 1] = (u64)val2;
1016 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1017 /*
1018 * Program include or exclude control bits for vinst or vdata
1019 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1020 * use supplied value, or default to bit set in 'mode'
1021 */
1022 if (elements != 3)
1023 exclude = config->mode & ETM_MODE_EXCLUDE;
1024 etm4_set_mode_exclude(drvdata, exclude ? true : false);
1025
1026 spin_unlock(&drvdata->spinlock);
1027 return size;
1028}
1029static DEVICE_ATTR_RW(addr_range);
1030
1031static ssize_t addr_start_show(struct device *dev,
1032 struct device_attribute *attr,
1033 char *buf)
1034{
1035 u8 idx;
1036 unsigned long val;
1037 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1038 struct etmv4_config *config = &drvdata->config;
1039
1040 spin_lock(&drvdata->spinlock);
1041 idx = config->addr_idx;
1042
1043 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1044 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1045 spin_unlock(&drvdata->spinlock);
1046 return -EPERM;
1047 }
1048
1049 val = (unsigned long)config->addr_val[idx];
1050 spin_unlock(&drvdata->spinlock);
1051 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1052}
1053
1054static ssize_t addr_start_store(struct device *dev,
1055 struct device_attribute *attr,
1056 const char *buf, size_t size)
1057{
1058 u8 idx;
1059 unsigned long val;
1060 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1061 struct etmv4_config *config = &drvdata->config;
1062
1063 if (kstrtoul(buf, 16, &val))
1064 return -EINVAL;
1065
1066 spin_lock(&drvdata->spinlock);
1067 idx = config->addr_idx;
1068 if (!drvdata->nr_addr_cmp) {
1069 spin_unlock(&drvdata->spinlock);
1070 return -EINVAL;
1071 }
1072 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1073 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1074 spin_unlock(&drvdata->spinlock);
1075 return -EPERM;
1076 }
1077
1078 config->addr_val[idx] = (u64)val;
1079 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1080 config->vissctlr |= BIT(idx);
1081 spin_unlock(&drvdata->spinlock);
1082 return size;
1083}
1084static DEVICE_ATTR_RW(addr_start);
1085
1086static ssize_t addr_stop_show(struct device *dev,
1087 struct device_attribute *attr,
1088 char *buf)
1089{
1090 u8 idx;
1091 unsigned long val;
1092 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1093 struct etmv4_config *config = &drvdata->config;
1094
1095 spin_lock(&drvdata->spinlock);
1096 idx = config->addr_idx;
1097
1098 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1099 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1100 spin_unlock(&drvdata->spinlock);
1101 return -EPERM;
1102 }
1103
1104 val = (unsigned long)config->addr_val[idx];
1105 spin_unlock(&drvdata->spinlock);
1106 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1107}
1108
1109static ssize_t addr_stop_store(struct device *dev,
1110 struct device_attribute *attr,
1111 const char *buf, size_t size)
1112{
1113 u8 idx;
1114 unsigned long val;
1115 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1116 struct etmv4_config *config = &drvdata->config;
1117
1118 if (kstrtoul(buf, 16, &val))
1119 return -EINVAL;
1120
1121 spin_lock(&drvdata->spinlock);
1122 idx = config->addr_idx;
1123 if (!drvdata->nr_addr_cmp) {
1124 spin_unlock(&drvdata->spinlock);
1125 return -EINVAL;
1126 }
1127 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1128 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1129 spin_unlock(&drvdata->spinlock);
1130 return -EPERM;
1131 }
1132
1133 config->addr_val[idx] = (u64)val;
1134 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1135 config->vissctlr |= BIT(idx + 16);
1136 spin_unlock(&drvdata->spinlock);
1137 return size;
1138}
1139static DEVICE_ATTR_RW(addr_stop);
1140
1141static ssize_t addr_ctxtype_show(struct device *dev,
1142 struct device_attribute *attr,
1143 char *buf)
1144{
1145 ssize_t len;
1146 u8 idx, val;
1147 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1148 struct etmv4_config *config = &drvdata->config;
1149
1150 spin_lock(&drvdata->spinlock);
1151 idx = config->addr_idx;
1152 /* CONTEXTTYPE, bits[3:2] */
1153 val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]);
1154 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1155 (val == ETM_CTX_CTXID ? "ctxid" :
1156 (val == ETM_CTX_VMID ? "vmid" : "all")));
1157 spin_unlock(&drvdata->spinlock);
1158 return len;
1159}
1160
1161static ssize_t addr_ctxtype_store(struct device *dev,
1162 struct device_attribute *attr,
1163 const char *buf, size_t size)
1164{
1165 u8 idx;
1166 char str[10] = "";
1167 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1168 struct etmv4_config *config = &drvdata->config;
1169
1170 if (strlen(buf) >= 10)
1171 return -EINVAL;
1172 if (sscanf(buf, "%s", str) != 1)
1173 return -EINVAL;
1174
1175 spin_lock(&drvdata->spinlock);
1176 idx = config->addr_idx;
1177 if (!strcmp(str, "none"))
1178 /* start by clearing context type bits */
1179 config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_MASK;
1180 else if (!strcmp(str, "ctxid")) {
1181 /* 0b01 The trace unit performs a Context ID */
1182 if (drvdata->numcidc) {
1183 config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1184 config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_VMID;
1185 }
1186 } else if (!strcmp(str, "vmid")) {
1187 /* 0b10 The trace unit performs a VMID */
1188 if (drvdata->numvmidc) {
1189 config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_CTXID;
1190 config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1191 }
1192 } else if (!strcmp(str, "all")) {
1193 /*
1194 * 0b11 The trace unit performs a Context ID
1195 * comparison and a VMID
1196 */
1197 if (drvdata->numcidc)
1198 config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1199 if (drvdata->numvmidc)
1200 config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1201 }
1202 spin_unlock(&drvdata->spinlock);
1203 return size;
1204}
1205static DEVICE_ATTR_RW(addr_ctxtype);
1206
1207static ssize_t addr_context_show(struct device *dev,
1208 struct device_attribute *attr,
1209 char *buf)
1210{
1211 u8 idx;
1212 unsigned long val;
1213 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1214 struct etmv4_config *config = &drvdata->config;
1215
1216 spin_lock(&drvdata->spinlock);
1217 idx = config->addr_idx;
1218 /* context ID comparator bits[6:4] */
1219 val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]);
1220 spin_unlock(&drvdata->spinlock);
1221 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1222}
1223
1224static ssize_t addr_context_store(struct device *dev,
1225 struct device_attribute *attr,
1226 const char *buf, size_t size)
1227{
1228 u8 idx;
1229 unsigned long val;
1230 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1231 struct etmv4_config *config = &drvdata->config;
1232
1233 if (kstrtoul(buf, 16, &val))
1234 return -EINVAL;
1235 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1236 return -EINVAL;
1237 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1238 drvdata->numcidc : drvdata->numvmidc))
1239 return -EINVAL;
1240
1241 spin_lock(&drvdata->spinlock);
1242 idx = config->addr_idx;
1243 /* clear context ID comparator bits[6:4] */
1244 config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK;
1245 config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK);
1246 spin_unlock(&drvdata->spinlock);
1247 return size;
1248}
1249static DEVICE_ATTR_RW(addr_context);
1250
1251static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1252 struct device_attribute *attr,
1253 char *buf)
1254{
1255 u8 idx;
1256 unsigned long val;
1257 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1258 struct etmv4_config *config = &drvdata->config;
1259
1260 spin_lock(&drvdata->spinlock);
1261 idx = config->addr_idx;
1262 val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]);
1263 spin_unlock(&drvdata->spinlock);
1264 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1265}
1266
1267static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1268 struct device_attribute *attr,
1269 const char *buf, size_t size)
1270{
1271 u8 idx;
1272 unsigned long val;
1273 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1274 struct etmv4_config *config = &drvdata->config;
1275
1276 if (kstrtoul(buf, 0, &val))
1277 return -EINVAL;
1278
1279 if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK)))
1280 return -EINVAL;
1281
1282 spin_lock(&drvdata->spinlock);
1283 idx = config->addr_idx;
1284 /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1285 config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK;
1286 config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK);
1287 spin_unlock(&drvdata->spinlock);
1288 return size;
1289}
1290static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1291
1292static const char * const addr_type_names[] = {
1293 "unused",
1294 "single",
1295 "range",
1296 "start",
1297 "stop"
1298};
1299
1300static ssize_t addr_cmp_view_show(struct device *dev,
1301 struct device_attribute *attr, char *buf)
1302{
1303 u8 idx, addr_type;
1304 unsigned long addr_v, addr_v2, addr_ctrl;
1305 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1306 struct etmv4_config *config = &drvdata->config;
1307 int size = 0;
1308 bool exclude = false;
1309
1310 spin_lock(&drvdata->spinlock);
1311 idx = config->addr_idx;
1312 addr_v = config->addr_val[idx];
1313 addr_ctrl = config->addr_acc[idx];
1314 addr_type = config->addr_type[idx];
1315 if (addr_type == ETM_ADDR_TYPE_RANGE) {
1316 if (idx & 0x1) {
1317 idx -= 1;
1318 addr_v2 = addr_v;
1319 addr_v = config->addr_val[idx];
1320 } else {
1321 addr_v2 = config->addr_val[idx + 1];
1322 }
1323 exclude = config->viiectlr & BIT(idx / 2 + 16);
1324 }
1325 spin_unlock(&drvdata->spinlock);
1326 if (addr_type) {
1327 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1328 addr_type_names[addr_type], addr_v);
1329 if (addr_type == ETM_ADDR_TYPE_RANGE) {
1330 size += scnprintf(buf + size, PAGE_SIZE - size,
1331 " %#lx %s", addr_v2,
1332 exclude ? "exclude" : "include");
1333 }
1334 size += scnprintf(buf + size, PAGE_SIZE - size,
1335 " ctrl(%#lx)\n", addr_ctrl);
1336 } else {
1337 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1338 }
1339 return size;
1340}
1341static DEVICE_ATTR_RO(addr_cmp_view);
1342
1343static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1344 struct device_attribute *attr,
1345 char *buf)
1346{
1347 unsigned long val;
1348 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1349 struct etmv4_config *config = &drvdata->config;
1350
1351 if (!drvdata->nr_pe_cmp)
1352 return -EINVAL;
1353 val = config->vipcssctlr;
1354 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1355}
1356static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1357 struct device_attribute *attr,
1358 const char *buf, size_t size)
1359{
1360 unsigned long val;
1361 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1362 struct etmv4_config *config = &drvdata->config;
1363
1364 if (kstrtoul(buf, 16, &val))
1365 return -EINVAL;
1366 if (!drvdata->nr_pe_cmp)
1367 return -EINVAL;
1368
1369 spin_lock(&drvdata->spinlock);
1370 config->vipcssctlr = val;
1371 spin_unlock(&drvdata->spinlock);
1372 return size;
1373}
1374static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1375
1376static ssize_t seq_idx_show(struct device *dev,
1377 struct device_attribute *attr,
1378 char *buf)
1379{
1380 unsigned long val;
1381 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1382 struct etmv4_config *config = &drvdata->config;
1383
1384 val = config->seq_idx;
1385 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1386}
1387
1388static ssize_t seq_idx_store(struct device *dev,
1389 struct device_attribute *attr,
1390 const char *buf, size_t size)
1391{
1392 unsigned long val;
1393 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1394 struct etmv4_config *config = &drvdata->config;
1395
1396 if (kstrtoul(buf, 16, &val))
1397 return -EINVAL;
1398 if (val >= drvdata->nrseqstate - 1)
1399 return -EINVAL;
1400
1401 /*
1402 * Use spinlock to ensure index doesn't change while it gets
1403 * dereferenced multiple times within a spinlock block elsewhere.
1404 */
1405 spin_lock(&drvdata->spinlock);
1406 config->seq_idx = val;
1407 spin_unlock(&drvdata->spinlock);
1408 return size;
1409}
1410static DEVICE_ATTR_RW(seq_idx);
1411
1412static ssize_t seq_state_show(struct device *dev,
1413 struct device_attribute *attr,
1414 char *buf)
1415{
1416 unsigned long val;
1417 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1418 struct etmv4_config *config = &drvdata->config;
1419
1420 val = config->seq_state;
1421 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1422}
1423
1424static ssize_t seq_state_store(struct device *dev,
1425 struct device_attribute *attr,
1426 const char *buf, size_t size)
1427{
1428 unsigned long val;
1429 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1430 struct etmv4_config *config = &drvdata->config;
1431
1432 if (kstrtoul(buf, 16, &val))
1433 return -EINVAL;
1434 if (val >= drvdata->nrseqstate)
1435 return -EINVAL;
1436
1437 config->seq_state = val;
1438 return size;
1439}
1440static DEVICE_ATTR_RW(seq_state);
1441
1442static ssize_t seq_event_show(struct device *dev,
1443 struct device_attribute *attr,
1444 char *buf)
1445{
1446 u8 idx;
1447 unsigned long val;
1448 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1449 struct etmv4_config *config = &drvdata->config;
1450
1451 spin_lock(&drvdata->spinlock);
1452 idx = config->seq_idx;
1453 val = config->seq_ctrl[idx];
1454 spin_unlock(&drvdata->spinlock);
1455 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1456}
1457
1458static ssize_t seq_event_store(struct device *dev,
1459 struct device_attribute *attr,
1460 const char *buf, size_t size)
1461{
1462 u8 idx;
1463 unsigned long val;
1464 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1465 struct etmv4_config *config = &drvdata->config;
1466
1467 if (kstrtoul(buf, 16, &val))
1468 return -EINVAL;
1469
1470 spin_lock(&drvdata->spinlock);
1471 idx = config->seq_idx;
1472 /* Seq control has two masks B[15:8] F[7:0] */
1473 config->seq_ctrl[idx] = val & 0xFFFF;
1474 spin_unlock(&drvdata->spinlock);
1475 return size;
1476}
1477static DEVICE_ATTR_RW(seq_event);
1478
1479static ssize_t seq_reset_event_show(struct device *dev,
1480 struct device_attribute *attr,
1481 char *buf)
1482{
1483 unsigned long val;
1484 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1485 struct etmv4_config *config = &drvdata->config;
1486
1487 val = config->seq_rst;
1488 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1489}
1490
1491static ssize_t seq_reset_event_store(struct device *dev,
1492 struct device_attribute *attr,
1493 const char *buf, size_t size)
1494{
1495 unsigned long val;
1496 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1497 struct etmv4_config *config = &drvdata->config;
1498
1499 if (kstrtoul(buf, 16, &val))
1500 return -EINVAL;
1501 if (!(drvdata->nrseqstate))
1502 return -EINVAL;
1503
1504 config->seq_rst = val & ETMv4_EVENT_MASK;
1505 return size;
1506}
1507static DEVICE_ATTR_RW(seq_reset_event);
1508
1509static ssize_t cntr_idx_show(struct device *dev,
1510 struct device_attribute *attr,
1511 char *buf)
1512{
1513 unsigned long val;
1514 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1515 struct etmv4_config *config = &drvdata->config;
1516
1517 val = config->cntr_idx;
1518 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1519}
1520
1521static ssize_t cntr_idx_store(struct device *dev,
1522 struct device_attribute *attr,
1523 const char *buf, size_t size)
1524{
1525 unsigned long val;
1526 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1527 struct etmv4_config *config = &drvdata->config;
1528
1529 if (kstrtoul(buf, 16, &val))
1530 return -EINVAL;
1531 if (val >= drvdata->nr_cntr)
1532 return -EINVAL;
1533
1534 /*
1535 * Use spinlock to ensure index doesn't change while it gets
1536 * dereferenced multiple times within a spinlock block elsewhere.
1537 */
1538 spin_lock(&drvdata->spinlock);
1539 config->cntr_idx = val;
1540 spin_unlock(&drvdata->spinlock);
1541 return size;
1542}
1543static DEVICE_ATTR_RW(cntr_idx);
1544
1545static ssize_t cntrldvr_show(struct device *dev,
1546 struct device_attribute *attr,
1547 char *buf)
1548{
1549 u8 idx;
1550 unsigned long val;
1551 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1552 struct etmv4_config *config = &drvdata->config;
1553
1554 spin_lock(&drvdata->spinlock);
1555 idx = config->cntr_idx;
1556 val = config->cntrldvr[idx];
1557 spin_unlock(&drvdata->spinlock);
1558 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1559}
1560
1561static ssize_t cntrldvr_store(struct device *dev,
1562 struct device_attribute *attr,
1563 const char *buf, size_t size)
1564{
1565 u8 idx;
1566 unsigned long val;
1567 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1568 struct etmv4_config *config = &drvdata->config;
1569
1570 if (kstrtoul(buf, 16, &val))
1571 return -EINVAL;
1572 if (val > ETM_CNTR_MAX_VAL)
1573 return -EINVAL;
1574
1575 spin_lock(&drvdata->spinlock);
1576 idx = config->cntr_idx;
1577 config->cntrldvr[idx] = val;
1578 spin_unlock(&drvdata->spinlock);
1579 return size;
1580}
1581static DEVICE_ATTR_RW(cntrldvr);
1582
1583static ssize_t cntr_val_show(struct device *dev,
1584 struct device_attribute *attr,
1585 char *buf)
1586{
1587 u8 idx;
1588 unsigned long val;
1589 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1590 struct etmv4_config *config = &drvdata->config;
1591
1592 spin_lock(&drvdata->spinlock);
1593 idx = config->cntr_idx;
1594 val = config->cntr_val[idx];
1595 spin_unlock(&drvdata->spinlock);
1596 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1597}
1598
1599static ssize_t cntr_val_store(struct device *dev,
1600 struct device_attribute *attr,
1601 const char *buf, size_t size)
1602{
1603 u8 idx;
1604 unsigned long val;
1605 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1606 struct etmv4_config *config = &drvdata->config;
1607
1608 if (kstrtoul(buf, 16, &val))
1609 return -EINVAL;
1610 if (val > ETM_CNTR_MAX_VAL)
1611 return -EINVAL;
1612
1613 spin_lock(&drvdata->spinlock);
1614 idx = config->cntr_idx;
1615 config->cntr_val[idx] = val;
1616 spin_unlock(&drvdata->spinlock);
1617 return size;
1618}
1619static DEVICE_ATTR_RW(cntr_val);
1620
1621static ssize_t cntr_ctrl_show(struct device *dev,
1622 struct device_attribute *attr,
1623 char *buf)
1624{
1625 u8 idx;
1626 unsigned long val;
1627 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1628 struct etmv4_config *config = &drvdata->config;
1629
1630 spin_lock(&drvdata->spinlock);
1631 idx = config->cntr_idx;
1632 val = config->cntr_ctrl[idx];
1633 spin_unlock(&drvdata->spinlock);
1634 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1635}
1636
1637static ssize_t cntr_ctrl_store(struct device *dev,
1638 struct device_attribute *attr,
1639 const char *buf, size_t size)
1640{
1641 u8 idx;
1642 unsigned long val;
1643 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1644 struct etmv4_config *config = &drvdata->config;
1645
1646 if (kstrtoul(buf, 16, &val))
1647 return -EINVAL;
1648
1649 spin_lock(&drvdata->spinlock);
1650 idx = config->cntr_idx;
1651 config->cntr_ctrl[idx] = val;
1652 spin_unlock(&drvdata->spinlock);
1653 return size;
1654}
1655static DEVICE_ATTR_RW(cntr_ctrl);
1656
1657static ssize_t res_idx_show(struct device *dev,
1658 struct device_attribute *attr,
1659 char *buf)
1660{
1661 unsigned long val;
1662 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1663 struct etmv4_config *config = &drvdata->config;
1664
1665 val = config->res_idx;
1666 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1667}
1668
1669static ssize_t res_idx_store(struct device *dev,
1670 struct device_attribute *attr,
1671 const char *buf, size_t size)
1672{
1673 unsigned long val;
1674 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1675 struct etmv4_config *config = &drvdata->config;
1676
1677 if (kstrtoul(buf, 16, &val))
1678 return -EINVAL;
1679 /*
1680 * Resource selector pair 0 is always implemented and reserved,
1681 * namely an idx with 0 and 1 is illegal.
1682 */
1683 if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1684 return -EINVAL;
1685
1686 /*
1687 * Use spinlock to ensure index doesn't change while it gets
1688 * dereferenced multiple times within a spinlock block elsewhere.
1689 */
1690 spin_lock(&drvdata->spinlock);
1691 config->res_idx = val;
1692 spin_unlock(&drvdata->spinlock);
1693 return size;
1694}
1695static DEVICE_ATTR_RW(res_idx);
1696
1697static ssize_t res_ctrl_show(struct device *dev,
1698 struct device_attribute *attr,
1699 char *buf)
1700{
1701 u8 idx;
1702 unsigned long val;
1703 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1704 struct etmv4_config *config = &drvdata->config;
1705
1706 spin_lock(&drvdata->spinlock);
1707 idx = config->res_idx;
1708 val = config->res_ctrl[idx];
1709 spin_unlock(&drvdata->spinlock);
1710 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1711}
1712
1713static ssize_t res_ctrl_store(struct device *dev,
1714 struct device_attribute *attr,
1715 const char *buf, size_t size)
1716{
1717 u8 idx;
1718 unsigned long val;
1719 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1720 struct etmv4_config *config = &drvdata->config;
1721
1722 if (kstrtoul(buf, 16, &val))
1723 return -EINVAL;
1724
1725 spin_lock(&drvdata->spinlock);
1726 idx = config->res_idx;
1727 /* For odd idx pair inversal bit is RES0 */
1728 if (idx % 2 != 0)
1729 /* PAIRINV, bit[21] */
1730 val &= ~TRCRSCTLRn_PAIRINV;
1731 config->res_ctrl[idx] = val & (TRCRSCTLRn_PAIRINV |
1732 TRCRSCTLRn_INV |
1733 TRCRSCTLRn_GROUP_MASK |
1734 TRCRSCTLRn_SELECT_MASK);
1735 spin_unlock(&drvdata->spinlock);
1736 return size;
1737}
1738static DEVICE_ATTR_RW(res_ctrl);
1739
1740static ssize_t sshot_idx_show(struct device *dev,
1741 struct device_attribute *attr, char *buf)
1742{
1743 unsigned long val;
1744 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1745 struct etmv4_config *config = &drvdata->config;
1746
1747 val = config->ss_idx;
1748 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1749}
1750
1751static ssize_t sshot_idx_store(struct device *dev,
1752 struct device_attribute *attr,
1753 const char *buf, size_t size)
1754{
1755 unsigned long val;
1756 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1757 struct etmv4_config *config = &drvdata->config;
1758
1759 if (kstrtoul(buf, 16, &val))
1760 return -EINVAL;
1761 if (val >= drvdata->nr_ss_cmp)
1762 return -EINVAL;
1763
1764 spin_lock(&drvdata->spinlock);
1765 config->ss_idx = val;
1766 spin_unlock(&drvdata->spinlock);
1767 return size;
1768}
1769static DEVICE_ATTR_RW(sshot_idx);
1770
1771static ssize_t sshot_ctrl_show(struct device *dev,
1772 struct device_attribute *attr,
1773 char *buf)
1774{
1775 unsigned long val;
1776 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1777 struct etmv4_config *config = &drvdata->config;
1778
1779 spin_lock(&drvdata->spinlock);
1780 val = config->ss_ctrl[config->ss_idx];
1781 spin_unlock(&drvdata->spinlock);
1782 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1783}
1784
1785static ssize_t sshot_ctrl_store(struct device *dev,
1786 struct device_attribute *attr,
1787 const char *buf, size_t size)
1788{
1789 u8 idx;
1790 unsigned long val;
1791 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1792 struct etmv4_config *config = &drvdata->config;
1793
1794 if (kstrtoul(buf, 16, &val))
1795 return -EINVAL;
1796
1797 spin_lock(&drvdata->spinlock);
1798 idx = config->ss_idx;
1799 config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val);
1800 /* must clear bit 31 in related status register on programming */
1801 config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1802 spin_unlock(&drvdata->spinlock);
1803 return size;
1804}
1805static DEVICE_ATTR_RW(sshot_ctrl);
1806
1807static ssize_t sshot_status_show(struct device *dev,
1808 struct device_attribute *attr, char *buf)
1809{
1810 unsigned long val;
1811 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1812 struct etmv4_config *config = &drvdata->config;
1813
1814 spin_lock(&drvdata->spinlock);
1815 val = config->ss_status[config->ss_idx];
1816 spin_unlock(&drvdata->spinlock);
1817 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1818}
1819static DEVICE_ATTR_RO(sshot_status);
1820
1821static ssize_t sshot_pe_ctrl_show(struct device *dev,
1822 struct device_attribute *attr,
1823 char *buf)
1824{
1825 unsigned long val;
1826 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1827 struct etmv4_config *config = &drvdata->config;
1828
1829 spin_lock(&drvdata->spinlock);
1830 val = config->ss_pe_cmp[config->ss_idx];
1831 spin_unlock(&drvdata->spinlock);
1832 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1833}
1834
1835static ssize_t sshot_pe_ctrl_store(struct device *dev,
1836 struct device_attribute *attr,
1837 const char *buf, size_t size)
1838{
1839 u8 idx;
1840 unsigned long val;
1841 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1842 struct etmv4_config *config = &drvdata->config;
1843
1844 if (kstrtoul(buf, 16, &val))
1845 return -EINVAL;
1846
1847 spin_lock(&drvdata->spinlock);
1848 idx = config->ss_idx;
1849 config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val);
1850 /* must clear bit 31 in related status register on programming */
1851 config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1852 spin_unlock(&drvdata->spinlock);
1853 return size;
1854}
1855static DEVICE_ATTR_RW(sshot_pe_ctrl);
1856
1857static ssize_t ctxid_idx_show(struct device *dev,
1858 struct device_attribute *attr,
1859 char *buf)
1860{
1861 unsigned long val;
1862 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1863 struct etmv4_config *config = &drvdata->config;
1864
1865 val = config->ctxid_idx;
1866 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1867}
1868
1869static ssize_t ctxid_idx_store(struct device *dev,
1870 struct device_attribute *attr,
1871 const char *buf, size_t size)
1872{
1873 unsigned long val;
1874 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1875 struct etmv4_config *config = &drvdata->config;
1876
1877 if (kstrtoul(buf, 16, &val))
1878 return -EINVAL;
1879 if (val >= drvdata->numcidc)
1880 return -EINVAL;
1881
1882 /*
1883 * Use spinlock to ensure index doesn't change while it gets
1884 * dereferenced multiple times within a spinlock block elsewhere.
1885 */
1886 spin_lock(&drvdata->spinlock);
1887 config->ctxid_idx = val;
1888 spin_unlock(&drvdata->spinlock);
1889 return size;
1890}
1891static DEVICE_ATTR_RW(ctxid_idx);
1892
1893static ssize_t ctxid_pid_show(struct device *dev,
1894 struct device_attribute *attr,
1895 char *buf)
1896{
1897 u8 idx;
1898 unsigned long val;
1899 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1900 struct etmv4_config *config = &drvdata->config;
1901
1902 /*
1903 * Don't use contextID tracing if coming from a PID namespace. See
1904 * comment in ctxid_pid_store().
1905 */
1906 if (task_active_pid_ns(current) != &init_pid_ns)
1907 return -EINVAL;
1908
1909 spin_lock(&drvdata->spinlock);
1910 idx = config->ctxid_idx;
1911 val = (unsigned long)config->ctxid_pid[idx];
1912 spin_unlock(&drvdata->spinlock);
1913 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1914}
1915
1916static ssize_t ctxid_pid_store(struct device *dev,
1917 struct device_attribute *attr,
1918 const char *buf, size_t size)
1919{
1920 u8 idx;
1921 unsigned long pid;
1922 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1923 struct etmv4_config *config = &drvdata->config;
1924
1925 /*
1926 * When contextID tracing is enabled the tracers will insert the
1927 * value found in the contextID register in the trace stream. But if
1928 * a process is in a namespace the PID of that process as seen from the
1929 * namespace won't be what the kernel sees, something that makes the
1930 * feature confusing and can potentially leak kernel only information.
1931 * As such refuse to use the feature if @current is not in the initial
1932 * PID namespace.
1933 */
1934 if (task_active_pid_ns(current) != &init_pid_ns)
1935 return -EINVAL;
1936
1937 /*
1938 * only implemented when ctxid tracing is enabled, i.e. at least one
1939 * ctxid comparator is implemented and ctxid is greater than 0 bits
1940 * in length
1941 */
1942 if (!drvdata->ctxid_size || !drvdata->numcidc)
1943 return -EINVAL;
1944 if (kstrtoul(buf, 16, &pid))
1945 return -EINVAL;
1946
1947 spin_lock(&drvdata->spinlock);
1948 idx = config->ctxid_idx;
1949 config->ctxid_pid[idx] = (u64)pid;
1950 spin_unlock(&drvdata->spinlock);
1951 return size;
1952}
1953static DEVICE_ATTR_RW(ctxid_pid);
1954
1955static ssize_t ctxid_masks_show(struct device *dev,
1956 struct device_attribute *attr,
1957 char *buf)
1958{
1959 unsigned long val1, val2;
1960 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1961 struct etmv4_config *config = &drvdata->config;
1962
1963 /*
1964 * Don't use contextID tracing if coming from a PID namespace. See
1965 * comment in ctxid_pid_store().
1966 */
1967 if (task_active_pid_ns(current) != &init_pid_ns)
1968 return -EINVAL;
1969
1970 spin_lock(&drvdata->spinlock);
1971 val1 = config->ctxid_mask0;
1972 val2 = config->ctxid_mask1;
1973 spin_unlock(&drvdata->spinlock);
1974 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1975}
1976
1977static ssize_t ctxid_masks_store(struct device *dev,
1978 struct device_attribute *attr,
1979 const char *buf, size_t size)
1980{
1981 u8 i, j, maskbyte;
1982 unsigned long val1, val2, mask;
1983 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1984 struct etmv4_config *config = &drvdata->config;
1985 int nr_inputs;
1986
1987 /*
1988 * Don't use contextID tracing if coming from a PID namespace. See
1989 * comment in ctxid_pid_store().
1990 */
1991 if (task_active_pid_ns(current) != &init_pid_ns)
1992 return -EINVAL;
1993
1994 /*
1995 * only implemented when ctxid tracing is enabled, i.e. at least one
1996 * ctxid comparator is implemented and ctxid is greater than 0 bits
1997 * in length
1998 */
1999 if (!drvdata->ctxid_size || !drvdata->numcidc)
2000 return -EINVAL;
2001 /* one mask if <= 4 comparators, two for up to 8 */
2002 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2003 if ((drvdata->numcidc > 4) && (nr_inputs != 2))
2004 return -EINVAL;
2005
2006 spin_lock(&drvdata->spinlock);
2007 /*
2008 * each byte[0..3] controls mask value applied to ctxid
2009 * comparator[0..3]
2010 */
2011 switch (drvdata->numcidc) {
2012 case 0x1:
2013 /* COMP0, bits[7:0] */
2014 config->ctxid_mask0 = val1 & 0xFF;
2015 break;
2016 case 0x2:
2017 /* COMP1, bits[15:8] */
2018 config->ctxid_mask0 = val1 & 0xFFFF;
2019 break;
2020 case 0x3:
2021 /* COMP2, bits[23:16] */
2022 config->ctxid_mask0 = val1 & 0xFFFFFF;
2023 break;
2024 case 0x4:
2025 /* COMP3, bits[31:24] */
2026 config->ctxid_mask0 = val1;
2027 break;
2028 case 0x5:
2029 /* COMP4, bits[7:0] */
2030 config->ctxid_mask0 = val1;
2031 config->ctxid_mask1 = val2 & 0xFF;
2032 break;
2033 case 0x6:
2034 /* COMP5, bits[15:8] */
2035 config->ctxid_mask0 = val1;
2036 config->ctxid_mask1 = val2 & 0xFFFF;
2037 break;
2038 case 0x7:
2039 /* COMP6, bits[23:16] */
2040 config->ctxid_mask0 = val1;
2041 config->ctxid_mask1 = val2 & 0xFFFFFF;
2042 break;
2043 case 0x8:
2044 /* COMP7, bits[31:24] */
2045 config->ctxid_mask0 = val1;
2046 config->ctxid_mask1 = val2;
2047 break;
2048 default:
2049 break;
2050 }
2051 /*
2052 * If software sets a mask bit to 1, it must program relevant byte
2053 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2054 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2055 * of ctxid comparator0 value (corresponding to byte 0) register.
2056 */
2057 mask = config->ctxid_mask0;
2058 for (i = 0; i < drvdata->numcidc; i++) {
2059 /* mask value of corresponding ctxid comparator */
2060 maskbyte = mask & ETMv4_EVENT_MASK;
2061 /*
2062 * each bit corresponds to a byte of respective ctxid comparator
2063 * value register
2064 */
2065 for (j = 0; j < 8; j++) {
2066 if (maskbyte & 1)
2067 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2068 maskbyte >>= 1;
2069 }
2070 /* Select the next ctxid comparator mask value */
2071 if (i == 3)
2072 /* ctxid comparators[4-7] */
2073 mask = config->ctxid_mask1;
2074 else
2075 mask >>= 0x8;
2076 }
2077
2078 spin_unlock(&drvdata->spinlock);
2079 return size;
2080}
2081static DEVICE_ATTR_RW(ctxid_masks);
2082
2083static ssize_t vmid_idx_show(struct device *dev,
2084 struct device_attribute *attr,
2085 char *buf)
2086{
2087 unsigned long val;
2088 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2089 struct etmv4_config *config = &drvdata->config;
2090
2091 val = config->vmid_idx;
2092 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2093}
2094
2095static ssize_t vmid_idx_store(struct device *dev,
2096 struct device_attribute *attr,
2097 const char *buf, size_t size)
2098{
2099 unsigned long val;
2100 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2101 struct etmv4_config *config = &drvdata->config;
2102
2103 if (kstrtoul(buf, 16, &val))
2104 return -EINVAL;
2105 if (val >= drvdata->numvmidc)
2106 return -EINVAL;
2107
2108 /*
2109 * Use spinlock to ensure index doesn't change while it gets
2110 * dereferenced multiple times within a spinlock block elsewhere.
2111 */
2112 spin_lock(&drvdata->spinlock);
2113 config->vmid_idx = val;
2114 spin_unlock(&drvdata->spinlock);
2115 return size;
2116}
2117static DEVICE_ATTR_RW(vmid_idx);
2118
2119static ssize_t vmid_val_show(struct device *dev,
2120 struct device_attribute *attr,
2121 char *buf)
2122{
2123 unsigned long val;
2124 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2125 struct etmv4_config *config = &drvdata->config;
2126
2127 /*
2128 * Don't use virtual contextID tracing if coming from a PID namespace.
2129 * See comment in ctxid_pid_store().
2130 */
2131 if (!task_is_in_init_pid_ns(current))
2132 return -EINVAL;
2133
2134 spin_lock(&drvdata->spinlock);
2135 val = (unsigned long)config->vmid_val[config->vmid_idx];
2136 spin_unlock(&drvdata->spinlock);
2137 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2138}
2139
2140static ssize_t vmid_val_store(struct device *dev,
2141 struct device_attribute *attr,
2142 const char *buf, size_t size)
2143{
2144 unsigned long val;
2145 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2146 struct etmv4_config *config = &drvdata->config;
2147
2148 /*
2149 * Don't use virtual contextID tracing if coming from a PID namespace.
2150 * See comment in ctxid_pid_store().
2151 */
2152 if (!task_is_in_init_pid_ns(current))
2153 return -EINVAL;
2154
2155 /*
2156 * only implemented when vmid tracing is enabled, i.e. at least one
2157 * vmid comparator is implemented and at least 8 bit vmid size
2158 */
2159 if (!drvdata->vmid_size || !drvdata->numvmidc)
2160 return -EINVAL;
2161 if (kstrtoul(buf, 16, &val))
2162 return -EINVAL;
2163
2164 spin_lock(&drvdata->spinlock);
2165 config->vmid_val[config->vmid_idx] = (u64)val;
2166 spin_unlock(&drvdata->spinlock);
2167 return size;
2168}
2169static DEVICE_ATTR_RW(vmid_val);
2170
2171static ssize_t vmid_masks_show(struct device *dev,
2172 struct device_attribute *attr, char *buf)
2173{
2174 unsigned long val1, val2;
2175 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2176 struct etmv4_config *config = &drvdata->config;
2177
2178 /*
2179 * Don't use virtual contextID tracing if coming from a PID namespace.
2180 * See comment in ctxid_pid_store().
2181 */
2182 if (!task_is_in_init_pid_ns(current))
2183 return -EINVAL;
2184
2185 spin_lock(&drvdata->spinlock);
2186 val1 = config->vmid_mask0;
2187 val2 = config->vmid_mask1;
2188 spin_unlock(&drvdata->spinlock);
2189 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2190}
2191
2192static ssize_t vmid_masks_store(struct device *dev,
2193 struct device_attribute *attr,
2194 const char *buf, size_t size)
2195{
2196 u8 i, j, maskbyte;
2197 unsigned long val1, val2, mask;
2198 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2199 struct etmv4_config *config = &drvdata->config;
2200 int nr_inputs;
2201
2202 /*
2203 * Don't use virtual contextID tracing if coming from a PID namespace.
2204 * See comment in ctxid_pid_store().
2205 */
2206 if (!task_is_in_init_pid_ns(current))
2207 return -EINVAL;
2208
2209 /*
2210 * only implemented when vmid tracing is enabled, i.e. at least one
2211 * vmid comparator is implemented and at least 8 bit vmid size
2212 */
2213 if (!drvdata->vmid_size || !drvdata->numvmidc)
2214 return -EINVAL;
2215 /* one mask if <= 4 comparators, two for up to 8 */
2216 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2217 if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2218 return -EINVAL;
2219
2220 spin_lock(&drvdata->spinlock);
2221
2222 /*
2223 * each byte[0..3] controls mask value applied to vmid
2224 * comparator[0..3]
2225 */
2226 switch (drvdata->numvmidc) {
2227 case 0x1:
2228 /* COMP0, bits[7:0] */
2229 config->vmid_mask0 = val1 & 0xFF;
2230 break;
2231 case 0x2:
2232 /* COMP1, bits[15:8] */
2233 config->vmid_mask0 = val1 & 0xFFFF;
2234 break;
2235 case 0x3:
2236 /* COMP2, bits[23:16] */
2237 config->vmid_mask0 = val1 & 0xFFFFFF;
2238 break;
2239 case 0x4:
2240 /* COMP3, bits[31:24] */
2241 config->vmid_mask0 = val1;
2242 break;
2243 case 0x5:
2244 /* COMP4, bits[7:0] */
2245 config->vmid_mask0 = val1;
2246 config->vmid_mask1 = val2 & 0xFF;
2247 break;
2248 case 0x6:
2249 /* COMP5, bits[15:8] */
2250 config->vmid_mask0 = val1;
2251 config->vmid_mask1 = val2 & 0xFFFF;
2252 break;
2253 case 0x7:
2254 /* COMP6, bits[23:16] */
2255 config->vmid_mask0 = val1;
2256 config->vmid_mask1 = val2 & 0xFFFFFF;
2257 break;
2258 case 0x8:
2259 /* COMP7, bits[31:24] */
2260 config->vmid_mask0 = val1;
2261 config->vmid_mask1 = val2;
2262 break;
2263 default:
2264 break;
2265 }
2266
2267 /*
2268 * If software sets a mask bit to 1, it must program relevant byte
2269 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2270 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2271 * of vmid comparator0 value (corresponding to byte 0) register.
2272 */
2273 mask = config->vmid_mask0;
2274 for (i = 0; i < drvdata->numvmidc; i++) {
2275 /* mask value of corresponding vmid comparator */
2276 maskbyte = mask & ETMv4_EVENT_MASK;
2277 /*
2278 * each bit corresponds to a byte of respective vmid comparator
2279 * value register
2280 */
2281 for (j = 0; j < 8; j++) {
2282 if (maskbyte & 1)
2283 config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2284 maskbyte >>= 1;
2285 }
2286 /* Select the next vmid comparator mask value */
2287 if (i == 3)
2288 /* vmid comparators[4-7] */
2289 mask = config->vmid_mask1;
2290 else
2291 mask >>= 0x8;
2292 }
2293 spin_unlock(&drvdata->spinlock);
2294 return size;
2295}
2296static DEVICE_ATTR_RW(vmid_masks);
2297
2298static ssize_t cpu_show(struct device *dev,
2299 struct device_attribute *attr, char *buf)
2300{
2301 int val;
2302 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2303
2304 val = drvdata->cpu;
2305 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2306
2307}
2308static DEVICE_ATTR_RO(cpu);
2309
2310static ssize_t ts_source_show(struct device *dev,
2311 struct device_attribute *attr,
2312 char *buf)
2313{
2314 int val;
2315 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2316
2317 if (!drvdata->trfcr) {
2318 val = -1;
2319 goto out;
2320 }
2321
2322 switch (drvdata->trfcr & TRFCR_ELx_TS_MASK) {
2323 case TRFCR_ELx_TS_VIRTUAL:
2324 case TRFCR_ELx_TS_GUEST_PHYSICAL:
2325 case TRFCR_ELx_TS_PHYSICAL:
2326 val = FIELD_GET(TRFCR_ELx_TS_MASK, drvdata->trfcr);
2327 break;
2328 default:
2329 val = -1;
2330 break;
2331 }
2332
2333out:
2334 return sysfs_emit(buf, "%d\n", val);
2335}
2336static DEVICE_ATTR_RO(ts_source);
2337
2338static struct attribute *coresight_etmv4_attrs[] = {
2339 &dev_attr_nr_pe_cmp.attr,
2340 &dev_attr_nr_addr_cmp.attr,
2341 &dev_attr_nr_cntr.attr,
2342 &dev_attr_nr_ext_inp.attr,
2343 &dev_attr_numcidc.attr,
2344 &dev_attr_numvmidc.attr,
2345 &dev_attr_nrseqstate.attr,
2346 &dev_attr_nr_resource.attr,
2347 &dev_attr_nr_ss_cmp.attr,
2348 &dev_attr_reset.attr,
2349 &dev_attr_mode.attr,
2350 &dev_attr_pe.attr,
2351 &dev_attr_event.attr,
2352 &dev_attr_event_instren.attr,
2353 &dev_attr_event_ts.attr,
2354 &dev_attr_syncfreq.attr,
2355 &dev_attr_cyc_threshold.attr,
2356 &dev_attr_bb_ctrl.attr,
2357 &dev_attr_event_vinst.attr,
2358 &dev_attr_s_exlevel_vinst.attr,
2359 &dev_attr_ns_exlevel_vinst.attr,
2360 &dev_attr_addr_idx.attr,
2361 &dev_attr_addr_instdatatype.attr,
2362 &dev_attr_addr_single.attr,
2363 &dev_attr_addr_range.attr,
2364 &dev_attr_addr_start.attr,
2365 &dev_attr_addr_stop.attr,
2366 &dev_attr_addr_ctxtype.attr,
2367 &dev_attr_addr_context.attr,
2368 &dev_attr_addr_exlevel_s_ns.attr,
2369 &dev_attr_addr_cmp_view.attr,
2370 &dev_attr_vinst_pe_cmp_start_stop.attr,
2371 &dev_attr_sshot_idx.attr,
2372 &dev_attr_sshot_ctrl.attr,
2373 &dev_attr_sshot_pe_ctrl.attr,
2374 &dev_attr_sshot_status.attr,
2375 &dev_attr_seq_idx.attr,
2376 &dev_attr_seq_state.attr,
2377 &dev_attr_seq_event.attr,
2378 &dev_attr_seq_reset_event.attr,
2379 &dev_attr_cntr_idx.attr,
2380 &dev_attr_cntrldvr.attr,
2381 &dev_attr_cntr_val.attr,
2382 &dev_attr_cntr_ctrl.attr,
2383 &dev_attr_res_idx.attr,
2384 &dev_attr_res_ctrl.attr,
2385 &dev_attr_ctxid_idx.attr,
2386 &dev_attr_ctxid_pid.attr,
2387 &dev_attr_ctxid_masks.attr,
2388 &dev_attr_vmid_idx.attr,
2389 &dev_attr_vmid_val.attr,
2390 &dev_attr_vmid_masks.attr,
2391 &dev_attr_cpu.attr,
2392 &dev_attr_ts_source.attr,
2393 NULL,
2394};
2395
2396/*
2397 * Trace ID allocated dynamically on enable - but also allocate on read
2398 * in case sysfs or perf read before enable to ensure consistent metadata
2399 * information for trace decode
2400 */
2401static ssize_t trctraceid_show(struct device *dev,
2402 struct device_attribute *attr,
2403 char *buf)
2404{
2405 int trace_id;
2406 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2407
2408 trace_id = etm4_read_alloc_trace_id(drvdata);
2409 if (trace_id < 0)
2410 return trace_id;
2411
2412 return sysfs_emit(buf, "0x%x\n", trace_id);
2413}
2414
2415struct etmv4_reg {
2416 struct coresight_device *csdev;
2417 u32 offset;
2418 u32 data;
2419};
2420
2421static void do_smp_cross_read(void *data)
2422{
2423 struct etmv4_reg *reg = data;
2424
2425 reg->data = etm4x_relaxed_read32(®->csdev->access, reg->offset);
2426}
2427
2428static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
2429{
2430 struct etmv4_reg reg;
2431
2432 reg.offset = offset;
2433 reg.csdev = drvdata->csdev;
2434
2435 /*
2436 * smp cross call ensures the CPU will be powered up before
2437 * accessing the ETMv4 trace core registers
2438 */
2439 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2440 return reg.data;
2441}
2442
2443static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
2444{
2445 struct dev_ext_attribute *eattr;
2446
2447 eattr = container_of(attr, struct dev_ext_attribute, attr);
2448 return (u32)(unsigned long)eattr->var;
2449}
2450
2451static ssize_t coresight_etm4x_reg_show(struct device *dev,
2452 struct device_attribute *d_attr,
2453 char *buf)
2454{
2455 u32 val, offset;
2456 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2457
2458 offset = coresight_etm4x_attr_to_offset(d_attr);
2459
2460 pm_runtime_get_sync(dev->parent);
2461 val = etmv4_cross_read(drvdata, offset);
2462 pm_runtime_put_sync(dev->parent);
2463
2464 return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
2465}
2466
2467static inline bool
2468etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
2469{
2470 switch (offset) {
2471 ETM_COMMON_SYSREG_LIST_CASES
2472 /*
2473 * Common registers to ETE & ETM4x accessible via system
2474 * instructions are always implemented.
2475 */
2476 return true;
2477
2478 ETM4x_ONLY_SYSREG_LIST_CASES
2479 /*
2480 * We only support etm4x and ete. So if the device is not
2481 * ETE, it must be ETMv4x.
2482 */
2483 return !etm4x_is_ete(drvdata);
2484
2485 ETM4x_MMAP_LIST_CASES
2486 /*
2487 * Registers accessible only via memory-mapped registers
2488 * must not be accessed via system instructions.
2489 * We cannot access the drvdata->csdev here, as this
2490 * function is called during the device creation, via
2491 * coresight_register() and the csdev is not initialized
2492 * until that is done. So rely on the drvdata->base to
2493 * detect if we have a memory mapped access.
2494 * Also ETE doesn't implement memory mapped access, thus
2495 * it is sufficient to check that we are using mmio.
2496 */
2497 return !!drvdata->base;
2498
2499 ETE_ONLY_SYSREG_LIST_CASES
2500 return etm4x_is_ete(drvdata);
2501 }
2502
2503 return false;
2504}
2505
2506/*
2507 * Hide the ETM4x registers that may not be available on the
2508 * hardware.
2509 * There are certain management registers unavailable via system
2510 * instructions. Make those sysfs attributes hidden on such
2511 * systems.
2512 */
2513static umode_t
2514coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
2515 struct attribute *attr, int unused)
2516{
2517 struct device *dev = kobj_to_dev(kobj);
2518 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2519 struct device_attribute *d_attr;
2520 u32 offset;
2521
2522 d_attr = container_of(attr, struct device_attribute, attr);
2523 offset = coresight_etm4x_attr_to_offset(d_attr);
2524
2525 if (etm4x_register_implemented(drvdata, offset))
2526 return attr->mode;
2527 return 0;
2528}
2529
2530/*
2531 * Macro to set an RO ext attribute with offset and show function.
2532 * Offset is used in mgmt group to ensure only correct registers for
2533 * the ETM / ETE variant are visible.
2534 */
2535#define coresight_etm4x_reg_showfn(name, offset, showfn) ( \
2536 &((struct dev_ext_attribute[]) { \
2537 { \
2538 __ATTR(name, 0444, showfn, NULL), \
2539 (void *)(unsigned long)offset \
2540 } \
2541 })[0].attr.attr \
2542 )
2543
2544/* macro using the default coresight_etm4x_reg_show function */
2545#define coresight_etm4x_reg(name, offset) \
2546 coresight_etm4x_reg_showfn(name, offset, coresight_etm4x_reg_show)
2547
2548static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2549 coresight_etm4x_reg(trcpdcr, TRCPDCR),
2550 coresight_etm4x_reg(trcpdsr, TRCPDSR),
2551 coresight_etm4x_reg(trclsr, TRCLSR),
2552 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
2553 coresight_etm4x_reg(trcdevid, TRCDEVID),
2554 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
2555 coresight_etm4x_reg(trcpidr0, TRCPIDR0),
2556 coresight_etm4x_reg(trcpidr1, TRCPIDR1),
2557 coresight_etm4x_reg(trcpidr2, TRCPIDR2),
2558 coresight_etm4x_reg(trcpidr3, TRCPIDR3),
2559 coresight_etm4x_reg(trcoslsr, TRCOSLSR),
2560 coresight_etm4x_reg(trcconfig, TRCCONFIGR),
2561 coresight_etm4x_reg_showfn(trctraceid, TRCTRACEIDR, trctraceid_show),
2562 coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
2563 NULL,
2564};
2565
2566static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2567 coresight_etm4x_reg(trcidr0, TRCIDR0),
2568 coresight_etm4x_reg(trcidr1, TRCIDR1),
2569 coresight_etm4x_reg(trcidr2, TRCIDR2),
2570 coresight_etm4x_reg(trcidr3, TRCIDR3),
2571 coresight_etm4x_reg(trcidr4, TRCIDR4),
2572 coresight_etm4x_reg(trcidr5, TRCIDR5),
2573 /* trcidr[6,7] are reserved */
2574 coresight_etm4x_reg(trcidr8, TRCIDR8),
2575 coresight_etm4x_reg(trcidr9, TRCIDR9),
2576 coresight_etm4x_reg(trcidr10, TRCIDR10),
2577 coresight_etm4x_reg(trcidr11, TRCIDR11),
2578 coresight_etm4x_reg(trcidr12, TRCIDR12),
2579 coresight_etm4x_reg(trcidr13, TRCIDR13),
2580 NULL,
2581};
2582
2583static const struct attribute_group coresight_etmv4_group = {
2584 .attrs = coresight_etmv4_attrs,
2585};
2586
2587static const struct attribute_group coresight_etmv4_mgmt_group = {
2588 .is_visible = coresight_etm4x_attr_reg_implemented,
2589 .attrs = coresight_etmv4_mgmt_attrs,
2590 .name = "mgmt",
2591};
2592
2593static const struct attribute_group coresight_etmv4_trcidr_group = {
2594 .attrs = coresight_etmv4_trcidr_attrs,
2595 .name = "trcidr",
2596};
2597
2598const struct attribute_group *coresight_etmv4_groups[] = {
2599 &coresight_etmv4_group,
2600 &coresight_etmv4_mgmt_group,
2601 &coresight_etmv4_trcidr_group,
2602 NULL,
2603};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7#include <linux/pid_namespace.h>
8#include <linux/pm_runtime.h>
9#include <linux/sysfs.h>
10#include "coresight-etm4x.h"
11#include "coresight-priv.h"
12
13static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
14{
15 u8 idx;
16 struct etmv4_config *config = &drvdata->config;
17
18 idx = config->addr_idx;
19
20 /*
21 * TRCACATRn.TYPE bit[1:0]: type of comparison
22 * the trace unit performs
23 */
24 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
25 if (idx % 2 != 0)
26 return -EINVAL;
27
28 /*
29 * We are performing instruction address comparison. Set the
30 * relevant bit of ViewInst Include/Exclude Control register
31 * for corresponding address comparator pair.
32 */
33 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
34 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
35 return -EINVAL;
36
37 if (exclude == true) {
38 /*
39 * Set exclude bit and unset the include bit
40 * corresponding to comparator pair
41 */
42 config->viiectlr |= BIT(idx / 2 + 16);
43 config->viiectlr &= ~BIT(idx / 2);
44 } else {
45 /*
46 * Set include bit and unset exclude bit
47 * corresponding to comparator pair
48 */
49 config->viiectlr |= BIT(idx / 2);
50 config->viiectlr &= ~BIT(idx / 2 + 16);
51 }
52 }
53 return 0;
54}
55
56static ssize_t nr_pe_cmp_show(struct device *dev,
57 struct device_attribute *attr,
58 char *buf)
59{
60 unsigned long val;
61 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
62
63 val = drvdata->nr_pe_cmp;
64 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
65}
66static DEVICE_ATTR_RO(nr_pe_cmp);
67
68static ssize_t nr_addr_cmp_show(struct device *dev,
69 struct device_attribute *attr,
70 char *buf)
71{
72 unsigned long val;
73 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
74
75 val = drvdata->nr_addr_cmp;
76 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
77}
78static DEVICE_ATTR_RO(nr_addr_cmp);
79
80static ssize_t nr_cntr_show(struct device *dev,
81 struct device_attribute *attr,
82 char *buf)
83{
84 unsigned long val;
85 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
86
87 val = drvdata->nr_cntr;
88 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
89}
90static DEVICE_ATTR_RO(nr_cntr);
91
92static ssize_t nr_ext_inp_show(struct device *dev,
93 struct device_attribute *attr,
94 char *buf)
95{
96 unsigned long val;
97 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
98
99 val = drvdata->nr_ext_inp;
100 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
101}
102static DEVICE_ATTR_RO(nr_ext_inp);
103
104static ssize_t numcidc_show(struct device *dev,
105 struct device_attribute *attr,
106 char *buf)
107{
108 unsigned long val;
109 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
110
111 val = drvdata->numcidc;
112 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
113}
114static DEVICE_ATTR_RO(numcidc);
115
116static ssize_t numvmidc_show(struct device *dev,
117 struct device_attribute *attr,
118 char *buf)
119{
120 unsigned long val;
121 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
122
123 val = drvdata->numvmidc;
124 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
125}
126static DEVICE_ATTR_RO(numvmidc);
127
128static ssize_t nrseqstate_show(struct device *dev,
129 struct device_attribute *attr,
130 char *buf)
131{
132 unsigned long val;
133 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
134
135 val = drvdata->nrseqstate;
136 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
137}
138static DEVICE_ATTR_RO(nrseqstate);
139
140static ssize_t nr_resource_show(struct device *dev,
141 struct device_attribute *attr,
142 char *buf)
143{
144 unsigned long val;
145 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
146
147 val = drvdata->nr_resource;
148 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
149}
150static DEVICE_ATTR_RO(nr_resource);
151
152static ssize_t nr_ss_cmp_show(struct device *dev,
153 struct device_attribute *attr,
154 char *buf)
155{
156 unsigned long val;
157 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
158
159 val = drvdata->nr_ss_cmp;
160 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
161}
162static DEVICE_ATTR_RO(nr_ss_cmp);
163
164static ssize_t reset_store(struct device *dev,
165 struct device_attribute *attr,
166 const char *buf, size_t size)
167{
168 int i;
169 unsigned long val;
170 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
171 struct etmv4_config *config = &drvdata->config;
172
173 if (kstrtoul(buf, 16, &val))
174 return -EINVAL;
175
176 spin_lock(&drvdata->spinlock);
177 if (val)
178 config->mode = 0x0;
179
180 /* Disable data tracing: do not trace load and store data transfers */
181 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
182 config->cfg &= ~(BIT(1) | BIT(2));
183
184 /* Disable data value and data address tracing */
185 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
186 ETM_MODE_DATA_TRACE_VAL);
187 config->cfg &= ~(BIT(16) | BIT(17));
188
189 /* Disable all events tracing */
190 config->eventctrl0 = 0x0;
191 config->eventctrl1 = 0x0;
192
193 /* Disable timestamp event */
194 config->ts_ctrl = 0x0;
195
196 /* Disable stalling */
197 config->stall_ctrl = 0x0;
198
199 /* Reset trace synchronization period to 2^8 = 256 bytes*/
200 if (drvdata->syncpr == false)
201 config->syncfreq = 0x8;
202
203 /*
204 * Enable ViewInst to trace everything with start-stop logic in
205 * started state. ARM recommends start-stop logic is set before
206 * each trace run.
207 */
208 config->vinst_ctrl |= BIT(0);
209 if (drvdata->nr_addr_cmp == true) {
210 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
211 /* SSSTATUS, bit[9] */
212 config->vinst_ctrl |= BIT(9);
213 }
214
215 /* No address range filtering for ViewInst */
216 config->viiectlr = 0x0;
217
218 /* No start-stop filtering for ViewInst */
219 config->vissctlr = 0x0;
220
221 /* Disable seq events */
222 for (i = 0; i < drvdata->nrseqstate-1; i++)
223 config->seq_ctrl[i] = 0x0;
224 config->seq_rst = 0x0;
225 config->seq_state = 0x0;
226
227 /* Disable external input events */
228 config->ext_inp = 0x0;
229
230 config->cntr_idx = 0x0;
231 for (i = 0; i < drvdata->nr_cntr; i++) {
232 config->cntrldvr[i] = 0x0;
233 config->cntr_ctrl[i] = 0x0;
234 config->cntr_val[i] = 0x0;
235 }
236
237 config->res_idx = 0x0;
238 for (i = 0; i < drvdata->nr_resource; i++)
239 config->res_ctrl[i] = 0x0;
240
241 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
242 config->ss_ctrl[i] = 0x0;
243 config->ss_pe_cmp[i] = 0x0;
244 }
245
246 config->addr_idx = 0x0;
247 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
248 config->addr_val[i] = 0x0;
249 config->addr_acc[i] = 0x0;
250 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
251 }
252
253 config->ctxid_idx = 0x0;
254 for (i = 0; i < drvdata->numcidc; i++)
255 config->ctxid_pid[i] = 0x0;
256
257 config->ctxid_mask0 = 0x0;
258 config->ctxid_mask1 = 0x0;
259
260 config->vmid_idx = 0x0;
261 for (i = 0; i < drvdata->numvmidc; i++)
262 config->vmid_val[i] = 0x0;
263 config->vmid_mask0 = 0x0;
264 config->vmid_mask1 = 0x0;
265
266 drvdata->trcid = drvdata->cpu + 1;
267
268 spin_unlock(&drvdata->spinlock);
269
270 return size;
271}
272static DEVICE_ATTR_WO(reset);
273
274static ssize_t mode_show(struct device *dev,
275 struct device_attribute *attr,
276 char *buf)
277{
278 unsigned long val;
279 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
280 struct etmv4_config *config = &drvdata->config;
281
282 val = config->mode;
283 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
284}
285
286static ssize_t mode_store(struct device *dev,
287 struct device_attribute *attr,
288 const char *buf, size_t size)
289{
290 unsigned long val, mode;
291 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
292 struct etmv4_config *config = &drvdata->config;
293
294 if (kstrtoul(buf, 16, &val))
295 return -EINVAL;
296
297 spin_lock(&drvdata->spinlock);
298 config->mode = val & ETMv4_MODE_ALL;
299 etm4_set_mode_exclude(drvdata,
300 config->mode & ETM_MODE_EXCLUDE ? true : false);
301
302 if (drvdata->instrp0 == true) {
303 /* start by clearing instruction P0 field */
304 config->cfg &= ~(BIT(1) | BIT(2));
305 if (config->mode & ETM_MODE_LOAD)
306 /* 0b01 Trace load instructions as P0 instructions */
307 config->cfg |= BIT(1);
308 if (config->mode & ETM_MODE_STORE)
309 /* 0b10 Trace store instructions as P0 instructions */
310 config->cfg |= BIT(2);
311 if (config->mode & ETM_MODE_LOAD_STORE)
312 /*
313 * 0b11 Trace load and store instructions
314 * as P0 instructions
315 */
316 config->cfg |= BIT(1) | BIT(2);
317 }
318
319 /* bit[3], Branch broadcast mode */
320 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
321 config->cfg |= BIT(3);
322 else
323 config->cfg &= ~BIT(3);
324
325 /* bit[4], Cycle counting instruction trace bit */
326 if ((config->mode & ETMv4_MODE_CYCACC) &&
327 (drvdata->trccci == true))
328 config->cfg |= BIT(4);
329 else
330 config->cfg &= ~BIT(4);
331
332 /* bit[6], Context ID tracing bit */
333 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
334 config->cfg |= BIT(6);
335 else
336 config->cfg &= ~BIT(6);
337
338 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
339 config->cfg |= BIT(7);
340 else
341 config->cfg &= ~BIT(7);
342
343 /* bits[10:8], Conditional instruction tracing bit */
344 mode = ETM_MODE_COND(config->mode);
345 if (drvdata->trccond == true) {
346 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
347 config->cfg |= mode << 8;
348 }
349
350 /* bit[11], Global timestamp tracing bit */
351 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
352 config->cfg |= BIT(11);
353 else
354 config->cfg &= ~BIT(11);
355
356 /* bit[12], Return stack enable bit */
357 if ((config->mode & ETM_MODE_RETURNSTACK) &&
358 (drvdata->retstack == true))
359 config->cfg |= BIT(12);
360 else
361 config->cfg &= ~BIT(12);
362
363 /* bits[14:13], Q element enable field */
364 mode = ETM_MODE_QELEM(config->mode);
365 /* start by clearing QE bits */
366 config->cfg &= ~(BIT(13) | BIT(14));
367 /* if supported, Q elements with instruction counts are enabled */
368 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
369 config->cfg |= BIT(13);
370 /*
371 * if supported, Q elements with and without instruction
372 * counts are enabled
373 */
374 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
375 config->cfg |= BIT(14);
376
377 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
378 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
379 (drvdata->atbtrig == true))
380 config->eventctrl1 |= BIT(11);
381 else
382 config->eventctrl1 &= ~BIT(11);
383
384 /* bit[12], Low-power state behavior override bit */
385 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
386 (drvdata->lpoverride == true))
387 config->eventctrl1 |= BIT(12);
388 else
389 config->eventctrl1 &= ~BIT(12);
390
391 /* bit[8], Instruction stall bit */
392 if (config->mode & ETM_MODE_ISTALL_EN)
393 config->stall_ctrl |= BIT(8);
394 else
395 config->stall_ctrl &= ~BIT(8);
396
397 /* bit[10], Prioritize instruction trace bit */
398 if (config->mode & ETM_MODE_INSTPRIO)
399 config->stall_ctrl |= BIT(10);
400 else
401 config->stall_ctrl &= ~BIT(10);
402
403 /* bit[13], Trace overflow prevention bit */
404 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
405 (drvdata->nooverflow == true))
406 config->stall_ctrl |= BIT(13);
407 else
408 config->stall_ctrl &= ~BIT(13);
409
410 /* bit[9] Start/stop logic control bit */
411 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
412 config->vinst_ctrl |= BIT(9);
413 else
414 config->vinst_ctrl &= ~BIT(9);
415
416 /* bit[10], Whether a trace unit must trace a Reset exception */
417 if (config->mode & ETM_MODE_TRACE_RESET)
418 config->vinst_ctrl |= BIT(10);
419 else
420 config->vinst_ctrl &= ~BIT(10);
421
422 /* bit[11], Whether a trace unit must trace a system error exception */
423 if ((config->mode & ETM_MODE_TRACE_ERR) &&
424 (drvdata->trc_error == true))
425 config->vinst_ctrl |= BIT(11);
426 else
427 config->vinst_ctrl &= ~BIT(11);
428
429 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
430 etm4_config_trace_mode(config);
431
432 spin_unlock(&drvdata->spinlock);
433
434 return size;
435}
436static DEVICE_ATTR_RW(mode);
437
438static ssize_t pe_show(struct device *dev,
439 struct device_attribute *attr,
440 char *buf)
441{
442 unsigned long val;
443 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
444 struct etmv4_config *config = &drvdata->config;
445
446 val = config->pe_sel;
447 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
448}
449
450static ssize_t pe_store(struct device *dev,
451 struct device_attribute *attr,
452 const char *buf, size_t size)
453{
454 unsigned long val;
455 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
456 struct etmv4_config *config = &drvdata->config;
457
458 if (kstrtoul(buf, 16, &val))
459 return -EINVAL;
460
461 spin_lock(&drvdata->spinlock);
462 if (val > drvdata->nr_pe) {
463 spin_unlock(&drvdata->spinlock);
464 return -EINVAL;
465 }
466
467 config->pe_sel = val;
468 spin_unlock(&drvdata->spinlock);
469 return size;
470}
471static DEVICE_ATTR_RW(pe);
472
473static ssize_t event_show(struct device *dev,
474 struct device_attribute *attr,
475 char *buf)
476{
477 unsigned long val;
478 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
479 struct etmv4_config *config = &drvdata->config;
480
481 val = config->eventctrl0;
482 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
483}
484
485static ssize_t event_store(struct device *dev,
486 struct device_attribute *attr,
487 const char *buf, size_t size)
488{
489 unsigned long val;
490 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
491 struct etmv4_config *config = &drvdata->config;
492
493 if (kstrtoul(buf, 16, &val))
494 return -EINVAL;
495
496 spin_lock(&drvdata->spinlock);
497 switch (drvdata->nr_event) {
498 case 0x0:
499 /* EVENT0, bits[7:0] */
500 config->eventctrl0 = val & 0xFF;
501 break;
502 case 0x1:
503 /* EVENT1, bits[15:8] */
504 config->eventctrl0 = val & 0xFFFF;
505 break;
506 case 0x2:
507 /* EVENT2, bits[23:16] */
508 config->eventctrl0 = val & 0xFFFFFF;
509 break;
510 case 0x3:
511 /* EVENT3, bits[31:24] */
512 config->eventctrl0 = val;
513 break;
514 default:
515 break;
516 }
517 spin_unlock(&drvdata->spinlock);
518 return size;
519}
520static DEVICE_ATTR_RW(event);
521
522static ssize_t event_instren_show(struct device *dev,
523 struct device_attribute *attr,
524 char *buf)
525{
526 unsigned long val;
527 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
528 struct etmv4_config *config = &drvdata->config;
529
530 val = BMVAL(config->eventctrl1, 0, 3);
531 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
532}
533
534static ssize_t event_instren_store(struct device *dev,
535 struct device_attribute *attr,
536 const char *buf, size_t size)
537{
538 unsigned long val;
539 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
540 struct etmv4_config *config = &drvdata->config;
541
542 if (kstrtoul(buf, 16, &val))
543 return -EINVAL;
544
545 spin_lock(&drvdata->spinlock);
546 /* start by clearing all instruction event enable bits */
547 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
548 switch (drvdata->nr_event) {
549 case 0x0:
550 /* generate Event element for event 1 */
551 config->eventctrl1 |= val & BIT(1);
552 break;
553 case 0x1:
554 /* generate Event element for event 1 and 2 */
555 config->eventctrl1 |= val & (BIT(0) | BIT(1));
556 break;
557 case 0x2:
558 /* generate Event element for event 1, 2 and 3 */
559 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
560 break;
561 case 0x3:
562 /* generate Event element for all 4 events */
563 config->eventctrl1 |= val & 0xF;
564 break;
565 default:
566 break;
567 }
568 spin_unlock(&drvdata->spinlock);
569 return size;
570}
571static DEVICE_ATTR_RW(event_instren);
572
573static ssize_t event_ts_show(struct device *dev,
574 struct device_attribute *attr,
575 char *buf)
576{
577 unsigned long val;
578 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
579 struct etmv4_config *config = &drvdata->config;
580
581 val = config->ts_ctrl;
582 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
583}
584
585static ssize_t event_ts_store(struct device *dev,
586 struct device_attribute *attr,
587 const char *buf, size_t size)
588{
589 unsigned long val;
590 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
591 struct etmv4_config *config = &drvdata->config;
592
593 if (kstrtoul(buf, 16, &val))
594 return -EINVAL;
595 if (!drvdata->ts_size)
596 return -EINVAL;
597
598 config->ts_ctrl = val & ETMv4_EVENT_MASK;
599 return size;
600}
601static DEVICE_ATTR_RW(event_ts);
602
603static ssize_t syncfreq_show(struct device *dev,
604 struct device_attribute *attr,
605 char *buf)
606{
607 unsigned long val;
608 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
609 struct etmv4_config *config = &drvdata->config;
610
611 val = config->syncfreq;
612 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
613}
614
615static ssize_t syncfreq_store(struct device *dev,
616 struct device_attribute *attr,
617 const char *buf, size_t size)
618{
619 unsigned long val;
620 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
621 struct etmv4_config *config = &drvdata->config;
622
623 if (kstrtoul(buf, 16, &val))
624 return -EINVAL;
625 if (drvdata->syncpr == true)
626 return -EINVAL;
627
628 config->syncfreq = val & ETMv4_SYNC_MASK;
629 return size;
630}
631static DEVICE_ATTR_RW(syncfreq);
632
633static ssize_t cyc_threshold_show(struct device *dev,
634 struct device_attribute *attr,
635 char *buf)
636{
637 unsigned long val;
638 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
639 struct etmv4_config *config = &drvdata->config;
640
641 val = config->ccctlr;
642 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
643}
644
645static ssize_t cyc_threshold_store(struct device *dev,
646 struct device_attribute *attr,
647 const char *buf, size_t size)
648{
649 unsigned long val;
650 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
651 struct etmv4_config *config = &drvdata->config;
652
653 if (kstrtoul(buf, 16, &val))
654 return -EINVAL;
655 if (val < drvdata->ccitmin)
656 return -EINVAL;
657
658 config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
659 return size;
660}
661static DEVICE_ATTR_RW(cyc_threshold);
662
663static ssize_t bb_ctrl_show(struct device *dev,
664 struct device_attribute *attr,
665 char *buf)
666{
667 unsigned long val;
668 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
669 struct etmv4_config *config = &drvdata->config;
670
671 val = config->bb_ctrl;
672 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
673}
674
675static ssize_t bb_ctrl_store(struct device *dev,
676 struct device_attribute *attr,
677 const char *buf, size_t size)
678{
679 unsigned long val;
680 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
681 struct etmv4_config *config = &drvdata->config;
682
683 if (kstrtoul(buf, 16, &val))
684 return -EINVAL;
685 if (drvdata->trcbb == false)
686 return -EINVAL;
687 if (!drvdata->nr_addr_cmp)
688 return -EINVAL;
689 /*
690 * Bit[7:0] selects which address range comparator is used for
691 * branch broadcast control.
692 */
693 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
694 return -EINVAL;
695
696 config->bb_ctrl = val;
697 return size;
698}
699static DEVICE_ATTR_RW(bb_ctrl);
700
701static ssize_t event_vinst_show(struct device *dev,
702 struct device_attribute *attr,
703 char *buf)
704{
705 unsigned long val;
706 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
707 struct etmv4_config *config = &drvdata->config;
708
709 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
710 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
711}
712
713static ssize_t event_vinst_store(struct device *dev,
714 struct device_attribute *attr,
715 const char *buf, size_t size)
716{
717 unsigned long val;
718 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
719 struct etmv4_config *config = &drvdata->config;
720
721 if (kstrtoul(buf, 16, &val))
722 return -EINVAL;
723
724 spin_lock(&drvdata->spinlock);
725 val &= ETMv4_EVENT_MASK;
726 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
727 config->vinst_ctrl |= val;
728 spin_unlock(&drvdata->spinlock);
729 return size;
730}
731static DEVICE_ATTR_RW(event_vinst);
732
733static ssize_t s_exlevel_vinst_show(struct device *dev,
734 struct device_attribute *attr,
735 char *buf)
736{
737 unsigned long val;
738 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
739 struct etmv4_config *config = &drvdata->config;
740
741 val = BMVAL(config->vinst_ctrl, 16, 19);
742 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
743}
744
745static ssize_t s_exlevel_vinst_store(struct device *dev,
746 struct device_attribute *attr,
747 const char *buf, size_t size)
748{
749 unsigned long val;
750 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
751 struct etmv4_config *config = &drvdata->config;
752
753 if (kstrtoul(buf, 16, &val))
754 return -EINVAL;
755
756 spin_lock(&drvdata->spinlock);
757 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
758 config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
759 /* enable instruction tracing for corresponding exception level */
760 val &= drvdata->s_ex_level;
761 config->vinst_ctrl |= (val << 16);
762 spin_unlock(&drvdata->spinlock);
763 return size;
764}
765static DEVICE_ATTR_RW(s_exlevel_vinst);
766
767static ssize_t ns_exlevel_vinst_show(struct device *dev,
768 struct device_attribute *attr,
769 char *buf)
770{
771 unsigned long val;
772 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
773 struct etmv4_config *config = &drvdata->config;
774
775 /* EXLEVEL_NS, bits[23:20] */
776 val = BMVAL(config->vinst_ctrl, 20, 23);
777 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
778}
779
780static ssize_t ns_exlevel_vinst_store(struct device *dev,
781 struct device_attribute *attr,
782 const char *buf, size_t size)
783{
784 unsigned long val;
785 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
786 struct etmv4_config *config = &drvdata->config;
787
788 if (kstrtoul(buf, 16, &val))
789 return -EINVAL;
790
791 spin_lock(&drvdata->spinlock);
792 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
793 config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
794 /* enable instruction tracing for corresponding exception level */
795 val &= drvdata->ns_ex_level;
796 config->vinst_ctrl |= (val << 20);
797 spin_unlock(&drvdata->spinlock);
798 return size;
799}
800static DEVICE_ATTR_RW(ns_exlevel_vinst);
801
802static ssize_t addr_idx_show(struct device *dev,
803 struct device_attribute *attr,
804 char *buf)
805{
806 unsigned long val;
807 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
808 struct etmv4_config *config = &drvdata->config;
809
810 val = config->addr_idx;
811 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
812}
813
814static ssize_t addr_idx_store(struct device *dev,
815 struct device_attribute *attr,
816 const char *buf, size_t size)
817{
818 unsigned long val;
819 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
820 struct etmv4_config *config = &drvdata->config;
821
822 if (kstrtoul(buf, 16, &val))
823 return -EINVAL;
824 if (val >= drvdata->nr_addr_cmp * 2)
825 return -EINVAL;
826
827 /*
828 * Use spinlock to ensure index doesn't change while it gets
829 * dereferenced multiple times within a spinlock block elsewhere.
830 */
831 spin_lock(&drvdata->spinlock);
832 config->addr_idx = val;
833 spin_unlock(&drvdata->spinlock);
834 return size;
835}
836static DEVICE_ATTR_RW(addr_idx);
837
838static ssize_t addr_instdatatype_show(struct device *dev,
839 struct device_attribute *attr,
840 char *buf)
841{
842 ssize_t len;
843 u8 val, idx;
844 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
845 struct etmv4_config *config = &drvdata->config;
846
847 spin_lock(&drvdata->spinlock);
848 idx = config->addr_idx;
849 val = BMVAL(config->addr_acc[idx], 0, 1);
850 len = scnprintf(buf, PAGE_SIZE, "%s\n",
851 val == ETM_INSTR_ADDR ? "instr" :
852 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
853 (val == ETM_DATA_STORE_ADDR ? "data_store" :
854 "data_load_store")));
855 spin_unlock(&drvdata->spinlock);
856 return len;
857}
858
859static ssize_t addr_instdatatype_store(struct device *dev,
860 struct device_attribute *attr,
861 const char *buf, size_t size)
862{
863 u8 idx;
864 char str[20] = "";
865 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
866 struct etmv4_config *config = &drvdata->config;
867
868 if (strlen(buf) >= 20)
869 return -EINVAL;
870 if (sscanf(buf, "%s", str) != 1)
871 return -EINVAL;
872
873 spin_lock(&drvdata->spinlock);
874 idx = config->addr_idx;
875 if (!strcmp(str, "instr"))
876 /* TYPE, bits[1:0] */
877 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
878
879 spin_unlock(&drvdata->spinlock);
880 return size;
881}
882static DEVICE_ATTR_RW(addr_instdatatype);
883
884static ssize_t addr_single_show(struct device *dev,
885 struct device_attribute *attr,
886 char *buf)
887{
888 u8 idx;
889 unsigned long val;
890 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
891 struct etmv4_config *config = &drvdata->config;
892
893 idx = config->addr_idx;
894 spin_lock(&drvdata->spinlock);
895 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
896 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
897 spin_unlock(&drvdata->spinlock);
898 return -EPERM;
899 }
900 val = (unsigned long)config->addr_val[idx];
901 spin_unlock(&drvdata->spinlock);
902 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
903}
904
905static ssize_t addr_single_store(struct device *dev,
906 struct device_attribute *attr,
907 const char *buf, size_t size)
908{
909 u8 idx;
910 unsigned long val;
911 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
912 struct etmv4_config *config = &drvdata->config;
913
914 if (kstrtoul(buf, 16, &val))
915 return -EINVAL;
916
917 spin_lock(&drvdata->spinlock);
918 idx = config->addr_idx;
919 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
920 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
921 spin_unlock(&drvdata->spinlock);
922 return -EPERM;
923 }
924
925 config->addr_val[idx] = (u64)val;
926 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
927 spin_unlock(&drvdata->spinlock);
928 return size;
929}
930static DEVICE_ATTR_RW(addr_single);
931
932static ssize_t addr_range_show(struct device *dev,
933 struct device_attribute *attr,
934 char *buf)
935{
936 u8 idx;
937 unsigned long val1, val2;
938 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
939 struct etmv4_config *config = &drvdata->config;
940
941 spin_lock(&drvdata->spinlock);
942 idx = config->addr_idx;
943 if (idx % 2 != 0) {
944 spin_unlock(&drvdata->spinlock);
945 return -EPERM;
946 }
947 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
948 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
949 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
950 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
951 spin_unlock(&drvdata->spinlock);
952 return -EPERM;
953 }
954
955 val1 = (unsigned long)config->addr_val[idx];
956 val2 = (unsigned long)config->addr_val[idx + 1];
957 spin_unlock(&drvdata->spinlock);
958 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
959}
960
961static ssize_t addr_range_store(struct device *dev,
962 struct device_attribute *attr,
963 const char *buf, size_t size)
964{
965 u8 idx;
966 unsigned long val1, val2;
967 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
968 struct etmv4_config *config = &drvdata->config;
969
970 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
971 return -EINVAL;
972 /* lower address comparator cannot have a higher address value */
973 if (val1 > val2)
974 return -EINVAL;
975
976 spin_lock(&drvdata->spinlock);
977 idx = config->addr_idx;
978 if (idx % 2 != 0) {
979 spin_unlock(&drvdata->spinlock);
980 return -EPERM;
981 }
982
983 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
984 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
985 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
986 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
987 spin_unlock(&drvdata->spinlock);
988 return -EPERM;
989 }
990
991 config->addr_val[idx] = (u64)val1;
992 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
993 config->addr_val[idx + 1] = (u64)val2;
994 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
995 /*
996 * Program include or exclude control bits for vinst or vdata
997 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
998 */
999 etm4_set_mode_exclude(drvdata,
1000 config->mode & ETM_MODE_EXCLUDE ? true : false);
1001
1002 spin_unlock(&drvdata->spinlock);
1003 return size;
1004}
1005static DEVICE_ATTR_RW(addr_range);
1006
1007static ssize_t addr_start_show(struct device *dev,
1008 struct device_attribute *attr,
1009 char *buf)
1010{
1011 u8 idx;
1012 unsigned long val;
1013 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1014 struct etmv4_config *config = &drvdata->config;
1015
1016 spin_lock(&drvdata->spinlock);
1017 idx = config->addr_idx;
1018
1019 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1020 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1021 spin_unlock(&drvdata->spinlock);
1022 return -EPERM;
1023 }
1024
1025 val = (unsigned long)config->addr_val[idx];
1026 spin_unlock(&drvdata->spinlock);
1027 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1028}
1029
1030static ssize_t addr_start_store(struct device *dev,
1031 struct device_attribute *attr,
1032 const char *buf, size_t size)
1033{
1034 u8 idx;
1035 unsigned long val;
1036 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1037 struct etmv4_config *config = &drvdata->config;
1038
1039 if (kstrtoul(buf, 16, &val))
1040 return -EINVAL;
1041
1042 spin_lock(&drvdata->spinlock);
1043 idx = config->addr_idx;
1044 if (!drvdata->nr_addr_cmp) {
1045 spin_unlock(&drvdata->spinlock);
1046 return -EINVAL;
1047 }
1048 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1049 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1050 spin_unlock(&drvdata->spinlock);
1051 return -EPERM;
1052 }
1053
1054 config->addr_val[idx] = (u64)val;
1055 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1056 config->vissctlr |= BIT(idx);
1057 /* SSSTATUS, bit[9] - turn on start/stop logic */
1058 config->vinst_ctrl |= BIT(9);
1059 spin_unlock(&drvdata->spinlock);
1060 return size;
1061}
1062static DEVICE_ATTR_RW(addr_start);
1063
1064static ssize_t addr_stop_show(struct device *dev,
1065 struct device_attribute *attr,
1066 char *buf)
1067{
1068 u8 idx;
1069 unsigned long val;
1070 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1071 struct etmv4_config *config = &drvdata->config;
1072
1073 spin_lock(&drvdata->spinlock);
1074 idx = config->addr_idx;
1075
1076 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1077 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1078 spin_unlock(&drvdata->spinlock);
1079 return -EPERM;
1080 }
1081
1082 val = (unsigned long)config->addr_val[idx];
1083 spin_unlock(&drvdata->spinlock);
1084 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1085}
1086
1087static ssize_t addr_stop_store(struct device *dev,
1088 struct device_attribute *attr,
1089 const char *buf, size_t size)
1090{
1091 u8 idx;
1092 unsigned long val;
1093 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1094 struct etmv4_config *config = &drvdata->config;
1095
1096 if (kstrtoul(buf, 16, &val))
1097 return -EINVAL;
1098
1099 spin_lock(&drvdata->spinlock);
1100 idx = config->addr_idx;
1101 if (!drvdata->nr_addr_cmp) {
1102 spin_unlock(&drvdata->spinlock);
1103 return -EINVAL;
1104 }
1105 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1106 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1107 spin_unlock(&drvdata->spinlock);
1108 return -EPERM;
1109 }
1110
1111 config->addr_val[idx] = (u64)val;
1112 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1113 config->vissctlr |= BIT(idx + 16);
1114 /* SSSTATUS, bit[9] - turn on start/stop logic */
1115 config->vinst_ctrl |= BIT(9);
1116 spin_unlock(&drvdata->spinlock);
1117 return size;
1118}
1119static DEVICE_ATTR_RW(addr_stop);
1120
1121static ssize_t addr_ctxtype_show(struct device *dev,
1122 struct device_attribute *attr,
1123 char *buf)
1124{
1125 ssize_t len;
1126 u8 idx, val;
1127 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1128 struct etmv4_config *config = &drvdata->config;
1129
1130 spin_lock(&drvdata->spinlock);
1131 idx = config->addr_idx;
1132 /* CONTEXTTYPE, bits[3:2] */
1133 val = BMVAL(config->addr_acc[idx], 2, 3);
1134 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1135 (val == ETM_CTX_CTXID ? "ctxid" :
1136 (val == ETM_CTX_VMID ? "vmid" : "all")));
1137 spin_unlock(&drvdata->spinlock);
1138 return len;
1139}
1140
1141static ssize_t addr_ctxtype_store(struct device *dev,
1142 struct device_attribute *attr,
1143 const char *buf, size_t size)
1144{
1145 u8 idx;
1146 char str[10] = "";
1147 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1148 struct etmv4_config *config = &drvdata->config;
1149
1150 if (strlen(buf) >= 10)
1151 return -EINVAL;
1152 if (sscanf(buf, "%s", str) != 1)
1153 return -EINVAL;
1154
1155 spin_lock(&drvdata->spinlock);
1156 idx = config->addr_idx;
1157 if (!strcmp(str, "none"))
1158 /* start by clearing context type bits */
1159 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1160 else if (!strcmp(str, "ctxid")) {
1161 /* 0b01 The trace unit performs a Context ID */
1162 if (drvdata->numcidc) {
1163 config->addr_acc[idx] |= BIT(2);
1164 config->addr_acc[idx] &= ~BIT(3);
1165 }
1166 } else if (!strcmp(str, "vmid")) {
1167 /* 0b10 The trace unit performs a VMID */
1168 if (drvdata->numvmidc) {
1169 config->addr_acc[idx] &= ~BIT(2);
1170 config->addr_acc[idx] |= BIT(3);
1171 }
1172 } else if (!strcmp(str, "all")) {
1173 /*
1174 * 0b11 The trace unit performs a Context ID
1175 * comparison and a VMID
1176 */
1177 if (drvdata->numcidc)
1178 config->addr_acc[idx] |= BIT(2);
1179 if (drvdata->numvmidc)
1180 config->addr_acc[idx] |= BIT(3);
1181 }
1182 spin_unlock(&drvdata->spinlock);
1183 return size;
1184}
1185static DEVICE_ATTR_RW(addr_ctxtype);
1186
1187static ssize_t addr_context_show(struct device *dev,
1188 struct device_attribute *attr,
1189 char *buf)
1190{
1191 u8 idx;
1192 unsigned long val;
1193 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1194 struct etmv4_config *config = &drvdata->config;
1195
1196 spin_lock(&drvdata->spinlock);
1197 idx = config->addr_idx;
1198 /* context ID comparator bits[6:4] */
1199 val = BMVAL(config->addr_acc[idx], 4, 6);
1200 spin_unlock(&drvdata->spinlock);
1201 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1202}
1203
1204static ssize_t addr_context_store(struct device *dev,
1205 struct device_attribute *attr,
1206 const char *buf, size_t size)
1207{
1208 u8 idx;
1209 unsigned long val;
1210 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1211 struct etmv4_config *config = &drvdata->config;
1212
1213 if (kstrtoul(buf, 16, &val))
1214 return -EINVAL;
1215 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1216 return -EINVAL;
1217 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1218 drvdata->numcidc : drvdata->numvmidc))
1219 return -EINVAL;
1220
1221 spin_lock(&drvdata->spinlock);
1222 idx = config->addr_idx;
1223 /* clear context ID comparator bits[6:4] */
1224 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1225 config->addr_acc[idx] |= (val << 4);
1226 spin_unlock(&drvdata->spinlock);
1227 return size;
1228}
1229static DEVICE_ATTR_RW(addr_context);
1230
1231static ssize_t seq_idx_show(struct device *dev,
1232 struct device_attribute *attr,
1233 char *buf)
1234{
1235 unsigned long val;
1236 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1237 struct etmv4_config *config = &drvdata->config;
1238
1239 val = config->seq_idx;
1240 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1241}
1242
1243static ssize_t seq_idx_store(struct device *dev,
1244 struct device_attribute *attr,
1245 const char *buf, size_t size)
1246{
1247 unsigned long val;
1248 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1249 struct etmv4_config *config = &drvdata->config;
1250
1251 if (kstrtoul(buf, 16, &val))
1252 return -EINVAL;
1253 if (val >= drvdata->nrseqstate - 1)
1254 return -EINVAL;
1255
1256 /*
1257 * Use spinlock to ensure index doesn't change while it gets
1258 * dereferenced multiple times within a spinlock block elsewhere.
1259 */
1260 spin_lock(&drvdata->spinlock);
1261 config->seq_idx = val;
1262 spin_unlock(&drvdata->spinlock);
1263 return size;
1264}
1265static DEVICE_ATTR_RW(seq_idx);
1266
1267static ssize_t seq_state_show(struct device *dev,
1268 struct device_attribute *attr,
1269 char *buf)
1270{
1271 unsigned long val;
1272 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1273 struct etmv4_config *config = &drvdata->config;
1274
1275 val = config->seq_state;
1276 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1277}
1278
1279static ssize_t seq_state_store(struct device *dev,
1280 struct device_attribute *attr,
1281 const char *buf, size_t size)
1282{
1283 unsigned long val;
1284 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1285 struct etmv4_config *config = &drvdata->config;
1286
1287 if (kstrtoul(buf, 16, &val))
1288 return -EINVAL;
1289 if (val >= drvdata->nrseqstate)
1290 return -EINVAL;
1291
1292 config->seq_state = val;
1293 return size;
1294}
1295static DEVICE_ATTR_RW(seq_state);
1296
1297static ssize_t seq_event_show(struct device *dev,
1298 struct device_attribute *attr,
1299 char *buf)
1300{
1301 u8 idx;
1302 unsigned long val;
1303 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1304 struct etmv4_config *config = &drvdata->config;
1305
1306 spin_lock(&drvdata->spinlock);
1307 idx = config->seq_idx;
1308 val = config->seq_ctrl[idx];
1309 spin_unlock(&drvdata->spinlock);
1310 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1311}
1312
1313static ssize_t seq_event_store(struct device *dev,
1314 struct device_attribute *attr,
1315 const char *buf, size_t size)
1316{
1317 u8 idx;
1318 unsigned long val;
1319 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1320 struct etmv4_config *config = &drvdata->config;
1321
1322 if (kstrtoul(buf, 16, &val))
1323 return -EINVAL;
1324
1325 spin_lock(&drvdata->spinlock);
1326 idx = config->seq_idx;
1327 /* RST, bits[7:0] */
1328 config->seq_ctrl[idx] = val & 0xFF;
1329 spin_unlock(&drvdata->spinlock);
1330 return size;
1331}
1332static DEVICE_ATTR_RW(seq_event);
1333
1334static ssize_t seq_reset_event_show(struct device *dev,
1335 struct device_attribute *attr,
1336 char *buf)
1337{
1338 unsigned long val;
1339 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1340 struct etmv4_config *config = &drvdata->config;
1341
1342 val = config->seq_rst;
1343 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1344}
1345
1346static ssize_t seq_reset_event_store(struct device *dev,
1347 struct device_attribute *attr,
1348 const char *buf, size_t size)
1349{
1350 unsigned long val;
1351 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1352 struct etmv4_config *config = &drvdata->config;
1353
1354 if (kstrtoul(buf, 16, &val))
1355 return -EINVAL;
1356 if (!(drvdata->nrseqstate))
1357 return -EINVAL;
1358
1359 config->seq_rst = val & ETMv4_EVENT_MASK;
1360 return size;
1361}
1362static DEVICE_ATTR_RW(seq_reset_event);
1363
1364static ssize_t cntr_idx_show(struct device *dev,
1365 struct device_attribute *attr,
1366 char *buf)
1367{
1368 unsigned long val;
1369 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1370 struct etmv4_config *config = &drvdata->config;
1371
1372 val = config->cntr_idx;
1373 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1374}
1375
1376static ssize_t cntr_idx_store(struct device *dev,
1377 struct device_attribute *attr,
1378 const char *buf, size_t size)
1379{
1380 unsigned long val;
1381 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1382 struct etmv4_config *config = &drvdata->config;
1383
1384 if (kstrtoul(buf, 16, &val))
1385 return -EINVAL;
1386 if (val >= drvdata->nr_cntr)
1387 return -EINVAL;
1388
1389 /*
1390 * Use spinlock to ensure index doesn't change while it gets
1391 * dereferenced multiple times within a spinlock block elsewhere.
1392 */
1393 spin_lock(&drvdata->spinlock);
1394 config->cntr_idx = val;
1395 spin_unlock(&drvdata->spinlock);
1396 return size;
1397}
1398static DEVICE_ATTR_RW(cntr_idx);
1399
1400static ssize_t cntrldvr_show(struct device *dev,
1401 struct device_attribute *attr,
1402 char *buf)
1403{
1404 u8 idx;
1405 unsigned long val;
1406 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1407 struct etmv4_config *config = &drvdata->config;
1408
1409 spin_lock(&drvdata->spinlock);
1410 idx = config->cntr_idx;
1411 val = config->cntrldvr[idx];
1412 spin_unlock(&drvdata->spinlock);
1413 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1414}
1415
1416static ssize_t cntrldvr_store(struct device *dev,
1417 struct device_attribute *attr,
1418 const char *buf, size_t size)
1419{
1420 u8 idx;
1421 unsigned long val;
1422 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1423 struct etmv4_config *config = &drvdata->config;
1424
1425 if (kstrtoul(buf, 16, &val))
1426 return -EINVAL;
1427 if (val > ETM_CNTR_MAX_VAL)
1428 return -EINVAL;
1429
1430 spin_lock(&drvdata->spinlock);
1431 idx = config->cntr_idx;
1432 config->cntrldvr[idx] = val;
1433 spin_unlock(&drvdata->spinlock);
1434 return size;
1435}
1436static DEVICE_ATTR_RW(cntrldvr);
1437
1438static ssize_t cntr_val_show(struct device *dev,
1439 struct device_attribute *attr,
1440 char *buf)
1441{
1442 u8 idx;
1443 unsigned long val;
1444 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1445 struct etmv4_config *config = &drvdata->config;
1446
1447 spin_lock(&drvdata->spinlock);
1448 idx = config->cntr_idx;
1449 val = config->cntr_val[idx];
1450 spin_unlock(&drvdata->spinlock);
1451 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1452}
1453
1454static ssize_t cntr_val_store(struct device *dev,
1455 struct device_attribute *attr,
1456 const char *buf, size_t size)
1457{
1458 u8 idx;
1459 unsigned long val;
1460 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1461 struct etmv4_config *config = &drvdata->config;
1462
1463 if (kstrtoul(buf, 16, &val))
1464 return -EINVAL;
1465 if (val > ETM_CNTR_MAX_VAL)
1466 return -EINVAL;
1467
1468 spin_lock(&drvdata->spinlock);
1469 idx = config->cntr_idx;
1470 config->cntr_val[idx] = val;
1471 spin_unlock(&drvdata->spinlock);
1472 return size;
1473}
1474static DEVICE_ATTR_RW(cntr_val);
1475
1476static ssize_t cntr_ctrl_show(struct device *dev,
1477 struct device_attribute *attr,
1478 char *buf)
1479{
1480 u8 idx;
1481 unsigned long val;
1482 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1483 struct etmv4_config *config = &drvdata->config;
1484
1485 spin_lock(&drvdata->spinlock);
1486 idx = config->cntr_idx;
1487 val = config->cntr_ctrl[idx];
1488 spin_unlock(&drvdata->spinlock);
1489 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1490}
1491
1492static ssize_t cntr_ctrl_store(struct device *dev,
1493 struct device_attribute *attr,
1494 const char *buf, size_t size)
1495{
1496 u8 idx;
1497 unsigned long val;
1498 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1499 struct etmv4_config *config = &drvdata->config;
1500
1501 if (kstrtoul(buf, 16, &val))
1502 return -EINVAL;
1503
1504 spin_lock(&drvdata->spinlock);
1505 idx = config->cntr_idx;
1506 config->cntr_ctrl[idx] = val;
1507 spin_unlock(&drvdata->spinlock);
1508 return size;
1509}
1510static DEVICE_ATTR_RW(cntr_ctrl);
1511
1512static ssize_t res_idx_show(struct device *dev,
1513 struct device_attribute *attr,
1514 char *buf)
1515{
1516 unsigned long val;
1517 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1518 struct etmv4_config *config = &drvdata->config;
1519
1520 val = config->res_idx;
1521 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1522}
1523
1524static ssize_t res_idx_store(struct device *dev,
1525 struct device_attribute *attr,
1526 const char *buf, size_t size)
1527{
1528 unsigned long val;
1529 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1530 struct etmv4_config *config = &drvdata->config;
1531
1532 if (kstrtoul(buf, 16, &val))
1533 return -EINVAL;
1534 /* Resource selector pair 0 is always implemented and reserved */
1535 if ((val == 0) || (val >= drvdata->nr_resource))
1536 return -EINVAL;
1537
1538 /*
1539 * Use spinlock to ensure index doesn't change while it gets
1540 * dereferenced multiple times within a spinlock block elsewhere.
1541 */
1542 spin_lock(&drvdata->spinlock);
1543 config->res_idx = val;
1544 spin_unlock(&drvdata->spinlock);
1545 return size;
1546}
1547static DEVICE_ATTR_RW(res_idx);
1548
1549static ssize_t res_ctrl_show(struct device *dev,
1550 struct device_attribute *attr,
1551 char *buf)
1552{
1553 u8 idx;
1554 unsigned long val;
1555 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1556 struct etmv4_config *config = &drvdata->config;
1557
1558 spin_lock(&drvdata->spinlock);
1559 idx = config->res_idx;
1560 val = config->res_ctrl[idx];
1561 spin_unlock(&drvdata->spinlock);
1562 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1563}
1564
1565static ssize_t res_ctrl_store(struct device *dev,
1566 struct device_attribute *attr,
1567 const char *buf, size_t size)
1568{
1569 u8 idx;
1570 unsigned long val;
1571 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1572 struct etmv4_config *config = &drvdata->config;
1573
1574 if (kstrtoul(buf, 16, &val))
1575 return -EINVAL;
1576
1577 spin_lock(&drvdata->spinlock);
1578 idx = config->res_idx;
1579 /* For odd idx pair inversal bit is RES0 */
1580 if (idx % 2 != 0)
1581 /* PAIRINV, bit[21] */
1582 val &= ~BIT(21);
1583 config->res_ctrl[idx] = val;
1584 spin_unlock(&drvdata->spinlock);
1585 return size;
1586}
1587static DEVICE_ATTR_RW(res_ctrl);
1588
1589static ssize_t ctxid_idx_show(struct device *dev,
1590 struct device_attribute *attr,
1591 char *buf)
1592{
1593 unsigned long val;
1594 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1595 struct etmv4_config *config = &drvdata->config;
1596
1597 val = config->ctxid_idx;
1598 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1599}
1600
1601static ssize_t ctxid_idx_store(struct device *dev,
1602 struct device_attribute *attr,
1603 const char *buf, size_t size)
1604{
1605 unsigned long val;
1606 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1607 struct etmv4_config *config = &drvdata->config;
1608
1609 if (kstrtoul(buf, 16, &val))
1610 return -EINVAL;
1611 if (val >= drvdata->numcidc)
1612 return -EINVAL;
1613
1614 /*
1615 * Use spinlock to ensure index doesn't change while it gets
1616 * dereferenced multiple times within a spinlock block elsewhere.
1617 */
1618 spin_lock(&drvdata->spinlock);
1619 config->ctxid_idx = val;
1620 spin_unlock(&drvdata->spinlock);
1621 return size;
1622}
1623static DEVICE_ATTR_RW(ctxid_idx);
1624
1625static ssize_t ctxid_pid_show(struct device *dev,
1626 struct device_attribute *attr,
1627 char *buf)
1628{
1629 u8 idx;
1630 unsigned long val;
1631 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1632 struct etmv4_config *config = &drvdata->config;
1633
1634 /*
1635 * Don't use contextID tracing if coming from a PID namespace. See
1636 * comment in ctxid_pid_store().
1637 */
1638 if (task_active_pid_ns(current) != &init_pid_ns)
1639 return -EINVAL;
1640
1641 spin_lock(&drvdata->spinlock);
1642 idx = config->ctxid_idx;
1643 val = (unsigned long)config->ctxid_pid[idx];
1644 spin_unlock(&drvdata->spinlock);
1645 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1646}
1647
1648static ssize_t ctxid_pid_store(struct device *dev,
1649 struct device_attribute *attr,
1650 const char *buf, size_t size)
1651{
1652 u8 idx;
1653 unsigned long pid;
1654 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1655 struct etmv4_config *config = &drvdata->config;
1656
1657 /*
1658 * When contextID tracing is enabled the tracers will insert the
1659 * value found in the contextID register in the trace stream. But if
1660 * a process is in a namespace the PID of that process as seen from the
1661 * namespace won't be what the kernel sees, something that makes the
1662 * feature confusing and can potentially leak kernel only information.
1663 * As such refuse to use the feature if @current is not in the initial
1664 * PID namespace.
1665 */
1666 if (task_active_pid_ns(current) != &init_pid_ns)
1667 return -EINVAL;
1668
1669 /*
1670 * only implemented when ctxid tracing is enabled, i.e. at least one
1671 * ctxid comparator is implemented and ctxid is greater than 0 bits
1672 * in length
1673 */
1674 if (!drvdata->ctxid_size || !drvdata->numcidc)
1675 return -EINVAL;
1676 if (kstrtoul(buf, 16, &pid))
1677 return -EINVAL;
1678
1679 spin_lock(&drvdata->spinlock);
1680 idx = config->ctxid_idx;
1681 config->ctxid_pid[idx] = (u64)pid;
1682 spin_unlock(&drvdata->spinlock);
1683 return size;
1684}
1685static DEVICE_ATTR_RW(ctxid_pid);
1686
1687static ssize_t ctxid_masks_show(struct device *dev,
1688 struct device_attribute *attr,
1689 char *buf)
1690{
1691 unsigned long val1, val2;
1692 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1693 struct etmv4_config *config = &drvdata->config;
1694
1695 /*
1696 * Don't use contextID tracing if coming from a PID namespace. See
1697 * comment in ctxid_pid_store().
1698 */
1699 if (task_active_pid_ns(current) != &init_pid_ns)
1700 return -EINVAL;
1701
1702 spin_lock(&drvdata->spinlock);
1703 val1 = config->ctxid_mask0;
1704 val2 = config->ctxid_mask1;
1705 spin_unlock(&drvdata->spinlock);
1706 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1707}
1708
1709static ssize_t ctxid_masks_store(struct device *dev,
1710 struct device_attribute *attr,
1711 const char *buf, size_t size)
1712{
1713 u8 i, j, maskbyte;
1714 unsigned long val1, val2, mask;
1715 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1716 struct etmv4_config *config = &drvdata->config;
1717
1718 /*
1719 * Don't use contextID tracing if coming from a PID namespace. See
1720 * comment in ctxid_pid_store().
1721 */
1722 if (task_active_pid_ns(current) != &init_pid_ns)
1723 return -EINVAL;
1724
1725 /*
1726 * only implemented when ctxid tracing is enabled, i.e. at least one
1727 * ctxid comparator is implemented and ctxid is greater than 0 bits
1728 * in length
1729 */
1730 if (!drvdata->ctxid_size || !drvdata->numcidc)
1731 return -EINVAL;
1732 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1733 return -EINVAL;
1734
1735 spin_lock(&drvdata->spinlock);
1736 /*
1737 * each byte[0..3] controls mask value applied to ctxid
1738 * comparator[0..3]
1739 */
1740 switch (drvdata->numcidc) {
1741 case 0x1:
1742 /* COMP0, bits[7:0] */
1743 config->ctxid_mask0 = val1 & 0xFF;
1744 break;
1745 case 0x2:
1746 /* COMP1, bits[15:8] */
1747 config->ctxid_mask0 = val1 & 0xFFFF;
1748 break;
1749 case 0x3:
1750 /* COMP2, bits[23:16] */
1751 config->ctxid_mask0 = val1 & 0xFFFFFF;
1752 break;
1753 case 0x4:
1754 /* COMP3, bits[31:24] */
1755 config->ctxid_mask0 = val1;
1756 break;
1757 case 0x5:
1758 /* COMP4, bits[7:0] */
1759 config->ctxid_mask0 = val1;
1760 config->ctxid_mask1 = val2 & 0xFF;
1761 break;
1762 case 0x6:
1763 /* COMP5, bits[15:8] */
1764 config->ctxid_mask0 = val1;
1765 config->ctxid_mask1 = val2 & 0xFFFF;
1766 break;
1767 case 0x7:
1768 /* COMP6, bits[23:16] */
1769 config->ctxid_mask0 = val1;
1770 config->ctxid_mask1 = val2 & 0xFFFFFF;
1771 break;
1772 case 0x8:
1773 /* COMP7, bits[31:24] */
1774 config->ctxid_mask0 = val1;
1775 config->ctxid_mask1 = val2;
1776 break;
1777 default:
1778 break;
1779 }
1780 /*
1781 * If software sets a mask bit to 1, it must program relevant byte
1782 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1783 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1784 * of ctxid comparator0 value (corresponding to byte 0) register.
1785 */
1786 mask = config->ctxid_mask0;
1787 for (i = 0; i < drvdata->numcidc; i++) {
1788 /* mask value of corresponding ctxid comparator */
1789 maskbyte = mask & ETMv4_EVENT_MASK;
1790 /*
1791 * each bit corresponds to a byte of respective ctxid comparator
1792 * value register
1793 */
1794 for (j = 0; j < 8; j++) {
1795 if (maskbyte & 1)
1796 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
1797 maskbyte >>= 1;
1798 }
1799 /* Select the next ctxid comparator mask value */
1800 if (i == 3)
1801 /* ctxid comparators[4-7] */
1802 mask = config->ctxid_mask1;
1803 else
1804 mask >>= 0x8;
1805 }
1806
1807 spin_unlock(&drvdata->spinlock);
1808 return size;
1809}
1810static DEVICE_ATTR_RW(ctxid_masks);
1811
1812static ssize_t vmid_idx_show(struct device *dev,
1813 struct device_attribute *attr,
1814 char *buf)
1815{
1816 unsigned long val;
1817 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1818 struct etmv4_config *config = &drvdata->config;
1819
1820 val = config->vmid_idx;
1821 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1822}
1823
1824static ssize_t vmid_idx_store(struct device *dev,
1825 struct device_attribute *attr,
1826 const char *buf, size_t size)
1827{
1828 unsigned long val;
1829 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1830 struct etmv4_config *config = &drvdata->config;
1831
1832 if (kstrtoul(buf, 16, &val))
1833 return -EINVAL;
1834 if (val >= drvdata->numvmidc)
1835 return -EINVAL;
1836
1837 /*
1838 * Use spinlock to ensure index doesn't change while it gets
1839 * dereferenced multiple times within a spinlock block elsewhere.
1840 */
1841 spin_lock(&drvdata->spinlock);
1842 config->vmid_idx = val;
1843 spin_unlock(&drvdata->spinlock);
1844 return size;
1845}
1846static DEVICE_ATTR_RW(vmid_idx);
1847
1848static ssize_t vmid_val_show(struct device *dev,
1849 struct device_attribute *attr,
1850 char *buf)
1851{
1852 unsigned long val;
1853 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1854 struct etmv4_config *config = &drvdata->config;
1855
1856 val = (unsigned long)config->vmid_val[config->vmid_idx];
1857 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1858}
1859
1860static ssize_t vmid_val_store(struct device *dev,
1861 struct device_attribute *attr,
1862 const char *buf, size_t size)
1863{
1864 unsigned long val;
1865 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1866 struct etmv4_config *config = &drvdata->config;
1867
1868 /*
1869 * only implemented when vmid tracing is enabled, i.e. at least one
1870 * vmid comparator is implemented and at least 8 bit vmid size
1871 */
1872 if (!drvdata->vmid_size || !drvdata->numvmidc)
1873 return -EINVAL;
1874 if (kstrtoul(buf, 16, &val))
1875 return -EINVAL;
1876
1877 spin_lock(&drvdata->spinlock);
1878 config->vmid_val[config->vmid_idx] = (u64)val;
1879 spin_unlock(&drvdata->spinlock);
1880 return size;
1881}
1882static DEVICE_ATTR_RW(vmid_val);
1883
1884static ssize_t vmid_masks_show(struct device *dev,
1885 struct device_attribute *attr, char *buf)
1886{
1887 unsigned long val1, val2;
1888 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1889 struct etmv4_config *config = &drvdata->config;
1890
1891 spin_lock(&drvdata->spinlock);
1892 val1 = config->vmid_mask0;
1893 val2 = config->vmid_mask1;
1894 spin_unlock(&drvdata->spinlock);
1895 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1896}
1897
1898static ssize_t vmid_masks_store(struct device *dev,
1899 struct device_attribute *attr,
1900 const char *buf, size_t size)
1901{
1902 u8 i, j, maskbyte;
1903 unsigned long val1, val2, mask;
1904 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1905 struct etmv4_config *config = &drvdata->config;
1906
1907 /*
1908 * only implemented when vmid tracing is enabled, i.e. at least one
1909 * vmid comparator is implemented and at least 8 bit vmid size
1910 */
1911 if (!drvdata->vmid_size || !drvdata->numvmidc)
1912 return -EINVAL;
1913 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1914 return -EINVAL;
1915
1916 spin_lock(&drvdata->spinlock);
1917
1918 /*
1919 * each byte[0..3] controls mask value applied to vmid
1920 * comparator[0..3]
1921 */
1922 switch (drvdata->numvmidc) {
1923 case 0x1:
1924 /* COMP0, bits[7:0] */
1925 config->vmid_mask0 = val1 & 0xFF;
1926 break;
1927 case 0x2:
1928 /* COMP1, bits[15:8] */
1929 config->vmid_mask0 = val1 & 0xFFFF;
1930 break;
1931 case 0x3:
1932 /* COMP2, bits[23:16] */
1933 config->vmid_mask0 = val1 & 0xFFFFFF;
1934 break;
1935 case 0x4:
1936 /* COMP3, bits[31:24] */
1937 config->vmid_mask0 = val1;
1938 break;
1939 case 0x5:
1940 /* COMP4, bits[7:0] */
1941 config->vmid_mask0 = val1;
1942 config->vmid_mask1 = val2 & 0xFF;
1943 break;
1944 case 0x6:
1945 /* COMP5, bits[15:8] */
1946 config->vmid_mask0 = val1;
1947 config->vmid_mask1 = val2 & 0xFFFF;
1948 break;
1949 case 0x7:
1950 /* COMP6, bits[23:16] */
1951 config->vmid_mask0 = val1;
1952 config->vmid_mask1 = val2 & 0xFFFFFF;
1953 break;
1954 case 0x8:
1955 /* COMP7, bits[31:24] */
1956 config->vmid_mask0 = val1;
1957 config->vmid_mask1 = val2;
1958 break;
1959 default:
1960 break;
1961 }
1962
1963 /*
1964 * If software sets a mask bit to 1, it must program relevant byte
1965 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1966 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1967 * of vmid comparator0 value (corresponding to byte 0) register.
1968 */
1969 mask = config->vmid_mask0;
1970 for (i = 0; i < drvdata->numvmidc; i++) {
1971 /* mask value of corresponding vmid comparator */
1972 maskbyte = mask & ETMv4_EVENT_MASK;
1973 /*
1974 * each bit corresponds to a byte of respective vmid comparator
1975 * value register
1976 */
1977 for (j = 0; j < 8; j++) {
1978 if (maskbyte & 1)
1979 config->vmid_val[i] &= ~(0xFFUL << (j * 8));
1980 maskbyte >>= 1;
1981 }
1982 /* Select the next vmid comparator mask value */
1983 if (i == 3)
1984 /* vmid comparators[4-7] */
1985 mask = config->vmid_mask1;
1986 else
1987 mask >>= 0x8;
1988 }
1989 spin_unlock(&drvdata->spinlock);
1990 return size;
1991}
1992static DEVICE_ATTR_RW(vmid_masks);
1993
1994static ssize_t cpu_show(struct device *dev,
1995 struct device_attribute *attr, char *buf)
1996{
1997 int val;
1998 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1999
2000 val = drvdata->cpu;
2001 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2002
2003}
2004static DEVICE_ATTR_RO(cpu);
2005
2006static struct attribute *coresight_etmv4_attrs[] = {
2007 &dev_attr_nr_pe_cmp.attr,
2008 &dev_attr_nr_addr_cmp.attr,
2009 &dev_attr_nr_cntr.attr,
2010 &dev_attr_nr_ext_inp.attr,
2011 &dev_attr_numcidc.attr,
2012 &dev_attr_numvmidc.attr,
2013 &dev_attr_nrseqstate.attr,
2014 &dev_attr_nr_resource.attr,
2015 &dev_attr_nr_ss_cmp.attr,
2016 &dev_attr_reset.attr,
2017 &dev_attr_mode.attr,
2018 &dev_attr_pe.attr,
2019 &dev_attr_event.attr,
2020 &dev_attr_event_instren.attr,
2021 &dev_attr_event_ts.attr,
2022 &dev_attr_syncfreq.attr,
2023 &dev_attr_cyc_threshold.attr,
2024 &dev_attr_bb_ctrl.attr,
2025 &dev_attr_event_vinst.attr,
2026 &dev_attr_s_exlevel_vinst.attr,
2027 &dev_attr_ns_exlevel_vinst.attr,
2028 &dev_attr_addr_idx.attr,
2029 &dev_attr_addr_instdatatype.attr,
2030 &dev_attr_addr_single.attr,
2031 &dev_attr_addr_range.attr,
2032 &dev_attr_addr_start.attr,
2033 &dev_attr_addr_stop.attr,
2034 &dev_attr_addr_ctxtype.attr,
2035 &dev_attr_addr_context.attr,
2036 &dev_attr_seq_idx.attr,
2037 &dev_attr_seq_state.attr,
2038 &dev_attr_seq_event.attr,
2039 &dev_attr_seq_reset_event.attr,
2040 &dev_attr_cntr_idx.attr,
2041 &dev_attr_cntrldvr.attr,
2042 &dev_attr_cntr_val.attr,
2043 &dev_attr_cntr_ctrl.attr,
2044 &dev_attr_res_idx.attr,
2045 &dev_attr_res_ctrl.attr,
2046 &dev_attr_ctxid_idx.attr,
2047 &dev_attr_ctxid_pid.attr,
2048 &dev_attr_ctxid_masks.attr,
2049 &dev_attr_vmid_idx.attr,
2050 &dev_attr_vmid_val.attr,
2051 &dev_attr_vmid_masks.attr,
2052 &dev_attr_cpu.attr,
2053 NULL,
2054};
2055
2056struct etmv4_reg {
2057 void __iomem *addr;
2058 u32 data;
2059};
2060
2061static void do_smp_cross_read(void *data)
2062{
2063 struct etmv4_reg *reg = data;
2064
2065 reg->data = readl_relaxed(reg->addr);
2066}
2067
2068static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2069{
2070 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2071 struct etmv4_reg reg;
2072
2073 reg.addr = drvdata->base + offset;
2074 /*
2075 * smp cross call ensures the CPU will be powered up before
2076 * accessing the ETMv4 trace core registers
2077 */
2078 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2079 return reg.data;
2080}
2081
2082#define coresight_etm4x_reg(name, offset) \
2083 coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2084
2085#define coresight_etm4x_cross_read(name, offset) \
2086 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2087 name, offset)
2088
2089coresight_etm4x_reg(trcpdcr, TRCPDCR);
2090coresight_etm4x_reg(trcpdsr, TRCPDSR);
2091coresight_etm4x_reg(trclsr, TRCLSR);
2092coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2093coresight_etm4x_reg(trcdevid, TRCDEVID);
2094coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2095coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2096coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2097coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2098coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2099coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2100coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2101coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2102
2103static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2104 &dev_attr_trcoslsr.attr,
2105 &dev_attr_trcpdcr.attr,
2106 &dev_attr_trcpdsr.attr,
2107 &dev_attr_trclsr.attr,
2108 &dev_attr_trcconfig.attr,
2109 &dev_attr_trctraceid.attr,
2110 &dev_attr_trcauthstatus.attr,
2111 &dev_attr_trcdevid.attr,
2112 &dev_attr_trcdevtype.attr,
2113 &dev_attr_trcpidr0.attr,
2114 &dev_attr_trcpidr1.attr,
2115 &dev_attr_trcpidr2.attr,
2116 &dev_attr_trcpidr3.attr,
2117 NULL,
2118};
2119
2120coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2121coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2122coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2123coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2124coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2125coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2126/* trcidr[6,7] are reserved */
2127coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2128coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2129coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2130coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2131coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2132coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2133
2134static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2135 &dev_attr_trcidr0.attr,
2136 &dev_attr_trcidr1.attr,
2137 &dev_attr_trcidr2.attr,
2138 &dev_attr_trcidr3.attr,
2139 &dev_attr_trcidr4.attr,
2140 &dev_attr_trcidr5.attr,
2141 /* trcidr[6,7] are reserved */
2142 &dev_attr_trcidr8.attr,
2143 &dev_attr_trcidr9.attr,
2144 &dev_attr_trcidr10.attr,
2145 &dev_attr_trcidr11.attr,
2146 &dev_attr_trcidr12.attr,
2147 &dev_attr_trcidr13.attr,
2148 NULL,
2149};
2150
2151static const struct attribute_group coresight_etmv4_group = {
2152 .attrs = coresight_etmv4_attrs,
2153};
2154
2155static const struct attribute_group coresight_etmv4_mgmt_group = {
2156 .attrs = coresight_etmv4_mgmt_attrs,
2157 .name = "mgmt",
2158};
2159
2160static const struct attribute_group coresight_etmv4_trcidr_group = {
2161 .attrs = coresight_etmv4_trcidr_attrs,
2162 .name = "trcidr",
2163};
2164
2165const struct attribute_group *coresight_etmv4_groups[] = {
2166 &coresight_etmv4_group,
2167 &coresight_etmv4_mgmt_group,
2168 &coresight_etmv4_trcidr_group,
2169 NULL,
2170};