Loading...
1/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * Description: CoreSight Trace Memory Controller driver
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/device.h>
19#include <linux/io.h>
20#include <linux/err.h>
21#include <linux/fs.h>
22#include <linux/miscdevice.h>
23#include <linux/uaccess.h>
24#include <linux/slab.h>
25#include <linux/dma-mapping.h>
26#include <linux/spinlock.h>
27#include <linux/pm_runtime.h>
28#include <linux/of.h>
29#include <linux/coresight.h>
30#include <linux/amba/bus.h>
31
32#include "coresight-priv.h"
33
34#define TMC_RSZ 0x004
35#define TMC_STS 0x00c
36#define TMC_RRD 0x010
37#define TMC_RRP 0x014
38#define TMC_RWP 0x018
39#define TMC_TRG 0x01c
40#define TMC_CTL 0x020
41#define TMC_RWD 0x024
42#define TMC_MODE 0x028
43#define TMC_LBUFLEVEL 0x02c
44#define TMC_CBUFLEVEL 0x030
45#define TMC_BUFWM 0x034
46#define TMC_RRPHI 0x038
47#define TMC_RWPHI 0x03c
48#define TMC_AXICTL 0x110
49#define TMC_DBALO 0x118
50#define TMC_DBAHI 0x11c
51#define TMC_FFSR 0x300
52#define TMC_FFCR 0x304
53#define TMC_PSCR 0x308
54#define TMC_ITMISCOP0 0xee0
55#define TMC_ITTRFLIN 0xee8
56#define TMC_ITATBDATA0 0xeec
57#define TMC_ITATBCTR2 0xef0
58#define TMC_ITATBCTR1 0xef4
59#define TMC_ITATBCTR0 0xef8
60
61/* register description */
62/* TMC_CTL - 0x020 */
63#define TMC_CTL_CAPT_EN BIT(0)
64/* TMC_STS - 0x00C */
65#define TMC_STS_TRIGGERED BIT(1)
66/* TMC_AXICTL - 0x110 */
67#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
68#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
69#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
70#define TMC_AXICTL_WR_BURST_LEN 0xF00
71/* TMC_FFCR - 0x304 */
72#define TMC_FFCR_EN_FMT BIT(0)
73#define TMC_FFCR_EN_TI BIT(1)
74#define TMC_FFCR_FON_FLIN BIT(4)
75#define TMC_FFCR_FON_TRIG_EVT BIT(5)
76#define TMC_FFCR_FLUSHMAN BIT(6)
77#define TMC_FFCR_TRIGON_TRIGIN BIT(8)
78#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
79
80#define TMC_STS_TRIGGERED_BIT 2
81#define TMC_FFCR_FLUSHMAN_BIT 6
82
83enum tmc_config_type {
84 TMC_CONFIG_TYPE_ETB,
85 TMC_CONFIG_TYPE_ETR,
86 TMC_CONFIG_TYPE_ETF,
87};
88
89enum tmc_mode {
90 TMC_MODE_CIRCULAR_BUFFER,
91 TMC_MODE_SOFTWARE_FIFO,
92 TMC_MODE_HARDWARE_FIFO,
93};
94
95enum tmc_mem_intf_width {
96 TMC_MEM_INTF_WIDTH_32BITS = 0x2,
97 TMC_MEM_INTF_WIDTH_64BITS = 0x3,
98 TMC_MEM_INTF_WIDTH_128BITS = 0x4,
99 TMC_MEM_INTF_WIDTH_256BITS = 0x5,
100};
101
102/**
103 * struct tmc_drvdata - specifics associated to an TMC component
104 * @base: memory mapped base address for this component.
105 * @dev: the device entity associated to this component.
106 * @csdev: component vitals needed by the framework.
107 * @miscdev: specifics to handle "/dev/xyz.tmc" entry.
108 * @spinlock: only one at a time pls.
109 * @read_count: manages preparation of buffer for reading.
110 * @buf: area of memory where trace data get sent.
111 * @paddr: DMA start location in RAM.
112 * @vaddr: virtual representation of @paddr.
113 * @size: @buf size.
114 * @enable: this TMC is being used.
115 * @config_type: TMC variant, must be of type @tmc_config_type.
116 * @trigger_cntr: amount of words to store after a trigger.
117 */
118struct tmc_drvdata {
119 void __iomem *base;
120 struct device *dev;
121 struct coresight_device *csdev;
122 struct miscdevice miscdev;
123 spinlock_t spinlock;
124 int read_count;
125 bool reading;
126 char *buf;
127 dma_addr_t paddr;
128 void *vaddr;
129 u32 size;
130 bool enable;
131 enum tmc_config_type config_type;
132 u32 trigger_cntr;
133};
134
135static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
136{
137 /* Ensure formatter, unformatter and hardware fifo are empty */
138 if (coresight_timeout(drvdata->base,
139 TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) {
140 dev_err(drvdata->dev,
141 "timeout observed when probing at offset %#x\n",
142 TMC_STS);
143 }
144}
145
146static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
147{
148 u32 ffcr;
149
150 ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
151 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
152 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
153 ffcr |= TMC_FFCR_FLUSHMAN;
154 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
155 /* Ensure flush completes */
156 if (coresight_timeout(drvdata->base,
157 TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
158 dev_err(drvdata->dev,
159 "timeout observed when probing at offset %#x\n",
160 TMC_FFCR);
161 }
162
163 tmc_wait_for_ready(drvdata);
164}
165
166static void tmc_enable_hw(struct tmc_drvdata *drvdata)
167{
168 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
169}
170
171static void tmc_disable_hw(struct tmc_drvdata *drvdata)
172{
173 writel_relaxed(0x0, drvdata->base + TMC_CTL);
174}
175
176static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
177{
178 /* Zero out the memory to help with debug */
179 memset(drvdata->buf, 0, drvdata->size);
180
181 CS_UNLOCK(drvdata->base);
182
183 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
184 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
185 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
186 TMC_FFCR_TRIGON_TRIGIN,
187 drvdata->base + TMC_FFCR);
188
189 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
190 tmc_enable_hw(drvdata);
191
192 CS_LOCK(drvdata->base);
193}
194
195static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
196{
197 u32 axictl;
198
199 /* Zero out the memory to help with debug */
200 memset(drvdata->vaddr, 0, drvdata->size);
201
202 CS_UNLOCK(drvdata->base);
203
204 writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
205 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
206
207 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
208 axictl |= TMC_AXICTL_WR_BURST_LEN;
209 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
210 axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
211 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
212 axictl = (axictl &
213 ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
214 TMC_AXICTL_PROT_CTL_B1;
215 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
216
217 writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
218 writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
219 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
220 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
221 TMC_FFCR_TRIGON_TRIGIN,
222 drvdata->base + TMC_FFCR);
223 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
224 tmc_enable_hw(drvdata);
225
226 CS_LOCK(drvdata->base);
227}
228
229static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
230{
231 CS_UNLOCK(drvdata->base);
232
233 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
234 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
235 drvdata->base + TMC_FFCR);
236 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
237 tmc_enable_hw(drvdata);
238
239 CS_LOCK(drvdata->base);
240}
241
242static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
243{
244 unsigned long flags;
245
246 spin_lock_irqsave(&drvdata->spinlock, flags);
247 if (drvdata->reading) {
248 spin_unlock_irqrestore(&drvdata->spinlock, flags);
249 return -EBUSY;
250 }
251
252 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
253 tmc_etb_enable_hw(drvdata);
254 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
255 tmc_etr_enable_hw(drvdata);
256 } else {
257 if (mode == TMC_MODE_CIRCULAR_BUFFER)
258 tmc_etb_enable_hw(drvdata);
259 else
260 tmc_etf_enable_hw(drvdata);
261 }
262 drvdata->enable = true;
263 spin_unlock_irqrestore(&drvdata->spinlock, flags);
264
265 dev_info(drvdata->dev, "TMC enabled\n");
266 return 0;
267}
268
269static int tmc_enable_sink(struct coresight_device *csdev, u32 mode)
270{
271 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
272
273 return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
274}
275
276static int tmc_enable_link(struct coresight_device *csdev, int inport,
277 int outport)
278{
279 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
280
281 return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO);
282}
283
284static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
285{
286 enum tmc_mem_intf_width memwidth;
287 u8 memwords;
288 char *bufp;
289 u32 read_data;
290 int i;
291
292 memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10);
293 if (memwidth == TMC_MEM_INTF_WIDTH_32BITS)
294 memwords = 1;
295 else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS)
296 memwords = 2;
297 else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS)
298 memwords = 4;
299 else
300 memwords = 8;
301
302 bufp = drvdata->buf;
303 while (1) {
304 for (i = 0; i < memwords; i++) {
305 read_data = readl_relaxed(drvdata->base + TMC_RRD);
306 if (read_data == 0xFFFFFFFF)
307 return;
308 memcpy(bufp, &read_data, 4);
309 bufp += 4;
310 }
311 }
312}
313
314static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
315{
316 CS_UNLOCK(drvdata->base);
317
318 tmc_flush_and_stop(drvdata);
319 tmc_etb_dump_hw(drvdata);
320 tmc_disable_hw(drvdata);
321
322 CS_LOCK(drvdata->base);
323}
324
325static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
326{
327 u32 rwp, val;
328
329 rwp = readl_relaxed(drvdata->base + TMC_RWP);
330 val = readl_relaxed(drvdata->base + TMC_STS);
331
332 /* How much memory do we still have */
333 if (val & BIT(0))
334 drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
335 else
336 drvdata->buf = drvdata->vaddr;
337}
338
339static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
340{
341 CS_UNLOCK(drvdata->base);
342
343 tmc_flush_and_stop(drvdata);
344 tmc_etr_dump_hw(drvdata);
345 tmc_disable_hw(drvdata);
346
347 CS_LOCK(drvdata->base);
348}
349
350static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
351{
352 CS_UNLOCK(drvdata->base);
353
354 tmc_flush_and_stop(drvdata);
355 tmc_disable_hw(drvdata);
356
357 CS_LOCK(drvdata->base);
358}
359
360static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
361{
362 unsigned long flags;
363
364 spin_lock_irqsave(&drvdata->spinlock, flags);
365 if (drvdata->reading)
366 goto out;
367
368 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
369 tmc_etb_disable_hw(drvdata);
370 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
371 tmc_etr_disable_hw(drvdata);
372 } else {
373 if (mode == TMC_MODE_CIRCULAR_BUFFER)
374 tmc_etb_disable_hw(drvdata);
375 else
376 tmc_etf_disable_hw(drvdata);
377 }
378out:
379 drvdata->enable = false;
380 spin_unlock_irqrestore(&drvdata->spinlock, flags);
381
382 dev_info(drvdata->dev, "TMC disabled\n");
383}
384
385static void tmc_disable_sink(struct coresight_device *csdev)
386{
387 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
388
389 tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
390}
391
392static void tmc_disable_link(struct coresight_device *csdev, int inport,
393 int outport)
394{
395 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
396
397 tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
398}
399
400static const struct coresight_ops_sink tmc_sink_ops = {
401 .enable = tmc_enable_sink,
402 .disable = tmc_disable_sink,
403};
404
405static const struct coresight_ops_link tmc_link_ops = {
406 .enable = tmc_enable_link,
407 .disable = tmc_disable_link,
408};
409
410static const struct coresight_ops tmc_etb_cs_ops = {
411 .sink_ops = &tmc_sink_ops,
412};
413
414static const struct coresight_ops tmc_etr_cs_ops = {
415 .sink_ops = &tmc_sink_ops,
416};
417
418static const struct coresight_ops tmc_etf_cs_ops = {
419 .sink_ops = &tmc_sink_ops,
420 .link_ops = &tmc_link_ops,
421};
422
423static int tmc_read_prepare(struct tmc_drvdata *drvdata)
424{
425 int ret;
426 unsigned long flags;
427 enum tmc_mode mode;
428
429 spin_lock_irqsave(&drvdata->spinlock, flags);
430 if (!drvdata->enable)
431 goto out;
432
433 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
434 tmc_etb_disable_hw(drvdata);
435 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
436 tmc_etr_disable_hw(drvdata);
437 } else {
438 mode = readl_relaxed(drvdata->base + TMC_MODE);
439 if (mode == TMC_MODE_CIRCULAR_BUFFER) {
440 tmc_etb_disable_hw(drvdata);
441 } else {
442 ret = -ENODEV;
443 goto err;
444 }
445 }
446out:
447 drvdata->reading = true;
448 spin_unlock_irqrestore(&drvdata->spinlock, flags);
449
450 dev_info(drvdata->dev, "TMC read start\n");
451 return 0;
452err:
453 spin_unlock_irqrestore(&drvdata->spinlock, flags);
454 return ret;
455}
456
457static void tmc_read_unprepare(struct tmc_drvdata *drvdata)
458{
459 unsigned long flags;
460 enum tmc_mode mode;
461
462 spin_lock_irqsave(&drvdata->spinlock, flags);
463 if (!drvdata->enable)
464 goto out;
465
466 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
467 tmc_etb_enable_hw(drvdata);
468 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
469 tmc_etr_enable_hw(drvdata);
470 } else {
471 mode = readl_relaxed(drvdata->base + TMC_MODE);
472 if (mode == TMC_MODE_CIRCULAR_BUFFER)
473 tmc_etb_enable_hw(drvdata);
474 }
475out:
476 drvdata->reading = false;
477 spin_unlock_irqrestore(&drvdata->spinlock, flags);
478
479 dev_info(drvdata->dev, "TMC read end\n");
480}
481
482static int tmc_open(struct inode *inode, struct file *file)
483{
484 struct tmc_drvdata *drvdata = container_of(file->private_data,
485 struct tmc_drvdata, miscdev);
486 int ret = 0;
487
488 if (drvdata->read_count++)
489 goto out;
490
491 ret = tmc_read_prepare(drvdata);
492 if (ret)
493 return ret;
494out:
495 nonseekable_open(inode, file);
496
497 dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
498 return 0;
499}
500
501static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
502 loff_t *ppos)
503{
504 struct tmc_drvdata *drvdata = container_of(file->private_data,
505 struct tmc_drvdata, miscdev);
506 char *bufp = drvdata->buf + *ppos;
507
508 if (*ppos + len > drvdata->size)
509 len = drvdata->size - *ppos;
510
511 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
512 if (bufp == (char *)(drvdata->vaddr + drvdata->size))
513 bufp = drvdata->vaddr;
514 else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
515 bufp -= drvdata->size;
516 if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
517 len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
518 }
519
520 if (copy_to_user(data, bufp, len)) {
521 dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
522 return -EFAULT;
523 }
524
525 *ppos += len;
526
527 dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
528 __func__, len, (int)(drvdata->size - *ppos));
529 return len;
530}
531
532static int tmc_release(struct inode *inode, struct file *file)
533{
534 struct tmc_drvdata *drvdata = container_of(file->private_data,
535 struct tmc_drvdata, miscdev);
536
537 if (--drvdata->read_count) {
538 if (drvdata->read_count < 0) {
539 dev_err(drvdata->dev, "mismatched close\n");
540 drvdata->read_count = 0;
541 }
542 goto out;
543 }
544
545 tmc_read_unprepare(drvdata);
546out:
547 dev_dbg(drvdata->dev, "%s: released\n", __func__);
548 return 0;
549}
550
551static const struct file_operations tmc_fops = {
552 .owner = THIS_MODULE,
553 .open = tmc_open,
554 .read = tmc_read,
555 .release = tmc_release,
556 .llseek = no_llseek,
557};
558
559static ssize_t status_show(struct device *dev,
560 struct device_attribute *attr, char *buf)
561{
562 unsigned long flags;
563 u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
564 u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
565 u32 devid;
566 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
567
568 pm_runtime_get_sync(drvdata->dev);
569 spin_lock_irqsave(&drvdata->spinlock, flags);
570 CS_UNLOCK(drvdata->base);
571
572 tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ);
573 tmc_sts = readl_relaxed(drvdata->base + TMC_STS);
574 tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP);
575 tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP);
576 tmc_trg = readl_relaxed(drvdata->base + TMC_TRG);
577 tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL);
578 tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR);
579 tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
580 tmc_mode = readl_relaxed(drvdata->base + TMC_MODE);
581 tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR);
582 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
583
584 CS_LOCK(drvdata->base);
585 spin_unlock_irqrestore(&drvdata->spinlock, flags);
586 pm_runtime_put(drvdata->dev);
587
588 return sprintf(buf,
589 "Depth:\t\t0x%x\n"
590 "Status:\t\t0x%x\n"
591 "RAM read ptr:\t0x%x\n"
592 "RAM wrt ptr:\t0x%x\n"
593 "Trigger cnt:\t0x%x\n"
594 "Control:\t0x%x\n"
595 "Flush status:\t0x%x\n"
596 "Flush ctrl:\t0x%x\n"
597 "Mode:\t\t0x%x\n"
598 "PSRC:\t\t0x%x\n"
599 "DEVID:\t\t0x%x\n",
600 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
601 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
602
603 return -EINVAL;
604}
605static DEVICE_ATTR_RO(status);
606
607static ssize_t trigger_cntr_show(struct device *dev,
608 struct device_attribute *attr, char *buf)
609{
610 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
611 unsigned long val = drvdata->trigger_cntr;
612
613 return sprintf(buf, "%#lx\n", val);
614}
615
616static ssize_t trigger_cntr_store(struct device *dev,
617 struct device_attribute *attr,
618 const char *buf, size_t size)
619{
620 int ret;
621 unsigned long val;
622 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
623
624 ret = kstrtoul(buf, 16, &val);
625 if (ret)
626 return ret;
627
628 drvdata->trigger_cntr = val;
629 return size;
630}
631static DEVICE_ATTR_RW(trigger_cntr);
632
633static struct attribute *coresight_etb_attrs[] = {
634 &dev_attr_trigger_cntr.attr,
635 &dev_attr_status.attr,
636 NULL,
637};
638ATTRIBUTE_GROUPS(coresight_etb);
639
640static struct attribute *coresight_etr_attrs[] = {
641 &dev_attr_trigger_cntr.attr,
642 &dev_attr_status.attr,
643 NULL,
644};
645ATTRIBUTE_GROUPS(coresight_etr);
646
647static struct attribute *coresight_etf_attrs[] = {
648 &dev_attr_trigger_cntr.attr,
649 &dev_attr_status.attr,
650 NULL,
651};
652ATTRIBUTE_GROUPS(coresight_etf);
653
654static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
655{
656 int ret = 0;
657 u32 devid;
658 void __iomem *base;
659 struct device *dev = &adev->dev;
660 struct coresight_platform_data *pdata = NULL;
661 struct tmc_drvdata *drvdata;
662 struct resource *res = &adev->res;
663 struct coresight_desc *desc;
664 struct device_node *np = adev->dev.of_node;
665
666 if (np) {
667 pdata = of_get_coresight_platform_data(dev, np);
668 if (IS_ERR(pdata))
669 return PTR_ERR(pdata);
670 adev->dev.platform_data = pdata;
671 }
672
673 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
674 if (!drvdata)
675 return -ENOMEM;
676
677 drvdata->dev = &adev->dev;
678 dev_set_drvdata(dev, drvdata);
679
680 /* Validity for the resource is already checked by the AMBA core */
681 base = devm_ioremap_resource(dev, res);
682 if (IS_ERR(base))
683 return PTR_ERR(base);
684
685 drvdata->base = base;
686
687 spin_lock_init(&drvdata->spinlock);
688
689 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
690 drvdata->config_type = BMVAL(devid, 6, 7);
691
692 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
693 if (np)
694 ret = of_property_read_u32(np,
695 "arm,buffer-size",
696 &drvdata->size);
697 if (ret)
698 drvdata->size = SZ_1M;
699 } else {
700 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
701 }
702
703 pm_runtime_put(&adev->dev);
704
705 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
706 drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
707 &drvdata->paddr, GFP_KERNEL);
708 if (!drvdata->vaddr)
709 return -ENOMEM;
710
711 memset(drvdata->vaddr, 0, drvdata->size);
712 drvdata->buf = drvdata->vaddr;
713 } else {
714 drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
715 if (!drvdata->buf)
716 return -ENOMEM;
717 }
718
719 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
720 if (!desc) {
721 ret = -ENOMEM;
722 goto err_devm_kzalloc;
723 }
724
725 desc->pdata = pdata;
726 desc->dev = dev;
727 desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
728
729 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
730 desc->type = CORESIGHT_DEV_TYPE_SINK;
731 desc->ops = &tmc_etb_cs_ops;
732 desc->groups = coresight_etb_groups;
733 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
734 desc->type = CORESIGHT_DEV_TYPE_SINK;
735 desc->ops = &tmc_etr_cs_ops;
736 desc->groups = coresight_etr_groups;
737 } else {
738 desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
739 desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
740 desc->ops = &tmc_etf_cs_ops;
741 desc->groups = coresight_etf_groups;
742 }
743
744 drvdata->csdev = coresight_register(desc);
745 if (IS_ERR(drvdata->csdev)) {
746 ret = PTR_ERR(drvdata->csdev);
747 goto err_devm_kzalloc;
748 }
749
750 drvdata->miscdev.name = pdata->name;
751 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
752 drvdata->miscdev.fops = &tmc_fops;
753 ret = misc_register(&drvdata->miscdev);
754 if (ret)
755 goto err_misc_register;
756
757 dev_info(dev, "TMC initialized\n");
758 return 0;
759
760err_misc_register:
761 coresight_unregister(drvdata->csdev);
762err_devm_kzalloc:
763 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
764 dma_free_coherent(dev, drvdata->size,
765 drvdata->vaddr, drvdata->paddr);
766 return ret;
767}
768
769static struct amba_id tmc_ids[] = {
770 {
771 .id = 0x0003b961,
772 .mask = 0x0003ffff,
773 },
774 { 0, 0},
775};
776
777static struct amba_driver tmc_driver = {
778 .drv = {
779 .name = "coresight-tmc",
780 .owner = THIS_MODULE,
781 .suppress_bind_attrs = true,
782 },
783 .probe = tmc_probe,
784 .id_table = tmc_ids,
785};
786builtin_amba_driver(tmc_driver);
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
3 *
4 * Description: CoreSight Trace Memory Controller driver
5 */
6
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/device.h>
11#include <linux/idr.h>
12#include <linux/io.h>
13#include <linux/err.h>
14#include <linux/fs.h>
15#include <linux/miscdevice.h>
16#include <linux/mutex.h>
17#include <linux/property.h>
18#include <linux/uaccess.h>
19#include <linux/slab.h>
20#include <linux/dma-mapping.h>
21#include <linux/spinlock.h>
22#include <linux/pm_runtime.h>
23#include <linux/of.h>
24#include <linux/coresight.h>
25#include <linux/amba/bus.h>
26
27#include "coresight-priv.h"
28#include "coresight-tmc.h"
29
30DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
31DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
32DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
33
34void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
35{
36 /* Ensure formatter, unformatter and hardware fifo are empty */
37 if (coresight_timeout(drvdata->base,
38 TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
39 dev_err(&drvdata->csdev->dev,
40 "timeout while waiting for TMC to be Ready\n");
41 }
42}
43
44void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
45{
46 u32 ffcr;
47
48 ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
49 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
50 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
51 ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
52 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
53 /* Ensure flush completes */
54 if (coresight_timeout(drvdata->base,
55 TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
56 dev_err(&drvdata->csdev->dev,
57 "timeout while waiting for completion of Manual Flush\n");
58 }
59
60 tmc_wait_for_tmcready(drvdata);
61}
62
63void tmc_enable_hw(struct tmc_drvdata *drvdata)
64{
65 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
66}
67
68void tmc_disable_hw(struct tmc_drvdata *drvdata)
69{
70 writel_relaxed(0x0, drvdata->base + TMC_CTL);
71}
72
73u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
74{
75 u32 mask = 0;
76
77 /*
78 * When moving RRP or an offset address forward, the new values must
79 * be byte-address aligned to the width of the trace memory databus
80 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
81 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
82 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
83 * be 0s.
84 */
85 switch (drvdata->memwidth) {
86 case TMC_MEM_INTF_WIDTH_32BITS:
87 /* fallthrough */
88 case TMC_MEM_INTF_WIDTH_64BITS:
89 /* fallthrough */
90 case TMC_MEM_INTF_WIDTH_128BITS:
91 mask = GENMASK(31, 4);
92 break;
93 case TMC_MEM_INTF_WIDTH_256BITS:
94 mask = GENMASK(31, 5);
95 break;
96 }
97
98 return mask;
99}
100
101static int tmc_read_prepare(struct tmc_drvdata *drvdata)
102{
103 int ret = 0;
104
105 switch (drvdata->config_type) {
106 case TMC_CONFIG_TYPE_ETB:
107 case TMC_CONFIG_TYPE_ETF:
108 ret = tmc_read_prepare_etb(drvdata);
109 break;
110 case TMC_CONFIG_TYPE_ETR:
111 ret = tmc_read_prepare_etr(drvdata);
112 break;
113 default:
114 ret = -EINVAL;
115 }
116
117 if (!ret)
118 dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
119
120 return ret;
121}
122
123static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
124{
125 int ret = 0;
126
127 switch (drvdata->config_type) {
128 case TMC_CONFIG_TYPE_ETB:
129 case TMC_CONFIG_TYPE_ETF:
130 ret = tmc_read_unprepare_etb(drvdata);
131 break;
132 case TMC_CONFIG_TYPE_ETR:
133 ret = tmc_read_unprepare_etr(drvdata);
134 break;
135 default:
136 ret = -EINVAL;
137 }
138
139 if (!ret)
140 dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
141
142 return ret;
143}
144
145static int tmc_open(struct inode *inode, struct file *file)
146{
147 int ret;
148 struct tmc_drvdata *drvdata = container_of(file->private_data,
149 struct tmc_drvdata, miscdev);
150
151 ret = tmc_read_prepare(drvdata);
152 if (ret)
153 return ret;
154
155 nonseekable_open(inode, file);
156
157 dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
158 return 0;
159}
160
161static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
162 loff_t pos, size_t len, char **bufpp)
163{
164 switch (drvdata->config_type) {
165 case TMC_CONFIG_TYPE_ETB:
166 case TMC_CONFIG_TYPE_ETF:
167 return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
168 case TMC_CONFIG_TYPE_ETR:
169 return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
170 }
171
172 return -EINVAL;
173}
174
175static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
176 loff_t *ppos)
177{
178 char *bufp;
179 ssize_t actual;
180 struct tmc_drvdata *drvdata = container_of(file->private_data,
181 struct tmc_drvdata, miscdev);
182 actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
183 if (actual <= 0)
184 return 0;
185
186 if (copy_to_user(data, bufp, actual)) {
187 dev_dbg(&drvdata->csdev->dev,
188 "%s: copy_to_user failed\n", __func__);
189 return -EFAULT;
190 }
191
192 *ppos += actual;
193 dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
194
195 return actual;
196}
197
198static int tmc_release(struct inode *inode, struct file *file)
199{
200 int ret;
201 struct tmc_drvdata *drvdata = container_of(file->private_data,
202 struct tmc_drvdata, miscdev);
203
204 ret = tmc_read_unprepare(drvdata);
205 if (ret)
206 return ret;
207
208 dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
209 return 0;
210}
211
212static const struct file_operations tmc_fops = {
213 .owner = THIS_MODULE,
214 .open = tmc_open,
215 .read = tmc_read,
216 .release = tmc_release,
217 .llseek = no_llseek,
218};
219
220static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
221{
222 enum tmc_mem_intf_width memwidth;
223
224 /*
225 * Excerpt from the TRM:
226 *
227 * DEVID::MEMWIDTH[10:8]
228 * 0x2 Memory interface databus is 32 bits wide.
229 * 0x3 Memory interface databus is 64 bits wide.
230 * 0x4 Memory interface databus is 128 bits wide.
231 * 0x5 Memory interface databus is 256 bits wide.
232 */
233 switch (BMVAL(devid, 8, 10)) {
234 case 0x2:
235 memwidth = TMC_MEM_INTF_WIDTH_32BITS;
236 break;
237 case 0x3:
238 memwidth = TMC_MEM_INTF_WIDTH_64BITS;
239 break;
240 case 0x4:
241 memwidth = TMC_MEM_INTF_WIDTH_128BITS;
242 break;
243 case 0x5:
244 memwidth = TMC_MEM_INTF_WIDTH_256BITS;
245 break;
246 default:
247 memwidth = 0;
248 }
249
250 return memwidth;
251}
252
253#define coresight_tmc_reg(name, offset) \
254 coresight_simple_reg32(struct tmc_drvdata, name, offset)
255#define coresight_tmc_reg64(name, lo_off, hi_off) \
256 coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
257
258coresight_tmc_reg(rsz, TMC_RSZ);
259coresight_tmc_reg(sts, TMC_STS);
260coresight_tmc_reg(trg, TMC_TRG);
261coresight_tmc_reg(ctl, TMC_CTL);
262coresight_tmc_reg(ffsr, TMC_FFSR);
263coresight_tmc_reg(ffcr, TMC_FFCR);
264coresight_tmc_reg(mode, TMC_MODE);
265coresight_tmc_reg(pscr, TMC_PSCR);
266coresight_tmc_reg(axictl, TMC_AXICTL);
267coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
268coresight_tmc_reg(devid, CORESIGHT_DEVID);
269coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
270coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
271coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
272
273static struct attribute *coresight_tmc_mgmt_attrs[] = {
274 &dev_attr_rsz.attr,
275 &dev_attr_sts.attr,
276 &dev_attr_rrp.attr,
277 &dev_attr_rwp.attr,
278 &dev_attr_trg.attr,
279 &dev_attr_ctl.attr,
280 &dev_attr_ffsr.attr,
281 &dev_attr_ffcr.attr,
282 &dev_attr_mode.attr,
283 &dev_attr_pscr.attr,
284 &dev_attr_devid.attr,
285 &dev_attr_dba.attr,
286 &dev_attr_axictl.attr,
287 &dev_attr_authstatus.attr,
288 NULL,
289};
290
291static ssize_t trigger_cntr_show(struct device *dev,
292 struct device_attribute *attr, char *buf)
293{
294 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
295 unsigned long val = drvdata->trigger_cntr;
296
297 return sprintf(buf, "%#lx\n", val);
298}
299
300static ssize_t trigger_cntr_store(struct device *dev,
301 struct device_attribute *attr,
302 const char *buf, size_t size)
303{
304 int ret;
305 unsigned long val;
306 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
307
308 ret = kstrtoul(buf, 16, &val);
309 if (ret)
310 return ret;
311
312 drvdata->trigger_cntr = val;
313 return size;
314}
315static DEVICE_ATTR_RW(trigger_cntr);
316
317static ssize_t buffer_size_show(struct device *dev,
318 struct device_attribute *attr, char *buf)
319{
320 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
321
322 return sprintf(buf, "%#x\n", drvdata->size);
323}
324
325static ssize_t buffer_size_store(struct device *dev,
326 struct device_attribute *attr,
327 const char *buf, size_t size)
328{
329 int ret;
330 unsigned long val;
331 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
332
333 /* Only permitted for TMC-ETRs */
334 if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
335 return -EPERM;
336
337 ret = kstrtoul(buf, 0, &val);
338 if (ret)
339 return ret;
340 /* The buffer size should be page aligned */
341 if (val & (PAGE_SIZE - 1))
342 return -EINVAL;
343 drvdata->size = val;
344 return size;
345}
346
347static DEVICE_ATTR_RW(buffer_size);
348
349static struct attribute *coresight_tmc_attrs[] = {
350 &dev_attr_trigger_cntr.attr,
351 &dev_attr_buffer_size.attr,
352 NULL,
353};
354
355static const struct attribute_group coresight_tmc_group = {
356 .attrs = coresight_tmc_attrs,
357};
358
359static const struct attribute_group coresight_tmc_mgmt_group = {
360 .attrs = coresight_tmc_mgmt_attrs,
361 .name = "mgmt",
362};
363
364const struct attribute_group *coresight_tmc_groups[] = {
365 &coresight_tmc_group,
366 &coresight_tmc_mgmt_group,
367 NULL,
368};
369
370static inline bool tmc_etr_can_use_sg(struct device *dev)
371{
372 return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
373}
374
375static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
376{
377 u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
378
379 return (auth & TMC_AUTH_NSID_MASK) == 0x3;
380}
381
382/* Detect and initialise the capabilities of a TMC ETR */
383static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
384{
385 int rc;
386 u32 dma_mask = 0;
387 struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
388
389 if (!tmc_etr_has_non_secure_access(drvdata))
390 return -EACCES;
391
392 /* Set the unadvertised capabilities */
393 tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
394
395 if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
396 tmc_etr_set_cap(drvdata, TMC_ETR_SG);
397
398 /* Check if the AXI address width is available */
399 if (devid & TMC_DEVID_AXIAW_VALID)
400 dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
401 TMC_DEVID_AXIAW_MASK);
402
403 /*
404 * Unless specified in the device configuration, ETR uses a 40-bit
405 * AXI master in place of the embedded SRAM of ETB/ETF.
406 */
407 switch (dma_mask) {
408 case 32:
409 case 40:
410 case 44:
411 case 48:
412 case 52:
413 dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
414 break;
415 default:
416 dma_mask = 40;
417 }
418
419 rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
420 if (rc)
421 dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
422 return rc;
423}
424
425static u32 tmc_etr_get_default_buffer_size(struct device *dev)
426{
427 u32 size;
428
429 if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
430 size = SZ_1M;
431 return size;
432}
433
434static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
435{
436 int ret = 0;
437 u32 devid;
438 void __iomem *base;
439 struct device *dev = &adev->dev;
440 struct coresight_platform_data *pdata = NULL;
441 struct tmc_drvdata *drvdata;
442 struct resource *res = &adev->res;
443 struct coresight_desc desc = { 0 };
444 struct coresight_dev_list *dev_list = NULL;
445
446 ret = -ENOMEM;
447 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
448 if (!drvdata)
449 goto out;
450
451 dev_set_drvdata(dev, drvdata);
452
453 /* Validity for the resource is already checked by the AMBA core */
454 base = devm_ioremap_resource(dev, res);
455 if (IS_ERR(base)) {
456 ret = PTR_ERR(base);
457 goto out;
458 }
459
460 drvdata->base = base;
461
462 spin_lock_init(&drvdata->spinlock);
463
464 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
465 drvdata->config_type = BMVAL(devid, 6, 7);
466 drvdata->memwidth = tmc_get_memwidth(devid);
467 /* This device is not associated with a session */
468 drvdata->pid = -1;
469
470 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
471 drvdata->size = tmc_etr_get_default_buffer_size(dev);
472 else
473 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
474
475 desc.dev = dev;
476 desc.groups = coresight_tmc_groups;
477
478 switch (drvdata->config_type) {
479 case TMC_CONFIG_TYPE_ETB:
480 desc.type = CORESIGHT_DEV_TYPE_SINK;
481 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
482 desc.ops = &tmc_etb_cs_ops;
483 dev_list = &etb_devs;
484 break;
485 case TMC_CONFIG_TYPE_ETR:
486 desc.type = CORESIGHT_DEV_TYPE_SINK;
487 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
488 desc.ops = &tmc_etr_cs_ops;
489 ret = tmc_etr_setup_caps(dev, devid,
490 coresight_get_uci_data(id));
491 if (ret)
492 goto out;
493 idr_init(&drvdata->idr);
494 mutex_init(&drvdata->idr_mutex);
495 dev_list = &etr_devs;
496 break;
497 case TMC_CONFIG_TYPE_ETF:
498 desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
499 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
500 desc.ops = &tmc_etf_cs_ops;
501 dev_list = &etf_devs;
502 break;
503 default:
504 pr_err("%s: Unsupported TMC config\n", desc.name);
505 ret = -EINVAL;
506 goto out;
507 }
508
509 desc.name = coresight_alloc_device_name(dev_list, dev);
510 if (!desc.name) {
511 ret = -ENOMEM;
512 goto out;
513 }
514
515 pdata = coresight_get_platform_data(dev);
516 if (IS_ERR(pdata)) {
517 ret = PTR_ERR(pdata);
518 goto out;
519 }
520 adev->dev.platform_data = pdata;
521 desc.pdata = pdata;
522
523 drvdata->csdev = coresight_register(&desc);
524 if (IS_ERR(drvdata->csdev)) {
525 ret = PTR_ERR(drvdata->csdev);
526 goto out;
527 }
528
529 drvdata->miscdev.name = desc.name;
530 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
531 drvdata->miscdev.fops = &tmc_fops;
532 ret = misc_register(&drvdata->miscdev);
533 if (ret)
534 coresight_unregister(drvdata->csdev);
535 else
536 pm_runtime_put(&adev->dev);
537out:
538 return ret;
539}
540
541static const struct amba_id tmc_ids[] = {
542 CS_AMBA_ID(0x000bb961),
543 /* Coresight SoC 600 TMC-ETR/ETS */
544 CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
545 /* Coresight SoC 600 TMC-ETB */
546 CS_AMBA_ID(0x000bb9e9),
547 /* Coresight SoC 600 TMC-ETF */
548 CS_AMBA_ID(0x000bb9ea),
549 { 0, 0},
550};
551
552static struct amba_driver tmc_driver = {
553 .drv = {
554 .name = "coresight-tmc",
555 .owner = THIS_MODULE,
556 .suppress_bind_attrs = true,
557 },
558 .probe = tmc_probe,
559 .id_table = tmc_ids,
560};
561builtin_amba_driver(tmc_driver);