Linux Audio

Check our new training course

Loading...
v4.6
  1/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  2 *
  3 * Description: CoreSight Trace Memory Controller driver
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License version 2 and
  7 * only version 2 as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 */
 14
 15#include <linux/kernel.h>
 16#include <linux/init.h>
 17#include <linux/types.h>
 18#include <linux/device.h>
 19#include <linux/io.h>
 20#include <linux/err.h>
 21#include <linux/fs.h>
 22#include <linux/miscdevice.h>
 23#include <linux/uaccess.h>
 24#include <linux/slab.h>
 25#include <linux/dma-mapping.h>
 26#include <linux/spinlock.h>
 27#include <linux/pm_runtime.h>
 28#include <linux/of.h>
 29#include <linux/coresight.h>
 30#include <linux/amba/bus.h>
 31
 32#include "coresight-priv.h"
 
 33
 34#define TMC_RSZ			0x004
 35#define TMC_STS			0x00c
 36#define TMC_RRD			0x010
 37#define TMC_RRP			0x014
 38#define TMC_RWP			0x018
 39#define TMC_TRG			0x01c
 40#define TMC_CTL			0x020
 41#define TMC_RWD			0x024
 42#define TMC_MODE		0x028
 43#define TMC_LBUFLEVEL		0x02c
 44#define TMC_CBUFLEVEL		0x030
 45#define TMC_BUFWM		0x034
 46#define TMC_RRPHI		0x038
 47#define TMC_RWPHI		0x03c
 48#define TMC_AXICTL		0x110
 49#define TMC_DBALO		0x118
 50#define TMC_DBAHI		0x11c
 51#define TMC_FFSR		0x300
 52#define TMC_FFCR		0x304
 53#define TMC_PSCR		0x308
 54#define TMC_ITMISCOP0		0xee0
 55#define TMC_ITTRFLIN		0xee8
 56#define TMC_ITATBDATA0		0xeec
 57#define TMC_ITATBCTR2		0xef0
 58#define TMC_ITATBCTR1		0xef4
 59#define TMC_ITATBCTR0		0xef8
 60
 61/* register description */
 62/* TMC_CTL - 0x020 */
 63#define TMC_CTL_CAPT_EN		BIT(0)
 64/* TMC_STS - 0x00C */
 65#define TMC_STS_TRIGGERED	BIT(1)
 66/* TMC_AXICTL - 0x110 */
 67#define TMC_AXICTL_PROT_CTL_B0	BIT(0)
 68#define TMC_AXICTL_PROT_CTL_B1	BIT(1)
 69#define TMC_AXICTL_SCT_GAT_MODE	BIT(7)
 70#define TMC_AXICTL_WR_BURST_LEN 0xF00
 71/* TMC_FFCR - 0x304 */
 72#define TMC_FFCR_EN_FMT		BIT(0)
 73#define TMC_FFCR_EN_TI		BIT(1)
 74#define TMC_FFCR_FON_FLIN	BIT(4)
 75#define TMC_FFCR_FON_TRIG_EVT	BIT(5)
 76#define TMC_FFCR_FLUSHMAN	BIT(6)
 77#define TMC_FFCR_TRIGON_TRIGIN	BIT(8)
 78#define TMC_FFCR_STOP_ON_FLUSH	BIT(12)
 79
 80#define TMC_STS_TRIGGERED_BIT	2
 81#define TMC_FFCR_FLUSHMAN_BIT	6
 82
 83enum tmc_config_type {
 84	TMC_CONFIG_TYPE_ETB,
 85	TMC_CONFIG_TYPE_ETR,
 86	TMC_CONFIG_TYPE_ETF,
 87};
 88
 89enum tmc_mode {
 90	TMC_MODE_CIRCULAR_BUFFER,
 91	TMC_MODE_SOFTWARE_FIFO,
 92	TMC_MODE_HARDWARE_FIFO,
 93};
 94
 95enum tmc_mem_intf_width {
 96	TMC_MEM_INTF_WIDTH_32BITS	= 0x2,
 97	TMC_MEM_INTF_WIDTH_64BITS	= 0x3,
 98	TMC_MEM_INTF_WIDTH_128BITS	= 0x4,
 99	TMC_MEM_INTF_WIDTH_256BITS	= 0x5,
100};
101
102/**
103 * struct tmc_drvdata - specifics associated to an TMC component
104 * @base:	memory mapped base address for this component.
105 * @dev:	the device entity associated to this component.
106 * @csdev:	component vitals needed by the framework.
107 * @miscdev:	specifics to handle "/dev/xyz.tmc" entry.
108 * @spinlock:	only one at a time pls.
109 * @read_count:	manages preparation of buffer for reading.
110 * @buf:	area of memory where trace data get sent.
111 * @paddr:	DMA start location in RAM.
112 * @vaddr:	virtual representation of @paddr.
113 * @size:	@buf size.
114 * @enable:	this TMC is being used.
115 * @config_type: TMC variant, must be of type @tmc_config_type.
116 * @trigger_cntr: amount of words to store after a trigger.
117 */
118struct tmc_drvdata {
119	void __iomem		*base;
120	struct device		*dev;
121	struct coresight_device	*csdev;
122	struct miscdevice	miscdev;
123	spinlock_t		spinlock;
124	int			read_count;
125	bool			reading;
126	char			*buf;
127	dma_addr_t		paddr;
128	void			*vaddr;
129	u32			size;
130	bool			enable;
131	enum tmc_config_type	config_type;
132	u32			trigger_cntr;
133};
134
135static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
136{
137	/* Ensure formatter, unformatter and hardware fifo are empty */
138	if (coresight_timeout(drvdata->base,
139			      TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) {
140		dev_err(drvdata->dev,
141			"timeout observed when probing at offset %#x\n",
142			TMC_STS);
143	}
144}
145
146static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
147{
148	u32 ffcr;
149
150	ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
151	ffcr |= TMC_FFCR_STOP_ON_FLUSH;
152	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
153	ffcr |= TMC_FFCR_FLUSHMAN;
154	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
155	/* Ensure flush completes */
156	if (coresight_timeout(drvdata->base,
157			      TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
158		dev_err(drvdata->dev,
159			"timeout observed when probing at offset %#x\n",
160			TMC_FFCR);
161	}
162
163	tmc_wait_for_ready(drvdata);
164}
165
166static void tmc_enable_hw(struct tmc_drvdata *drvdata)
167{
168	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
169}
170
171static void tmc_disable_hw(struct tmc_drvdata *drvdata)
172{
173	writel_relaxed(0x0, drvdata->base + TMC_CTL);
174}
175
176static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
177{
178	/* Zero out the memory to help with debug */
179	memset(drvdata->buf, 0, drvdata->size);
180
181	CS_UNLOCK(drvdata->base);
182
183	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
184	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
185		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
186		       TMC_FFCR_TRIGON_TRIGIN,
187		       drvdata->base + TMC_FFCR);
188
189	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
190	tmc_enable_hw(drvdata);
191
192	CS_LOCK(drvdata->base);
193}
194
195static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
196{
197	u32 axictl;
198
199	/* Zero out the memory to help with debug */
200	memset(drvdata->vaddr, 0, drvdata->size);
201
202	CS_UNLOCK(drvdata->base);
203
204	writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
205	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
206
207	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
208	axictl |= TMC_AXICTL_WR_BURST_LEN;
209	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
210	axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
211	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
212	axictl = (axictl &
213		  ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
214		  TMC_AXICTL_PROT_CTL_B1;
215	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
216
217	writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
218	writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
219	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
220		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
221		       TMC_FFCR_TRIGON_TRIGIN,
222		       drvdata->base + TMC_FFCR);
223	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
224	tmc_enable_hw(drvdata);
225
226	CS_LOCK(drvdata->base);
227}
228
229static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
230{
231	CS_UNLOCK(drvdata->base);
232
233	writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
234	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
235		       drvdata->base + TMC_FFCR);
236	writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
237	tmc_enable_hw(drvdata);
238
239	CS_LOCK(drvdata->base);
240}
241
242static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
243{
244	unsigned long flags;
245
246	spin_lock_irqsave(&drvdata->spinlock, flags);
247	if (drvdata->reading) {
248		spin_unlock_irqrestore(&drvdata->spinlock, flags);
249		return -EBUSY;
250	}
251
252	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
253		tmc_etb_enable_hw(drvdata);
254	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
255		tmc_etr_enable_hw(drvdata);
256	} else {
257		if (mode == TMC_MODE_CIRCULAR_BUFFER)
258			tmc_etb_enable_hw(drvdata);
259		else
260			tmc_etf_enable_hw(drvdata);
261	}
262	drvdata->enable = true;
263	spin_unlock_irqrestore(&drvdata->spinlock, flags);
264
265	dev_info(drvdata->dev, "TMC enabled\n");
266	return 0;
267}
268
269static int tmc_enable_sink(struct coresight_device *csdev, u32 mode)
270{
271	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
272
273	return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
274}
275
276static int tmc_enable_link(struct coresight_device *csdev, int inport,
277			   int outport)
278{
279	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
280
281	return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO);
282}
283
284static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
285{
286	enum tmc_mem_intf_width memwidth;
287	u8 memwords;
288	char *bufp;
289	u32 read_data;
290	int i;
291
292	memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10);
293	if (memwidth == TMC_MEM_INTF_WIDTH_32BITS)
294		memwords = 1;
295	else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS)
296		memwords = 2;
297	else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS)
298		memwords = 4;
299	else
300		memwords = 8;
301
302	bufp = drvdata->buf;
303	while (1) {
304		for (i = 0; i < memwords; i++) {
305			read_data = readl_relaxed(drvdata->base + TMC_RRD);
306			if (read_data == 0xFFFFFFFF)
307				return;
308			memcpy(bufp, &read_data, 4);
309			bufp += 4;
310		}
311	}
312}
313
314static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
315{
316	CS_UNLOCK(drvdata->base);
317
318	tmc_flush_and_stop(drvdata);
319	tmc_etb_dump_hw(drvdata);
320	tmc_disable_hw(drvdata);
321
322	CS_LOCK(drvdata->base);
323}
324
325static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
326{
327	u32 rwp, val;
328
329	rwp = readl_relaxed(drvdata->base + TMC_RWP);
330	val = readl_relaxed(drvdata->base + TMC_STS);
331
332	/* How much memory do we still have */
333	if (val & BIT(0))
334		drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
335	else
336		drvdata->buf = drvdata->vaddr;
337}
338
339static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
340{
341	CS_UNLOCK(drvdata->base);
342
343	tmc_flush_and_stop(drvdata);
344	tmc_etr_dump_hw(drvdata);
345	tmc_disable_hw(drvdata);
346
347	CS_LOCK(drvdata->base);
348}
349
350static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
351{
352	CS_UNLOCK(drvdata->base);
353
354	tmc_flush_and_stop(drvdata);
355	tmc_disable_hw(drvdata);
356
357	CS_LOCK(drvdata->base);
358}
359
360static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
361{
362	unsigned long flags;
363
364	spin_lock_irqsave(&drvdata->spinlock, flags);
365	if (drvdata->reading)
366		goto out;
367
368	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
369		tmc_etb_disable_hw(drvdata);
370	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
371		tmc_etr_disable_hw(drvdata);
372	} else {
373		if (mode == TMC_MODE_CIRCULAR_BUFFER)
374			tmc_etb_disable_hw(drvdata);
375		else
376			tmc_etf_disable_hw(drvdata);
 
377	}
378out:
379	drvdata->enable = false;
380	spin_unlock_irqrestore(&drvdata->spinlock, flags);
381
382	dev_info(drvdata->dev, "TMC disabled\n");
383}
384
385static void tmc_disable_sink(struct coresight_device *csdev)
386{
387	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
388
389	tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
390}
391
392static void tmc_disable_link(struct coresight_device *csdev, int inport,
393			     int outport)
394{
395	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
396
397	tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
398}
399
400static const struct coresight_ops_sink tmc_sink_ops = {
401	.enable		= tmc_enable_sink,
402	.disable	= tmc_disable_sink,
403};
404
405static const struct coresight_ops_link tmc_link_ops = {
406	.enable		= tmc_enable_link,
407	.disable	= tmc_disable_link,
408};
409
410static const struct coresight_ops tmc_etb_cs_ops = {
411	.sink_ops	= &tmc_sink_ops,
412};
413
414static const struct coresight_ops tmc_etr_cs_ops = {
415	.sink_ops	= &tmc_sink_ops,
416};
417
418static const struct coresight_ops tmc_etf_cs_ops = {
419	.sink_ops	= &tmc_sink_ops,
420	.link_ops	= &tmc_link_ops,
421};
422
423static int tmc_read_prepare(struct tmc_drvdata *drvdata)
424{
425	int ret;
426	unsigned long flags;
427	enum tmc_mode mode;
428
429	spin_lock_irqsave(&drvdata->spinlock, flags);
430	if (!drvdata->enable)
431		goto out;
432
433	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
434		tmc_etb_disable_hw(drvdata);
435	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
436		tmc_etr_disable_hw(drvdata);
437	} else {
438		mode = readl_relaxed(drvdata->base + TMC_MODE);
439		if (mode == TMC_MODE_CIRCULAR_BUFFER) {
440			tmc_etb_disable_hw(drvdata);
441		} else {
442			ret = -ENODEV;
443			goto err;
444		}
445	}
446out:
447	drvdata->reading = true;
448	spin_unlock_irqrestore(&drvdata->spinlock, flags);
449
450	dev_info(drvdata->dev, "TMC read start\n");
451	return 0;
452err:
453	spin_unlock_irqrestore(&drvdata->spinlock, flags);
454	return ret;
455}
456
457static void tmc_read_unprepare(struct tmc_drvdata *drvdata)
458{
459	unsigned long flags;
460	enum tmc_mode mode;
461
462	spin_lock_irqsave(&drvdata->spinlock, flags);
463	if (!drvdata->enable)
464		goto out;
465
466	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
467		tmc_etb_enable_hw(drvdata);
468	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
469		tmc_etr_enable_hw(drvdata);
470	} else {
471		mode = readl_relaxed(drvdata->base + TMC_MODE);
472		if (mode == TMC_MODE_CIRCULAR_BUFFER)
473			tmc_etb_enable_hw(drvdata);
474	}
475out:
476	drvdata->reading = false;
477	spin_unlock_irqrestore(&drvdata->spinlock, flags);
478
479	dev_info(drvdata->dev, "TMC read end\n");
480}
481
482static int tmc_open(struct inode *inode, struct file *file)
483{
 
484	struct tmc_drvdata *drvdata = container_of(file->private_data,
485						   struct tmc_drvdata, miscdev);
486	int ret = 0;
487
488	if (drvdata->read_count++)
489		goto out;
490
491	ret = tmc_read_prepare(drvdata);
492	if (ret)
493		return ret;
494out:
495	nonseekable_open(inode, file);
496
497	dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
498	return 0;
499}
500
501static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
502			loff_t *ppos)
503{
504	struct tmc_drvdata *drvdata = container_of(file->private_data,
505						   struct tmc_drvdata, miscdev);
506	char *bufp = drvdata->buf + *ppos;
507
508	if (*ppos + len > drvdata->size)
509		len = drvdata->size - *ppos;
510
511	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
512		if (bufp == (char *)(drvdata->vaddr + drvdata->size))
513			bufp = drvdata->vaddr;
514		else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
515			bufp -= drvdata->size;
516		if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
517			len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
518	}
519
520	if (copy_to_user(data, bufp, len)) {
521		dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
522		return -EFAULT;
523	}
524
525	*ppos += len;
526
527	dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
528		__func__, len, (int)(drvdata->size - *ppos));
529	return len;
530}
531
532static int tmc_release(struct inode *inode, struct file *file)
533{
 
534	struct tmc_drvdata *drvdata = container_of(file->private_data,
535						   struct tmc_drvdata, miscdev);
536
537	if (--drvdata->read_count) {
538		if (drvdata->read_count < 0) {
539			dev_err(drvdata->dev, "mismatched close\n");
540			drvdata->read_count = 0;
541		}
542		goto out;
543	}
544
545	tmc_read_unprepare(drvdata);
546out:
547	dev_dbg(drvdata->dev, "%s: released\n", __func__);
548	return 0;
549}
550
551static const struct file_operations tmc_fops = {
552	.owner		= THIS_MODULE,
553	.open		= tmc_open,
554	.read		= tmc_read,
555	.release	= tmc_release,
556	.llseek		= no_llseek,
557};
558
559static ssize_t status_show(struct device *dev,
560			   struct device_attribute *attr, char *buf)
561{
562	unsigned long flags;
563	u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
564	u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
565	u32 devid;
566	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
567
568	pm_runtime_get_sync(drvdata->dev);
569	spin_lock_irqsave(&drvdata->spinlock, flags);
570	CS_UNLOCK(drvdata->base);
571
572	tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ);
573	tmc_sts = readl_relaxed(drvdata->base + TMC_STS);
574	tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP);
575	tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP);
576	tmc_trg = readl_relaxed(drvdata->base + TMC_TRG);
577	tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL);
578	tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR);
579	tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
580	tmc_mode = readl_relaxed(drvdata->base + TMC_MODE);
581	tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR);
582	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
583
584	CS_LOCK(drvdata->base);
585	spin_unlock_irqrestore(&drvdata->spinlock, flags);
586	pm_runtime_put(drvdata->dev);
587
588	return sprintf(buf,
589		       "Depth:\t\t0x%x\n"
590		       "Status:\t\t0x%x\n"
591		       "RAM read ptr:\t0x%x\n"
592		       "RAM wrt ptr:\t0x%x\n"
593		       "Trigger cnt:\t0x%x\n"
594		       "Control:\t0x%x\n"
595		       "Flush status:\t0x%x\n"
596		       "Flush ctrl:\t0x%x\n"
597		       "Mode:\t\t0x%x\n"
598		       "PSRC:\t\t0x%x\n"
599		       "DEVID:\t\t0x%x\n",
600			tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
601			tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
602
603	return -EINVAL;
604}
605static DEVICE_ATTR_RO(status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606
607static ssize_t trigger_cntr_show(struct device *dev,
608			    struct device_attribute *attr, char *buf)
609{
610	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
611	unsigned long val = drvdata->trigger_cntr;
612
613	return sprintf(buf, "%#lx\n", val);
614}
615
616static ssize_t trigger_cntr_store(struct device *dev,
617			     struct device_attribute *attr,
618			     const char *buf, size_t size)
619{
620	int ret;
621	unsigned long val;
622	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
623
624	ret = kstrtoul(buf, 16, &val);
625	if (ret)
626		return ret;
627
628	drvdata->trigger_cntr = val;
629	return size;
630}
631static DEVICE_ATTR_RW(trigger_cntr);
632
633static struct attribute *coresight_etb_attrs[] = {
634	&dev_attr_trigger_cntr.attr,
635	&dev_attr_status.attr,
636	NULL,
637};
638ATTRIBUTE_GROUPS(coresight_etb);
639
640static struct attribute *coresight_etr_attrs[] = {
641	&dev_attr_trigger_cntr.attr,
642	&dev_attr_status.attr,
643	NULL,
644};
645ATTRIBUTE_GROUPS(coresight_etr);
646
647static struct attribute *coresight_etf_attrs[] = {
648	&dev_attr_trigger_cntr.attr,
649	&dev_attr_status.attr,
 
 
 
 
 
650	NULL,
651};
652ATTRIBUTE_GROUPS(coresight_etf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
653
654static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
655{
656	int ret = 0;
657	u32 devid;
658	void __iomem *base;
659	struct device *dev = &adev->dev;
660	struct coresight_platform_data *pdata = NULL;
661	struct tmc_drvdata *drvdata;
662	struct resource *res = &adev->res;
663	struct coresight_desc *desc;
664	struct device_node *np = adev->dev.of_node;
665
666	if (np) {
667		pdata = of_get_coresight_platform_data(dev, np);
668		if (IS_ERR(pdata))
669			return PTR_ERR(pdata);
 
 
670		adev->dev.platform_data = pdata;
671	}
672
 
673	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
674	if (!drvdata)
675		return -ENOMEM;
676
677	drvdata->dev = &adev->dev;
678	dev_set_drvdata(dev, drvdata);
679
680	/* Validity for the resource is already checked by the AMBA core */
681	base = devm_ioremap_resource(dev, res);
682	if (IS_ERR(base))
683		return PTR_ERR(base);
 
 
684
685	drvdata->base = base;
686
687	spin_lock_init(&drvdata->spinlock);
688
689	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
690	drvdata->config_type = BMVAL(devid, 6, 7);
 
691
692	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
693		if (np)
694			ret = of_property_read_u32(np,
695						   "arm,buffer-size",
696						   &drvdata->size);
697		if (ret)
698			drvdata->size = SZ_1M;
699	} else {
700		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
701	}
702
703	pm_runtime_put(&adev->dev);
704
705	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
706		drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
707						&drvdata->paddr, GFP_KERNEL);
708		if (!drvdata->vaddr)
709			return -ENOMEM;
710
711		memset(drvdata->vaddr, 0, drvdata->size);
712		drvdata->buf = drvdata->vaddr;
713	} else {
714		drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
715		if (!drvdata->buf)
716			return -ENOMEM;
717	}
718
719	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
720	if (!desc) {
721		ret = -ENOMEM;
722		goto err_devm_kzalloc;
723	}
724
725	desc->pdata = pdata;
726	desc->dev = dev;
727	desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
728
729	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
730		desc->type = CORESIGHT_DEV_TYPE_SINK;
731		desc->ops = &tmc_etb_cs_ops;
732		desc->groups = coresight_etb_groups;
733	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
734		desc->type = CORESIGHT_DEV_TYPE_SINK;
735		desc->ops = &tmc_etr_cs_ops;
736		desc->groups = coresight_etr_groups;
737	} else {
738		desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
739		desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
740		desc->ops = &tmc_etf_cs_ops;
741		desc->groups = coresight_etf_groups;
742	}
743
744	drvdata->csdev = coresight_register(desc);
745	if (IS_ERR(drvdata->csdev)) {
746		ret = PTR_ERR(drvdata->csdev);
747		goto err_devm_kzalloc;
748	}
749
750	drvdata->miscdev.name = pdata->name;
751	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
752	drvdata->miscdev.fops = &tmc_fops;
753	ret = misc_register(&drvdata->miscdev);
754	if (ret)
755		goto err_misc_register;
756
757	dev_info(dev, "TMC initialized\n");
758	return 0;
759
760err_misc_register:
761	coresight_unregister(drvdata->csdev);
762err_devm_kzalloc:
763	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
764		dma_free_coherent(dev, drvdata->size,
765				drvdata->vaddr, drvdata->paddr);
766	return ret;
767}
768
769static struct amba_id tmc_ids[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
770	{
771		.id     = 0x0003b961,
772		.mask   = 0x0003ffff,
 
773	},
774	{ 0, 0},
775};
776
777static struct amba_driver tmc_driver = {
778	.drv = {
779		.name   = "coresight-tmc",
780		.owner  = THIS_MODULE,
781		.suppress_bind_attrs = true,
782	},
783	.probe		= tmc_probe,
784	.id_table	= tmc_ids,
785};
786builtin_amba_driver(tmc_driver);
v4.17
  1/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  2 *
  3 * Description: CoreSight Trace Memory Controller driver
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License version 2 and
  7 * only version 2 as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 */
 14
 15#include <linux/kernel.h>
 16#include <linux/init.h>
 17#include <linux/types.h>
 18#include <linux/device.h>
 19#include <linux/io.h>
 20#include <linux/err.h>
 21#include <linux/fs.h>
 22#include <linux/miscdevice.h>
 23#include <linux/uaccess.h>
 24#include <linux/slab.h>
 25#include <linux/dma-mapping.h>
 26#include <linux/spinlock.h>
 27#include <linux/pm_runtime.h>
 28#include <linux/of.h>
 29#include <linux/coresight.h>
 30#include <linux/amba/bus.h>
 31
 32#include "coresight-priv.h"
 33#include "coresight-tmc.h"
 34
 35void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36{
 37	/* Ensure formatter, unformatter and hardware fifo are empty */
 38	if (coresight_timeout(drvdata->base,
 39			      TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
 40		dev_err(drvdata->dev,
 41			"timeout while waiting for TMC to be Ready\n");
 
 42	}
 43}
 44
 45void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
 46{
 47	u32 ffcr;
 48
 49	ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
 50	ffcr |= TMC_FFCR_STOP_ON_FLUSH;
 51	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
 52	ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
 53	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
 54	/* Ensure flush completes */
 55	if (coresight_timeout(drvdata->base,
 56			      TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
 57		dev_err(drvdata->dev,
 58		"timeout while waiting for completion of Manual Flush\n");
 
 59	}
 60
 61	tmc_wait_for_tmcready(drvdata);
 62}
 63
 64void tmc_enable_hw(struct tmc_drvdata *drvdata)
 65{
 66	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
 67}
 68
 69void tmc_disable_hw(struct tmc_drvdata *drvdata)
 70{
 71	writel_relaxed(0x0, drvdata->base + TMC_CTL);
 72}
 73
 74static int tmc_read_prepare(struct tmc_drvdata *drvdata)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75{
 76	int ret = 0;
 
 
 
 77
 78	switch (drvdata->config_type) {
 79	case TMC_CONFIG_TYPE_ETB:
 80	case TMC_CONFIG_TYPE_ETF:
 81		ret = tmc_read_prepare_etb(drvdata);
 82		break;
 83	case TMC_CONFIG_TYPE_ETR:
 84		ret = tmc_read_prepare_etr(drvdata);
 85		break;
 86	default:
 87		ret = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88	}
 
 89
 90	if (!ret)
 91		dev_info(drvdata->dev, "TMC read start\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92
 93	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 94}
 95
 96static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
 97{
 98	int ret = 0;
 
 
 
 
 99
100	switch (drvdata->config_type) {
101	case TMC_CONFIG_TYPE_ETB:
102	case TMC_CONFIG_TYPE_ETF:
103		ret = tmc_read_unprepare_etb(drvdata);
104		break;
105	case TMC_CONFIG_TYPE_ETR:
106		ret = tmc_read_unprepare_etr(drvdata);
107		break;
108	default:
109		ret = -EINVAL;
110	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
112	if (!ret)
113		dev_info(drvdata->dev, "TMC read end\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115	return ret;
116}
117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118static int tmc_open(struct inode *inode, struct file *file)
119{
120	int ret;
121	struct tmc_drvdata *drvdata = container_of(file->private_data,
122						   struct tmc_drvdata, miscdev);
 
 
 
 
123
124	ret = tmc_read_prepare(drvdata);
125	if (ret)
126		return ret;
127
128	nonseekable_open(inode, file);
129
130	dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
131	return 0;
132}
133
134static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
135			loff_t *ppos)
136{
137	struct tmc_drvdata *drvdata = container_of(file->private_data,
138						   struct tmc_drvdata, miscdev);
139	char *bufp = drvdata->buf + *ppos;
140
141	if (*ppos + len > drvdata->len)
142		len = drvdata->len - *ppos;
143
144	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
145		if (bufp == (char *)(drvdata->vaddr + drvdata->size))
146			bufp = drvdata->vaddr;
147		else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
148			bufp -= drvdata->size;
149		if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
150			len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
151	}
152
153	if (copy_to_user(data, bufp, len)) {
154		dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
155		return -EFAULT;
156	}
157
158	*ppos += len;
159
160	dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
161		__func__, len, (int)(drvdata->len - *ppos));
162	return len;
163}
164
165static int tmc_release(struct inode *inode, struct file *file)
166{
167	int ret;
168	struct tmc_drvdata *drvdata = container_of(file->private_data,
169						   struct tmc_drvdata, miscdev);
170
171	ret = tmc_read_unprepare(drvdata);
172	if (ret)
173		return ret;
 
 
 
 
174
 
 
175	dev_dbg(drvdata->dev, "%s: released\n", __func__);
176	return 0;
177}
178
179static const struct file_operations tmc_fops = {
180	.owner		= THIS_MODULE,
181	.open		= tmc_open,
182	.read		= tmc_read,
183	.release	= tmc_release,
184	.llseek		= no_llseek,
185};
186
187static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
 
188{
189	enum tmc_mem_intf_width memwidth;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
191	/*
192	 * Excerpt from the TRM:
193	 *
194	 * DEVID::MEMWIDTH[10:8]
195	 * 0x2 Memory interface databus is 32 bits wide.
196	 * 0x3 Memory interface databus is 64 bits wide.
197	 * 0x4 Memory interface databus is 128 bits wide.
198	 * 0x5 Memory interface databus is 256 bits wide.
199	 */
200	switch (BMVAL(devid, 8, 10)) {
201	case 0x2:
202		memwidth = TMC_MEM_INTF_WIDTH_32BITS;
203		break;
204	case 0x3:
205		memwidth = TMC_MEM_INTF_WIDTH_64BITS;
206		break;
207	case 0x4:
208		memwidth = TMC_MEM_INTF_WIDTH_128BITS;
209		break;
210	case 0x5:
211		memwidth = TMC_MEM_INTF_WIDTH_256BITS;
212		break;
213	default:
214		memwidth = 0;
215	}
216
217	return memwidth;
218}
219
220#define coresight_tmc_reg(name, offset)			\
221	coresight_simple_reg32(struct tmc_drvdata, name, offset)
222#define coresight_tmc_reg64(name, lo_off, hi_off)	\
223	coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
224
225coresight_tmc_reg(rsz, TMC_RSZ);
226coresight_tmc_reg(sts, TMC_STS);
227coresight_tmc_reg(trg, TMC_TRG);
228coresight_tmc_reg(ctl, TMC_CTL);
229coresight_tmc_reg(ffsr, TMC_FFSR);
230coresight_tmc_reg(ffcr, TMC_FFCR);
231coresight_tmc_reg(mode, TMC_MODE);
232coresight_tmc_reg(pscr, TMC_PSCR);
233coresight_tmc_reg(axictl, TMC_AXICTL);
234coresight_tmc_reg(devid, CORESIGHT_DEVID);
235coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
236coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
237coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
238
239static struct attribute *coresight_tmc_mgmt_attrs[] = {
240	&dev_attr_rsz.attr,
241	&dev_attr_sts.attr,
242	&dev_attr_rrp.attr,
243	&dev_attr_rwp.attr,
244	&dev_attr_trg.attr,
245	&dev_attr_ctl.attr,
246	&dev_attr_ffsr.attr,
247	&dev_attr_ffcr.attr,
248	&dev_attr_mode.attr,
249	&dev_attr_pscr.attr,
250	&dev_attr_devid.attr,
251	&dev_attr_dba.attr,
252	&dev_attr_axictl.attr,
253	NULL,
254};
255
256static ssize_t trigger_cntr_show(struct device *dev,
257				 struct device_attribute *attr, char *buf)
258{
259	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
260	unsigned long val = drvdata->trigger_cntr;
261
262	return sprintf(buf, "%#lx\n", val);
263}
264
265static ssize_t trigger_cntr_store(struct device *dev,
266			     struct device_attribute *attr,
267			     const char *buf, size_t size)
268{
269	int ret;
270	unsigned long val;
271	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
272
273	ret = kstrtoul(buf, 16, &val);
274	if (ret)
275		return ret;
276
277	drvdata->trigger_cntr = val;
278	return size;
279}
280static DEVICE_ATTR_RW(trigger_cntr);
281
282static struct attribute *coresight_tmc_attrs[] = {
283	&dev_attr_trigger_cntr.attr,
 
284	NULL,
285};
 
286
287static const struct attribute_group coresight_tmc_group = {
288	.attrs = coresight_tmc_attrs,
 
 
289};
 
290
291static const struct attribute_group coresight_tmc_mgmt_group = {
292	.attrs = coresight_tmc_mgmt_attrs,
293	.name = "mgmt",
294};
295
296const struct attribute_group *coresight_tmc_groups[] = {
297	&coresight_tmc_group,
298	&coresight_tmc_mgmt_group,
299	NULL,
300};
301
302/* Detect and initialise the capabilities of a TMC ETR */
303static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
304			     u32 devid, void *dev_caps)
305{
306	u32 dma_mask = 0;
307
308	/* Set the unadvertised capabilities */
309	tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
310
311	if (!(devid & TMC_DEVID_NOSCAT))
312		tmc_etr_set_cap(drvdata, TMC_ETR_SG);
313
314	/* Check if the AXI address width is available */
315	if (devid & TMC_DEVID_AXIAW_VALID)
316		dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
317				TMC_DEVID_AXIAW_MASK);
318
319	/*
320	 * Unless specified in the device configuration, ETR uses a 40-bit
321	 * AXI master in place of the embedded SRAM of ETB/ETF.
322	 */
323	switch (dma_mask) {
324	case 32:
325	case 40:
326	case 44:
327	case 48:
328	case 52:
329		dev_info(drvdata->dev, "Detected dma mask %dbits\n", dma_mask);
330		break;
331	default:
332		dma_mask = 40;
333	}
334
335	return dma_set_mask_and_coherent(drvdata->dev, DMA_BIT_MASK(dma_mask));
336}
337
338static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
339{
340	int ret = 0;
341	u32 devid;
342	void __iomem *base;
343	struct device *dev = &adev->dev;
344	struct coresight_platform_data *pdata = NULL;
345	struct tmc_drvdata *drvdata;
346	struct resource *res = &adev->res;
347	struct coresight_desc desc = { 0 };
348	struct device_node *np = adev->dev.of_node;
349
350	if (np) {
351		pdata = of_get_coresight_platform_data(dev, np);
352		if (IS_ERR(pdata)) {
353			ret = PTR_ERR(pdata);
354			goto out;
355		}
356		adev->dev.platform_data = pdata;
357	}
358
359	ret = -ENOMEM;
360	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
361	if (!drvdata)
362		goto out;
363
364	drvdata->dev = &adev->dev;
365	dev_set_drvdata(dev, drvdata);
366
367	/* Validity for the resource is already checked by the AMBA core */
368	base = devm_ioremap_resource(dev, res);
369	if (IS_ERR(base)) {
370		ret = PTR_ERR(base);
371		goto out;
372	}
373
374	drvdata->base = base;
375
376	spin_lock_init(&drvdata->spinlock);
377
378	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
379	drvdata->config_type = BMVAL(devid, 6, 7);
380	drvdata->memwidth = tmc_get_memwidth(devid);
381
382	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
383		if (np)
384			ret = of_property_read_u32(np,
385						   "arm,buffer-size",
386						   &drvdata->size);
387		if (ret)
388			drvdata->size = SZ_1M;
389	} else {
390		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
391	}
392
393	pm_runtime_put(&adev->dev);
394
395	desc.pdata = pdata;
396	desc.dev = dev;
397	desc.groups = coresight_tmc_groups;
398
399	switch (drvdata->config_type) {
400	case TMC_CONFIG_TYPE_ETB:
401		desc.type = CORESIGHT_DEV_TYPE_SINK;
402		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
403		desc.ops = &tmc_etb_cs_ops;
404		break;
405	case TMC_CONFIG_TYPE_ETR:
406		desc.type = CORESIGHT_DEV_TYPE_SINK;
407		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
408		desc.ops = &tmc_etr_cs_ops;
409		ret = tmc_etr_setup_caps(drvdata, devid, id->data);
410		if (ret)
411			goto out;
412		break;
413	case TMC_CONFIG_TYPE_ETF:
414		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
415		desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
416		desc.ops = &tmc_etf_cs_ops;
417		break;
418	default:
419		pr_err("%s: Unsupported TMC config\n", pdata->name);
420		ret = -EINVAL;
421		goto out;
 
 
 
 
 
 
 
 
 
 
422	}
423
424	drvdata->csdev = coresight_register(&desc);
425	if (IS_ERR(drvdata->csdev)) {
426		ret = PTR_ERR(drvdata->csdev);
427		goto out;
428	}
429
430	drvdata->miscdev.name = pdata->name;
431	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
432	drvdata->miscdev.fops = &tmc_fops;
433	ret = misc_register(&drvdata->miscdev);
434	if (ret)
435		coresight_unregister(drvdata->csdev);
436out:
 
 
 
 
 
 
 
 
 
437	return ret;
438}
439
440static const struct amba_id tmc_ids[] = {
441	{
442		.id     = 0x000bb961,
443		.mask   = 0x000fffff,
444	},
445	{
446		/* Coresight SoC 600 TMC-ETR/ETS */
447		.id	= 0x000bb9e8,
448		.mask	= 0x000fffff,
449		.data	= (void *)(unsigned long)CORESIGHT_SOC_600_ETR_CAPS,
450	},
451	{
452		/* Coresight SoC 600 TMC-ETB */
453		.id	= 0x000bb9e9,
454		.mask	= 0x000fffff,
455	},
456	{
457		/* Coresight SoC 600 TMC-ETF */
458		.id	= 0x000bb9ea,
459		.mask	= 0x000fffff,
460	},
461	{ 0, 0},
462};
463
464static struct amba_driver tmc_driver = {
465	.drv = {
466		.name   = "coresight-tmc",
467		.owner  = THIS_MODULE,
468		.suppress_bind_attrs = true,
469	},
470	.probe		= tmc_probe,
471	.id_table	= tmc_ids,
472};
473builtin_amba_driver(tmc_driver);