Linux Audio

Check our new training course

Loading...
v4.6
 
  1/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  2 *
  3 * Description: CoreSight Trace Memory Controller driver
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License version 2 and
  7 * only version 2 as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 */
 14
 15#include <linux/kernel.h>
 16#include <linux/init.h>
 17#include <linux/types.h>
 18#include <linux/device.h>
 
 19#include <linux/io.h>
 20#include <linux/err.h>
 21#include <linux/fs.h>
 22#include <linux/miscdevice.h>
 
 
 23#include <linux/uaccess.h>
 24#include <linux/slab.h>
 25#include <linux/dma-mapping.h>
 26#include <linux/spinlock.h>
 27#include <linux/pm_runtime.h>
 28#include <linux/of.h>
 29#include <linux/coresight.h>
 30#include <linux/amba/bus.h>
 31
 32#include "coresight-priv.h"
 
 33
 34#define TMC_RSZ			0x004
 35#define TMC_STS			0x00c
 36#define TMC_RRD			0x010
 37#define TMC_RRP			0x014
 38#define TMC_RWP			0x018
 39#define TMC_TRG			0x01c
 40#define TMC_CTL			0x020
 41#define TMC_RWD			0x024
 42#define TMC_MODE		0x028
 43#define TMC_LBUFLEVEL		0x02c
 44#define TMC_CBUFLEVEL		0x030
 45#define TMC_BUFWM		0x034
 46#define TMC_RRPHI		0x038
 47#define TMC_RWPHI		0x03c
 48#define TMC_AXICTL		0x110
 49#define TMC_DBALO		0x118
 50#define TMC_DBAHI		0x11c
 51#define TMC_FFSR		0x300
 52#define TMC_FFCR		0x304
 53#define TMC_PSCR		0x308
 54#define TMC_ITMISCOP0		0xee0
 55#define TMC_ITTRFLIN		0xee8
 56#define TMC_ITATBDATA0		0xeec
 57#define TMC_ITATBCTR2		0xef0
 58#define TMC_ITATBCTR1		0xef4
 59#define TMC_ITATBCTR0		0xef8
 60
 61/* register description */
 62/* TMC_CTL - 0x020 */
 63#define TMC_CTL_CAPT_EN		BIT(0)
 64/* TMC_STS - 0x00C */
 65#define TMC_STS_TRIGGERED	BIT(1)
 66/* TMC_AXICTL - 0x110 */
 67#define TMC_AXICTL_PROT_CTL_B0	BIT(0)
 68#define TMC_AXICTL_PROT_CTL_B1	BIT(1)
 69#define TMC_AXICTL_SCT_GAT_MODE	BIT(7)
 70#define TMC_AXICTL_WR_BURST_LEN 0xF00
 71/* TMC_FFCR - 0x304 */
 72#define TMC_FFCR_EN_FMT		BIT(0)
 73#define TMC_FFCR_EN_TI		BIT(1)
 74#define TMC_FFCR_FON_FLIN	BIT(4)
 75#define TMC_FFCR_FON_TRIG_EVT	BIT(5)
 76#define TMC_FFCR_FLUSHMAN	BIT(6)
 77#define TMC_FFCR_TRIGON_TRIGIN	BIT(8)
 78#define TMC_FFCR_STOP_ON_FLUSH	BIT(12)
 79
 80#define TMC_STS_TRIGGERED_BIT	2
 81#define TMC_FFCR_FLUSHMAN_BIT	6
 82
 83enum tmc_config_type {
 84	TMC_CONFIG_TYPE_ETB,
 85	TMC_CONFIG_TYPE_ETR,
 86	TMC_CONFIG_TYPE_ETF,
 87};
 88
 89enum tmc_mode {
 90	TMC_MODE_CIRCULAR_BUFFER,
 91	TMC_MODE_SOFTWARE_FIFO,
 92	TMC_MODE_HARDWARE_FIFO,
 93};
 94
 95enum tmc_mem_intf_width {
 96	TMC_MEM_INTF_WIDTH_32BITS	= 0x2,
 97	TMC_MEM_INTF_WIDTH_64BITS	= 0x3,
 98	TMC_MEM_INTF_WIDTH_128BITS	= 0x4,
 99	TMC_MEM_INTF_WIDTH_256BITS	= 0x5,
100};
101
102/**
103 * struct tmc_drvdata - specifics associated to an TMC component
104 * @base:	memory mapped base address for this component.
105 * @dev:	the device entity associated to this component.
106 * @csdev:	component vitals needed by the framework.
107 * @miscdev:	specifics to handle "/dev/xyz.tmc" entry.
108 * @spinlock:	only one at a time pls.
109 * @read_count:	manages preparation of buffer for reading.
110 * @buf:	area of memory where trace data get sent.
111 * @paddr:	DMA start location in RAM.
112 * @vaddr:	virtual representation of @paddr.
113 * @size:	@buf size.
114 * @enable:	this TMC is being used.
115 * @config_type: TMC variant, must be of type @tmc_config_type.
116 * @trigger_cntr: amount of words to store after a trigger.
117 */
118struct tmc_drvdata {
119	void __iomem		*base;
120	struct device		*dev;
121	struct coresight_device	*csdev;
122	struct miscdevice	miscdev;
123	spinlock_t		spinlock;
124	int			read_count;
125	bool			reading;
126	char			*buf;
127	dma_addr_t		paddr;
128	void			*vaddr;
129	u32			size;
130	bool			enable;
131	enum tmc_config_type	config_type;
132	u32			trigger_cntr;
133};
134
135static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
136{
137	/* Ensure formatter, unformatter and hardware fifo are empty */
138	if (coresight_timeout(drvdata->base,
139			      TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) {
140		dev_err(drvdata->dev,
141			"timeout observed when probing at offset %#x\n",
142			TMC_STS);
143	}
144}
145
146static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
147{
148	u32 ffcr;
149
150	ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
151	ffcr |= TMC_FFCR_STOP_ON_FLUSH;
152	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
153	ffcr |= TMC_FFCR_FLUSHMAN;
154	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
155	/* Ensure flush completes */
156	if (coresight_timeout(drvdata->base,
157			      TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
158		dev_err(drvdata->dev,
159			"timeout observed when probing at offset %#x\n",
160			TMC_FFCR);
161	}
162
163	tmc_wait_for_ready(drvdata);
164}
165
166static void tmc_enable_hw(struct tmc_drvdata *drvdata)
167{
168	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
169}
170
171static void tmc_disable_hw(struct tmc_drvdata *drvdata)
172{
173	writel_relaxed(0x0, drvdata->base + TMC_CTL);
174}
175
176static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
177{
178	/* Zero out the memory to help with debug */
179	memset(drvdata->buf, 0, drvdata->size);
180
181	CS_UNLOCK(drvdata->base);
182
183	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
184	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
185		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
186		       TMC_FFCR_TRIGON_TRIGIN,
187		       drvdata->base + TMC_FFCR);
188
189	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
190	tmc_enable_hw(drvdata);
191
192	CS_LOCK(drvdata->base);
193}
194
195static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
196{
197	u32 axictl;
198
199	/* Zero out the memory to help with debug */
200	memset(drvdata->vaddr, 0, drvdata->size);
201
202	CS_UNLOCK(drvdata->base);
203
204	writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
205	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
206
207	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
208	axictl |= TMC_AXICTL_WR_BURST_LEN;
209	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
210	axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
211	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
212	axictl = (axictl &
213		  ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
214		  TMC_AXICTL_PROT_CTL_B1;
215	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
216
217	writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
218	writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
219	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
220		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
221		       TMC_FFCR_TRIGON_TRIGIN,
222		       drvdata->base + TMC_FFCR);
223	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
224	tmc_enable_hw(drvdata);
225
226	CS_LOCK(drvdata->base);
227}
228
229static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
230{
231	CS_UNLOCK(drvdata->base);
232
233	writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
234	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
235		       drvdata->base + TMC_FFCR);
236	writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
237	tmc_enable_hw(drvdata);
238
239	CS_LOCK(drvdata->base);
240}
241
242static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
243{
244	unsigned long flags;
245
246	spin_lock_irqsave(&drvdata->spinlock, flags);
247	if (drvdata->reading) {
248		spin_unlock_irqrestore(&drvdata->spinlock, flags);
249		return -EBUSY;
250	}
251
252	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
253		tmc_etb_enable_hw(drvdata);
254	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
255		tmc_etr_enable_hw(drvdata);
256	} else {
257		if (mode == TMC_MODE_CIRCULAR_BUFFER)
258			tmc_etb_enable_hw(drvdata);
259		else
260			tmc_etf_enable_hw(drvdata);
261	}
262	drvdata->enable = true;
263	spin_unlock_irqrestore(&drvdata->spinlock, flags);
264
265	dev_info(drvdata->dev, "TMC enabled\n");
266	return 0;
267}
268
269static int tmc_enable_sink(struct coresight_device *csdev, u32 mode)
270{
271	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
272
273	return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
274}
275
276static int tmc_enable_link(struct coresight_device *csdev, int inport,
277			   int outport)
278{
279	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
280
281	return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO);
282}
283
284static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
285{
286	enum tmc_mem_intf_width memwidth;
287	u8 memwords;
288	char *bufp;
289	u32 read_data;
290	int i;
291
292	memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10);
293	if (memwidth == TMC_MEM_INTF_WIDTH_32BITS)
294		memwords = 1;
295	else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS)
296		memwords = 2;
297	else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS)
298		memwords = 4;
299	else
300		memwords = 8;
301
302	bufp = drvdata->buf;
303	while (1) {
304		for (i = 0; i < memwords; i++) {
305			read_data = readl_relaxed(drvdata->base + TMC_RRD);
306			if (read_data == 0xFFFFFFFF)
307				return;
308			memcpy(bufp, &read_data, 4);
309			bufp += 4;
310		}
 
311	}
312}
313
314static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
315{
316	CS_UNLOCK(drvdata->base);
317
318	tmc_flush_and_stop(drvdata);
319	tmc_etb_dump_hw(drvdata);
320	tmc_disable_hw(drvdata);
321
322	CS_LOCK(drvdata->base);
323}
324
325static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
326{
327	u32 rwp, val;
328
329	rwp = readl_relaxed(drvdata->base + TMC_RWP);
330	val = readl_relaxed(drvdata->base + TMC_STS);
331
332	/* How much memory do we still have */
333	if (val & BIT(0))
334		drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
335	else
336		drvdata->buf = drvdata->vaddr;
337}
338
339static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
340{
341	CS_UNLOCK(drvdata->base);
342
343	tmc_flush_and_stop(drvdata);
344	tmc_etr_dump_hw(drvdata);
345	tmc_disable_hw(drvdata);
346
347	CS_LOCK(drvdata->base);
348}
349
350static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
351{
352	CS_UNLOCK(drvdata->base);
353
354	tmc_flush_and_stop(drvdata);
355	tmc_disable_hw(drvdata);
356
357	CS_LOCK(drvdata->base);
358}
359
360static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
361{
362	unsigned long flags;
363
364	spin_lock_irqsave(&drvdata->spinlock, flags);
365	if (drvdata->reading)
366		goto out;
367
368	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
369		tmc_etb_disable_hw(drvdata);
370	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
371		tmc_etr_disable_hw(drvdata);
372	} else {
373		if (mode == TMC_MODE_CIRCULAR_BUFFER)
374			tmc_etb_disable_hw(drvdata);
375		else
376			tmc_etf_disable_hw(drvdata);
 
377	}
378out:
379	drvdata->enable = false;
380	spin_unlock_irqrestore(&drvdata->spinlock, flags);
381
382	dev_info(drvdata->dev, "TMC disabled\n");
383}
384
385static void tmc_disable_sink(struct coresight_device *csdev)
386{
387	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
388
389	tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
390}
391
392static void tmc_disable_link(struct coresight_device *csdev, int inport,
393			     int outport)
394{
395	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
396
397	tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
398}
399
400static const struct coresight_ops_sink tmc_sink_ops = {
401	.enable		= tmc_enable_sink,
402	.disable	= tmc_disable_sink,
403};
404
405static const struct coresight_ops_link tmc_link_ops = {
406	.enable		= tmc_enable_link,
407	.disable	= tmc_disable_link,
408};
409
410static const struct coresight_ops tmc_etb_cs_ops = {
411	.sink_ops	= &tmc_sink_ops,
412};
413
414static const struct coresight_ops tmc_etr_cs_ops = {
415	.sink_ops	= &tmc_sink_ops,
416};
417
418static const struct coresight_ops tmc_etf_cs_ops = {
419	.sink_ops	= &tmc_sink_ops,
420	.link_ops	= &tmc_link_ops,
421};
422
423static int tmc_read_prepare(struct tmc_drvdata *drvdata)
424{
425	int ret;
426	unsigned long flags;
427	enum tmc_mode mode;
428
429	spin_lock_irqsave(&drvdata->spinlock, flags);
430	if (!drvdata->enable)
431		goto out;
432
433	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
434		tmc_etb_disable_hw(drvdata);
435	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
436		tmc_etr_disable_hw(drvdata);
437	} else {
438		mode = readl_relaxed(drvdata->base + TMC_MODE);
439		if (mode == TMC_MODE_CIRCULAR_BUFFER) {
440			tmc_etb_disable_hw(drvdata);
441		} else {
442			ret = -ENODEV;
443			goto err;
444		}
445	}
446out:
447	drvdata->reading = true;
448	spin_unlock_irqrestore(&drvdata->spinlock, flags);
449
450	dev_info(drvdata->dev, "TMC read start\n");
451	return 0;
452err:
453	spin_unlock_irqrestore(&drvdata->spinlock, flags);
454	return ret;
455}
456
457static void tmc_read_unprepare(struct tmc_drvdata *drvdata)
458{
459	unsigned long flags;
460	enum tmc_mode mode;
461
462	spin_lock_irqsave(&drvdata->spinlock, flags);
463	if (!drvdata->enable)
464		goto out;
465
466	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
467		tmc_etb_enable_hw(drvdata);
468	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
469		tmc_etr_enable_hw(drvdata);
470	} else {
471		mode = readl_relaxed(drvdata->base + TMC_MODE);
472		if (mode == TMC_MODE_CIRCULAR_BUFFER)
473			tmc_etb_enable_hw(drvdata);
474	}
475out:
476	drvdata->reading = false;
477	spin_unlock_irqrestore(&drvdata->spinlock, flags);
478
479	dev_info(drvdata->dev, "TMC read end\n");
480}
481
482static int tmc_open(struct inode *inode, struct file *file)
483{
 
484	struct tmc_drvdata *drvdata = container_of(file->private_data,
485						   struct tmc_drvdata, miscdev);
486	int ret = 0;
487
488	if (drvdata->read_count++)
489		goto out;
490
491	ret = tmc_read_prepare(drvdata);
492	if (ret)
493		return ret;
494out:
495	nonseekable_open(inode, file);
496
497	dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
498	return 0;
499}
500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
501static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
502			loff_t *ppos)
503{
 
 
504	struct tmc_drvdata *drvdata = container_of(file->private_data,
505						   struct tmc_drvdata, miscdev);
506	char *bufp = drvdata->buf + *ppos;
507
508	if (*ppos + len > drvdata->size)
509		len = drvdata->size - *ppos;
510
511	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
512		if (bufp == (char *)(drvdata->vaddr + drvdata->size))
513			bufp = drvdata->vaddr;
514		else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
515			bufp -= drvdata->size;
516		if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
517			len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
518	}
519
520	if (copy_to_user(data, bufp, len)) {
521		dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
522		return -EFAULT;
523	}
524
525	*ppos += len;
 
526
527	dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
528		__func__, len, (int)(drvdata->size - *ppos));
529	return len;
530}
531
532static int tmc_release(struct inode *inode, struct file *file)
533{
 
534	struct tmc_drvdata *drvdata = container_of(file->private_data,
535						   struct tmc_drvdata, miscdev);
536
537	if (--drvdata->read_count) {
538		if (drvdata->read_count < 0) {
539			dev_err(drvdata->dev, "mismatched close\n");
540			drvdata->read_count = 0;
541		}
542		goto out;
543	}
544
545	tmc_read_unprepare(drvdata);
546out:
547	dev_dbg(drvdata->dev, "%s: released\n", __func__);
548	return 0;
549}
550
551static const struct file_operations tmc_fops = {
552	.owner		= THIS_MODULE,
553	.open		= tmc_open,
554	.read		= tmc_read,
555	.release	= tmc_release,
556	.llseek		= no_llseek,
557};
558
559static ssize_t status_show(struct device *dev,
560			   struct device_attribute *attr, char *buf)
561{
562	unsigned long flags;
563	u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
564	u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
565	u32 devid;
566	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
567
568	pm_runtime_get_sync(drvdata->dev);
569	spin_lock_irqsave(&drvdata->spinlock, flags);
570	CS_UNLOCK(drvdata->base);
571
572	tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ);
573	tmc_sts = readl_relaxed(drvdata->base + TMC_STS);
574	tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP);
575	tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP);
576	tmc_trg = readl_relaxed(drvdata->base + TMC_TRG);
577	tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL);
578	tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR);
579	tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
580	tmc_mode = readl_relaxed(drvdata->base + TMC_MODE);
581	tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR);
582	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
583
584	CS_LOCK(drvdata->base);
585	spin_unlock_irqrestore(&drvdata->spinlock, flags);
586	pm_runtime_put(drvdata->dev);
587
588	return sprintf(buf,
589		       "Depth:\t\t0x%x\n"
590		       "Status:\t\t0x%x\n"
591		       "RAM read ptr:\t0x%x\n"
592		       "RAM wrt ptr:\t0x%x\n"
593		       "Trigger cnt:\t0x%x\n"
594		       "Control:\t0x%x\n"
595		       "Flush status:\t0x%x\n"
596		       "Flush ctrl:\t0x%x\n"
597		       "Mode:\t\t0x%x\n"
598		       "PSRC:\t\t0x%x\n"
599		       "DEVID:\t\t0x%x\n",
600			tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
601			tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
602
603	return -EINVAL;
604}
605static DEVICE_ATTR_RO(status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606
607static ssize_t trigger_cntr_show(struct device *dev,
608			    struct device_attribute *attr, char *buf)
609{
610	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
611	unsigned long val = drvdata->trigger_cntr;
612
613	return sprintf(buf, "%#lx\n", val);
614}
615
616static ssize_t trigger_cntr_store(struct device *dev,
617			     struct device_attribute *attr,
618			     const char *buf, size_t size)
619{
620	int ret;
621	unsigned long val;
622	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
623
624	ret = kstrtoul(buf, 16, &val);
625	if (ret)
626		return ret;
627
628	drvdata->trigger_cntr = val;
629	return size;
630}
631static DEVICE_ATTR_RW(trigger_cntr);
632
633static struct attribute *coresight_etb_attrs[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
634	&dev_attr_trigger_cntr.attr,
635	&dev_attr_status.attr,
636	NULL,
637};
638ATTRIBUTE_GROUPS(coresight_etb);
639
640static struct attribute *coresight_etr_attrs[] = {
641	&dev_attr_trigger_cntr.attr,
642	&dev_attr_status.attr,
643	NULL,
644};
645ATTRIBUTE_GROUPS(coresight_etr);
646
647static struct attribute *coresight_etf_attrs[] = {
648	&dev_attr_trigger_cntr.attr,
649	&dev_attr_status.attr,
 
 
 
 
 
650	NULL,
651};
652ATTRIBUTE_GROUPS(coresight_etf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
653
654static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
655{
656	int ret = 0;
657	u32 devid;
658	void __iomem *base;
659	struct device *dev = &adev->dev;
660	struct coresight_platform_data *pdata = NULL;
661	struct tmc_drvdata *drvdata;
662	struct resource *res = &adev->res;
663	struct coresight_desc *desc;
664	struct device_node *np = adev->dev.of_node;
665
666	if (np) {
667		pdata = of_get_coresight_platform_data(dev, np);
668		if (IS_ERR(pdata))
669			return PTR_ERR(pdata);
670		adev->dev.platform_data = pdata;
671	}
672
 
673	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
674	if (!drvdata)
675		return -ENOMEM;
676
677	drvdata->dev = &adev->dev;
678	dev_set_drvdata(dev, drvdata);
679
680	/* Validity for the resource is already checked by the AMBA core */
681	base = devm_ioremap_resource(dev, res);
682	if (IS_ERR(base))
683		return PTR_ERR(base);
 
 
684
685	drvdata->base = base;
686
687	spin_lock_init(&drvdata->spinlock);
688
689	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
690	drvdata->config_type = BMVAL(devid, 6, 7);
 
 
 
691
692	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
693		if (np)
694			ret = of_property_read_u32(np,
695						   "arm,buffer-size",
696						   &drvdata->size);
697		if (ret)
698			drvdata->size = SZ_1M;
699	} else {
700		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
701	}
702
703	pm_runtime_put(&adev->dev);
 
704
705	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
706		drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
707						&drvdata->paddr, GFP_KERNEL);
708		if (!drvdata->vaddr)
709			return -ENOMEM;
710
711		memset(drvdata->vaddr, 0, drvdata->size);
712		drvdata->buf = drvdata->vaddr;
713	} else {
714		drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
715		if (!drvdata->buf)
716			return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
717	}
718
719	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
720	if (!desc) {
721		ret = -ENOMEM;
722		goto err_devm_kzalloc;
723	}
724
725	desc->pdata = pdata;
726	desc->dev = dev;
727	desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
728
729	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
730		desc->type = CORESIGHT_DEV_TYPE_SINK;
731		desc->ops = &tmc_etb_cs_ops;
732		desc->groups = coresight_etb_groups;
733	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
734		desc->type = CORESIGHT_DEV_TYPE_SINK;
735		desc->ops = &tmc_etr_cs_ops;
736		desc->groups = coresight_etr_groups;
737	} else {
738		desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
739		desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
740		desc->ops = &tmc_etf_cs_ops;
741		desc->groups = coresight_etf_groups;
742	}
 
 
743
744	drvdata->csdev = coresight_register(desc);
745	if (IS_ERR(drvdata->csdev)) {
746		ret = PTR_ERR(drvdata->csdev);
747		goto err_devm_kzalloc;
748	}
749
750	drvdata->miscdev.name = pdata->name;
751	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
752	drvdata->miscdev.fops = &tmc_fops;
753	ret = misc_register(&drvdata->miscdev);
754	if (ret)
755		goto err_misc_register;
 
 
 
 
 
756
757	dev_info(dev, "TMC initialized\n");
758	return 0;
 
 
 
 
 
 
 
759
760err_misc_register:
761	coresight_unregister(drvdata->csdev);
762err_devm_kzalloc:
763	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
764		dma_free_coherent(dev, drvdata->size,
765				drvdata->vaddr, drvdata->paddr);
766	return ret;
 
 
 
 
 
 
767}
768
769static struct amba_id tmc_ids[] = {
770	{
771		.id     = 0x0003b961,
772		.mask   = 0x0003ffff,
773	},
 
 
 
774	{ 0, 0},
775};
776
777static struct amba_driver tmc_driver = {
778	.drv = {
779		.name   = "coresight-tmc",
780		.owner  = THIS_MODULE,
781		.suppress_bind_attrs = true,
782	},
783	.probe		= tmc_probe,
 
784	.id_table	= tmc_ids,
785};
786builtin_amba_driver(tmc_driver);
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  3 *
  4 * Description: CoreSight Trace Memory Controller driver
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/types.h>
 10#include <linux/device.h>
 11#include <linux/idr.h>
 12#include <linux/io.h>
 13#include <linux/err.h>
 14#include <linux/fs.h>
 15#include <linux/miscdevice.h>
 16#include <linux/mutex.h>
 17#include <linux/property.h>
 18#include <linux/uaccess.h>
 19#include <linux/slab.h>
 20#include <linux/dma-mapping.h>
 21#include <linux/spinlock.h>
 22#include <linux/pm_runtime.h>
 23#include <linux/of.h>
 24#include <linux/coresight.h>
 25#include <linux/amba/bus.h>
 26
 27#include "coresight-priv.h"
 28#include "coresight-tmc.h"
 29
 30DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
 31DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
 32DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33
 34void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35{
 36	/* Ensure formatter, unformatter and hardware fifo are empty */
 37	if (coresight_timeout(drvdata->base,
 38			      TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
 39		dev_err(&drvdata->csdev->dev,
 40			"timeout while waiting for TMC to be Ready\n");
 
 41	}
 42}
 43
 44void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
 45{
 46	u32 ffcr;
 47
 48	ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
 49	ffcr |= TMC_FFCR_STOP_ON_FLUSH;
 50	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
 51	ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
 52	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
 53	/* Ensure flush completes */
 54	if (coresight_timeout(drvdata->base,
 55			      TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
 56		dev_err(&drvdata->csdev->dev,
 57		"timeout while waiting for completion of Manual Flush\n");
 
 58	}
 59
 60	tmc_wait_for_tmcready(drvdata);
 61}
 62
 63void tmc_enable_hw(struct tmc_drvdata *drvdata)
 64{
 65	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
 66}
 67
 68void tmc_disable_hw(struct tmc_drvdata *drvdata)
 69{
 70	writel_relaxed(0x0, drvdata->base + TMC_CTL);
 71}
 72
 73u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 74{
 75	u32 mask = 0;
 
 
 
 
 
 76
 77	/*
 78	 * When moving RRP or an offset address forward, the new values must
 79	 * be byte-address aligned to the width of the trace memory databus
 80	 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
 81	 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
 82	 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
 83	 * be 0s.
 84	 */
 85	switch (drvdata->memwidth) {
 86	case TMC_MEM_INTF_WIDTH_32BITS:
 87	case TMC_MEM_INTF_WIDTH_64BITS:
 88	case TMC_MEM_INTF_WIDTH_128BITS:
 89		mask = GENMASK(31, 4);
 90		break;
 91	case TMC_MEM_INTF_WIDTH_256BITS:
 92		mask = GENMASK(31, 5);
 93		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 94	}
 
 
 95
 96	return mask;
 
 97}
 98
 99static int tmc_read_prepare(struct tmc_drvdata *drvdata)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100{
101	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
103	switch (drvdata->config_type) {
104	case TMC_CONFIG_TYPE_ETB:
105	case TMC_CONFIG_TYPE_ETF:
106		ret = tmc_read_prepare_etb(drvdata);
107		break;
108	case TMC_CONFIG_TYPE_ETR:
109		ret = tmc_read_prepare_etr(drvdata);
110		break;
111	default:
112		ret = -EINVAL;
113	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
115	if (!ret)
116		dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
 
 
 
 
 
 
 
 
117
118	return ret;
 
 
 
119}
120
121static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
122{
123	int ret = 0;
 
 
 
 
124
125	switch (drvdata->config_type) {
126	case TMC_CONFIG_TYPE_ETB:
127	case TMC_CONFIG_TYPE_ETF:
128		ret = tmc_read_unprepare_etb(drvdata);
129		break;
130	case TMC_CONFIG_TYPE_ETR:
131		ret = tmc_read_unprepare_etr(drvdata);
132		break;
133	default:
134		ret = -EINVAL;
135	}
 
 
 
 
 
 
 
 
 
 
136
137	if (!ret)
138		dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140	return ret;
141}
142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143static int tmc_open(struct inode *inode, struct file *file)
144{
145	int ret;
146	struct tmc_drvdata *drvdata = container_of(file->private_data,
147						   struct tmc_drvdata, miscdev);
 
 
 
 
148
149	ret = tmc_read_prepare(drvdata);
150	if (ret)
151		return ret;
152
153	nonseekable_open(inode, file);
154
155	dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
156	return 0;
157}
158
159static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
160					  loff_t pos, size_t len, char **bufpp)
161{
162	switch (drvdata->config_type) {
163	case TMC_CONFIG_TYPE_ETB:
164	case TMC_CONFIG_TYPE_ETF:
165		return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
166	case TMC_CONFIG_TYPE_ETR:
167		return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
168	}
169
170	return -EINVAL;
171}
172
173static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
174			loff_t *ppos)
175{
176	char *bufp;
177	ssize_t actual;
178	struct tmc_drvdata *drvdata = container_of(file->private_data,
179						   struct tmc_drvdata, miscdev);
180	actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
181	if (actual <= 0)
182		return 0;
183
184	if (copy_to_user(data, bufp, actual)) {
185		dev_dbg(&drvdata->csdev->dev,
186			"%s: copy_to_user failed\n", __func__);
 
 
 
 
 
 
 
 
 
187		return -EFAULT;
188	}
189
190	*ppos += actual;
191	dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
192
193	return actual;
 
 
194}
195
196static int tmc_release(struct inode *inode, struct file *file)
197{
198	int ret;
199	struct tmc_drvdata *drvdata = container_of(file->private_data,
200						   struct tmc_drvdata, miscdev);
201
202	ret = tmc_read_unprepare(drvdata);
203	if (ret)
204		return ret;
 
 
 
 
205
206	dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
 
 
207	return 0;
208}
209
210static const struct file_operations tmc_fops = {
211	.owner		= THIS_MODULE,
212	.open		= tmc_open,
213	.read		= tmc_read,
214	.release	= tmc_release,
215	.llseek		= no_llseek,
216};
217
218static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
 
219{
220	enum tmc_mem_intf_width memwidth;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
222	/*
223	 * Excerpt from the TRM:
224	 *
225	 * DEVID::MEMWIDTH[10:8]
226	 * 0x2 Memory interface databus is 32 bits wide.
227	 * 0x3 Memory interface databus is 64 bits wide.
228	 * 0x4 Memory interface databus is 128 bits wide.
229	 * 0x5 Memory interface databus is 256 bits wide.
230	 */
231	switch (BMVAL(devid, 8, 10)) {
232	case 0x2:
233		memwidth = TMC_MEM_INTF_WIDTH_32BITS;
234		break;
235	case 0x3:
236		memwidth = TMC_MEM_INTF_WIDTH_64BITS;
237		break;
238	case 0x4:
239		memwidth = TMC_MEM_INTF_WIDTH_128BITS;
240		break;
241	case 0x5:
242		memwidth = TMC_MEM_INTF_WIDTH_256BITS;
243		break;
244	default:
245		memwidth = 0;
246	}
247
248	return memwidth;
249}
250
251#define coresight_tmc_reg(name, offset)			\
252	coresight_simple_reg32(struct tmc_drvdata, name, offset)
253#define coresight_tmc_reg64(name, lo_off, hi_off)	\
254	coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
255
256coresight_tmc_reg(rsz, TMC_RSZ);
257coresight_tmc_reg(sts, TMC_STS);
258coresight_tmc_reg(trg, TMC_TRG);
259coresight_tmc_reg(ctl, TMC_CTL);
260coresight_tmc_reg(ffsr, TMC_FFSR);
261coresight_tmc_reg(ffcr, TMC_FFCR);
262coresight_tmc_reg(mode, TMC_MODE);
263coresight_tmc_reg(pscr, TMC_PSCR);
264coresight_tmc_reg(axictl, TMC_AXICTL);
265coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
266coresight_tmc_reg(devid, CORESIGHT_DEVID);
267coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
268coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
269coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
270
271static struct attribute *coresight_tmc_mgmt_attrs[] = {
272	&dev_attr_rsz.attr,
273	&dev_attr_sts.attr,
274	&dev_attr_rrp.attr,
275	&dev_attr_rwp.attr,
276	&dev_attr_trg.attr,
277	&dev_attr_ctl.attr,
278	&dev_attr_ffsr.attr,
279	&dev_attr_ffcr.attr,
280	&dev_attr_mode.attr,
281	&dev_attr_pscr.attr,
282	&dev_attr_devid.attr,
283	&dev_attr_dba.attr,
284	&dev_attr_axictl.attr,
285	&dev_attr_authstatus.attr,
286	NULL,
287};
288
289static ssize_t trigger_cntr_show(struct device *dev,
290				 struct device_attribute *attr, char *buf)
291{
292	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
293	unsigned long val = drvdata->trigger_cntr;
294
295	return sprintf(buf, "%#lx\n", val);
296}
297
298static ssize_t trigger_cntr_store(struct device *dev,
299			     struct device_attribute *attr,
300			     const char *buf, size_t size)
301{
302	int ret;
303	unsigned long val;
304	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
305
306	ret = kstrtoul(buf, 16, &val);
307	if (ret)
308		return ret;
309
310	drvdata->trigger_cntr = val;
311	return size;
312}
313static DEVICE_ATTR_RW(trigger_cntr);
314
315static ssize_t buffer_size_show(struct device *dev,
316				struct device_attribute *attr, char *buf)
317{
318	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
319
320	return sprintf(buf, "%#x\n", drvdata->size);
321}
322
323static ssize_t buffer_size_store(struct device *dev,
324				 struct device_attribute *attr,
325				 const char *buf, size_t size)
326{
327	int ret;
328	unsigned long val;
329	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
330
331	/* Only permitted for TMC-ETRs */
332	if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
333		return -EPERM;
334
335	ret = kstrtoul(buf, 0, &val);
336	if (ret)
337		return ret;
338	/* The buffer size should be page aligned */
339	if (val & (PAGE_SIZE - 1))
340		return -EINVAL;
341	drvdata->size = val;
342	return size;
343}
344
345static DEVICE_ATTR_RW(buffer_size);
346
347static struct attribute *coresight_tmc_attrs[] = {
348	&dev_attr_trigger_cntr.attr,
349	&dev_attr_buffer_size.attr,
350	NULL,
351};
 
352
353static const struct attribute_group coresight_tmc_group = {
354	.attrs = coresight_tmc_attrs,
 
 
355};
 
356
357static const struct attribute_group coresight_tmc_mgmt_group = {
358	.attrs = coresight_tmc_mgmt_attrs,
359	.name = "mgmt",
360};
361
362static const struct attribute_group *coresight_tmc_groups[] = {
363	&coresight_tmc_group,
364	&coresight_tmc_mgmt_group,
365	NULL,
366};
367
368static inline bool tmc_etr_can_use_sg(struct device *dev)
369{
370	return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
371}
372
373static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
374{
375	u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
376
377	return (auth & TMC_AUTH_NSID_MASK) == 0x3;
378}
379
380/* Detect and initialise the capabilities of a TMC ETR */
381static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
382{
383	int rc;
384	u32 dma_mask = 0;
385	struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
386
387	if (!tmc_etr_has_non_secure_access(drvdata))
388		return -EACCES;
389
390	/* Set the unadvertised capabilities */
391	tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
392
393	if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
394		tmc_etr_set_cap(drvdata, TMC_ETR_SG);
395
396	/* Check if the AXI address width is available */
397	if (devid & TMC_DEVID_AXIAW_VALID)
398		dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
399				TMC_DEVID_AXIAW_MASK);
400
401	/*
402	 * Unless specified in the device configuration, ETR uses a 40-bit
403	 * AXI master in place of the embedded SRAM of ETB/ETF.
404	 */
405	switch (dma_mask) {
406	case 32:
407	case 40:
408	case 44:
409	case 48:
410	case 52:
411		dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
412		break;
413	default:
414		dma_mask = 40;
415	}
416
417	rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
418	if (rc)
419		dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
420	return rc;
421}
422
423static u32 tmc_etr_get_default_buffer_size(struct device *dev)
424{
425	u32 size;
426
427	if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
428		size = SZ_1M;
429	return size;
430}
431
432static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
433{
434	int ret = 0;
435	u32 devid;
436	void __iomem *base;
437	struct device *dev = &adev->dev;
438	struct coresight_platform_data *pdata = NULL;
439	struct tmc_drvdata *drvdata;
440	struct resource *res = &adev->res;
441	struct coresight_desc desc = { 0 };
442	struct coresight_dev_list *dev_list = NULL;
 
 
 
 
 
 
 
443
444	ret = -ENOMEM;
445	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
446	if (!drvdata)
447		goto out;
448
 
449	dev_set_drvdata(dev, drvdata);
450
451	/* Validity for the resource is already checked by the AMBA core */
452	base = devm_ioremap_resource(dev, res);
453	if (IS_ERR(base)) {
454		ret = PTR_ERR(base);
455		goto out;
456	}
457
458	drvdata->base = base;
459
460	spin_lock_init(&drvdata->spinlock);
461
462	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
463	drvdata->config_type = BMVAL(devid, 6, 7);
464	drvdata->memwidth = tmc_get_memwidth(devid);
465	/* This device is not associated with a session */
466	drvdata->pid = -1;
467
468	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
469		drvdata->size = tmc_etr_get_default_buffer_size(dev);
470	else
 
 
 
 
 
471		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
 
472
473	desc.dev = dev;
474	desc.groups = coresight_tmc_groups;
475
476	switch (drvdata->config_type) {
477	case TMC_CONFIG_TYPE_ETB:
478		desc.type = CORESIGHT_DEV_TYPE_SINK;
479		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
480		desc.ops = &tmc_etb_cs_ops;
481		dev_list = &etb_devs;
482		break;
483	case TMC_CONFIG_TYPE_ETR:
484		desc.type = CORESIGHT_DEV_TYPE_SINK;
485		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
486		desc.ops = &tmc_etr_cs_ops;
487		ret = tmc_etr_setup_caps(dev, devid,
488					 coresight_get_uci_data(id));
489		if (ret)
490			goto out;
491		idr_init(&drvdata->idr);
492		mutex_init(&drvdata->idr_mutex);
493		dev_list = &etr_devs;
494		break;
495	case TMC_CONFIG_TYPE_ETF:
496		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
497		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
498		desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
499		desc.ops = &tmc_etf_cs_ops;
500		dev_list = &etf_devs;
501		break;
502	default:
503		pr_err("%s: Unsupported TMC config\n", desc.name);
504		ret = -EINVAL;
505		goto out;
506	}
507
508	desc.name = coresight_alloc_device_name(dev_list, dev);
509	if (!desc.name) {
510		ret = -ENOMEM;
511		goto out;
512	}
513
514	pdata = coresight_get_platform_data(dev);
515	if (IS_ERR(pdata)) {
516		ret = PTR_ERR(pdata);
517		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
518	}
519	adev->dev.platform_data = pdata;
520	desc.pdata = pdata;
521
522	drvdata->csdev = coresight_register(&desc);
523	if (IS_ERR(drvdata->csdev)) {
524		ret = PTR_ERR(drvdata->csdev);
525		goto out;
526	}
527
528	drvdata->miscdev.name = desc.name;
529	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
530	drvdata->miscdev.fops = &tmc_fops;
531	ret = misc_register(&drvdata->miscdev);
532	if (ret)
533		coresight_unregister(drvdata->csdev);
534	else
535		pm_runtime_put(&adev->dev);
536out:
537	return ret;
538}
539
540static void tmc_shutdown(struct amba_device *adev)
541{
542	unsigned long flags;
543	struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
544
545	spin_lock_irqsave(&drvdata->spinlock, flags);
546
547	if (drvdata->mode == CS_MODE_DISABLED)
548		goto out;
549
 
 
 
550	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
551		tmc_etr_disable_hw(drvdata);
552
553	/*
554	 * We do not care about coresight unregister here unlike remove
555	 * callback which is required for making coresight modular since
556	 * the system is going down after this.
557	 */
558out:
559	spin_unlock_irqrestore(&drvdata->spinlock, flags);
560}
561
562static const struct amba_id tmc_ids[] = {
563	CS_AMBA_ID(0x000bb961),
564	/* Coresight SoC 600 TMC-ETR/ETS */
565	CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
566	/* Coresight SoC 600 TMC-ETB */
567	CS_AMBA_ID(0x000bb9e9),
568	/* Coresight SoC 600 TMC-ETF */
569	CS_AMBA_ID(0x000bb9ea),
570	{ 0, 0},
571};
572
573static struct amba_driver tmc_driver = {
574	.drv = {
575		.name   = "coresight-tmc",
576		.owner  = THIS_MODULE,
577		.suppress_bind_attrs = true,
578	},
579	.probe		= tmc_probe,
580	.shutdown	= tmc_shutdown,
581	.id_table	= tmc_ids,
582};
583builtin_amba_driver(tmc_driver);