Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  3 *
  4 * Description: CoreSight Trace Memory Controller driver
  5 */
  6
  7#include <linux/acpi.h>
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/types.h>
 11#include <linux/device.h>
 12#include <linux/idr.h>
 13#include <linux/io.h>
 14#include <linux/iommu.h>
 15#include <linux/err.h>
 16#include <linux/fs.h>
 17#include <linux/miscdevice.h>
 18#include <linux/mutex.h>
 19#include <linux/property.h>
 20#include <linux/uaccess.h>
 21#include <linux/slab.h>
 22#include <linux/dma-mapping.h>
 23#include <linux/spinlock.h>
 24#include <linux/pm_runtime.h>
 25#include <linux/of.h>
 26#include <linux/coresight.h>
 27#include <linux/amba/bus.h>
 28#include <linux/platform_device.h>
 29
 30#include "coresight-priv.h"
 31#include "coresight-tmc.h"
 32
 33DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
 34DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
 35DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
 36
 37int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
 38{
 39	struct coresight_device *csdev = drvdata->csdev;
 40	struct csdev_access *csa = &csdev->access;
 41
 42	/* Ensure formatter, unformatter and hardware fifo are empty */
 43	if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
 44		dev_err(&csdev->dev,
 45			"timeout while waiting for TMC to be Ready\n");
 46		return -EBUSY;
 47	}
 48	return 0;
 49}
 50
 51void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
 52{
 53	struct coresight_device *csdev = drvdata->csdev;
 54	struct csdev_access *csa = &csdev->access;
 55	u32 ffcr;
 56
 57	ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
 58	ffcr |= TMC_FFCR_STOP_ON_FLUSH;
 59	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
 60	ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
 61	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
 62	/* Ensure flush completes */
 63	if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
 64		dev_err(&csdev->dev,
 65		"timeout while waiting for completion of Manual Flush\n");
 66	}
 67
 68	tmc_wait_for_tmcready(drvdata);
 69}
 70
 71void tmc_enable_hw(struct tmc_drvdata *drvdata)
 72{
 73	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
 74}
 75
 76void tmc_disable_hw(struct tmc_drvdata *drvdata)
 77{
 78	writel_relaxed(0x0, drvdata->base + TMC_CTL);
 79}
 80
 81u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
 82{
 83	u32 mask = 0;
 84
 85	/*
 86	 * When moving RRP or an offset address forward, the new values must
 87	 * be byte-address aligned to the width of the trace memory databus
 88	 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
 89	 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
 90	 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
 91	 * be 0s.
 92	 */
 93	switch (drvdata->memwidth) {
 94	case TMC_MEM_INTF_WIDTH_32BITS:
 95	case TMC_MEM_INTF_WIDTH_64BITS:
 96	case TMC_MEM_INTF_WIDTH_128BITS:
 97		mask = GENMASK(31, 4);
 98		break;
 99	case TMC_MEM_INTF_WIDTH_256BITS:
100		mask = GENMASK(31, 5);
101		break;
102	}
103
104	return mask;
105}
106
107static int tmc_read_prepare(struct tmc_drvdata *drvdata)
108{
109	int ret = 0;
110
111	switch (drvdata->config_type) {
112	case TMC_CONFIG_TYPE_ETB:
113	case TMC_CONFIG_TYPE_ETF:
114		ret = tmc_read_prepare_etb(drvdata);
115		break;
116	case TMC_CONFIG_TYPE_ETR:
117		ret = tmc_read_prepare_etr(drvdata);
118		break;
119	default:
120		ret = -EINVAL;
121	}
122
123	if (!ret)
124		dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
125
126	return ret;
127}
128
129static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
130{
131	int ret = 0;
132
133	switch (drvdata->config_type) {
134	case TMC_CONFIG_TYPE_ETB:
135	case TMC_CONFIG_TYPE_ETF:
136		ret = tmc_read_unprepare_etb(drvdata);
137		break;
138	case TMC_CONFIG_TYPE_ETR:
139		ret = tmc_read_unprepare_etr(drvdata);
140		break;
141	default:
142		ret = -EINVAL;
143	}
144
145	if (!ret)
146		dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
147
148	return ret;
149}
150
151static int tmc_open(struct inode *inode, struct file *file)
152{
153	int ret;
154	struct tmc_drvdata *drvdata = container_of(file->private_data,
155						   struct tmc_drvdata, miscdev);
156
157	ret = tmc_read_prepare(drvdata);
158	if (ret)
159		return ret;
160
161	nonseekable_open(inode, file);
162
163	dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
164	return 0;
165}
166
167static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
168					  loff_t pos, size_t len, char **bufpp)
169{
170	switch (drvdata->config_type) {
171	case TMC_CONFIG_TYPE_ETB:
172	case TMC_CONFIG_TYPE_ETF:
173		return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
174	case TMC_CONFIG_TYPE_ETR:
175		return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
176	}
177
178	return -EINVAL;
179}
180
181static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
182			loff_t *ppos)
183{
184	char *bufp;
185	ssize_t actual;
186	struct tmc_drvdata *drvdata = container_of(file->private_data,
187						   struct tmc_drvdata, miscdev);
188	actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
189	if (actual <= 0)
190		return 0;
191
192	if (copy_to_user(data, bufp, actual)) {
193		dev_dbg(&drvdata->csdev->dev,
194			"%s: copy_to_user failed\n", __func__);
195		return -EFAULT;
196	}
197
198	*ppos += actual;
199	dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
200
201	return actual;
202}
203
204static int tmc_release(struct inode *inode, struct file *file)
205{
206	int ret;
207	struct tmc_drvdata *drvdata = container_of(file->private_data,
208						   struct tmc_drvdata, miscdev);
209
210	ret = tmc_read_unprepare(drvdata);
211	if (ret)
212		return ret;
213
214	dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
215	return 0;
216}
217
218static const struct file_operations tmc_fops = {
219	.owner		= THIS_MODULE,
220	.open		= tmc_open,
221	.read		= tmc_read,
222	.release	= tmc_release,
 
223};
224
225static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
226{
227	enum tmc_mem_intf_width memwidth;
228
229	/*
230	 * Excerpt from the TRM:
231	 *
232	 * DEVID::MEMWIDTH[10:8]
233	 * 0x2 Memory interface databus is 32 bits wide.
234	 * 0x3 Memory interface databus is 64 bits wide.
235	 * 0x4 Memory interface databus is 128 bits wide.
236	 * 0x5 Memory interface databus is 256 bits wide.
237	 */
238	switch (BMVAL(devid, 8, 10)) {
239	case 0x2:
240		memwidth = TMC_MEM_INTF_WIDTH_32BITS;
241		break;
242	case 0x3:
243		memwidth = TMC_MEM_INTF_WIDTH_64BITS;
244		break;
245	case 0x4:
246		memwidth = TMC_MEM_INTF_WIDTH_128BITS;
247		break;
248	case 0x5:
249		memwidth = TMC_MEM_INTF_WIDTH_256BITS;
250		break;
251	default:
252		memwidth = 0;
253	}
254
255	return memwidth;
256}
257
258static struct attribute *coresight_tmc_mgmt_attrs[] = {
259	coresight_simple_reg32(rsz, TMC_RSZ),
260	coresight_simple_reg32(sts, TMC_STS),
261	coresight_simple_reg64(rrp, TMC_RRP, TMC_RRPHI),
262	coresight_simple_reg64(rwp, TMC_RWP, TMC_RWPHI),
263	coresight_simple_reg32(trg, TMC_TRG),
264	coresight_simple_reg32(ctl, TMC_CTL),
265	coresight_simple_reg32(ffsr, TMC_FFSR),
266	coresight_simple_reg32(ffcr, TMC_FFCR),
267	coresight_simple_reg32(mode, TMC_MODE),
268	coresight_simple_reg32(pscr, TMC_PSCR),
269	coresight_simple_reg32(devid, CORESIGHT_DEVID),
270	coresight_simple_reg64(dba, TMC_DBALO, TMC_DBAHI),
271	coresight_simple_reg32(axictl, TMC_AXICTL),
272	coresight_simple_reg32(authstatus, TMC_AUTHSTATUS),
273	NULL,
274};
275
276static ssize_t trigger_cntr_show(struct device *dev,
277				 struct device_attribute *attr, char *buf)
278{
279	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
280	unsigned long val = drvdata->trigger_cntr;
281
282	return sprintf(buf, "%#lx\n", val);
283}
284
285static ssize_t trigger_cntr_store(struct device *dev,
286			     struct device_attribute *attr,
287			     const char *buf, size_t size)
288{
289	int ret;
290	unsigned long val;
291	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
292
293	ret = kstrtoul(buf, 16, &val);
294	if (ret)
295		return ret;
296
297	drvdata->trigger_cntr = val;
298	return size;
299}
300static DEVICE_ATTR_RW(trigger_cntr);
301
302static ssize_t buffer_size_show(struct device *dev,
303				struct device_attribute *attr, char *buf)
304{
305	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
306
307	return sprintf(buf, "%#x\n", drvdata->size);
308}
309
310static ssize_t buffer_size_store(struct device *dev,
311				 struct device_attribute *attr,
312				 const char *buf, size_t size)
313{
314	int ret;
315	unsigned long val;
316	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
317
318	/* Only permitted for TMC-ETRs */
319	if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
320		return -EPERM;
321
322	ret = kstrtoul(buf, 0, &val);
323	if (ret)
324		return ret;
325	/* The buffer size should be page aligned */
326	if (val & (PAGE_SIZE - 1))
327		return -EINVAL;
328	drvdata->size = val;
329	return size;
330}
331
332static DEVICE_ATTR_RW(buffer_size);
333
334static struct attribute *coresight_tmc_attrs[] = {
335	&dev_attr_trigger_cntr.attr,
336	&dev_attr_buffer_size.attr,
337	NULL,
338};
339
340static const struct attribute_group coresight_tmc_group = {
341	.attrs = coresight_tmc_attrs,
342};
343
344static const struct attribute_group coresight_tmc_mgmt_group = {
345	.attrs = coresight_tmc_mgmt_attrs,
346	.name = "mgmt",
347};
348
349static const struct attribute_group *coresight_etf_groups[] = {
350	&coresight_tmc_group,
351	&coresight_tmc_mgmt_group,
352	NULL,
353};
354
355static const struct attribute_group *coresight_etr_groups[] = {
356	&coresight_etr_group,
357	&coresight_tmc_group,
358	&coresight_tmc_mgmt_group,
359	NULL,
360};
361
362static inline bool tmc_etr_can_use_sg(struct device *dev)
363{
364	int ret;
365	u8 val_u8;
366
367	/*
368	 * Presence of the property 'arm,scatter-gather' is checked
369	 * on the platform for the feature support, rather than its
370	 * value.
371	 */
372	if (is_of_node(dev->fwnode)) {
373		return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
374	} else if (is_acpi_device_node(dev->fwnode)) {
375		/*
376		 * TMC_DEVID_NOSCAT test in tmc_etr_setup_caps(), has already ensured
377		 * this property is only checked for Coresight SoC 400 TMC configured
378		 * as ETR.
379		 */
380		ret = fwnode_property_read_u8(dev->fwnode, "arm-armhc97c-sg-enable", &val_u8);
381		if (!ret)
382			return !!val_u8;
383
384		if (fwnode_property_present(dev->fwnode, "arm,scatter-gather")) {
385			pr_warn_once("Deprecated ACPI property - arm,scatter-gather\n");
386			return true;
387		}
388	}
389	return false;
390}
391
392static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
393{
394	u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
395
396	return (auth & TMC_AUTH_NSID_MASK) == 0x3;
397}
398
399static const struct amba_id tmc_ids[];
400
401/* Detect and initialise the capabilities of a TMC ETR */
402static int tmc_etr_setup_caps(struct device *parent, u32 devid,
403			      struct csdev_access *access)
404{
405	int rc;
406	u32 tmc_pid, dma_mask = 0;
407	struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
408	void *dev_caps;
409
410	if (!tmc_etr_has_non_secure_access(drvdata))
411		return -EACCES;
412
413	tmc_pid = coresight_get_pid(access);
414	dev_caps = coresight_get_uci_data_from_amba(tmc_ids, tmc_pid);
415
416	/* Set the unadvertised capabilities */
417	tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
418
419	if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
420		tmc_etr_set_cap(drvdata, TMC_ETR_SG);
421
422	/* Check if the AXI address width is available */
423	if (devid & TMC_DEVID_AXIAW_VALID)
424		dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
425				TMC_DEVID_AXIAW_MASK);
426
427	/*
428	 * Unless specified in the device configuration, ETR uses a 40-bit
429	 * AXI master in place of the embedded SRAM of ETB/ETF.
430	 */
431	switch (dma_mask) {
432	case 32:
433	case 40:
434	case 44:
435	case 48:
436	case 52:
437		dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
438		break;
439	default:
440		dma_mask = 40;
441	}
442
443	rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
444	if (rc)
445		dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
446	return rc;
447}
448
449static u32 tmc_etr_get_default_buffer_size(struct device *dev)
450{
451	u32 size;
452
453	if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
454		size = SZ_1M;
455	return size;
456}
457
458static u32 tmc_etr_get_max_burst_size(struct device *dev)
459{
460	u32 burst_size;
461
462	if (fwnode_property_read_u32(dev->fwnode, "arm,max-burst-size",
463				     &burst_size))
464		return TMC_AXICTL_WR_BURST_16;
465
466	/* Only permissible values are 0 to 15 */
467	if (burst_size > 0xF)
468		burst_size = TMC_AXICTL_WR_BURST_16;
469
470	return burst_size;
471}
472
473static int __tmc_probe(struct device *dev, struct resource *res)
474{
475	int ret = 0;
476	u32 devid;
477	void __iomem *base;
 
478	struct coresight_platform_data *pdata = NULL;
479	struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
 
480	struct coresight_desc desc = { 0 };
481	struct coresight_dev_list *dev_list = NULL;
482
483	ret = -ENOMEM;
 
 
 
 
 
484
485	/* Validity for the resource is already checked by the AMBA core */
486	base = devm_ioremap_resource(dev, res);
487	if (IS_ERR(base)) {
488		ret = PTR_ERR(base);
489		goto out;
490	}
491
492	drvdata->base = base;
493	desc.access = CSDEV_ACCESS_IOMEM(base);
494
495	spin_lock_init(&drvdata->spinlock);
496
497	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
498	drvdata->config_type = BMVAL(devid, 6, 7);
499	drvdata->memwidth = tmc_get_memwidth(devid);
500	/* This device is not associated with a session */
501	drvdata->pid = -1;
502	drvdata->etr_mode = ETR_MODE_AUTO;
503
504	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
505		drvdata->size = tmc_etr_get_default_buffer_size(dev);
506		drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
507	} else {
508		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
509	}
510
511	desc.dev = dev;
 
512
513	switch (drvdata->config_type) {
514	case TMC_CONFIG_TYPE_ETB:
515		desc.groups = coresight_etf_groups;
516		desc.type = CORESIGHT_DEV_TYPE_SINK;
517		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
518		desc.ops = &tmc_etb_cs_ops;
519		dev_list = &etb_devs;
520		break;
521	case TMC_CONFIG_TYPE_ETR:
522		desc.groups = coresight_etr_groups;
523		desc.type = CORESIGHT_DEV_TYPE_SINK;
524		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
525		desc.ops = &tmc_etr_cs_ops;
526		ret = tmc_etr_setup_caps(dev, devid, &desc.access);
 
527		if (ret)
528			goto out;
529		idr_init(&drvdata->idr);
530		mutex_init(&drvdata->idr_mutex);
531		dev_list = &etr_devs;
532		break;
533	case TMC_CONFIG_TYPE_ETF:
534		desc.groups = coresight_etf_groups;
535		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
536		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
537		desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
538		desc.ops = &tmc_etf_cs_ops;
539		dev_list = &etf_devs;
540		break;
541	default:
542		pr_err("%s: Unsupported TMC config\n", desc.name);
543		ret = -EINVAL;
544		goto out;
545	}
546
547	desc.name = coresight_alloc_device_name(dev_list, dev);
548	if (!desc.name) {
549		ret = -ENOMEM;
550		goto out;
551	}
552
553	pdata = coresight_get_platform_data(dev);
554	if (IS_ERR(pdata)) {
555		ret = PTR_ERR(pdata);
556		goto out;
557	}
558	dev->platform_data = pdata;
559	desc.pdata = pdata;
560
561	drvdata->csdev = coresight_register(&desc);
562	if (IS_ERR(drvdata->csdev)) {
563		ret = PTR_ERR(drvdata->csdev);
564		goto out;
565	}
566
567	drvdata->miscdev.name = desc.name;
568	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
569	drvdata->miscdev.fops = &tmc_fops;
570	ret = misc_register(&drvdata->miscdev);
571	if (ret)
572		coresight_unregister(drvdata->csdev);
573out:
574	return ret;
575}
576
577static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
578{
579	struct tmc_drvdata *drvdata;
580	int ret;
581
582	drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL);
583	if (!drvdata)
584		return -ENOMEM;
585
586	amba_set_drvdata(adev, drvdata);
587	ret = __tmc_probe(&adev->dev, &adev->res);
588	if (!ret)
589		pm_runtime_put(&adev->dev);
590
591	return ret;
592}
593
594static void tmc_shutdown(struct amba_device *adev)
595{
596	unsigned long flags;
597	struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
598
599	spin_lock_irqsave(&drvdata->spinlock, flags);
600
601	if (coresight_get_mode(drvdata->csdev) == CS_MODE_DISABLED)
602		goto out;
603
604	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
605		tmc_etr_disable_hw(drvdata);
606
607	/*
608	 * We do not care about coresight unregister here unlike remove
609	 * callback which is required for making coresight modular since
610	 * the system is going down after this.
611	 */
612out:
613	spin_unlock_irqrestore(&drvdata->spinlock, flags);
614}
615
616static void __tmc_remove(struct device *dev)
617{
618	struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
619
620	/*
621	 * Since misc_open() holds a refcount on the f_ops, which is
622	 * etb fops in this case, device is there until last file
623	 * handler to this device is closed.
624	 */
625	misc_deregister(&drvdata->miscdev);
626	coresight_unregister(drvdata->csdev);
627}
628
629static void tmc_remove(struct amba_device *adev)
630{
631	__tmc_remove(&adev->dev);
632}
633
634static const struct amba_id tmc_ids[] = {
635	CS_AMBA_ID(0x000bb961),
636	/* Coresight SoC 600 TMC-ETR/ETS */
637	CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
638	/* Coresight SoC 600 TMC-ETB */
639	CS_AMBA_ID(0x000bb9e9),
640	/* Coresight SoC 600 TMC-ETF */
641	CS_AMBA_ID(0x000bb9ea),
642	{ 0, 0, NULL },
643};
644
645MODULE_DEVICE_TABLE(amba, tmc_ids);
646
647static struct amba_driver tmc_driver = {
648	.drv = {
649		.name   = "coresight-tmc",
 
650		.suppress_bind_attrs = true,
651	},
652	.probe		= tmc_probe,
653	.shutdown	= tmc_shutdown,
654	.remove		= tmc_remove,
655	.id_table	= tmc_ids,
656};
657
658static int tmc_platform_probe(struct platform_device *pdev)
659{
660	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
661	struct tmc_drvdata *drvdata;
662	int ret = 0;
663
664	drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
665	if (!drvdata)
666		return -ENOMEM;
667
668	drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
669	if (IS_ERR(drvdata->pclk))
670		return -ENODEV;
671
672	dev_set_drvdata(&pdev->dev, drvdata);
673	pm_runtime_get_noresume(&pdev->dev);
674	pm_runtime_set_active(&pdev->dev);
675	pm_runtime_enable(&pdev->dev);
676
677	ret = __tmc_probe(&pdev->dev, res);
678	pm_runtime_put(&pdev->dev);
679	if (ret)
680		pm_runtime_disable(&pdev->dev);
681
682	return ret;
683}
684
685static void tmc_platform_remove(struct platform_device *pdev)
686{
687	struct tmc_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
688
689	if (WARN_ON(!drvdata))
690		return;
691
692	__tmc_remove(&pdev->dev);
693	pm_runtime_disable(&pdev->dev);
694	if (!IS_ERR_OR_NULL(drvdata->pclk))
695		clk_put(drvdata->pclk);
696}
697
698#ifdef CONFIG_PM
699static int tmc_runtime_suspend(struct device *dev)
700{
701	struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
702
703	if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
704		clk_disable_unprepare(drvdata->pclk);
705	return 0;
706}
707
708static int tmc_runtime_resume(struct device *dev)
709{
710	struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
711
712	if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
713		clk_prepare_enable(drvdata->pclk);
714	return 0;
715}
716#endif
717
718static const struct dev_pm_ops tmc_dev_pm_ops = {
719	SET_RUNTIME_PM_OPS(tmc_runtime_suspend, tmc_runtime_resume, NULL)
720};
721
722#ifdef CONFIG_ACPI
723static const struct acpi_device_id tmc_acpi_ids[] = {
724	{"ARMHC501", 0, 0, 0}, /* ARM CoreSight ETR */
725	{"ARMHC97C", 0, 0, 0}, /* ARM CoreSight SoC-400 TMC, SoC-600 ETF/ETB */
726	{},
727};
728MODULE_DEVICE_TABLE(acpi, tmc_acpi_ids);
729#endif
730
731static struct platform_driver tmc_platform_driver = {
732	.probe	= tmc_platform_probe,
733	.remove = tmc_platform_remove,
734	.driver	= {
735		.name			= "coresight-tmc-platform",
736		.acpi_match_table	= ACPI_PTR(tmc_acpi_ids),
737		.suppress_bind_attrs	= true,
738		.pm			= &tmc_dev_pm_ops,
739	},
740};
741
742static int __init tmc_init(void)
743{
744	return coresight_init_driver("tmc", &tmc_driver, &tmc_platform_driver);
745}
746
747static void __exit tmc_exit(void)
748{
749	coresight_remove_driver(&tmc_driver, &tmc_platform_driver);
750}
751module_init(tmc_init);
752module_exit(tmc_exit);
753
754MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
755MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver");
756MODULE_LICENSE("GPL v2");
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  3 *
  4 * Description: CoreSight Trace Memory Controller driver
  5 */
  6
 
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/types.h>
 10#include <linux/device.h>
 11#include <linux/idr.h>
 12#include <linux/io.h>
 
 13#include <linux/err.h>
 14#include <linux/fs.h>
 15#include <linux/miscdevice.h>
 16#include <linux/mutex.h>
 17#include <linux/property.h>
 18#include <linux/uaccess.h>
 19#include <linux/slab.h>
 20#include <linux/dma-mapping.h>
 21#include <linux/spinlock.h>
 22#include <linux/pm_runtime.h>
 23#include <linux/of.h>
 24#include <linux/coresight.h>
 25#include <linux/amba/bus.h>
 
 26
 27#include "coresight-priv.h"
 28#include "coresight-tmc.h"
 29
 30DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
 31DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
 32DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
 33
 34void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
 35{
 36	struct coresight_device *csdev = drvdata->csdev;
 37	struct csdev_access *csa = &csdev->access;
 38
 39	/* Ensure formatter, unformatter and hardware fifo are empty */
 40	if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
 41		dev_err(&csdev->dev,
 42			"timeout while waiting for TMC to be Ready\n");
 
 43	}
 
 44}
 45
 46void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
 47{
 48	struct coresight_device *csdev = drvdata->csdev;
 49	struct csdev_access *csa = &csdev->access;
 50	u32 ffcr;
 51
 52	ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
 53	ffcr |= TMC_FFCR_STOP_ON_FLUSH;
 54	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
 55	ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
 56	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
 57	/* Ensure flush completes */
 58	if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
 59		dev_err(&csdev->dev,
 60		"timeout while waiting for completion of Manual Flush\n");
 61	}
 62
 63	tmc_wait_for_tmcready(drvdata);
 64}
 65
 66void tmc_enable_hw(struct tmc_drvdata *drvdata)
 67{
 68	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
 69}
 70
 71void tmc_disable_hw(struct tmc_drvdata *drvdata)
 72{
 73	writel_relaxed(0x0, drvdata->base + TMC_CTL);
 74}
 75
 76u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
 77{
 78	u32 mask = 0;
 79
 80	/*
 81	 * When moving RRP or an offset address forward, the new values must
 82	 * be byte-address aligned to the width of the trace memory databus
 83	 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
 84	 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
 85	 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
 86	 * be 0s.
 87	 */
 88	switch (drvdata->memwidth) {
 89	case TMC_MEM_INTF_WIDTH_32BITS:
 90	case TMC_MEM_INTF_WIDTH_64BITS:
 91	case TMC_MEM_INTF_WIDTH_128BITS:
 92		mask = GENMASK(31, 4);
 93		break;
 94	case TMC_MEM_INTF_WIDTH_256BITS:
 95		mask = GENMASK(31, 5);
 96		break;
 97	}
 98
 99	return mask;
100}
101
102static int tmc_read_prepare(struct tmc_drvdata *drvdata)
103{
104	int ret = 0;
105
106	switch (drvdata->config_type) {
107	case TMC_CONFIG_TYPE_ETB:
108	case TMC_CONFIG_TYPE_ETF:
109		ret = tmc_read_prepare_etb(drvdata);
110		break;
111	case TMC_CONFIG_TYPE_ETR:
112		ret = tmc_read_prepare_etr(drvdata);
113		break;
114	default:
115		ret = -EINVAL;
116	}
117
118	if (!ret)
119		dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
120
121	return ret;
122}
123
124static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
125{
126	int ret = 0;
127
128	switch (drvdata->config_type) {
129	case TMC_CONFIG_TYPE_ETB:
130	case TMC_CONFIG_TYPE_ETF:
131		ret = tmc_read_unprepare_etb(drvdata);
132		break;
133	case TMC_CONFIG_TYPE_ETR:
134		ret = tmc_read_unprepare_etr(drvdata);
135		break;
136	default:
137		ret = -EINVAL;
138	}
139
140	if (!ret)
141		dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
142
143	return ret;
144}
145
146static int tmc_open(struct inode *inode, struct file *file)
147{
148	int ret;
149	struct tmc_drvdata *drvdata = container_of(file->private_data,
150						   struct tmc_drvdata, miscdev);
151
152	ret = tmc_read_prepare(drvdata);
153	if (ret)
154		return ret;
155
156	nonseekable_open(inode, file);
157
158	dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
159	return 0;
160}
161
162static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
163					  loff_t pos, size_t len, char **bufpp)
164{
165	switch (drvdata->config_type) {
166	case TMC_CONFIG_TYPE_ETB:
167	case TMC_CONFIG_TYPE_ETF:
168		return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
169	case TMC_CONFIG_TYPE_ETR:
170		return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
171	}
172
173	return -EINVAL;
174}
175
176static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
177			loff_t *ppos)
178{
179	char *bufp;
180	ssize_t actual;
181	struct tmc_drvdata *drvdata = container_of(file->private_data,
182						   struct tmc_drvdata, miscdev);
183	actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
184	if (actual <= 0)
185		return 0;
186
187	if (copy_to_user(data, bufp, actual)) {
188		dev_dbg(&drvdata->csdev->dev,
189			"%s: copy_to_user failed\n", __func__);
190		return -EFAULT;
191	}
192
193	*ppos += actual;
194	dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
195
196	return actual;
197}
198
199static int tmc_release(struct inode *inode, struct file *file)
200{
201	int ret;
202	struct tmc_drvdata *drvdata = container_of(file->private_data,
203						   struct tmc_drvdata, miscdev);
204
205	ret = tmc_read_unprepare(drvdata);
206	if (ret)
207		return ret;
208
209	dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
210	return 0;
211}
212
213static const struct file_operations tmc_fops = {
214	.owner		= THIS_MODULE,
215	.open		= tmc_open,
216	.read		= tmc_read,
217	.release	= tmc_release,
218	.llseek		= no_llseek,
219};
220
221static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
222{
223	enum tmc_mem_intf_width memwidth;
224
225	/*
226	 * Excerpt from the TRM:
227	 *
228	 * DEVID::MEMWIDTH[10:8]
229	 * 0x2 Memory interface databus is 32 bits wide.
230	 * 0x3 Memory interface databus is 64 bits wide.
231	 * 0x4 Memory interface databus is 128 bits wide.
232	 * 0x5 Memory interface databus is 256 bits wide.
233	 */
234	switch (BMVAL(devid, 8, 10)) {
235	case 0x2:
236		memwidth = TMC_MEM_INTF_WIDTH_32BITS;
237		break;
238	case 0x3:
239		memwidth = TMC_MEM_INTF_WIDTH_64BITS;
240		break;
241	case 0x4:
242		memwidth = TMC_MEM_INTF_WIDTH_128BITS;
243		break;
244	case 0x5:
245		memwidth = TMC_MEM_INTF_WIDTH_256BITS;
246		break;
247	default:
248		memwidth = 0;
249	}
250
251	return memwidth;
252}
253
254static struct attribute *coresight_tmc_mgmt_attrs[] = {
255	coresight_simple_reg32(rsz, TMC_RSZ),
256	coresight_simple_reg32(sts, TMC_STS),
257	coresight_simple_reg64(rrp, TMC_RRP, TMC_RRPHI),
258	coresight_simple_reg64(rwp, TMC_RWP, TMC_RWPHI),
259	coresight_simple_reg32(trg, TMC_TRG),
260	coresight_simple_reg32(ctl, TMC_CTL),
261	coresight_simple_reg32(ffsr, TMC_FFSR),
262	coresight_simple_reg32(ffcr, TMC_FFCR),
263	coresight_simple_reg32(mode, TMC_MODE),
264	coresight_simple_reg32(pscr, TMC_PSCR),
265	coresight_simple_reg32(devid, CORESIGHT_DEVID),
266	coresight_simple_reg64(dba, TMC_DBALO, TMC_DBAHI),
267	coresight_simple_reg32(axictl, TMC_AXICTL),
268	coresight_simple_reg32(authstatus, TMC_AUTHSTATUS),
269	NULL,
270};
271
272static ssize_t trigger_cntr_show(struct device *dev,
273				 struct device_attribute *attr, char *buf)
274{
275	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
276	unsigned long val = drvdata->trigger_cntr;
277
278	return sprintf(buf, "%#lx\n", val);
279}
280
281static ssize_t trigger_cntr_store(struct device *dev,
282			     struct device_attribute *attr,
283			     const char *buf, size_t size)
284{
285	int ret;
286	unsigned long val;
287	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
288
289	ret = kstrtoul(buf, 16, &val);
290	if (ret)
291		return ret;
292
293	drvdata->trigger_cntr = val;
294	return size;
295}
296static DEVICE_ATTR_RW(trigger_cntr);
297
298static ssize_t buffer_size_show(struct device *dev,
299				struct device_attribute *attr, char *buf)
300{
301	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
302
303	return sprintf(buf, "%#x\n", drvdata->size);
304}
305
306static ssize_t buffer_size_store(struct device *dev,
307				 struct device_attribute *attr,
308				 const char *buf, size_t size)
309{
310	int ret;
311	unsigned long val;
312	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
313
314	/* Only permitted for TMC-ETRs */
315	if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
316		return -EPERM;
317
318	ret = kstrtoul(buf, 0, &val);
319	if (ret)
320		return ret;
321	/* The buffer size should be page aligned */
322	if (val & (PAGE_SIZE - 1))
323		return -EINVAL;
324	drvdata->size = val;
325	return size;
326}
327
328static DEVICE_ATTR_RW(buffer_size);
329
330static struct attribute *coresight_tmc_attrs[] = {
331	&dev_attr_trigger_cntr.attr,
332	&dev_attr_buffer_size.attr,
333	NULL,
334};
335
336static const struct attribute_group coresight_tmc_group = {
337	.attrs = coresight_tmc_attrs,
338};
339
340static const struct attribute_group coresight_tmc_mgmt_group = {
341	.attrs = coresight_tmc_mgmt_attrs,
342	.name = "mgmt",
343};
344
345static const struct attribute_group *coresight_tmc_groups[] = {
 
 
 
 
 
 
 
346	&coresight_tmc_group,
347	&coresight_tmc_mgmt_group,
348	NULL,
349};
350
351static inline bool tmc_etr_can_use_sg(struct device *dev)
352{
353	return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354}
355
356static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
357{
358	u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
359
360	return (auth & TMC_AUTH_NSID_MASK) == 0x3;
361}
362
 
 
363/* Detect and initialise the capabilities of a TMC ETR */
364static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
 
365{
366	int rc;
367	u32 dma_mask = 0;
368	struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
 
369
370	if (!tmc_etr_has_non_secure_access(drvdata))
371		return -EACCES;
372
 
 
 
373	/* Set the unadvertised capabilities */
374	tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
375
376	if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
377		tmc_etr_set_cap(drvdata, TMC_ETR_SG);
378
379	/* Check if the AXI address width is available */
380	if (devid & TMC_DEVID_AXIAW_VALID)
381		dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
382				TMC_DEVID_AXIAW_MASK);
383
384	/*
385	 * Unless specified in the device configuration, ETR uses a 40-bit
386	 * AXI master in place of the embedded SRAM of ETB/ETF.
387	 */
388	switch (dma_mask) {
389	case 32:
390	case 40:
391	case 44:
392	case 48:
393	case 52:
394		dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
395		break;
396	default:
397		dma_mask = 40;
398	}
399
400	rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
401	if (rc)
402		dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
403	return rc;
404}
405
406static u32 tmc_etr_get_default_buffer_size(struct device *dev)
407{
408	u32 size;
409
410	if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
411		size = SZ_1M;
412	return size;
413}
414
415static u32 tmc_etr_get_max_burst_size(struct device *dev)
416{
417	u32 burst_size;
418
419	if (fwnode_property_read_u32(dev->fwnode, "arm,max-burst-size",
420				     &burst_size))
421		return TMC_AXICTL_WR_BURST_16;
422
423	/* Only permissible values are 0 to 15 */
424	if (burst_size > 0xF)
425		burst_size = TMC_AXICTL_WR_BURST_16;
426
427	return burst_size;
428}
429
430static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
431{
432	int ret = 0;
433	u32 devid;
434	void __iomem *base;
435	struct device *dev = &adev->dev;
436	struct coresight_platform_data *pdata = NULL;
437	struct tmc_drvdata *drvdata;
438	struct resource *res = &adev->res;
439	struct coresight_desc desc = { 0 };
440	struct coresight_dev_list *dev_list = NULL;
441
442	ret = -ENOMEM;
443	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
444	if (!drvdata)
445		goto out;
446
447	dev_set_drvdata(dev, drvdata);
448
449	/* Validity for the resource is already checked by the AMBA core */
450	base = devm_ioremap_resource(dev, res);
451	if (IS_ERR(base)) {
452		ret = PTR_ERR(base);
453		goto out;
454	}
455
456	drvdata->base = base;
457	desc.access = CSDEV_ACCESS_IOMEM(base);
458
459	spin_lock_init(&drvdata->spinlock);
460
461	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
462	drvdata->config_type = BMVAL(devid, 6, 7);
463	drvdata->memwidth = tmc_get_memwidth(devid);
464	/* This device is not associated with a session */
465	drvdata->pid = -1;
 
466
467	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
468		drvdata->size = tmc_etr_get_default_buffer_size(dev);
469		drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
470	} else {
471		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
472	}
473
474	desc.dev = dev;
475	desc.groups = coresight_tmc_groups;
476
477	switch (drvdata->config_type) {
478	case TMC_CONFIG_TYPE_ETB:
 
479		desc.type = CORESIGHT_DEV_TYPE_SINK;
480		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
481		desc.ops = &tmc_etb_cs_ops;
482		dev_list = &etb_devs;
483		break;
484	case TMC_CONFIG_TYPE_ETR:
 
485		desc.type = CORESIGHT_DEV_TYPE_SINK;
486		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
487		desc.ops = &tmc_etr_cs_ops;
488		ret = tmc_etr_setup_caps(dev, devid,
489					 coresight_get_uci_data(id));
490		if (ret)
491			goto out;
492		idr_init(&drvdata->idr);
493		mutex_init(&drvdata->idr_mutex);
494		dev_list = &etr_devs;
495		break;
496	case TMC_CONFIG_TYPE_ETF:
 
497		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
498		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
499		desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
500		desc.ops = &tmc_etf_cs_ops;
501		dev_list = &etf_devs;
502		break;
503	default:
504		pr_err("%s: Unsupported TMC config\n", desc.name);
505		ret = -EINVAL;
506		goto out;
507	}
508
509	desc.name = coresight_alloc_device_name(dev_list, dev);
510	if (!desc.name) {
511		ret = -ENOMEM;
512		goto out;
513	}
514
515	pdata = coresight_get_platform_data(dev);
516	if (IS_ERR(pdata)) {
517		ret = PTR_ERR(pdata);
518		goto out;
519	}
520	adev->dev.platform_data = pdata;
521	desc.pdata = pdata;
522
523	drvdata->csdev = coresight_register(&desc);
524	if (IS_ERR(drvdata->csdev)) {
525		ret = PTR_ERR(drvdata->csdev);
526		goto out;
527	}
528
529	drvdata->miscdev.name = desc.name;
530	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
531	drvdata->miscdev.fops = &tmc_fops;
532	ret = misc_register(&drvdata->miscdev);
533	if (ret)
534		coresight_unregister(drvdata->csdev);
535	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
536		pm_runtime_put(&adev->dev);
537out:
538	return ret;
539}
540
541static void tmc_shutdown(struct amba_device *adev)
542{
543	unsigned long flags;
544	struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
545
546	spin_lock_irqsave(&drvdata->spinlock, flags);
547
548	if (drvdata->mode == CS_MODE_DISABLED)
549		goto out;
550
551	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
552		tmc_etr_disable_hw(drvdata);
553
554	/*
555	 * We do not care about coresight unregister here unlike remove
556	 * callback which is required for making coresight modular since
557	 * the system is going down after this.
558	 */
559out:
560	spin_unlock_irqrestore(&drvdata->spinlock, flags);
561}
562
563static void tmc_remove(struct amba_device *adev)
564{
565	struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
566
567	/*
568	 * Since misc_open() holds a refcount on the f_ops, which is
569	 * etb fops in this case, device is there until last file
570	 * handler to this device is closed.
571	 */
572	misc_deregister(&drvdata->miscdev);
573	coresight_unregister(drvdata->csdev);
574}
575
 
 
 
 
 
576static const struct amba_id tmc_ids[] = {
577	CS_AMBA_ID(0x000bb961),
578	/* Coresight SoC 600 TMC-ETR/ETS */
579	CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
580	/* Coresight SoC 600 TMC-ETB */
581	CS_AMBA_ID(0x000bb9e9),
582	/* Coresight SoC 600 TMC-ETF */
583	CS_AMBA_ID(0x000bb9ea),
584	{ 0, 0},
585};
586
587MODULE_DEVICE_TABLE(amba, tmc_ids);
588
589static struct amba_driver tmc_driver = {
590	.drv = {
591		.name   = "coresight-tmc",
592		.owner  = THIS_MODULE,
593		.suppress_bind_attrs = true,
594	},
595	.probe		= tmc_probe,
596	.shutdown	= tmc_shutdown,
597	.remove		= tmc_remove,
598	.id_table	= tmc_ids,
599};
600
601module_amba_driver(tmc_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
602
603MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
604MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver");
605MODULE_LICENSE("GPL v2");