Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
   4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/atomic.h>
   8#include <linux/coresight.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/iommu.h>
  11#include <linux/idr.h>
  12#include <linux/mutex.h>
  13#include <linux/refcount.h>
  14#include <linux/slab.h>
  15#include <linux/types.h>
  16#include <linux/vmalloc.h>
  17#include "coresight-catu.h"
  18#include "coresight-etm-perf.h"
  19#include "coresight-priv.h"
  20#include "coresight-tmc.h"
  21
  22struct etr_flat_buf {
  23	struct device	*dev;
  24	dma_addr_t	daddr;
  25	void		*vaddr;
  26	size_t		size;
  27};
  28
  29/*
  30 * etr_perf_buffer - Perf buffer used for ETR
  31 * @drvdata		- The ETR drvdaga this buffer has been allocated for.
  32 * @etr_buf		- Actual buffer used by the ETR
  33 * @pid			- The PID this etr_perf_buffer belongs to.
  34 * @snaphost		- Perf session mode
  35 * @nr_pages		- Number of pages in the ring buffer.
  36 * @pages		- Array of Pages in the ring buffer.
  37 */
  38struct etr_perf_buffer {
  39	struct tmc_drvdata	*drvdata;
  40	struct etr_buf		*etr_buf;
  41	pid_t			pid;
  42	bool			snapshot;
  43	int			nr_pages;
  44	void			**pages;
  45};
  46
  47/* Convert the perf index to an offset within the ETR buffer */
  48#define PERF_IDX2OFF(idx, buf)	((idx) % ((buf)->nr_pages << PAGE_SHIFT))
  49
  50/* Lower limit for ETR hardware buffer */
  51#define TMC_ETR_PERF_MIN_BUF_SIZE	SZ_1M
  52
  53/*
  54 * The TMC ETR SG has a page size of 4K. The SG table contains pointers
  55 * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
  56 * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
  57 * contain more than one SG buffer and tables.
  58 *
  59 * A table entry has the following format:
  60 *
  61 * ---Bit31------------Bit4-------Bit1-----Bit0--
  62 * |     Address[39:12]    | SBZ |  Entry Type  |
  63 * ----------------------------------------------
  64 *
  65 * Address: Bits [39:12] of a physical page address. Bits [11:0] are
  66 *	    always zero.
  67 *
  68 * Entry type:
  69 *	b00 - Reserved.
  70 *	b01 - Last entry in the tables, points to 4K page buffer.
  71 *	b10 - Normal entry, points to 4K page buffer.
  72 *	b11 - Link. The address points to the base of next table.
  73 */
  74
  75typedef u32 sgte_t;
  76
  77#define ETR_SG_PAGE_SHIFT		12
  78#define ETR_SG_PAGE_SIZE		(1UL << ETR_SG_PAGE_SHIFT)
  79#define ETR_SG_PAGES_PER_SYSPAGE	(PAGE_SIZE / ETR_SG_PAGE_SIZE)
  80#define ETR_SG_PTRS_PER_PAGE		(ETR_SG_PAGE_SIZE / sizeof(sgte_t))
  81#define ETR_SG_PTRS_PER_SYSPAGE		(PAGE_SIZE / sizeof(sgte_t))
  82
  83#define ETR_SG_ET_MASK			0x3
  84#define ETR_SG_ET_LAST			0x1
  85#define ETR_SG_ET_NORMAL		0x2
  86#define ETR_SG_ET_LINK			0x3
  87
  88#define ETR_SG_ADDR_SHIFT		4
  89
  90#define ETR_SG_ENTRY(addr, type) \
  91	(sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \
  92		 (type & ETR_SG_ET_MASK))
  93
  94#define ETR_SG_ADDR(entry) \
  95	(((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT)
  96#define ETR_SG_ET(entry)		((entry) & ETR_SG_ET_MASK)
  97
  98/*
  99 * struct etr_sg_table : ETR SG Table
 100 * @sg_table:		Generic SG Table holding the data/table pages.
 101 * @hwaddr:		hwaddress used by the TMC, which is the base
 102 *			address of the table.
 103 */
 104struct etr_sg_table {
 105	struct tmc_sg_table	*sg_table;
 106	dma_addr_t		hwaddr;
 107};
 108
 109/*
 110 * tmc_etr_sg_table_entries: Total number of table entries required to map
 111 * @nr_pages system pages.
 112 *
 113 * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages.
 114 * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
 115 * with the last entry pointing to another page of table entries.
 116 * If we spill over to a new page for mapping 1 entry, we could as
 117 * well replace the link entry of the previous page with the last entry.
 118 */
 119static inline unsigned long __attribute_const__
 120tmc_etr_sg_table_entries(int nr_pages)
 121{
 122	unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
 123	unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1);
 124	/*
 125	 * If we spill over to a new page for 1 entry, we could as well
 126	 * make it the LAST entry in the previous page, skipping the Link
 127	 * address.
 128	 */
 129	if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2))
 130		nr_sglinks--;
 131	return nr_sgpages + nr_sglinks;
 132}
 133
 134/*
 135 * tmc_pages_get_offset:  Go through all the pages in the tmc_pages
 136 * and map the device address @addr to an offset within the virtual
 137 * contiguous buffer.
 138 */
 139static long
 140tmc_pages_get_offset(struct tmc_pages *tmc_pages, dma_addr_t addr)
 141{
 142	int i;
 143	dma_addr_t page_start;
 144
 145	for (i = 0; i < tmc_pages->nr_pages; i++) {
 146		page_start = tmc_pages->daddrs[i];
 147		if (addr >= page_start && addr < (page_start + PAGE_SIZE))
 148			return i * PAGE_SIZE + (addr - page_start);
 149	}
 150
 151	return -EINVAL;
 152}
 153
 154/*
 155 * tmc_pages_free : Unmap and free the pages used by tmc_pages.
 156 * If the pages were not allocated in tmc_pages_alloc(), we would
 157 * simply drop the refcount.
 158 */
 159static void tmc_pages_free(struct tmc_pages *tmc_pages,
 160			   struct device *dev, enum dma_data_direction dir)
 161{
 162	int i;
 163	struct device *real_dev = dev->parent;
 164
 165	for (i = 0; i < tmc_pages->nr_pages; i++) {
 166		if (tmc_pages->daddrs && tmc_pages->daddrs[i])
 167			dma_unmap_page(real_dev, tmc_pages->daddrs[i],
 168					 PAGE_SIZE, dir);
 169		if (tmc_pages->pages && tmc_pages->pages[i])
 170			__free_page(tmc_pages->pages[i]);
 171	}
 172
 173	kfree(tmc_pages->pages);
 174	kfree(tmc_pages->daddrs);
 175	tmc_pages->pages = NULL;
 176	tmc_pages->daddrs = NULL;
 177	tmc_pages->nr_pages = 0;
 178}
 179
 180/*
 181 * tmc_pages_alloc : Allocate and map pages for a given @tmc_pages.
 182 * If @pages is not NULL, the list of page virtual addresses are
 183 * used as the data pages. The pages are then dma_map'ed for @dev
 184 * with dma_direction @dir.
 185 *
 186 * Returns 0 upon success, else the error number.
 187 */
 188static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
 189			   struct device *dev, int node,
 190			   enum dma_data_direction dir, void **pages)
 191{
 192	int i, nr_pages;
 193	dma_addr_t paddr;
 194	struct page *page;
 195	struct device *real_dev = dev->parent;
 196
 197	nr_pages = tmc_pages->nr_pages;
 198	tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
 199					 GFP_KERNEL);
 200	if (!tmc_pages->daddrs)
 201		return -ENOMEM;
 202	tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages),
 203					 GFP_KERNEL);
 204	if (!tmc_pages->pages) {
 205		kfree(tmc_pages->daddrs);
 206		tmc_pages->daddrs = NULL;
 207		return -ENOMEM;
 208	}
 209
 210	for (i = 0; i < nr_pages; i++) {
 211		if (pages && pages[i]) {
 212			page = virt_to_page(pages[i]);
 213			/* Hold a refcount on the page */
 214			get_page(page);
 215		} else {
 216			page = alloc_pages_node(node,
 217						GFP_KERNEL | __GFP_ZERO, 0);
 218			if (!page)
 219				goto err;
 220		}
 221		paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir);
 222		if (dma_mapping_error(real_dev, paddr))
 223			goto err;
 224		tmc_pages->daddrs[i] = paddr;
 225		tmc_pages->pages[i] = page;
 226	}
 227	return 0;
 228err:
 229	tmc_pages_free(tmc_pages, dev, dir);
 230	return -ENOMEM;
 231}
 232
 233static inline long
 234tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
 235{
 236	return tmc_pages_get_offset(&sg_table->data_pages, addr);
 237}
 238
 239static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
 240{
 241	if (sg_table->table_vaddr)
 242		vunmap(sg_table->table_vaddr);
 243	tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE);
 244}
 245
 246static void tmc_free_data_pages(struct tmc_sg_table *sg_table)
 247{
 248	if (sg_table->data_vaddr)
 249		vunmap(sg_table->data_vaddr);
 250	tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE);
 251}
 252
 253void tmc_free_sg_table(struct tmc_sg_table *sg_table)
 254{
 255	tmc_free_table_pages(sg_table);
 256	tmc_free_data_pages(sg_table);
 257}
 258EXPORT_SYMBOL_GPL(tmc_free_sg_table);
 259
 260/*
 261 * Alloc pages for the table. Since this will be used by the device,
 262 * allocate the pages closer to the device (i.e, dev_to_node(dev)
 263 * rather than the CPU node).
 264 */
 265static int tmc_alloc_table_pages(struct tmc_sg_table *sg_table)
 266{
 267	int rc;
 268	struct tmc_pages *table_pages = &sg_table->table_pages;
 269
 270	rc = tmc_pages_alloc(table_pages, sg_table->dev,
 271			     dev_to_node(sg_table->dev),
 272			     DMA_TO_DEVICE, NULL);
 273	if (rc)
 274		return rc;
 275	sg_table->table_vaddr = vmap(table_pages->pages,
 276				     table_pages->nr_pages,
 277				     VM_MAP,
 278				     PAGE_KERNEL);
 279	if (!sg_table->table_vaddr)
 280		rc = -ENOMEM;
 281	else
 282		sg_table->table_daddr = table_pages->daddrs[0];
 283	return rc;
 284}
 285
 286static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages)
 287{
 288	int rc;
 289
 290	/* Allocate data pages on the node requested by the caller */
 291	rc = tmc_pages_alloc(&sg_table->data_pages,
 292			     sg_table->dev, sg_table->node,
 293			     DMA_FROM_DEVICE, pages);
 294	if (!rc) {
 295		sg_table->data_vaddr = vmap(sg_table->data_pages.pages,
 296					    sg_table->data_pages.nr_pages,
 297					    VM_MAP,
 298					    PAGE_KERNEL);
 299		if (!sg_table->data_vaddr)
 300			rc = -ENOMEM;
 301	}
 302	return rc;
 303}
 304
 305/*
 306 * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table
 307 * and data buffers. TMC writes to the data buffers and reads from the SG
 308 * Table pages.
 309 *
 310 * @dev		- Coresight device to which page should be DMA mapped.
 311 * @node	- Numa node for mem allocations
 312 * @nr_tpages	- Number of pages for the table entries.
 313 * @nr_dpages	- Number of pages for Data buffer.
 314 * @pages	- Optional list of virtual address of pages.
 315 */
 316struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
 317					int node,
 318					int nr_tpages,
 319					int nr_dpages,
 320					void **pages)
 321{
 322	long rc;
 323	struct tmc_sg_table *sg_table;
 324
 325	sg_table = kzalloc(sizeof(*sg_table), GFP_KERNEL);
 326	if (!sg_table)
 327		return ERR_PTR(-ENOMEM);
 328	sg_table->data_pages.nr_pages = nr_dpages;
 329	sg_table->table_pages.nr_pages = nr_tpages;
 330	sg_table->node = node;
 331	sg_table->dev = dev;
 332
 333	rc  = tmc_alloc_data_pages(sg_table, pages);
 334	if (!rc)
 335		rc = tmc_alloc_table_pages(sg_table);
 336	if (rc) {
 337		tmc_free_sg_table(sg_table);
 338		kfree(sg_table);
 339		return ERR_PTR(rc);
 340	}
 341
 342	return sg_table;
 343}
 344EXPORT_SYMBOL_GPL(tmc_alloc_sg_table);
 345
 346/*
 347 * tmc_sg_table_sync_data_range: Sync the data buffer written
 348 * by the device from @offset upto a @size bytes.
 349 */
 350void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
 351				  u64 offset, u64 size)
 352{
 353	int i, index, start;
 354	int npages = DIV_ROUND_UP(size, PAGE_SIZE);
 355	struct device *real_dev = table->dev->parent;
 356	struct tmc_pages *data = &table->data_pages;
 357
 358	start = offset >> PAGE_SHIFT;
 359	for (i = start; i < (start + npages); i++) {
 360		index = i % data->nr_pages;
 361		dma_sync_single_for_cpu(real_dev, data->daddrs[index],
 362					PAGE_SIZE, DMA_FROM_DEVICE);
 363	}
 364}
 365EXPORT_SYMBOL_GPL(tmc_sg_table_sync_data_range);
 366
 367/* tmc_sg_sync_table: Sync the page table */
 368void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
 369{
 370	int i;
 371	struct device *real_dev = sg_table->dev->parent;
 372	struct tmc_pages *table_pages = &sg_table->table_pages;
 373
 374	for (i = 0; i < table_pages->nr_pages; i++)
 375		dma_sync_single_for_device(real_dev, table_pages->daddrs[i],
 376					   PAGE_SIZE, DMA_TO_DEVICE);
 377}
 378EXPORT_SYMBOL_GPL(tmc_sg_table_sync_table);
 379
 380/*
 381 * tmc_sg_table_get_data: Get the buffer pointer for data @offset
 382 * in the SG buffer. The @bufpp is updated to point to the buffer.
 383 * Returns :
 384 *	the length of linear data available at @offset.
 385 *	or
 386 *	<= 0 if no data is available.
 387 */
 388ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
 389			      u64 offset, size_t len, char **bufpp)
 390{
 391	size_t size;
 392	int pg_idx = offset >> PAGE_SHIFT;
 393	int pg_offset = offset & (PAGE_SIZE - 1);
 394	struct tmc_pages *data_pages = &sg_table->data_pages;
 395
 396	size = tmc_sg_table_buf_size(sg_table);
 397	if (offset >= size)
 398		return -EINVAL;
 399
 400	/* Make sure we don't go beyond the end */
 401	len = (len < (size - offset)) ? len : size - offset;
 402	/* Respect the page boundaries */
 403	len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
 404	if (len > 0)
 405		*bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
 406	return len;
 407}
 408EXPORT_SYMBOL_GPL(tmc_sg_table_get_data);
 409
 410#ifdef ETR_SG_DEBUG
 411/* Map a dma address to virtual address */
 412static unsigned long
 413tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table,
 414		      dma_addr_t addr, bool table)
 415{
 416	long offset;
 417	unsigned long base;
 418	struct tmc_pages *tmc_pages;
 419
 420	if (table) {
 421		tmc_pages = &sg_table->table_pages;
 422		base = (unsigned long)sg_table->table_vaddr;
 423	} else {
 424		tmc_pages = &sg_table->data_pages;
 425		base = (unsigned long)sg_table->data_vaddr;
 426	}
 427
 428	offset = tmc_pages_get_offset(tmc_pages, addr);
 429	if (offset < 0)
 430		return 0;
 431	return base + offset;
 432}
 433
 434/* Dump the given sg_table */
 435static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
 436{
 437	sgte_t *ptr;
 438	int i = 0;
 439	dma_addr_t addr;
 440	struct tmc_sg_table *sg_table = etr_table->sg_table;
 441
 442	ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
 443					      etr_table->hwaddr, true);
 444	while (ptr) {
 445		addr = ETR_SG_ADDR(*ptr);
 446		switch (ETR_SG_ET(*ptr)) {
 447		case ETR_SG_ET_NORMAL:
 448			dev_dbg(sg_table->dev,
 449				"%05d: %p\t:[N] 0x%llx\n", i, ptr, addr);
 450			ptr++;
 451			break;
 452		case ETR_SG_ET_LINK:
 453			dev_dbg(sg_table->dev,
 454				"%05d: *** %p\t:{L} 0x%llx ***\n",
 455				 i, ptr, addr);
 456			ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
 457							      addr, true);
 458			break;
 459		case ETR_SG_ET_LAST:
 460			dev_dbg(sg_table->dev,
 461				"%05d: ### %p\t:[L] 0x%llx ###\n",
 462				 i, ptr, addr);
 463			return;
 464		default:
 465			dev_dbg(sg_table->dev,
 466				"%05d: xxx %p\t:[INVALID] 0x%llx xxx\n",
 467				 i, ptr, addr);
 468			return;
 469		}
 470		i++;
 471	}
 472	dev_dbg(sg_table->dev, "******* End of Table *****\n");
 473}
 474#else
 475static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
 476#endif
 477
 478/*
 479 * Populate the SG Table page table entries from table/data
 480 * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages.
 481 * So does a Table page. So we keep track of indices of the tables
 482 * in each system page and move the pointers accordingly.
 483 */
 484#define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size))
 485static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table)
 486{
 487	dma_addr_t paddr;
 488	int i, type, nr_entries;
 489	int tpidx = 0; /* index to the current system table_page */
 490	int sgtidx = 0;	/* index to the sg_table within the current syspage */
 491	int sgtentry = 0; /* the entry within the sg_table */
 492	int dpidx = 0; /* index to the current system data_page */
 493	int spidx = 0; /* index to the SG page within the current data page */
 494	sgte_t *ptr; /* pointer to the table entry to fill */
 495	struct tmc_sg_table *sg_table = etr_table->sg_table;
 496	dma_addr_t *table_daddrs = sg_table->table_pages.daddrs;
 497	dma_addr_t *data_daddrs = sg_table->data_pages.daddrs;
 498
 499	nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages);
 500	/*
 501	 * Use the contiguous virtual address of the table to update entries.
 502	 */
 503	ptr = sg_table->table_vaddr;
 504	/*
 505	 * Fill all the entries, except the last entry to avoid special
 506	 * checks within the loop.
 507	 */
 508	for (i = 0; i < nr_entries - 1; i++) {
 509		if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) {
 510			/*
 511			 * Last entry in a sg_table page is a link address to
 512			 * the next table page. If this sg_table is the last
 513			 * one in the system page, it links to the first
 514			 * sg_table in the next system page. Otherwise, it
 515			 * links to the next sg_table page within the system
 516			 * page.
 517			 */
 518			if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) {
 519				paddr = table_daddrs[tpidx + 1];
 520			} else {
 521				paddr = table_daddrs[tpidx] +
 522					(ETR_SG_PAGE_SIZE * (sgtidx + 1));
 523			}
 524			type = ETR_SG_ET_LINK;
 525		} else {
 526			/*
 527			 * Update the indices to the data_pages to point to the
 528			 * next sg_page in the data buffer.
 529			 */
 530			type = ETR_SG_ET_NORMAL;
 531			paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
 532			if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE))
 533				dpidx++;
 534		}
 535		*ptr++ = ETR_SG_ENTRY(paddr, type);
 536		/*
 537		 * Move to the next table pointer, moving the table page index
 538		 * if necessary
 539		 */
 540		if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) {
 541			if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE))
 542				tpidx++;
 543		}
 544	}
 545
 546	/* Set up the last entry, which is always a data pointer */
 547	paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
 548	*ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST);
 549}
 550
 551/*
 552 * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
 553 * populate the table.
 554 *
 555 * @dev		- Device pointer for the TMC
 556 * @node	- NUMA node where the memory should be allocated
 557 * @size	- Total size of the data buffer
 558 * @pages	- Optional list of page virtual address
 559 */
 560static struct etr_sg_table *
 561tmc_init_etr_sg_table(struct device *dev, int node,
 562		      unsigned long size, void **pages)
 563{
 564	int nr_entries, nr_tpages;
 565	int nr_dpages = size >> PAGE_SHIFT;
 566	struct tmc_sg_table *sg_table;
 567	struct etr_sg_table *etr_table;
 568
 569	etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL);
 570	if (!etr_table)
 571		return ERR_PTR(-ENOMEM);
 572	nr_entries = tmc_etr_sg_table_entries(nr_dpages);
 573	nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE);
 574
 575	sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
 576	if (IS_ERR(sg_table)) {
 577		kfree(etr_table);
 578		return ERR_CAST(sg_table);
 579	}
 580
 581	etr_table->sg_table = sg_table;
 582	/* TMC should use table base address for DBA */
 583	etr_table->hwaddr = sg_table->table_daddr;
 584	tmc_etr_sg_table_populate(etr_table);
 585	/* Sync the table pages for the HW */
 586	tmc_sg_table_sync_table(sg_table);
 587	tmc_etr_sg_table_dump(etr_table);
 588
 589	return etr_table;
 590}
 591
 592/*
 593 * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer.
 594 */
 595static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
 596				  struct etr_buf *etr_buf, int node,
 597				  void **pages)
 598{
 599	struct etr_flat_buf *flat_buf;
 600	struct device *real_dev = drvdata->csdev->dev.parent;
 601
 602	/* We cannot reuse existing pages for flat buf */
 603	if (pages)
 604		return -EINVAL;
 605
 606	flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL);
 607	if (!flat_buf)
 608		return -ENOMEM;
 609
 610	flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size,
 611						&flat_buf->daddr,
 612						DMA_FROM_DEVICE, GFP_KERNEL);
 613	if (!flat_buf->vaddr) {
 614		kfree(flat_buf);
 615		return -ENOMEM;
 616	}
 617
 618	flat_buf->size = etr_buf->size;
 619	flat_buf->dev = &drvdata->csdev->dev;
 620	etr_buf->hwaddr = flat_buf->daddr;
 621	etr_buf->mode = ETR_MODE_FLAT;
 622	etr_buf->private = flat_buf;
 623	return 0;
 624}
 625
 626static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
 627{
 628	struct etr_flat_buf *flat_buf = etr_buf->private;
 629
 630	if (flat_buf && flat_buf->daddr) {
 631		struct device *real_dev = flat_buf->dev->parent;
 632
 633		dma_free_noncoherent(real_dev, etr_buf->size,
 634				     flat_buf->vaddr, flat_buf->daddr,
 635				     DMA_FROM_DEVICE);
 636	}
 637	kfree(flat_buf);
 638}
 639
 640static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
 641{
 642	struct etr_flat_buf *flat_buf = etr_buf->private;
 643	struct device *real_dev = flat_buf->dev->parent;
 644
 645	/*
 646	 * Adjust the buffer to point to the beginning of the trace data
 647	 * and update the available trace data.
 648	 */
 649	etr_buf->offset = rrp - etr_buf->hwaddr;
 650	if (etr_buf->full)
 651		etr_buf->len = etr_buf->size;
 652	else
 653		etr_buf->len = rwp - rrp;
 654
 655	/*
 656	 * The driver always starts tracing at the beginning of the buffer,
 657	 * the only reason why we would get a wrap around is when the buffer
 658	 * is full.  Sync the entire buffer in one go for this case.
 659	 */
 660	if (etr_buf->offset + etr_buf->len > etr_buf->size)
 661		dma_sync_single_for_cpu(real_dev, flat_buf->daddr,
 662					etr_buf->size, DMA_FROM_DEVICE);
 663	else
 664		dma_sync_single_for_cpu(real_dev,
 665					flat_buf->daddr + etr_buf->offset,
 666					etr_buf->len, DMA_FROM_DEVICE);
 667}
 668
 669static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
 670					 u64 offset, size_t len, char **bufpp)
 671{
 672	struct etr_flat_buf *flat_buf = etr_buf->private;
 673
 674	*bufpp = (char *)flat_buf->vaddr + offset;
 675	/*
 676	 * tmc_etr_buf_get_data already adjusts the length to handle
 677	 * buffer wrapping around.
 678	 */
 679	return len;
 680}
 681
 682static const struct etr_buf_operations etr_flat_buf_ops = {
 683	.alloc = tmc_etr_alloc_flat_buf,
 684	.free = tmc_etr_free_flat_buf,
 685	.sync = tmc_etr_sync_flat_buf,
 686	.get_data = tmc_etr_get_data_flat_buf,
 687};
 688
 689/*
 690 * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
 691 * appropriately.
 692 */
 693static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
 694				struct etr_buf *etr_buf, int node,
 695				void **pages)
 696{
 697	struct etr_sg_table *etr_table;
 698	struct device *dev = &drvdata->csdev->dev;
 699
 700	etr_table = tmc_init_etr_sg_table(dev, node,
 701					  etr_buf->size, pages);
 702	if (IS_ERR(etr_table))
 703		return -ENOMEM;
 704	etr_buf->hwaddr = etr_table->hwaddr;
 705	etr_buf->mode = ETR_MODE_ETR_SG;
 706	etr_buf->private = etr_table;
 707	return 0;
 708}
 709
 710static void tmc_etr_free_sg_buf(struct etr_buf *etr_buf)
 711{
 712	struct etr_sg_table *etr_table = etr_buf->private;
 713
 714	if (etr_table) {
 715		tmc_free_sg_table(etr_table->sg_table);
 716		kfree(etr_table);
 717	}
 718}
 719
 720static ssize_t tmc_etr_get_data_sg_buf(struct etr_buf *etr_buf, u64 offset,
 721				       size_t len, char **bufpp)
 722{
 723	struct etr_sg_table *etr_table = etr_buf->private;
 724
 725	return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp);
 726}
 727
 728static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
 729{
 730	long r_offset, w_offset;
 731	struct etr_sg_table *etr_table = etr_buf->private;
 732	struct tmc_sg_table *table = etr_table->sg_table;
 733
 734	/* Convert hw address to offset in the buffer */
 735	r_offset = tmc_sg_get_data_page_offset(table, rrp);
 736	if (r_offset < 0) {
 737		dev_warn(table->dev,
 738			 "Unable to map RRP %llx to offset\n", rrp);
 739		etr_buf->len = 0;
 740		return;
 741	}
 742
 743	w_offset = tmc_sg_get_data_page_offset(table, rwp);
 744	if (w_offset < 0) {
 745		dev_warn(table->dev,
 746			 "Unable to map RWP %llx to offset\n", rwp);
 747		etr_buf->len = 0;
 748		return;
 749	}
 750
 751	etr_buf->offset = r_offset;
 752	if (etr_buf->full)
 753		etr_buf->len = etr_buf->size;
 754	else
 755		etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) +
 756				w_offset - r_offset;
 757	tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
 758}
 759
 760static const struct etr_buf_operations etr_sg_buf_ops = {
 761	.alloc = tmc_etr_alloc_sg_buf,
 762	.free = tmc_etr_free_sg_buf,
 763	.sync = tmc_etr_sync_sg_buf,
 764	.get_data = tmc_etr_get_data_sg_buf,
 765};
 766
 767/*
 768 * TMC ETR could be connected to a CATU device, which can provide address
 769 * translation service. This is represented by the Output port of the TMC
 770 * (ETR) connected to the input port of the CATU.
 771 *
 772 * Returns	: coresight_device ptr for the CATU device if a CATU is found.
 773 *		: NULL otherwise.
 774 */
 775struct coresight_device *
 776tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
 777{
 778	int i;
 779	struct coresight_device *tmp, *etr = drvdata->csdev;
 780
 781	if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
 782		return NULL;
 783
 784	for (i = 0; i < etr->pdata->nr_outport; i++) {
 785		tmp = etr->pdata->conns[i].child_dev;
 786		if (tmp && coresight_is_catu_device(tmp))
 787			return tmp;
 788	}
 789
 790	return NULL;
 791}
 792EXPORT_SYMBOL_GPL(tmc_etr_get_catu_device);
 793
 794static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata,
 795				      struct etr_buf *etr_buf)
 796{
 797	struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
 798
 799	if (catu && helper_ops(catu)->enable)
 800		return helper_ops(catu)->enable(catu, etr_buf);
 801	return 0;
 802}
 803
 804static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
 805{
 806	struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
 807
 808	if (catu && helper_ops(catu)->disable)
 809		helper_ops(catu)->disable(catu, drvdata->etr_buf);
 810}
 811
 812static const struct etr_buf_operations *etr_buf_ops[] = {
 813	[ETR_MODE_FLAT] = &etr_flat_buf_ops,
 814	[ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
 815	[ETR_MODE_CATU] = NULL,
 816};
 817
 818void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu)
 819{
 820	etr_buf_ops[ETR_MODE_CATU] = catu;
 821}
 822EXPORT_SYMBOL_GPL(tmc_etr_set_catu_ops);
 823
 824void tmc_etr_remove_catu_ops(void)
 825{
 826	etr_buf_ops[ETR_MODE_CATU] = NULL;
 827}
 828EXPORT_SYMBOL_GPL(tmc_etr_remove_catu_ops);
 829
 830static inline int tmc_etr_mode_alloc_buf(int mode,
 831					 struct tmc_drvdata *drvdata,
 832					 struct etr_buf *etr_buf, int node,
 833					 void **pages)
 834{
 835	int rc = -EINVAL;
 836
 837	switch (mode) {
 838	case ETR_MODE_FLAT:
 839	case ETR_MODE_ETR_SG:
 840	case ETR_MODE_CATU:
 841		if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
 842			rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
 843						      node, pages);
 844		if (!rc)
 845			etr_buf->ops = etr_buf_ops[mode];
 846		return rc;
 847	default:
 848		return -EINVAL;
 849	}
 850}
 851
 852/*
 853 * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
 854 * @drvdata	: ETR device details.
 855 * @size	: size of the requested buffer.
 856 * @flags	: Required properties for the buffer.
 857 * @node	: Node for memory allocations.
 858 * @pages	: An optional list of pages.
 859 */
 860static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
 861					 ssize_t size, int flags,
 862					 int node, void **pages)
 863{
 864	int rc = -ENOMEM;
 865	bool has_etr_sg, has_iommu;
 866	bool has_sg, has_catu;
 867	struct etr_buf *etr_buf;
 868	struct device *dev = &drvdata->csdev->dev;
 869
 870	has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
 871	has_iommu = iommu_get_domain_for_dev(dev->parent);
 872	has_catu = !!tmc_etr_get_catu_device(drvdata);
 873
 874	has_sg = has_catu || has_etr_sg;
 875
 876	etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
 877	if (!etr_buf)
 878		return ERR_PTR(-ENOMEM);
 879
 880	etr_buf->size = size;
 881
 882	/*
 883	 * If we have to use an existing list of pages, we cannot reliably
 884	 * use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
 885	 * we use the contiguous DMA memory if at least one of the following
 886	 * conditions is true:
 887	 *  a) The ETR cannot use Scatter-Gather.
 888	 *  b) we have a backing IOMMU
 889	 *  c) The requested memory size is smaller (< 1M).
 890	 *
 891	 * Fallback to available mechanisms.
 892	 *
 893	 */
 894	if (!pages &&
 895	    (!has_sg || has_iommu || size < SZ_1M))
 896		rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
 897					    etr_buf, node, pages);
 898	if (rc && has_etr_sg)
 899		rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
 900					    etr_buf, node, pages);
 901	if (rc && has_catu)
 902		rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
 903					    etr_buf, node, pages);
 904	if (rc) {
 905		kfree(etr_buf);
 906		return ERR_PTR(rc);
 907	}
 908
 909	refcount_set(&etr_buf->refcount, 1);
 910	dev_dbg(dev, "allocated buffer of size %ldKB in mode %d\n",
 911		(unsigned long)size >> 10, etr_buf->mode);
 912	return etr_buf;
 913}
 914
 915static void tmc_free_etr_buf(struct etr_buf *etr_buf)
 916{
 917	WARN_ON(!etr_buf->ops || !etr_buf->ops->free);
 918	etr_buf->ops->free(etr_buf);
 919	kfree(etr_buf);
 920}
 921
 922/*
 923 * tmc_etr_buf_get_data: Get the pointer the trace data at @offset
 924 * with a maximum of @len bytes.
 925 * Returns: The size of the linear data available @pos, with *bufpp
 926 * updated to point to the buffer.
 927 */
 928static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
 929				    u64 offset, size_t len, char **bufpp)
 930{
 931	/* Adjust the length to limit this transaction to end of buffer */
 932	len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset;
 933
 934	return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
 935}
 936
 937static inline s64
 938tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
 939{
 940	ssize_t len;
 941	char *bufp;
 942
 943	len = tmc_etr_buf_get_data(etr_buf, offset,
 944				   CORESIGHT_BARRIER_PKT_SIZE, &bufp);
 945	if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
 946		return -EINVAL;
 947	coresight_insert_barrier_packet(bufp);
 948	return offset + CORESIGHT_BARRIER_PKT_SIZE;
 949}
 950
 951/*
 952 * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
 953 * Makes sure the trace data is synced to the memory for consumption.
 954 * @etr_buf->offset will hold the offset to the beginning of the trace data
 955 * within the buffer, with @etr_buf->len bytes to consume.
 956 */
 957static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
 958{
 959	struct etr_buf *etr_buf = drvdata->etr_buf;
 960	u64 rrp, rwp;
 961	u32 status;
 962
 963	rrp = tmc_read_rrp(drvdata);
 964	rwp = tmc_read_rwp(drvdata);
 965	status = readl_relaxed(drvdata->base + TMC_STS);
 966
 967	/*
 968	 * If there were memory errors in the session, truncate the
 969	 * buffer.
 970	 */
 971	if (WARN_ON_ONCE(status & TMC_STS_MEMERR)) {
 972		dev_dbg(&drvdata->csdev->dev,
 973			"tmc memory error detected, truncating buffer\n");
 974		etr_buf->len = 0;
 975		etr_buf->full = false;
 976		return;
 977	}
 978
 979	etr_buf->full = !!(status & TMC_STS_FULL);
 980
 981	WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
 982
 983	etr_buf->ops->sync(etr_buf, rrp, rwp);
 984}
 985
 986static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
 987{
 988	u32 axictl, sts;
 989	struct etr_buf *etr_buf = drvdata->etr_buf;
 
 
 990
 991	CS_UNLOCK(drvdata->base);
 992
 993	/* Wait for TMCSReady bit to be set */
 994	tmc_wait_for_tmcready(drvdata);
 995
 996	writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
 997	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
 998
 999	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
1000	axictl &= ~TMC_AXICTL_CLEAR_MASK;
1001	axictl |= TMC_AXICTL_PROT_CTL_B1;
1002	axictl |= TMC_AXICTL_WR_BURST(drvdata->max_burst_size);
1003	axictl |= TMC_AXICTL_AXCACHE_OS;
1004
1005	if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
1006		axictl &= ~TMC_AXICTL_ARCACHE_MASK;
1007		axictl |= TMC_AXICTL_ARCACHE_OS;
1008	}
1009
1010	if (etr_buf->mode == ETR_MODE_ETR_SG)
1011		axictl |= TMC_AXICTL_SCT_GAT_MODE;
1012
1013	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
1014	tmc_write_dba(drvdata, etr_buf->hwaddr);
1015	/*
1016	 * If the TMC pointers must be programmed before the session,
1017	 * we have to set it properly (i.e, RRP/RWP to base address and
1018	 * STS to "not full").
1019	 */
1020	if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
1021		tmc_write_rrp(drvdata, etr_buf->hwaddr);
1022		tmc_write_rwp(drvdata, etr_buf->hwaddr);
1023		sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
1024		writel_relaxed(sts, drvdata->base + TMC_STS);
1025	}
1026
1027	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
1028		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
1029		       TMC_FFCR_TRIGON_TRIGIN,
1030		       drvdata->base + TMC_FFCR);
1031	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
1032	tmc_enable_hw(drvdata);
1033
1034	CS_LOCK(drvdata->base);
1035}
1036
1037static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
1038			     struct etr_buf *etr_buf)
1039{
1040	int rc;
1041
1042	/* Callers should provide an appropriate buffer for use */
1043	if (WARN_ON(!etr_buf))
1044		return -EINVAL;
1045
1046	if ((etr_buf->mode == ETR_MODE_ETR_SG) &&
1047	    WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
1048		return -EINVAL;
1049
1050	if (WARN_ON(drvdata->etr_buf))
1051		return -EBUSY;
1052
1053	/*
1054	 * If this ETR is connected to a CATU, enable it before we turn
1055	 * this on.
1056	 */
1057	rc = tmc_etr_enable_catu(drvdata, etr_buf);
1058	if (rc)
1059		return rc;
1060	rc = coresight_claim_device(drvdata->csdev);
1061	if (!rc) {
1062		drvdata->etr_buf = etr_buf;
1063		__tmc_etr_enable_hw(drvdata);
1064	}
1065
1066	return rc;
1067}
1068
1069/*
1070 * Return the available trace data in the buffer (starts at etr_buf->offset,
1071 * limited by etr_buf->len) from @pos, with a maximum limit of @len,
1072 * also updating the @bufpp on where to find it. Since the trace data
1073 * starts at anywhere in the buffer, depending on the RRP, we adjust the
1074 * @len returned to handle buffer wrapping around.
1075 *
1076 * We are protected here by drvdata->reading != 0, which ensures the
1077 * sysfs_buf stays alive.
1078 */
1079ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
1080				loff_t pos, size_t len, char **bufpp)
1081{
1082	s64 offset;
1083	ssize_t actual = len;
1084	struct etr_buf *etr_buf = drvdata->sysfs_buf;
1085
1086	if (pos + actual > etr_buf->len)
1087		actual = etr_buf->len - pos;
1088	if (actual <= 0)
1089		return actual;
1090
1091	/* Compute the offset from which we read the data */
1092	offset = etr_buf->offset + pos;
1093	if (offset >= etr_buf->size)
1094		offset -= etr_buf->size;
1095	return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp);
1096}
1097
1098static struct etr_buf *
1099tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
1100{
1101	return tmc_alloc_etr_buf(drvdata, drvdata->size,
1102				 0, cpu_to_node(0), NULL);
1103}
1104
1105static void
1106tmc_etr_free_sysfs_buf(struct etr_buf *buf)
1107{
1108	if (buf)
1109		tmc_free_etr_buf(buf);
1110}
1111
1112static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
1113{
1114	struct etr_buf *etr_buf = drvdata->etr_buf;
1115
1116	if (WARN_ON(drvdata->sysfs_buf != etr_buf)) {
1117		tmc_etr_free_sysfs_buf(drvdata->sysfs_buf);
1118		drvdata->sysfs_buf = NULL;
1119	} else {
1120		tmc_sync_etr_buf(drvdata);
1121		/*
1122		 * Insert barrier packets at the beginning, if there was
1123		 * an overflow.
1124		 */
1125		if (etr_buf->full)
1126			tmc_etr_buf_insert_barrier_packet(etr_buf,
1127							  etr_buf->offset);
1128	}
1129}
1130
1131static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1132{
1133	CS_UNLOCK(drvdata->base);
1134
1135	tmc_flush_and_stop(drvdata);
1136	/*
1137	 * When operating in sysFS mode the content of the buffer needs to be
1138	 * read before the TMC is disabled.
1139	 */
1140	if (drvdata->mode == CS_MODE_SYSFS)
1141		tmc_etr_sync_sysfs_buf(drvdata);
1142
1143	tmc_disable_hw(drvdata);
1144
1145	CS_LOCK(drvdata->base);
1146
1147}
1148
1149void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1150{
1151	__tmc_etr_disable_hw(drvdata);
1152	/* Disable CATU device if this ETR is connected to one */
1153	tmc_etr_disable_catu(drvdata);
1154	coresight_disclaim_device(drvdata->csdev);
1155	/* Reset the ETR buf used by hardware */
1156	drvdata->etr_buf = NULL;
1157}
1158
1159static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
1160{
1161	int ret = 0;
 
1162	unsigned long flags;
 
 
1163	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1164	struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL;
1165
1166	/*
1167	 * If we are enabling the ETR from disabled state, we need to make
1168	 * sure we have a buffer with the right size. The etr_buf is not reset
1169	 * immediately after we stop the tracing in SYSFS mode as we wait for
1170	 * the user to collect the data. We may be able to reuse the existing
1171	 * buffer, provided the size matches. Any allocation has to be done
1172	 * with the lock released.
1173	 */
1174	spin_lock_irqsave(&drvdata->spinlock, flags);
1175	sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1176	if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
1177		spin_unlock_irqrestore(&drvdata->spinlock, flags);
1178
1179		/* Allocate memory with the locks released */
1180		free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
1181		if (IS_ERR(new_buf))
1182			return PTR_ERR(new_buf);
 
 
 
 
 
1183
1184		/* Let's try again */
1185		spin_lock_irqsave(&drvdata->spinlock, flags);
1186	}
1187
1188	if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
1189		ret = -EBUSY;
1190		goto out;
1191	}
1192
1193	/*
1194	 * In sysFS mode we can have multiple writers per sink.  Since this
1195	 * sink is already enabled no memory is needed and the HW need not be
1196	 * touched, even if the buffer size has changed.
1197	 */
1198	if (drvdata->mode == CS_MODE_SYSFS) {
1199		atomic_inc(csdev->refcnt);
1200		goto out;
1201	}
1202
1203	/*
1204	 * If we don't have a buffer or it doesn't match the requested size,
1205	 * use the buffer allocated above. Otherwise reuse the existing buffer.
1206	 */
1207	sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1208	if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) {
1209		free_buf = sysfs_buf;
1210		drvdata->sysfs_buf = new_buf;
 
 
1211	}
1212
1213	ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
1214	if (!ret) {
1215		drvdata->mode = CS_MODE_SYSFS;
1216		atomic_inc(csdev->refcnt);
1217	}
1218out:
1219	spin_unlock_irqrestore(&drvdata->spinlock, flags);
1220
1221	/* Free memory outside the spinlock if need be */
1222	if (free_buf)
1223		tmc_etr_free_sysfs_buf(free_buf);
1224
1225	if (!ret)
1226		dev_dbg(&csdev->dev, "TMC-ETR enabled\n");
1227
1228	return ret;
1229}
1230
1231/*
1232 * alloc_etr_buf: Allocate ETR buffer for use by perf.
1233 * The size of the hardware buffer is dependent on the size configured
1234 * via sysfs and the perf ring buffer size. We prefer to allocate the
1235 * largest possible size, scaling down the size by half until it
1236 * reaches a minimum limit (1M), beyond which we give up.
1237 */
1238static struct etr_buf *
1239alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1240	      int nr_pages, void **pages, bool snapshot)
1241{
1242	int node;
1243	struct etr_buf *etr_buf;
1244	unsigned long size;
1245
1246	node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
1247	/*
1248	 * Try to match the perf ring buffer size if it is larger
1249	 * than the size requested via sysfs.
1250	 */
1251	if ((nr_pages << PAGE_SHIFT) > drvdata->size) {
1252		etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT),
1253					    0, node, NULL);
1254		if (!IS_ERR(etr_buf))
1255			goto done;
1256	}
1257
1258	/*
1259	 * Else switch to configured size for this ETR
1260	 * and scale down until we hit the minimum limit.
1261	 */
1262	size = drvdata->size;
1263	do {
1264		etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL);
1265		if (!IS_ERR(etr_buf))
1266			goto done;
1267		size /= 2;
1268	} while (size >= TMC_ETR_PERF_MIN_BUF_SIZE);
1269
1270	return ERR_PTR(-ENOMEM);
1271
1272done:
1273	return etr_buf;
1274}
1275
1276static struct etr_buf *
1277get_perf_etr_buf_cpu_wide(struct tmc_drvdata *drvdata,
1278			  struct perf_event *event, int nr_pages,
1279			  void **pages, bool snapshot)
1280{
1281	int ret;
1282	pid_t pid = task_pid_nr(event->owner);
1283	struct etr_buf *etr_buf;
1284
1285retry:
1286	/*
1287	 * An etr_perf_buffer is associated with an event and holds a reference
1288	 * to the AUX ring buffer that was created for that event.  In CPU-wide
1289	 * N:1 mode multiple events (one per CPU), each with its own AUX ring
1290	 * buffer, share a sink.  As such an etr_perf_buffer is created for each
1291	 * event but a single etr_buf associated with the ETR is shared between
1292	 * them.  The last event in a trace session will copy the content of the
1293	 * etr_buf to its AUX ring buffer.  Ring buffer associated to other
1294	 * events are simply not used an freed as events are destoyed.  We still
1295	 * need to allocate a ring buffer for each event since we don't know
1296	 * which event will be last.
1297	 */
1298
1299	/*
1300	 * The first thing to do here is check if an etr_buf has already been
1301	 * allocated for this session.  If so it is shared with this event,
1302	 * otherwise it is created.
1303	 */
1304	mutex_lock(&drvdata->idr_mutex);
1305	etr_buf = idr_find(&drvdata->idr, pid);
1306	if (etr_buf) {
1307		refcount_inc(&etr_buf->refcount);
1308		mutex_unlock(&drvdata->idr_mutex);
1309		return etr_buf;
1310	}
1311
1312	/* If we made it here no buffer has been allocated, do so now. */
1313	mutex_unlock(&drvdata->idr_mutex);
1314
1315	etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1316	if (IS_ERR(etr_buf))
1317		return etr_buf;
1318
1319	/* Now that we have a buffer, add it to the IDR. */
1320	mutex_lock(&drvdata->idr_mutex);
1321	ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL);
1322	mutex_unlock(&drvdata->idr_mutex);
1323
1324	/* Another event with this session ID has allocated this buffer. */
1325	if (ret == -ENOSPC) {
1326		tmc_free_etr_buf(etr_buf);
1327		goto retry;
1328	}
1329
1330	/* The IDR can't allocate room for a new session, abandon ship. */
1331	if (ret == -ENOMEM) {
1332		tmc_free_etr_buf(etr_buf);
1333		return ERR_PTR(ret);
1334	}
1335
1336
1337	return etr_buf;
1338}
1339
1340static struct etr_buf *
1341get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata,
1342			    struct perf_event *event, int nr_pages,
1343			    void **pages, bool snapshot)
1344{
1345	/*
1346	 * In per-thread mode the etr_buf isn't shared, so just go ahead
1347	 * with memory allocation.
1348	 */
1349	return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1350}
1351
1352static struct etr_buf *
1353get_perf_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1354		 int nr_pages, void **pages, bool snapshot)
1355{
1356	if (event->cpu == -1)
1357		return get_perf_etr_buf_per_thread(drvdata, event, nr_pages,
1358						   pages, snapshot);
1359
1360	return get_perf_etr_buf_cpu_wide(drvdata, event, nr_pages,
1361					 pages, snapshot);
1362}
1363
1364static struct etr_perf_buffer *
1365tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1366		       int nr_pages, void **pages, bool snapshot)
1367{
1368	int node;
1369	struct etr_buf *etr_buf;
1370	struct etr_perf_buffer *etr_perf;
1371
1372	node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
1373
1374	etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
1375	if (!etr_perf)
1376		return ERR_PTR(-ENOMEM);
1377
1378	etr_buf = get_perf_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1379	if (!IS_ERR(etr_buf))
1380		goto done;
1381
1382	kfree(etr_perf);
1383	return ERR_PTR(-ENOMEM);
1384
1385done:
1386	/*
1387	 * Keep a reference to the ETR this buffer has been allocated for
1388	 * in order to have access to the IDR in tmc_free_etr_buffer().
1389	 */
1390	etr_perf->drvdata = drvdata;
1391	etr_perf->etr_buf = etr_buf;
1392
1393	return etr_perf;
1394}
1395
1396
1397static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
1398				  struct perf_event *event, void **pages,
1399				  int nr_pages, bool snapshot)
1400{
1401	struct etr_perf_buffer *etr_perf;
1402	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1403
1404	etr_perf = tmc_etr_setup_perf_buf(drvdata, event,
1405					  nr_pages, pages, snapshot);
1406	if (IS_ERR(etr_perf)) {
1407		dev_dbg(&csdev->dev, "Unable to allocate ETR buffer\n");
1408		return NULL;
1409	}
1410
1411	etr_perf->pid = task_pid_nr(event->owner);
1412	etr_perf->snapshot = snapshot;
1413	etr_perf->nr_pages = nr_pages;
1414	etr_perf->pages = pages;
1415
1416	return etr_perf;
1417}
1418
1419static void tmc_free_etr_buffer(void *config)
1420{
1421	struct etr_perf_buffer *etr_perf = config;
1422	struct tmc_drvdata *drvdata = etr_perf->drvdata;
1423	struct etr_buf *buf, *etr_buf = etr_perf->etr_buf;
1424
1425	if (!etr_buf)
1426		goto free_etr_perf_buffer;
1427
1428	mutex_lock(&drvdata->idr_mutex);
1429	/* If we are not the last one to use the buffer, don't touch it. */
1430	if (!refcount_dec_and_test(&etr_buf->refcount)) {
1431		mutex_unlock(&drvdata->idr_mutex);
1432		goto free_etr_perf_buffer;
1433	}
1434
1435	/* We are the last one, remove from the IDR and free the buffer. */
1436	buf = idr_remove(&drvdata->idr, etr_perf->pid);
1437	mutex_unlock(&drvdata->idr_mutex);
1438
1439	/*
1440	 * Something went very wrong if the buffer associated with this ID
1441	 * is not the same in the IDR.  Leak to avoid use after free.
1442	 */
1443	if (buf && WARN_ON(buf != etr_buf))
1444		goto free_etr_perf_buffer;
1445
1446	tmc_free_etr_buf(etr_perf->etr_buf);
1447
1448free_etr_perf_buffer:
1449	kfree(etr_perf);
1450}
1451
1452/*
1453 * tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware
1454 * buffer to the perf ring buffer.
1455 */
1456static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf,
1457				     unsigned long head,
1458				     unsigned long src_offset,
1459				     unsigned long to_copy)
1460{
1461	long bytes;
1462	long pg_idx, pg_offset;
1463	char **dst_pages, *src_buf;
1464	struct etr_buf *etr_buf = etr_perf->etr_buf;
1465
1466	head = PERF_IDX2OFF(head, etr_perf);
1467	pg_idx = head >> PAGE_SHIFT;
1468	pg_offset = head & (PAGE_SIZE - 1);
1469	dst_pages = (char **)etr_perf->pages;
1470
1471	while (to_copy > 0) {
1472		/*
1473		 * In one iteration, we can copy minimum of :
1474		 *  1) what is available in the source buffer,
1475		 *  2) what is available in the source buffer, before it
1476		 *     wraps around.
1477		 *  3) what is available in the destination page.
1478		 * in one iteration.
1479		 */
1480		if (src_offset >= etr_buf->size)
1481			src_offset -= etr_buf->size;
1482		bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy,
1483					     &src_buf);
1484		if (WARN_ON_ONCE(bytes <= 0))
1485			break;
1486		bytes = min(bytes, (long)(PAGE_SIZE - pg_offset));
1487
1488		memcpy(dst_pages[pg_idx] + pg_offset, src_buf, bytes);
1489
1490		to_copy -= bytes;
1491
1492		/* Move destination pointers */
1493		pg_offset += bytes;
1494		if (pg_offset == PAGE_SIZE) {
1495			pg_offset = 0;
1496			if (++pg_idx == etr_perf->nr_pages)
1497				pg_idx = 0;
1498		}
1499
1500		/* Move source pointers */
1501		src_offset += bytes;
1502	}
1503}
1504
1505/*
1506 * tmc_update_etr_buffer : Update the perf ring buffer with the
1507 * available trace data. We use software double buffering at the moment.
1508 *
1509 * TODO: Add support for reusing the perf ring buffer.
1510 */
1511static unsigned long
1512tmc_update_etr_buffer(struct coresight_device *csdev,
1513		      struct perf_output_handle *handle,
1514		      void *config)
1515{
1516	bool lost = false;
1517	unsigned long flags, offset, size = 0;
1518	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1519	struct etr_perf_buffer *etr_perf = config;
1520	struct etr_buf *etr_buf = etr_perf->etr_buf;
1521
1522	spin_lock_irqsave(&drvdata->spinlock, flags);
1523
1524	/* Don't do anything if another tracer is using this sink */
1525	if (atomic_read(csdev->refcnt) != 1) {
1526		spin_unlock_irqrestore(&drvdata->spinlock, flags);
1527		goto out;
1528	}
1529
1530	if (WARN_ON(drvdata->perf_buf != etr_buf)) {
1531		lost = true;
1532		spin_unlock_irqrestore(&drvdata->spinlock, flags);
1533		goto out;
1534	}
1535
1536	CS_UNLOCK(drvdata->base);
1537
1538	tmc_flush_and_stop(drvdata);
1539	tmc_sync_etr_buf(drvdata);
1540
1541	CS_LOCK(drvdata->base);
1542	spin_unlock_irqrestore(&drvdata->spinlock, flags);
1543
1544	lost = etr_buf->full;
1545	offset = etr_buf->offset;
1546	size = etr_buf->len;
1547
1548	/*
1549	 * The ETR buffer may be bigger than the space available in the
1550	 * perf ring buffer (handle->size).  If so advance the offset so that we
1551	 * get the latest trace data.  In snapshot mode none of that matters
1552	 * since we are expected to clobber stale data in favour of the latest
1553	 * traces.
1554	 */
1555	if (!etr_perf->snapshot && size > handle->size) {
1556		u32 mask = tmc_get_memwidth_mask(drvdata);
1557
1558		/*
1559		 * Make sure the new size is aligned in accordance with the
1560		 * requirement explained in function tmc_get_memwidth_mask().
1561		 */
1562		size = handle->size & mask;
1563		offset = etr_buf->offset + etr_buf->len - size;
1564
1565		if (offset >= etr_buf->size)
1566			offset -= etr_buf->size;
1567		lost = true;
1568	}
1569
1570	/* Insert barrier packets at the beginning, if there was an overflow */
1571	if (lost)
1572		tmc_etr_buf_insert_barrier_packet(etr_buf, offset);
1573	tmc_etr_sync_perf_buffer(etr_perf, handle->head, offset, size);
1574
1575	/*
1576	 * In snapshot mode we simply increment the head by the number of byte
1577	 * that were written.  User space will figure out how many bytes to get
1578	 * from the AUX buffer based on the position of the head.
1579	 */
1580	if (etr_perf->snapshot)
1581		handle->head += size;
1582
1583	/*
1584	 * Ensure that the AUX trace data is visible before the aux_head
1585	 * is updated via perf_aux_output_end(), as expected by the
1586	 * perf ring buffer.
1587	 */
1588	smp_wmb();
1589
1590out:
1591	/*
1592	 * Don't set the TRUNCATED flag in snapshot mode because 1) the
1593	 * captured buffer is expected to be truncated and 2) a full buffer
1594	 * prevents the event from being re-enabled by the perf core,
1595	 * resulting in stale data being send to user space.
1596	 */
1597	if (!etr_perf->snapshot && lost)
1598		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
1599	return size;
1600}
1601
1602static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
1603{
1604	int rc = 0;
1605	pid_t pid;
1606	unsigned long flags;
1607	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1608	struct perf_output_handle *handle = data;
1609	struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
1610
1611	spin_lock_irqsave(&drvdata->spinlock, flags);
1612	 /* Don't use this sink if it is already claimed by sysFS */
1613	if (drvdata->mode == CS_MODE_SYSFS) {
1614		rc = -EBUSY;
1615		goto unlock_out;
1616	}
1617
1618	if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) {
1619		rc = -EINVAL;
1620		goto unlock_out;
1621	}
1622
1623	/* Get a handle on the pid of the process to monitor */
1624	pid = etr_perf->pid;
1625
1626	/* Do not proceed if this device is associated with another session */
1627	if (drvdata->pid != -1 && drvdata->pid != pid) {
1628		rc = -EBUSY;
1629		goto unlock_out;
1630	}
1631
1632	/*
1633	 * No HW configuration is needed if the sink is already in
1634	 * use for this session.
1635	 */
1636	if (drvdata->pid == pid) {
1637		atomic_inc(csdev->refcnt);
1638		goto unlock_out;
1639	}
1640
1641	rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf);
1642	if (!rc) {
1643		/* Associate with monitored process. */
1644		drvdata->pid = pid;
1645		drvdata->mode = CS_MODE_PERF;
1646		drvdata->perf_buf = etr_perf->etr_buf;
1647		atomic_inc(csdev->refcnt);
1648	}
1649
1650unlock_out:
1651	spin_unlock_irqrestore(&drvdata->spinlock, flags);
1652	return rc;
 
1653}
1654
1655static int tmc_enable_etr_sink(struct coresight_device *csdev,
1656			       u32 mode, void *data)
1657{
1658	switch (mode) {
1659	case CS_MODE_SYSFS:
1660		return tmc_enable_etr_sink_sysfs(csdev);
1661	case CS_MODE_PERF:
1662		return tmc_enable_etr_sink_perf(csdev, data);
1663	}
1664
1665	/* We shouldn't be here */
1666	return -EINVAL;
1667}
1668
1669static int tmc_disable_etr_sink(struct coresight_device *csdev)
1670{
1671	unsigned long flags;
1672	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1673
1674	spin_lock_irqsave(&drvdata->spinlock, flags);
1675
1676	if (drvdata->reading) {
1677		spin_unlock_irqrestore(&drvdata->spinlock, flags);
1678		return -EBUSY;
1679	}
1680
1681	if (atomic_dec_return(csdev->refcnt)) {
1682		spin_unlock_irqrestore(&drvdata->spinlock, flags);
1683		return -EBUSY;
 
1684	}
1685
1686	/* Complain if we (somehow) got out of sync */
1687	WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
1688	tmc_etr_disable_hw(drvdata);
1689	/* Dissociate from monitored process. */
1690	drvdata->pid = -1;
1691	drvdata->mode = CS_MODE_DISABLED;
1692	/* Reset perf specific data */
1693	drvdata->perf_buf = NULL;
1694
1695	spin_unlock_irqrestore(&drvdata->spinlock, flags);
1696
1697	dev_dbg(&csdev->dev, "TMC-ETR disabled\n");
1698	return 0;
1699}
1700
1701static const struct coresight_ops_sink tmc_etr_sink_ops = {
1702	.enable		= tmc_enable_etr_sink,
1703	.disable	= tmc_disable_etr_sink,
1704	.alloc_buffer	= tmc_alloc_etr_buffer,
1705	.update_buffer	= tmc_update_etr_buffer,
1706	.free_buffer	= tmc_free_etr_buffer,
1707};
1708
1709const struct coresight_ops tmc_etr_cs_ops = {
1710	.sink_ops	= &tmc_etr_sink_ops,
1711};
1712
1713int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
1714{
1715	int ret = 0;
1716	unsigned long flags;
1717
1718	/* config types are set a boot time and never change */
1719	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1720		return -EINVAL;
1721
1722	spin_lock_irqsave(&drvdata->spinlock, flags);
1723	if (drvdata->reading) {
1724		ret = -EBUSY;
1725		goto out;
1726	}
1727
1728	/*
1729	 * We can safely allow reads even if the ETR is operating in PERF mode,
1730	 * since the sysfs session is captured in mode specific data.
1731	 * If drvdata::sysfs_data is NULL the trace data has been read already.
1732	 */
1733	if (!drvdata->sysfs_buf) {
1734		ret = -EINVAL;
1735		goto out;
1736	}
1737
1738	/* Disable the TMC if we are trying to read from a running session. */
 
 
 
 
 
 
1739	if (drvdata->mode == CS_MODE_SYSFS)
1740		__tmc_etr_disable_hw(drvdata);
1741
1742	drvdata->reading = true;
1743out:
1744	spin_unlock_irqrestore(&drvdata->spinlock, flags);
1745
1746	return ret;
1747}
1748
1749int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
1750{
1751	unsigned long flags;
1752	struct etr_buf *sysfs_buf = NULL;
 
1753
1754	/* config types are set a boot time and never change */
1755	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1756		return -EINVAL;
1757
1758	spin_lock_irqsave(&drvdata->spinlock, flags);
1759
1760	/* RE-enable the TMC if need be */
1761	if (drvdata->mode == CS_MODE_SYSFS) {
1762		/*
1763		 * The trace run will continue with the same allocated trace
1764		 * buffer. Since the tracer is still enabled drvdata::buf can't
1765		 * be NULL.
 
1766		 */
1767		__tmc_etr_enable_hw(drvdata);
1768	} else {
1769		/*
1770		 * The ETR is not tracing and the buffer was just read.
1771		 * As such prepare to free the trace buffer.
1772		 */
1773		sysfs_buf = drvdata->sysfs_buf;
1774		drvdata->sysfs_buf = NULL;
 
1775	}
1776
1777	drvdata->reading = false;
1778	spin_unlock_irqrestore(&drvdata->spinlock, flags);
1779
1780	/* Free allocated memory out side of the spinlock */
1781	if (sysfs_buf)
1782		tmc_etr_free_sysfs_buf(sysfs_buf);
1783
1784	return 0;
1785}
v4.17
 
  1/*
  2 * Copyright(C) 2016 Linaro Limited. All rights reserved.
  3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 
 18#include <linux/coresight.h>
 19#include <linux/dma-mapping.h>
 
 
 
 
 
 
 
 
 
 20#include "coresight-priv.h"
 21#include "coresight-tmc.h"
 22
 23static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24{
 25	u32 axictl, sts;
 26
 27	/* Zero out the memory to help with debug */
 28	memset(drvdata->vaddr, 0, drvdata->size);
 29
 30	CS_UNLOCK(drvdata->base);
 31
 32	/* Wait for TMCSReady bit to be set */
 33	tmc_wait_for_tmcready(drvdata);
 34
 35	writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
 36	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
 37
 38	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
 39	axictl &= ~TMC_AXICTL_CLEAR_MASK;
 40	axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
 
 41	axictl |= TMC_AXICTL_AXCACHE_OS;
 42
 43	if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
 44		axictl &= ~TMC_AXICTL_ARCACHE_MASK;
 45		axictl |= TMC_AXICTL_ARCACHE_OS;
 46	}
 47
 
 
 
 48	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
 49	tmc_write_dba(drvdata, drvdata->paddr);
 50	/*
 51	 * If the TMC pointers must be programmed before the session,
 52	 * we have to set it properly (i.e, RRP/RWP to base address and
 53	 * STS to "not full").
 54	 */
 55	if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
 56		tmc_write_rrp(drvdata, drvdata->paddr);
 57		tmc_write_rwp(drvdata, drvdata->paddr);
 58		sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
 59		writel_relaxed(sts, drvdata->base + TMC_STS);
 60	}
 61
 62	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
 63		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
 64		       TMC_FFCR_TRIGON_TRIGIN,
 65		       drvdata->base + TMC_FFCR);
 66	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
 67	tmc_enable_hw(drvdata);
 68
 69	CS_LOCK(drvdata->base);
 70}
 71
 72static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
 
 73{
 74	const u32 *barrier;
 75	u32 val;
 76	u32 *temp;
 77	u64 rwp;
 
 
 
 
 
 78
 79	rwp = tmc_read_rwp(drvdata);
 80	val = readl_relaxed(drvdata->base + TMC_STS);
 81
 82	/*
 83	 * Adjust the buffer to point to the beginning of the trace data
 84	 * and update the available trace data.
 85	 */
 86	if (val & TMC_STS_FULL) {
 87		drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
 88		drvdata->len = drvdata->size;
 89
 90		barrier = barrier_pkt;
 91		temp = (u32 *)drvdata->buf;
 92
 93		while (*barrier) {
 94			*temp = *barrier;
 95			temp++;
 96			barrier++;
 97		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98
 
 
 
 99	} else {
100		drvdata->buf = drvdata->vaddr;
101		drvdata->len = rwp - drvdata->paddr;
 
 
 
 
 
 
102	}
103}
104
105static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
106{
107	CS_UNLOCK(drvdata->base);
108
109	tmc_flush_and_stop(drvdata);
110	/*
111	 * When operating in sysFS mode the content of the buffer needs to be
112	 * read before the TMC is disabled.
113	 */
114	if (drvdata->mode == CS_MODE_SYSFS)
115		tmc_etr_dump_hw(drvdata);
 
116	tmc_disable_hw(drvdata);
117
118	CS_LOCK(drvdata->base);
 
 
 
 
 
 
 
 
 
 
 
119}
120
121static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
122{
123	int ret = 0;
124	bool used = false;
125	unsigned long flags;
126	void __iomem *vaddr = NULL;
127	dma_addr_t paddr;
128	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
129
130
131	/*
132	 * If we don't have a buffer release the lock and allocate memory.
133	 * Otherwise keep the lock and move along.
 
 
 
 
134	 */
135	spin_lock_irqsave(&drvdata->spinlock, flags);
136	if (!drvdata->vaddr) {
 
137		spin_unlock_irqrestore(&drvdata->spinlock, flags);
138
139		/*
140		 * Contiguous  memory can't be allocated while a spinlock is
141		 * held.  As such allocate memory here and free it if a buffer
142		 * has already been allocated (from a previous session).
143		 */
144		vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
145					   &paddr, GFP_KERNEL);
146		if (!vaddr)
147			return -ENOMEM;
148
149		/* Let's try again */
150		spin_lock_irqsave(&drvdata->spinlock, flags);
151	}
152
153	if (drvdata->reading) {
154		ret = -EBUSY;
155		goto out;
156	}
157
158	/*
159	 * In sysFS mode we can have multiple writers per sink.  Since this
160	 * sink is already enabled no memory is needed and the HW need not be
161	 * touched.
162	 */
163	if (drvdata->mode == CS_MODE_SYSFS)
 
164		goto out;
 
165
166	/*
167	 * If drvdata::buf == NULL, use the memory allocated above.
168	 * Otherwise a buffer still exists from a previous session, so
169	 * simply use that.
170	 */
171	if (drvdata->buf == NULL) {
172		used = true;
173		drvdata->vaddr = vaddr;
174		drvdata->paddr = paddr;
175		drvdata->buf = drvdata->vaddr;
176	}
177
178	drvdata->mode = CS_MODE_SYSFS;
179	tmc_etr_enable_hw(drvdata);
 
 
 
180out:
181	spin_unlock_irqrestore(&drvdata->spinlock, flags);
182
183	/* Free memory outside the spinlock if need be */
184	if (!used && vaddr)
185		dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
186
187	if (!ret)
188		dev_info(drvdata->dev, "TMC-ETR enabled\n");
189
190	return ret;
191}
192
193static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
 
 
 
 
 
 
 
 
 
194{
195	int ret = 0;
196	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
 
198
199	spin_lock_irqsave(&drvdata->spinlock, flags);
200	if (drvdata->reading) {
201		ret = -EINVAL;
 
 
 
 
 
 
 
 
202		goto out;
203	}
204
 
 
 
 
 
 
 
 
 
 
 
 
205	/*
206	 * In Perf mode there can be only one writer per sink.  There
207	 * is also no need to continue if the ETR is already operated
208	 * from sysFS.
 
 
209	 */
210	if (drvdata->mode != CS_MODE_DISABLED) {
211		ret = -EINVAL;
212		goto out;
 
 
 
 
 
 
 
 
 
 
213	}
214
215	drvdata->mode = CS_MODE_PERF;
216	tmc_etr_enable_hw(drvdata);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218	spin_unlock_irqrestore(&drvdata->spinlock, flags);
219
220	return ret;
221}
222
223static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
 
224{
225	switch (mode) {
226	case CS_MODE_SYSFS:
227		return tmc_enable_etr_sink_sysfs(csdev);
228	case CS_MODE_PERF:
229		return tmc_enable_etr_sink_perf(csdev);
230	}
231
232	/* We shouldn't be here */
233	return -EINVAL;
234}
235
236static void tmc_disable_etr_sink(struct coresight_device *csdev)
237{
238	unsigned long flags;
239	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
240
241	spin_lock_irqsave(&drvdata->spinlock, flags);
 
242	if (drvdata->reading) {
243		spin_unlock_irqrestore(&drvdata->spinlock, flags);
244		return;
245	}
246
247	/* Disable the TMC only if it needs to */
248	if (drvdata->mode != CS_MODE_DISABLED) {
249		tmc_etr_disable_hw(drvdata);
250		drvdata->mode = CS_MODE_DISABLED;
251	}
252
 
 
 
 
 
 
 
 
 
253	spin_unlock_irqrestore(&drvdata->spinlock, flags);
254
255	dev_info(drvdata->dev, "TMC-ETR disabled\n");
 
256}
257
258static const struct coresight_ops_sink tmc_etr_sink_ops = {
259	.enable		= tmc_enable_etr_sink,
260	.disable	= tmc_disable_etr_sink,
 
 
 
261};
262
263const struct coresight_ops tmc_etr_cs_ops = {
264	.sink_ops	= &tmc_etr_sink_ops,
265};
266
267int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
268{
269	int ret = 0;
270	unsigned long flags;
271
272	/* config types are set a boot time and never change */
273	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
274		return -EINVAL;
275
276	spin_lock_irqsave(&drvdata->spinlock, flags);
277	if (drvdata->reading) {
278		ret = -EBUSY;
279		goto out;
280	}
281
282	/* Don't interfere if operated from Perf */
283	if (drvdata->mode == CS_MODE_PERF) {
 
 
 
 
284		ret = -EINVAL;
285		goto out;
286	}
287
288	/* If drvdata::buf is NULL the trace data has been read already */
289	if (drvdata->buf == NULL) {
290		ret = -EINVAL;
291		goto out;
292	}
293
294	/* Disable the TMC if need be */
295	if (drvdata->mode == CS_MODE_SYSFS)
296		tmc_etr_disable_hw(drvdata);
297
298	drvdata->reading = true;
299out:
300	spin_unlock_irqrestore(&drvdata->spinlock, flags);
301
302	return ret;
303}
304
305int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
306{
307	unsigned long flags;
308	dma_addr_t paddr;
309	void __iomem *vaddr = NULL;
310
311	/* config types are set a boot time and never change */
312	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
313		return -EINVAL;
314
315	spin_lock_irqsave(&drvdata->spinlock, flags);
316
317	/* RE-enable the TMC if need be */
318	if (drvdata->mode == CS_MODE_SYSFS) {
319		/*
320		 * The trace run will continue with the same allocated trace
321		 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
322		 * so we don't have to explicitly clear it. Also, since the
323		 * tracer is still enabled drvdata::buf can't be NULL.
324		 */
325		tmc_etr_enable_hw(drvdata);
326	} else {
327		/*
328		 * The ETR is not tracing and the buffer was just read.
329		 * As such prepare to free the trace buffer.
330		 */
331		vaddr = drvdata->vaddr;
332		paddr = drvdata->paddr;
333		drvdata->buf = drvdata->vaddr = NULL;
334	}
335
336	drvdata->reading = false;
337	spin_unlock_irqrestore(&drvdata->spinlock, flags);
338
339	/* Free allocated memory out side of the spinlock */
340	if (vaddr)
341		dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
342
343	return 0;
344}