Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7#include <linux/atomic.h>
8#include <linux/circ_buf.h>
9#include <linux/coresight.h>
10#include <linux/perf_event.h>
11#include <linux/slab.h>
12#include "coresight-priv.h"
13#include "coresight-tmc.h"
14#include "coresight-etm-perf.h"
15
16static int tmc_set_etf_buffer(struct coresight_device *csdev,
17 struct perf_output_handle *handle);
18
19static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
20{
21 CS_UNLOCK(drvdata->base);
22
23 /* Wait for TMCSReady bit to be set */
24 tmc_wait_for_tmcready(drvdata);
25
26 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
27 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
28 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
29 TMC_FFCR_TRIGON_TRIGIN,
30 drvdata->base + TMC_FFCR);
31
32 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
33 tmc_enable_hw(drvdata);
34
35 CS_LOCK(drvdata->base);
36}
37
38static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
39{
40 int rc = coresight_claim_device(drvdata->csdev);
41
42 if (rc)
43 return rc;
44
45 __tmc_etb_enable_hw(drvdata);
46 return 0;
47}
48
49static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
50{
51 char *bufp;
52 u32 read_data, lost;
53
54 /* Check if the buffer wrapped around. */
55 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
56 bufp = drvdata->buf;
57 drvdata->len = 0;
58 while (1) {
59 read_data = readl_relaxed(drvdata->base + TMC_RRD);
60 if (read_data == 0xFFFFFFFF)
61 break;
62 memcpy(bufp, &read_data, 4);
63 bufp += 4;
64 drvdata->len += 4;
65 }
66
67 if (lost)
68 coresight_insert_barrier_packet(drvdata->buf);
69 return;
70}
71
72static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
73{
74 CS_UNLOCK(drvdata->base);
75
76 tmc_flush_and_stop(drvdata);
77 /*
78 * When operating in sysFS mode the content of the buffer needs to be
79 * read before the TMC is disabled.
80 */
81 if (drvdata->mode == CS_MODE_SYSFS)
82 tmc_etb_dump_hw(drvdata);
83 tmc_disable_hw(drvdata);
84
85 CS_LOCK(drvdata->base);
86}
87
88static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
89{
90 __tmc_etb_disable_hw(drvdata);
91 coresight_disclaim_device(drvdata->csdev);
92}
93
94static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
95{
96 CS_UNLOCK(drvdata->base);
97
98 /* Wait for TMCSReady bit to be set */
99 tmc_wait_for_tmcready(drvdata);
100
101 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
102 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
103 drvdata->base + TMC_FFCR);
104 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
105 tmc_enable_hw(drvdata);
106
107 CS_LOCK(drvdata->base);
108}
109
110static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
111{
112 int rc = coresight_claim_device(drvdata->csdev);
113
114 if (rc)
115 return rc;
116
117 __tmc_etf_enable_hw(drvdata);
118 return 0;
119}
120
121static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
122{
123 struct coresight_device *csdev = drvdata->csdev;
124
125 CS_UNLOCK(drvdata->base);
126
127 tmc_flush_and_stop(drvdata);
128 tmc_disable_hw(drvdata);
129 coresight_disclaim_device_unlocked(csdev);
130 CS_LOCK(drvdata->base);
131}
132
133/*
134 * Return the available trace data in the buffer from @pos, with
135 * a maximum limit of @len, updating the @bufpp on where to
136 * find it.
137 */
138ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
139 loff_t pos, size_t len, char **bufpp)
140{
141 ssize_t actual = len;
142
143 /* Adjust the len to available size @pos */
144 if (pos + actual > drvdata->len)
145 actual = drvdata->len - pos;
146 if (actual > 0)
147 *bufpp = drvdata->buf + pos;
148 return actual;
149}
150
151static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
152{
153 int ret = 0;
154 bool used = false;
155 char *buf = NULL;
156 unsigned long flags;
157 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
158
159 /*
160 * If we don't have a buffer release the lock and allocate memory.
161 * Otherwise keep the lock and move along.
162 */
163 spin_lock_irqsave(&drvdata->spinlock, flags);
164 if (!drvdata->buf) {
165 spin_unlock_irqrestore(&drvdata->spinlock, flags);
166
167 /* Allocating the memory here while outside of the spinlock */
168 buf = kzalloc(drvdata->size, GFP_KERNEL);
169 if (!buf)
170 return -ENOMEM;
171
172 /* Let's try again */
173 spin_lock_irqsave(&drvdata->spinlock, flags);
174 }
175
176 if (drvdata->reading) {
177 ret = -EBUSY;
178 goto out;
179 }
180
181 /*
182 * In sysFS mode we can have multiple writers per sink. Since this
183 * sink is already enabled no memory is needed and the HW need not be
184 * touched.
185 */
186 if (drvdata->mode == CS_MODE_SYSFS) {
187 atomic_inc(csdev->refcnt);
188 goto out;
189 }
190
191 /*
192 * If drvdata::buf isn't NULL, memory was allocated for a previous
193 * trace run but wasn't read. If so simply zero-out the memory.
194 * Otherwise use the memory allocated above.
195 *
196 * The memory is freed when users read the buffer using the
197 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
198 * details.
199 */
200 if (drvdata->buf) {
201 memset(drvdata->buf, 0, drvdata->size);
202 } else {
203 used = true;
204 drvdata->buf = buf;
205 }
206
207 ret = tmc_etb_enable_hw(drvdata);
208 if (!ret) {
209 drvdata->mode = CS_MODE_SYSFS;
210 atomic_inc(csdev->refcnt);
211 } else {
212 /* Free up the buffer if we failed to enable */
213 used = false;
214 }
215out:
216 spin_unlock_irqrestore(&drvdata->spinlock, flags);
217
218 /* Free memory outside the spinlock if need be */
219 if (!used)
220 kfree(buf);
221
222 return ret;
223}
224
225static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
226{
227 int ret = 0;
228 pid_t pid;
229 unsigned long flags;
230 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
231 struct perf_output_handle *handle = data;
232 struct cs_buffers *buf = etm_perf_sink_config(handle);
233
234 spin_lock_irqsave(&drvdata->spinlock, flags);
235 do {
236 ret = -EINVAL;
237 if (drvdata->reading)
238 break;
239 /*
240 * No need to continue if the ETB/ETF is already operated
241 * from sysFS.
242 */
243 if (drvdata->mode == CS_MODE_SYSFS) {
244 ret = -EBUSY;
245 break;
246 }
247
248 /* Get a handle on the pid of the process to monitor */
249 pid = buf->pid;
250
251 if (drvdata->pid != -1 && drvdata->pid != pid) {
252 ret = -EBUSY;
253 break;
254 }
255
256 ret = tmc_set_etf_buffer(csdev, handle);
257 if (ret)
258 break;
259
260 /*
261 * No HW configuration is needed if the sink is already in
262 * use for this session.
263 */
264 if (drvdata->pid == pid) {
265 atomic_inc(csdev->refcnt);
266 break;
267 }
268
269 ret = tmc_etb_enable_hw(drvdata);
270 if (!ret) {
271 /* Associate with monitored process. */
272 drvdata->pid = pid;
273 drvdata->mode = CS_MODE_PERF;
274 atomic_inc(csdev->refcnt);
275 }
276 } while (0);
277 spin_unlock_irqrestore(&drvdata->spinlock, flags);
278
279 return ret;
280}
281
282static int tmc_enable_etf_sink(struct coresight_device *csdev,
283 u32 mode, void *data)
284{
285 int ret;
286
287 switch (mode) {
288 case CS_MODE_SYSFS:
289 ret = tmc_enable_etf_sink_sysfs(csdev);
290 break;
291 case CS_MODE_PERF:
292 ret = tmc_enable_etf_sink_perf(csdev, data);
293 break;
294 /* We shouldn't be here */
295 default:
296 ret = -EINVAL;
297 break;
298 }
299
300 if (ret)
301 return ret;
302
303 dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
304 return 0;
305}
306
307static int tmc_disable_etf_sink(struct coresight_device *csdev)
308{
309 unsigned long flags;
310 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
311
312 spin_lock_irqsave(&drvdata->spinlock, flags);
313
314 if (drvdata->reading) {
315 spin_unlock_irqrestore(&drvdata->spinlock, flags);
316 return -EBUSY;
317 }
318
319 if (atomic_dec_return(csdev->refcnt)) {
320 spin_unlock_irqrestore(&drvdata->spinlock, flags);
321 return -EBUSY;
322 }
323
324 /* Complain if we (somehow) got out of sync */
325 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
326 tmc_etb_disable_hw(drvdata);
327 /* Dissociate from monitored process. */
328 drvdata->pid = -1;
329 drvdata->mode = CS_MODE_DISABLED;
330
331 spin_unlock_irqrestore(&drvdata->spinlock, flags);
332
333 dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
334 return 0;
335}
336
337static int tmc_enable_etf_link(struct coresight_device *csdev,
338 int inport, int outport)
339{
340 int ret = 0;
341 unsigned long flags;
342 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
343 bool first_enable = false;
344
345 spin_lock_irqsave(&drvdata->spinlock, flags);
346 if (drvdata->reading) {
347 spin_unlock_irqrestore(&drvdata->spinlock, flags);
348 return -EBUSY;
349 }
350
351 if (atomic_read(&csdev->refcnt[0]) == 0) {
352 ret = tmc_etf_enable_hw(drvdata);
353 if (!ret) {
354 drvdata->mode = CS_MODE_SYSFS;
355 first_enable = true;
356 }
357 }
358 if (!ret)
359 atomic_inc(&csdev->refcnt[0]);
360 spin_unlock_irqrestore(&drvdata->spinlock, flags);
361
362 if (first_enable)
363 dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
364 return ret;
365}
366
367static void tmc_disable_etf_link(struct coresight_device *csdev,
368 int inport, int outport)
369{
370 unsigned long flags;
371 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
372 bool last_disable = false;
373
374 spin_lock_irqsave(&drvdata->spinlock, flags);
375 if (drvdata->reading) {
376 spin_unlock_irqrestore(&drvdata->spinlock, flags);
377 return;
378 }
379
380 if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
381 tmc_etf_disable_hw(drvdata);
382 drvdata->mode = CS_MODE_DISABLED;
383 last_disable = true;
384 }
385 spin_unlock_irqrestore(&drvdata->spinlock, flags);
386
387 if (last_disable)
388 dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
389}
390
391static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
392 struct perf_event *event, void **pages,
393 int nr_pages, bool overwrite)
394{
395 int node;
396 struct cs_buffers *buf;
397
398 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
399
400 /* Allocate memory structure for interaction with Perf */
401 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
402 if (!buf)
403 return NULL;
404
405 buf->pid = task_pid_nr(event->owner);
406 buf->snapshot = overwrite;
407 buf->nr_pages = nr_pages;
408 buf->data_pages = pages;
409
410 return buf;
411}
412
413static void tmc_free_etf_buffer(void *config)
414{
415 struct cs_buffers *buf = config;
416
417 kfree(buf);
418}
419
420static int tmc_set_etf_buffer(struct coresight_device *csdev,
421 struct perf_output_handle *handle)
422{
423 int ret = 0;
424 unsigned long head;
425 struct cs_buffers *buf = etm_perf_sink_config(handle);
426
427 if (!buf)
428 return -EINVAL;
429
430 /* wrap head around to the amount of space we have */
431 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
432
433 /* find the page to write to */
434 buf->cur = head / PAGE_SIZE;
435
436 /* and offset within that page */
437 buf->offset = head % PAGE_SIZE;
438
439 local_set(&buf->data_size, 0);
440
441 return ret;
442}
443
444static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
445 struct perf_output_handle *handle,
446 void *sink_config)
447{
448 bool lost = false;
449 int i, cur;
450 const u32 *barrier;
451 u32 *buf_ptr;
452 u64 read_ptr, write_ptr;
453 u32 status;
454 unsigned long offset, to_read = 0, flags;
455 struct cs_buffers *buf = sink_config;
456 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
457
458 if (!buf)
459 return 0;
460
461 /* This shouldn't happen */
462 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
463 return 0;
464
465 spin_lock_irqsave(&drvdata->spinlock, flags);
466
467 /* Don't do anything if another tracer is using this sink */
468 if (atomic_read(csdev->refcnt) != 1)
469 goto out;
470
471 CS_UNLOCK(drvdata->base);
472
473 tmc_flush_and_stop(drvdata);
474
475 read_ptr = tmc_read_rrp(drvdata);
476 write_ptr = tmc_read_rwp(drvdata);
477
478 /*
479 * Get a hold of the status register and see if a wrap around
480 * has occurred. If so adjust things accordingly.
481 */
482 status = readl_relaxed(drvdata->base + TMC_STS);
483 if (status & TMC_STS_FULL) {
484 lost = true;
485 to_read = drvdata->size;
486 } else {
487 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
488 }
489
490 /*
491 * The TMC RAM buffer may be bigger than the space available in the
492 * perf ring buffer (handle->size). If so advance the RRP so that we
493 * get the latest trace data. In snapshot mode none of that matters
494 * since we are expected to clobber stale data in favour of the latest
495 * traces.
496 */
497 if (!buf->snapshot && to_read > handle->size) {
498 u32 mask = tmc_get_memwidth_mask(drvdata);
499
500 /*
501 * Make sure the new size is aligned in accordance with the
502 * requirement explained in function tmc_get_memwidth_mask().
503 */
504 to_read = handle->size & mask;
505 /* Move the RAM read pointer up */
506 read_ptr = (write_ptr + drvdata->size) - to_read;
507 /* Make sure we are still within our limits */
508 if (read_ptr > (drvdata->size - 1))
509 read_ptr -= drvdata->size;
510 /* Tell the HW */
511 tmc_write_rrp(drvdata, read_ptr);
512 lost = true;
513 }
514
515 /*
516 * Don't set the TRUNCATED flag in snapshot mode because 1) the
517 * captured buffer is expected to be truncated and 2) a full buffer
518 * prevents the event from being re-enabled by the perf core,
519 * resulting in stale data being send to user space.
520 */
521 if (!buf->snapshot && lost)
522 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
523
524 cur = buf->cur;
525 offset = buf->offset;
526 barrier = coresight_barrier_pkt;
527
528 /* for every byte to read */
529 for (i = 0; i < to_read; i += 4) {
530 buf_ptr = buf->data_pages[cur] + offset;
531 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
532
533 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
534 *buf_ptr = *barrier;
535 barrier++;
536 }
537
538 offset += 4;
539 if (offset >= PAGE_SIZE) {
540 offset = 0;
541 cur++;
542 /* wrap around at the end of the buffer */
543 cur &= buf->nr_pages - 1;
544 }
545 }
546
547 /*
548 * In snapshot mode we simply increment the head by the number of byte
549 * that were written. User space will figure out how many bytes to get
550 * from the AUX buffer based on the position of the head.
551 */
552 if (buf->snapshot)
553 handle->head += to_read;
554
555 /*
556 * CS_LOCK() contains mb() so it can ensure visibility of the AUX trace
557 * data before the aux_head is updated via perf_aux_output_end(), which
558 * is expected by the perf ring buffer.
559 */
560 CS_LOCK(drvdata->base);
561out:
562 spin_unlock_irqrestore(&drvdata->spinlock, flags);
563
564 return to_read;
565}
566
567static const struct coresight_ops_sink tmc_etf_sink_ops = {
568 .enable = tmc_enable_etf_sink,
569 .disable = tmc_disable_etf_sink,
570 .alloc_buffer = tmc_alloc_etf_buffer,
571 .free_buffer = tmc_free_etf_buffer,
572 .update_buffer = tmc_update_etf_buffer,
573};
574
575static const struct coresight_ops_link tmc_etf_link_ops = {
576 .enable = tmc_enable_etf_link,
577 .disable = tmc_disable_etf_link,
578};
579
580const struct coresight_ops tmc_etb_cs_ops = {
581 .sink_ops = &tmc_etf_sink_ops,
582};
583
584const struct coresight_ops tmc_etf_cs_ops = {
585 .sink_ops = &tmc_etf_sink_ops,
586 .link_ops = &tmc_etf_link_ops,
587};
588
589int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
590{
591 enum tmc_mode mode;
592 int ret = 0;
593 unsigned long flags;
594
595 /* config types are set a boot time and never change */
596 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
597 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
598 return -EINVAL;
599
600 spin_lock_irqsave(&drvdata->spinlock, flags);
601
602 if (drvdata->reading) {
603 ret = -EBUSY;
604 goto out;
605 }
606
607 /* Don't interfere if operated from Perf */
608 if (drvdata->mode == CS_MODE_PERF) {
609 ret = -EINVAL;
610 goto out;
611 }
612
613 /* If drvdata::buf is NULL the trace data has been read already */
614 if (drvdata->buf == NULL) {
615 ret = -EINVAL;
616 goto out;
617 }
618
619 /* Disable the TMC if need be */
620 if (drvdata->mode == CS_MODE_SYSFS) {
621 /* There is no point in reading a TMC in HW FIFO mode */
622 mode = readl_relaxed(drvdata->base + TMC_MODE);
623 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
624 ret = -EINVAL;
625 goto out;
626 }
627 __tmc_etb_disable_hw(drvdata);
628 }
629
630 drvdata->reading = true;
631out:
632 spin_unlock_irqrestore(&drvdata->spinlock, flags);
633
634 return ret;
635}
636
637int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
638{
639 char *buf = NULL;
640 enum tmc_mode mode;
641 unsigned long flags;
642
643 /* config types are set a boot time and never change */
644 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
645 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
646 return -EINVAL;
647
648 spin_lock_irqsave(&drvdata->spinlock, flags);
649
650 /* Re-enable the TMC if need be */
651 if (drvdata->mode == CS_MODE_SYSFS) {
652 /* There is no point in reading a TMC in HW FIFO mode */
653 mode = readl_relaxed(drvdata->base + TMC_MODE);
654 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
655 spin_unlock_irqrestore(&drvdata->spinlock, flags);
656 return -EINVAL;
657 }
658 /*
659 * The trace run will continue with the same allocated trace
660 * buffer. As such zero-out the buffer so that we don't end
661 * up with stale data.
662 *
663 * Since the tracer is still enabled drvdata::buf
664 * can't be NULL.
665 */
666 memset(drvdata->buf, 0, drvdata->size);
667 __tmc_etb_enable_hw(drvdata);
668 } else {
669 /*
670 * The ETB/ETF is not tracing and the buffer was just read.
671 * As such prepare to free the trace buffer.
672 */
673 buf = drvdata->buf;
674 drvdata->buf = NULL;
675 }
676
677 drvdata->reading = false;
678 spin_unlock_irqrestore(&drvdata->spinlock, flags);
679
680 /*
681 * Free allocated memory outside of the spinlock. There is no need
682 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
683 */
684 kfree(buf);
685
686 return 0;
687}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7#include <linux/atomic.h>
8#include <linux/circ_buf.h>
9#include <linux/coresight.h>
10#include <linux/perf_event.h>
11#include <linux/slab.h>
12#include "coresight-priv.h"
13#include "coresight-tmc.h"
14#include "coresight-etm-perf.h"
15
16static int tmc_set_etf_buffer(struct coresight_device *csdev,
17 struct perf_output_handle *handle);
18
19static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
20{
21 CS_UNLOCK(drvdata->base);
22
23 /* Wait for TMCSReady bit to be set */
24 tmc_wait_for_tmcready(drvdata);
25
26 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
27 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
28 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
29 TMC_FFCR_TRIGON_TRIGIN,
30 drvdata->base + TMC_FFCR);
31
32 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
33 tmc_enable_hw(drvdata);
34
35 CS_LOCK(drvdata->base);
36}
37
38static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
39{
40 int rc = coresight_claim_device(drvdata->base);
41
42 if (rc)
43 return rc;
44
45 __tmc_etb_enable_hw(drvdata);
46 return 0;
47}
48
49static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
50{
51 char *bufp;
52 u32 read_data, lost;
53
54 /* Check if the buffer wrapped around. */
55 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
56 bufp = drvdata->buf;
57 drvdata->len = 0;
58 while (1) {
59 read_data = readl_relaxed(drvdata->base + TMC_RRD);
60 if (read_data == 0xFFFFFFFF)
61 break;
62 memcpy(bufp, &read_data, 4);
63 bufp += 4;
64 drvdata->len += 4;
65 }
66
67 if (lost)
68 coresight_insert_barrier_packet(drvdata->buf);
69 return;
70}
71
72static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
73{
74 CS_UNLOCK(drvdata->base);
75
76 tmc_flush_and_stop(drvdata);
77 /*
78 * When operating in sysFS mode the content of the buffer needs to be
79 * read before the TMC is disabled.
80 */
81 if (drvdata->mode == CS_MODE_SYSFS)
82 tmc_etb_dump_hw(drvdata);
83 tmc_disable_hw(drvdata);
84
85 CS_LOCK(drvdata->base);
86}
87
88static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
89{
90 __tmc_etb_disable_hw(drvdata);
91 coresight_disclaim_device(drvdata->base);
92}
93
94static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
95{
96 CS_UNLOCK(drvdata->base);
97
98 /* Wait for TMCSReady bit to be set */
99 tmc_wait_for_tmcready(drvdata);
100
101 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
102 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
103 drvdata->base + TMC_FFCR);
104 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
105 tmc_enable_hw(drvdata);
106
107 CS_LOCK(drvdata->base);
108}
109
110static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
111{
112 int rc = coresight_claim_device(drvdata->base);
113
114 if (rc)
115 return rc;
116
117 __tmc_etf_enable_hw(drvdata);
118 return 0;
119}
120
121static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
122{
123 CS_UNLOCK(drvdata->base);
124
125 tmc_flush_and_stop(drvdata);
126 tmc_disable_hw(drvdata);
127 coresight_disclaim_device_unlocked(drvdata->base);
128 CS_LOCK(drvdata->base);
129}
130
131/*
132 * Return the available trace data in the buffer from @pos, with
133 * a maximum limit of @len, updating the @bufpp on where to
134 * find it.
135 */
136ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
137 loff_t pos, size_t len, char **bufpp)
138{
139 ssize_t actual = len;
140
141 /* Adjust the len to available size @pos */
142 if (pos + actual > drvdata->len)
143 actual = drvdata->len - pos;
144 if (actual > 0)
145 *bufpp = drvdata->buf + pos;
146 return actual;
147}
148
149static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
150{
151 int ret = 0;
152 bool used = false;
153 char *buf = NULL;
154 unsigned long flags;
155 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
156
157 /*
158 * If we don't have a buffer release the lock and allocate memory.
159 * Otherwise keep the lock and move along.
160 */
161 spin_lock_irqsave(&drvdata->spinlock, flags);
162 if (!drvdata->buf) {
163 spin_unlock_irqrestore(&drvdata->spinlock, flags);
164
165 /* Allocating the memory here while outside of the spinlock */
166 buf = kzalloc(drvdata->size, GFP_KERNEL);
167 if (!buf)
168 return -ENOMEM;
169
170 /* Let's try again */
171 spin_lock_irqsave(&drvdata->spinlock, flags);
172 }
173
174 if (drvdata->reading) {
175 ret = -EBUSY;
176 goto out;
177 }
178
179 /*
180 * In sysFS mode we can have multiple writers per sink. Since this
181 * sink is already enabled no memory is needed and the HW need not be
182 * touched.
183 */
184 if (drvdata->mode == CS_MODE_SYSFS) {
185 atomic_inc(csdev->refcnt);
186 goto out;
187 }
188
189 /*
190 * If drvdata::buf isn't NULL, memory was allocated for a previous
191 * trace run but wasn't read. If so simply zero-out the memory.
192 * Otherwise use the memory allocated above.
193 *
194 * The memory is freed when users read the buffer using the
195 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
196 * details.
197 */
198 if (drvdata->buf) {
199 memset(drvdata->buf, 0, drvdata->size);
200 } else {
201 used = true;
202 drvdata->buf = buf;
203 }
204
205 ret = tmc_etb_enable_hw(drvdata);
206 if (!ret) {
207 drvdata->mode = CS_MODE_SYSFS;
208 atomic_inc(csdev->refcnt);
209 } else {
210 /* Free up the buffer if we failed to enable */
211 used = false;
212 }
213out:
214 spin_unlock_irqrestore(&drvdata->spinlock, flags);
215
216 /* Free memory outside the spinlock if need be */
217 if (!used)
218 kfree(buf);
219
220 return ret;
221}
222
223static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
224{
225 int ret = 0;
226 pid_t pid;
227 unsigned long flags;
228 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
229 struct perf_output_handle *handle = data;
230
231 spin_lock_irqsave(&drvdata->spinlock, flags);
232 do {
233 ret = -EINVAL;
234 if (drvdata->reading)
235 break;
236 /*
237 * No need to continue if the ETB/ETF is already operated
238 * from sysFS.
239 */
240 if (drvdata->mode == CS_MODE_SYSFS) {
241 ret = -EBUSY;
242 break;
243 }
244
245 /* Get a handle on the pid of the process to monitor */
246 pid = task_pid_nr(handle->event->owner);
247
248 if (drvdata->pid != -1 && drvdata->pid != pid) {
249 ret = -EBUSY;
250 break;
251 }
252
253 ret = tmc_set_etf_buffer(csdev, handle);
254 if (ret)
255 break;
256
257 /*
258 * No HW configuration is needed if the sink is already in
259 * use for this session.
260 */
261 if (drvdata->pid == pid) {
262 atomic_inc(csdev->refcnt);
263 break;
264 }
265
266 ret = tmc_etb_enable_hw(drvdata);
267 if (!ret) {
268 /* Associate with monitored process. */
269 drvdata->pid = pid;
270 drvdata->mode = CS_MODE_PERF;
271 atomic_inc(csdev->refcnt);
272 }
273 } while (0);
274 spin_unlock_irqrestore(&drvdata->spinlock, flags);
275
276 return ret;
277}
278
279static int tmc_enable_etf_sink(struct coresight_device *csdev,
280 u32 mode, void *data)
281{
282 int ret;
283
284 switch (mode) {
285 case CS_MODE_SYSFS:
286 ret = tmc_enable_etf_sink_sysfs(csdev);
287 break;
288 case CS_MODE_PERF:
289 ret = tmc_enable_etf_sink_perf(csdev, data);
290 break;
291 /* We shouldn't be here */
292 default:
293 ret = -EINVAL;
294 break;
295 }
296
297 if (ret)
298 return ret;
299
300 dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
301 return 0;
302}
303
304static int tmc_disable_etf_sink(struct coresight_device *csdev)
305{
306 unsigned long flags;
307 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
308
309 spin_lock_irqsave(&drvdata->spinlock, flags);
310
311 if (drvdata->reading) {
312 spin_unlock_irqrestore(&drvdata->spinlock, flags);
313 return -EBUSY;
314 }
315
316 if (atomic_dec_return(csdev->refcnt)) {
317 spin_unlock_irqrestore(&drvdata->spinlock, flags);
318 return -EBUSY;
319 }
320
321 /* Complain if we (somehow) got out of sync */
322 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
323 tmc_etb_disable_hw(drvdata);
324 /* Dissociate from monitored process. */
325 drvdata->pid = -1;
326 drvdata->mode = CS_MODE_DISABLED;
327
328 spin_unlock_irqrestore(&drvdata->spinlock, flags);
329
330 dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
331 return 0;
332}
333
334static int tmc_enable_etf_link(struct coresight_device *csdev,
335 int inport, int outport)
336{
337 int ret = 0;
338 unsigned long flags;
339 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
340 bool first_enable = false;
341
342 spin_lock_irqsave(&drvdata->spinlock, flags);
343 if (drvdata->reading) {
344 spin_unlock_irqrestore(&drvdata->spinlock, flags);
345 return -EBUSY;
346 }
347
348 if (atomic_read(&csdev->refcnt[0]) == 0) {
349 ret = tmc_etf_enable_hw(drvdata);
350 if (!ret) {
351 drvdata->mode = CS_MODE_SYSFS;
352 first_enable = true;
353 }
354 }
355 if (!ret)
356 atomic_inc(&csdev->refcnt[0]);
357 spin_unlock_irqrestore(&drvdata->spinlock, flags);
358
359 if (first_enable)
360 dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
361 return ret;
362}
363
364static void tmc_disable_etf_link(struct coresight_device *csdev,
365 int inport, int outport)
366{
367 unsigned long flags;
368 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
369 bool last_disable = false;
370
371 spin_lock_irqsave(&drvdata->spinlock, flags);
372 if (drvdata->reading) {
373 spin_unlock_irqrestore(&drvdata->spinlock, flags);
374 return;
375 }
376
377 if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
378 tmc_etf_disable_hw(drvdata);
379 drvdata->mode = CS_MODE_DISABLED;
380 last_disable = true;
381 }
382 spin_unlock_irqrestore(&drvdata->spinlock, flags);
383
384 if (last_disable)
385 dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
386}
387
388static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
389 struct perf_event *event, void **pages,
390 int nr_pages, bool overwrite)
391{
392 int node;
393 struct cs_buffers *buf;
394
395 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
396
397 /* Allocate memory structure for interaction with Perf */
398 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
399 if (!buf)
400 return NULL;
401
402 buf->snapshot = overwrite;
403 buf->nr_pages = nr_pages;
404 buf->data_pages = pages;
405
406 return buf;
407}
408
409static void tmc_free_etf_buffer(void *config)
410{
411 struct cs_buffers *buf = config;
412
413 kfree(buf);
414}
415
416static int tmc_set_etf_buffer(struct coresight_device *csdev,
417 struct perf_output_handle *handle)
418{
419 int ret = 0;
420 unsigned long head;
421 struct cs_buffers *buf = etm_perf_sink_config(handle);
422
423 if (!buf)
424 return -EINVAL;
425
426 /* wrap head around to the amount of space we have */
427 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
428
429 /* find the page to write to */
430 buf->cur = head / PAGE_SIZE;
431
432 /* and offset within that page */
433 buf->offset = head % PAGE_SIZE;
434
435 local_set(&buf->data_size, 0);
436
437 return ret;
438}
439
440static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
441 struct perf_output_handle *handle,
442 void *sink_config)
443{
444 bool lost = false;
445 int i, cur;
446 const u32 *barrier;
447 u32 *buf_ptr;
448 u64 read_ptr, write_ptr;
449 u32 status;
450 unsigned long offset, to_read = 0, flags;
451 struct cs_buffers *buf = sink_config;
452 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
453
454 if (!buf)
455 return 0;
456
457 /* This shouldn't happen */
458 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
459 return 0;
460
461 spin_lock_irqsave(&drvdata->spinlock, flags);
462
463 /* Don't do anything if another tracer is using this sink */
464 if (atomic_read(csdev->refcnt) != 1)
465 goto out;
466
467 CS_UNLOCK(drvdata->base);
468
469 tmc_flush_and_stop(drvdata);
470
471 read_ptr = tmc_read_rrp(drvdata);
472 write_ptr = tmc_read_rwp(drvdata);
473
474 /*
475 * Get a hold of the status register and see if a wrap around
476 * has occurred. If so adjust things accordingly.
477 */
478 status = readl_relaxed(drvdata->base + TMC_STS);
479 if (status & TMC_STS_FULL) {
480 lost = true;
481 to_read = drvdata->size;
482 } else {
483 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
484 }
485
486 /*
487 * The TMC RAM buffer may be bigger than the space available in the
488 * perf ring buffer (handle->size). If so advance the RRP so that we
489 * get the latest trace data. In snapshot mode none of that matters
490 * since we are expected to clobber stale data in favour of the latest
491 * traces.
492 */
493 if (!buf->snapshot && to_read > handle->size) {
494 u32 mask = tmc_get_memwidth_mask(drvdata);
495
496 /*
497 * Make sure the new size is aligned in accordance with the
498 * requirement explained in function tmc_get_memwidth_mask().
499 */
500 to_read = handle->size & mask;
501 /* Move the RAM read pointer up */
502 read_ptr = (write_ptr + drvdata->size) - to_read;
503 /* Make sure we are still within our limits */
504 if (read_ptr > (drvdata->size - 1))
505 read_ptr -= drvdata->size;
506 /* Tell the HW */
507 tmc_write_rrp(drvdata, read_ptr);
508 lost = true;
509 }
510
511 /*
512 * Don't set the TRUNCATED flag in snapshot mode because 1) the
513 * captured buffer is expected to be truncated and 2) a full buffer
514 * prevents the event from being re-enabled by the perf core,
515 * resulting in stale data being send to user space.
516 */
517 if (!buf->snapshot && lost)
518 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
519
520 cur = buf->cur;
521 offset = buf->offset;
522 barrier = barrier_pkt;
523
524 /* for every byte to read */
525 for (i = 0; i < to_read; i += 4) {
526 buf_ptr = buf->data_pages[cur] + offset;
527 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
528
529 if (lost && *barrier) {
530 *buf_ptr = *barrier;
531 barrier++;
532 }
533
534 offset += 4;
535 if (offset >= PAGE_SIZE) {
536 offset = 0;
537 cur++;
538 /* wrap around at the end of the buffer */
539 cur &= buf->nr_pages - 1;
540 }
541 }
542
543 /*
544 * In snapshot mode we simply increment the head by the number of byte
545 * that were written. User space function cs_etm_find_snapshot() will
546 * figure out how many bytes to get from the AUX buffer based on the
547 * position of the head.
548 */
549 if (buf->snapshot)
550 handle->head += to_read;
551
552 CS_LOCK(drvdata->base);
553out:
554 spin_unlock_irqrestore(&drvdata->spinlock, flags);
555
556 return to_read;
557}
558
559static const struct coresight_ops_sink tmc_etf_sink_ops = {
560 .enable = tmc_enable_etf_sink,
561 .disable = tmc_disable_etf_sink,
562 .alloc_buffer = tmc_alloc_etf_buffer,
563 .free_buffer = tmc_free_etf_buffer,
564 .update_buffer = tmc_update_etf_buffer,
565};
566
567static const struct coresight_ops_link tmc_etf_link_ops = {
568 .enable = tmc_enable_etf_link,
569 .disable = tmc_disable_etf_link,
570};
571
572const struct coresight_ops tmc_etb_cs_ops = {
573 .sink_ops = &tmc_etf_sink_ops,
574};
575
576const struct coresight_ops tmc_etf_cs_ops = {
577 .sink_ops = &tmc_etf_sink_ops,
578 .link_ops = &tmc_etf_link_ops,
579};
580
581int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
582{
583 enum tmc_mode mode;
584 int ret = 0;
585 unsigned long flags;
586
587 /* config types are set a boot time and never change */
588 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
589 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
590 return -EINVAL;
591
592 spin_lock_irqsave(&drvdata->spinlock, flags);
593
594 if (drvdata->reading) {
595 ret = -EBUSY;
596 goto out;
597 }
598
599 /* Don't interfere if operated from Perf */
600 if (drvdata->mode == CS_MODE_PERF) {
601 ret = -EINVAL;
602 goto out;
603 }
604
605 /* If drvdata::buf is NULL the trace data has been read already */
606 if (drvdata->buf == NULL) {
607 ret = -EINVAL;
608 goto out;
609 }
610
611 /* Disable the TMC if need be */
612 if (drvdata->mode == CS_MODE_SYSFS) {
613 /* There is no point in reading a TMC in HW FIFO mode */
614 mode = readl_relaxed(drvdata->base + TMC_MODE);
615 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
616 ret = -EINVAL;
617 goto out;
618 }
619 __tmc_etb_disable_hw(drvdata);
620 }
621
622 drvdata->reading = true;
623out:
624 spin_unlock_irqrestore(&drvdata->spinlock, flags);
625
626 return ret;
627}
628
629int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
630{
631 char *buf = NULL;
632 enum tmc_mode mode;
633 unsigned long flags;
634
635 /* config types are set a boot time and never change */
636 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
637 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
638 return -EINVAL;
639
640 spin_lock_irqsave(&drvdata->spinlock, flags);
641
642 /* Re-enable the TMC if need be */
643 if (drvdata->mode == CS_MODE_SYSFS) {
644 /* There is no point in reading a TMC in HW FIFO mode */
645 mode = readl_relaxed(drvdata->base + TMC_MODE);
646 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
647 spin_unlock_irqrestore(&drvdata->spinlock, flags);
648 return -EINVAL;
649 }
650 /*
651 * The trace run will continue with the same allocated trace
652 * buffer. As such zero-out the buffer so that we don't end
653 * up with stale data.
654 *
655 * Since the tracer is still enabled drvdata::buf
656 * can't be NULL.
657 */
658 memset(drvdata->buf, 0, drvdata->size);
659 __tmc_etb_enable_hw(drvdata);
660 } else {
661 /*
662 * The ETB/ETF is not tracing and the buffer was just read.
663 * As such prepare to free the trace buffer.
664 */
665 buf = drvdata->buf;
666 drvdata->buf = NULL;
667 }
668
669 drvdata->reading = false;
670 spin_unlock_irqrestore(&drvdata->spinlock, flags);
671
672 /*
673 * Free allocated memory outside of the spinlock. There is no need
674 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
675 */
676 kfree(buf);
677
678 return 0;
679}