Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7#include <linux/atomic.h>
8#include <linux/circ_buf.h>
9#include <linux/coresight.h>
10#include <linux/perf_event.h>
11#include <linux/slab.h>
12#include "coresight-priv.h"
13#include "coresight-tmc.h"
14#include "coresight-etm-perf.h"
15
16static int tmc_set_etf_buffer(struct coresight_device *csdev,
17 struct perf_output_handle *handle);
18
19static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
20{
21 int rc = 0;
22
23 CS_UNLOCK(drvdata->base);
24
25 /* Wait for TMCSReady bit to be set */
26 rc = tmc_wait_for_tmcready(drvdata);
27 if (rc) {
28 dev_err(&drvdata->csdev->dev,
29 "Failed to enable: TMC not ready\n");
30 CS_LOCK(drvdata->base);
31 return rc;
32 }
33
34 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
35 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
36 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
37 TMC_FFCR_TRIGON_TRIGIN,
38 drvdata->base + TMC_FFCR);
39
40 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
41 tmc_enable_hw(drvdata);
42
43 CS_LOCK(drvdata->base);
44 return rc;
45}
46
47static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
48{
49 int rc = coresight_claim_device(drvdata->csdev);
50
51 if (rc)
52 return rc;
53
54 rc = __tmc_etb_enable_hw(drvdata);
55 if (rc)
56 coresight_disclaim_device(drvdata->csdev);
57 return rc;
58}
59
60static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
61{
62 char *bufp;
63 u32 read_data, lost;
64
65 /* Check if the buffer wrapped around. */
66 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
67 bufp = drvdata->buf;
68 drvdata->len = 0;
69 while (1) {
70 read_data = readl_relaxed(drvdata->base + TMC_RRD);
71 if (read_data == 0xFFFFFFFF)
72 break;
73 memcpy(bufp, &read_data, 4);
74 bufp += 4;
75 drvdata->len += 4;
76 }
77
78 if (lost)
79 coresight_insert_barrier_packet(drvdata->buf);
80 return;
81}
82
83static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
84{
85 CS_UNLOCK(drvdata->base);
86
87 tmc_flush_and_stop(drvdata);
88 /*
89 * When operating in sysFS mode the content of the buffer needs to be
90 * read before the TMC is disabled.
91 */
92 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS)
93 tmc_etb_dump_hw(drvdata);
94 tmc_disable_hw(drvdata);
95
96 CS_LOCK(drvdata->base);
97}
98
99static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
100{
101 __tmc_etb_disable_hw(drvdata);
102 coresight_disclaim_device(drvdata->csdev);
103}
104
105static int __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
106{
107 int rc = 0;
108
109 CS_UNLOCK(drvdata->base);
110
111 /* Wait for TMCSReady bit to be set */
112 rc = tmc_wait_for_tmcready(drvdata);
113 if (rc) {
114 dev_err(&drvdata->csdev->dev,
115 "Failed to enable : TMC is not ready\n");
116 CS_LOCK(drvdata->base);
117 return rc;
118 }
119
120 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
121 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
122 drvdata->base + TMC_FFCR);
123 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
124 tmc_enable_hw(drvdata);
125
126 CS_LOCK(drvdata->base);
127 return rc;
128}
129
130static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
131{
132 int rc = coresight_claim_device(drvdata->csdev);
133
134 if (rc)
135 return rc;
136
137 rc = __tmc_etf_enable_hw(drvdata);
138 if (rc)
139 coresight_disclaim_device(drvdata->csdev);
140 return rc;
141}
142
143static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
144{
145 struct coresight_device *csdev = drvdata->csdev;
146
147 CS_UNLOCK(drvdata->base);
148
149 tmc_flush_and_stop(drvdata);
150 tmc_disable_hw(drvdata);
151 coresight_disclaim_device_unlocked(csdev);
152 CS_LOCK(drvdata->base);
153}
154
155/*
156 * Return the available trace data in the buffer from @pos, with
157 * a maximum limit of @len, updating the @bufpp on where to
158 * find it.
159 */
160ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
161 loff_t pos, size_t len, char **bufpp)
162{
163 ssize_t actual = len;
164
165 /* Adjust the len to available size @pos */
166 if (pos + actual > drvdata->len)
167 actual = drvdata->len - pos;
168 if (actual > 0)
169 *bufpp = drvdata->buf + pos;
170 return actual;
171}
172
173static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
174{
175 int ret = 0;
176 bool used = false;
177 char *buf = NULL;
178 unsigned long flags;
179 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
180
181 /*
182 * If we don't have a buffer release the lock and allocate memory.
183 * Otherwise keep the lock and move along.
184 */
185 spin_lock_irqsave(&drvdata->spinlock, flags);
186 if (!drvdata->buf) {
187 spin_unlock_irqrestore(&drvdata->spinlock, flags);
188
189 /* Allocating the memory here while outside of the spinlock */
190 buf = kzalloc(drvdata->size, GFP_KERNEL);
191 if (!buf)
192 return -ENOMEM;
193
194 /* Let's try again */
195 spin_lock_irqsave(&drvdata->spinlock, flags);
196 }
197
198 if (drvdata->reading) {
199 ret = -EBUSY;
200 goto out;
201 }
202
203 /*
204 * In sysFS mode we can have multiple writers per sink. Since this
205 * sink is already enabled no memory is needed and the HW need not be
206 * touched.
207 */
208 if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
209 csdev->refcnt++;
210 goto out;
211 }
212
213 /*
214 * If drvdata::buf isn't NULL, memory was allocated for a previous
215 * trace run but wasn't read. If so simply zero-out the memory.
216 * Otherwise use the memory allocated above.
217 *
218 * The memory is freed when users read the buffer using the
219 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
220 * details.
221 */
222 if (drvdata->buf) {
223 memset(drvdata->buf, 0, drvdata->size);
224 } else {
225 used = true;
226 drvdata->buf = buf;
227 }
228
229 ret = tmc_etb_enable_hw(drvdata);
230 if (!ret) {
231 coresight_set_mode(csdev, CS_MODE_SYSFS);
232 csdev->refcnt++;
233 } else {
234 /* Free up the buffer if we failed to enable */
235 used = false;
236 }
237out:
238 spin_unlock_irqrestore(&drvdata->spinlock, flags);
239
240 /* Free memory outside the spinlock if need be */
241 if (!used)
242 kfree(buf);
243
244 return ret;
245}
246
247static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
248{
249 int ret = 0;
250 pid_t pid;
251 unsigned long flags;
252 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
253 struct perf_output_handle *handle = data;
254 struct cs_buffers *buf = etm_perf_sink_config(handle);
255
256 spin_lock_irqsave(&drvdata->spinlock, flags);
257 do {
258 ret = -EINVAL;
259 if (drvdata->reading)
260 break;
261 /*
262 * No need to continue if the ETB/ETF is already operated
263 * from sysFS.
264 */
265 if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
266 ret = -EBUSY;
267 break;
268 }
269
270 /* Get a handle on the pid of the process to monitor */
271 pid = buf->pid;
272
273 if (drvdata->pid != -1 && drvdata->pid != pid) {
274 ret = -EBUSY;
275 break;
276 }
277
278 ret = tmc_set_etf_buffer(csdev, handle);
279 if (ret)
280 break;
281
282 /*
283 * No HW configuration is needed if the sink is already in
284 * use for this session.
285 */
286 if (drvdata->pid == pid) {
287 csdev->refcnt++;
288 break;
289 }
290
291 ret = tmc_etb_enable_hw(drvdata);
292 if (!ret) {
293 /* Associate with monitored process. */
294 drvdata->pid = pid;
295 coresight_set_mode(csdev, CS_MODE_PERF);
296 csdev->refcnt++;
297 }
298 } while (0);
299 spin_unlock_irqrestore(&drvdata->spinlock, flags);
300
301 return ret;
302}
303
304static int tmc_enable_etf_sink(struct coresight_device *csdev,
305 enum cs_mode mode, void *data)
306{
307 int ret;
308
309 switch (mode) {
310 case CS_MODE_SYSFS:
311 ret = tmc_enable_etf_sink_sysfs(csdev);
312 break;
313 case CS_MODE_PERF:
314 ret = tmc_enable_etf_sink_perf(csdev, data);
315 break;
316 /* We shouldn't be here */
317 default:
318 ret = -EINVAL;
319 break;
320 }
321
322 if (ret)
323 return ret;
324
325 dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
326 return 0;
327}
328
329static int tmc_disable_etf_sink(struct coresight_device *csdev)
330{
331 unsigned long flags;
332 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
333
334 spin_lock_irqsave(&drvdata->spinlock, flags);
335
336 if (drvdata->reading) {
337 spin_unlock_irqrestore(&drvdata->spinlock, flags);
338 return -EBUSY;
339 }
340
341 csdev->refcnt--;
342 if (csdev->refcnt) {
343 spin_unlock_irqrestore(&drvdata->spinlock, flags);
344 return -EBUSY;
345 }
346
347 /* Complain if we (somehow) got out of sync */
348 WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
349 tmc_etb_disable_hw(drvdata);
350 /* Dissociate from monitored process. */
351 drvdata->pid = -1;
352 coresight_set_mode(csdev, CS_MODE_DISABLED);
353
354 spin_unlock_irqrestore(&drvdata->spinlock, flags);
355
356 dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
357 return 0;
358}
359
360static int tmc_enable_etf_link(struct coresight_device *csdev,
361 struct coresight_connection *in,
362 struct coresight_connection *out)
363{
364 int ret = 0;
365 unsigned long flags;
366 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
367 bool first_enable = false;
368
369 spin_lock_irqsave(&drvdata->spinlock, flags);
370 if (drvdata->reading) {
371 spin_unlock_irqrestore(&drvdata->spinlock, flags);
372 return -EBUSY;
373 }
374
375 if (csdev->refcnt == 0) {
376 ret = tmc_etf_enable_hw(drvdata);
377 if (!ret) {
378 coresight_set_mode(csdev, CS_MODE_SYSFS);
379 first_enable = true;
380 }
381 }
382 if (!ret)
383 csdev->refcnt++;
384 spin_unlock_irqrestore(&drvdata->spinlock, flags);
385
386 if (first_enable)
387 dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
388 return ret;
389}
390
391static void tmc_disable_etf_link(struct coresight_device *csdev,
392 struct coresight_connection *in,
393 struct coresight_connection *out)
394{
395 unsigned long flags;
396 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
397 bool last_disable = false;
398
399 spin_lock_irqsave(&drvdata->spinlock, flags);
400 if (drvdata->reading) {
401 spin_unlock_irqrestore(&drvdata->spinlock, flags);
402 return;
403 }
404
405 csdev->refcnt--;
406 if (csdev->refcnt == 0) {
407 tmc_etf_disable_hw(drvdata);
408 coresight_set_mode(csdev, CS_MODE_DISABLED);
409 last_disable = true;
410 }
411 spin_unlock_irqrestore(&drvdata->spinlock, flags);
412
413 if (last_disable)
414 dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
415}
416
417static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
418 struct perf_event *event, void **pages,
419 int nr_pages, bool overwrite)
420{
421 int node;
422 struct cs_buffers *buf;
423
424 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
425
426 /* Allocate memory structure for interaction with Perf */
427 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
428 if (!buf)
429 return NULL;
430
431 buf->pid = task_pid_nr(event->owner);
432 buf->snapshot = overwrite;
433 buf->nr_pages = nr_pages;
434 buf->data_pages = pages;
435
436 return buf;
437}
438
439static void tmc_free_etf_buffer(void *config)
440{
441 struct cs_buffers *buf = config;
442
443 kfree(buf);
444}
445
446static int tmc_set_etf_buffer(struct coresight_device *csdev,
447 struct perf_output_handle *handle)
448{
449 int ret = 0;
450 unsigned long head;
451 struct cs_buffers *buf = etm_perf_sink_config(handle);
452
453 if (!buf)
454 return -EINVAL;
455
456 /* wrap head around to the amount of space we have */
457 head = handle->head & (((unsigned long)buf->nr_pages << PAGE_SHIFT) - 1);
458
459 /* find the page to write to */
460 buf->cur = head / PAGE_SIZE;
461
462 /* and offset within that page */
463 buf->offset = head % PAGE_SIZE;
464
465 local_set(&buf->data_size, 0);
466
467 return ret;
468}
469
470static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
471 struct perf_output_handle *handle,
472 void *sink_config)
473{
474 bool lost = false;
475 int i, cur;
476 const u32 *barrier;
477 u32 *buf_ptr;
478 u64 read_ptr, write_ptr;
479 u32 status;
480 unsigned long offset, to_read = 0, flags;
481 struct cs_buffers *buf = sink_config;
482 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
483
484 if (!buf)
485 return 0;
486
487 /* This shouldn't happen */
488 if (WARN_ON_ONCE(coresight_get_mode(csdev) != CS_MODE_PERF))
489 return 0;
490
491 spin_lock_irqsave(&drvdata->spinlock, flags);
492
493 /* Don't do anything if another tracer is using this sink */
494 if (csdev->refcnt != 1)
495 goto out;
496
497 CS_UNLOCK(drvdata->base);
498
499 tmc_flush_and_stop(drvdata);
500
501 read_ptr = tmc_read_rrp(drvdata);
502 write_ptr = tmc_read_rwp(drvdata);
503
504 /*
505 * Get a hold of the status register and see if a wrap around
506 * has occurred. If so adjust things accordingly.
507 */
508 status = readl_relaxed(drvdata->base + TMC_STS);
509 if (status & TMC_STS_FULL) {
510 lost = true;
511 to_read = drvdata->size;
512 } else {
513 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
514 }
515
516 /*
517 * The TMC RAM buffer may be bigger than the space available in the
518 * perf ring buffer (handle->size). If so advance the RRP so that we
519 * get the latest trace data. In snapshot mode none of that matters
520 * since we are expected to clobber stale data in favour of the latest
521 * traces.
522 */
523 if (!buf->snapshot && to_read > handle->size) {
524 u32 mask = tmc_get_memwidth_mask(drvdata);
525
526 /*
527 * Make sure the new size is aligned in accordance with the
528 * requirement explained in function tmc_get_memwidth_mask().
529 */
530 to_read = handle->size & mask;
531 /* Move the RAM read pointer up */
532 read_ptr = (write_ptr + drvdata->size) - to_read;
533 /* Make sure we are still within our limits */
534 if (read_ptr > (drvdata->size - 1))
535 read_ptr -= drvdata->size;
536 /* Tell the HW */
537 tmc_write_rrp(drvdata, read_ptr);
538 lost = true;
539 }
540
541 /*
542 * Don't set the TRUNCATED flag in snapshot mode because 1) the
543 * captured buffer is expected to be truncated and 2) a full buffer
544 * prevents the event from being re-enabled by the perf core,
545 * resulting in stale data being send to user space.
546 */
547 if (!buf->snapshot && lost)
548 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
549
550 cur = buf->cur;
551 offset = buf->offset;
552 barrier = coresight_barrier_pkt;
553
554 /* for every byte to read */
555 for (i = 0; i < to_read; i += 4) {
556 buf_ptr = buf->data_pages[cur] + offset;
557 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
558
559 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
560 *buf_ptr = *barrier;
561 barrier++;
562 }
563
564 offset += 4;
565 if (offset >= PAGE_SIZE) {
566 offset = 0;
567 cur++;
568 /* wrap around at the end of the buffer */
569 cur &= buf->nr_pages - 1;
570 }
571 }
572
573 /*
574 * In snapshot mode we simply increment the head by the number of byte
575 * that were written. User space will figure out how many bytes to get
576 * from the AUX buffer based on the position of the head.
577 */
578 if (buf->snapshot)
579 handle->head += to_read;
580
581 /*
582 * CS_LOCK() contains mb() so it can ensure visibility of the AUX trace
583 * data before the aux_head is updated via perf_aux_output_end(), which
584 * is expected by the perf ring buffer.
585 */
586 CS_LOCK(drvdata->base);
587out:
588 spin_unlock_irqrestore(&drvdata->spinlock, flags);
589
590 return to_read;
591}
592
593static const struct coresight_ops_sink tmc_etf_sink_ops = {
594 .enable = tmc_enable_etf_sink,
595 .disable = tmc_disable_etf_sink,
596 .alloc_buffer = tmc_alloc_etf_buffer,
597 .free_buffer = tmc_free_etf_buffer,
598 .update_buffer = tmc_update_etf_buffer,
599};
600
601static const struct coresight_ops_link tmc_etf_link_ops = {
602 .enable = tmc_enable_etf_link,
603 .disable = tmc_disable_etf_link,
604};
605
606const struct coresight_ops tmc_etb_cs_ops = {
607 .sink_ops = &tmc_etf_sink_ops,
608};
609
610const struct coresight_ops tmc_etf_cs_ops = {
611 .sink_ops = &tmc_etf_sink_ops,
612 .link_ops = &tmc_etf_link_ops,
613};
614
615int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
616{
617 enum tmc_mode mode;
618 int ret = 0;
619 unsigned long flags;
620
621 /* config types are set a boot time and never change */
622 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
623 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
624 return -EINVAL;
625
626 spin_lock_irqsave(&drvdata->spinlock, flags);
627
628 if (drvdata->reading) {
629 ret = -EBUSY;
630 goto out;
631 }
632
633 /* Don't interfere if operated from Perf */
634 if (coresight_get_mode(drvdata->csdev) == CS_MODE_PERF) {
635 ret = -EINVAL;
636 goto out;
637 }
638
639 /* If drvdata::buf is NULL the trace data has been read already */
640 if (drvdata->buf == NULL) {
641 ret = -EINVAL;
642 goto out;
643 }
644
645 /* Disable the TMC if need be */
646 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
647 /* There is no point in reading a TMC in HW FIFO mode */
648 mode = readl_relaxed(drvdata->base + TMC_MODE);
649 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
650 ret = -EINVAL;
651 goto out;
652 }
653 __tmc_etb_disable_hw(drvdata);
654 }
655
656 drvdata->reading = true;
657out:
658 spin_unlock_irqrestore(&drvdata->spinlock, flags);
659
660 return ret;
661}
662
663int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
664{
665 char *buf = NULL;
666 enum tmc_mode mode;
667 unsigned long flags;
668 int rc = 0;
669
670 /* config types are set a boot time and never change */
671 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
672 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
673 return -EINVAL;
674
675 spin_lock_irqsave(&drvdata->spinlock, flags);
676
677 /* Re-enable the TMC if need be */
678 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
679 /* There is no point in reading a TMC in HW FIFO mode */
680 mode = readl_relaxed(drvdata->base + TMC_MODE);
681 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
682 spin_unlock_irqrestore(&drvdata->spinlock, flags);
683 return -EINVAL;
684 }
685 /*
686 * The trace run will continue with the same allocated trace
687 * buffer. As such zero-out the buffer so that we don't end
688 * up with stale data.
689 *
690 * Since the tracer is still enabled drvdata::buf
691 * can't be NULL.
692 */
693 memset(drvdata->buf, 0, drvdata->size);
694 rc = __tmc_etb_enable_hw(drvdata);
695 if (rc) {
696 spin_unlock_irqrestore(&drvdata->spinlock, flags);
697 return rc;
698 }
699 } else {
700 /*
701 * The ETB/ETF is not tracing and the buffer was just read.
702 * As such prepare to free the trace buffer.
703 */
704 buf = drvdata->buf;
705 drvdata->buf = NULL;
706 }
707
708 drvdata->reading = false;
709 spin_unlock_irqrestore(&drvdata->spinlock, flags);
710
711 /*
712 * Free allocated memory outside of the spinlock. There is no need
713 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
714 */
715 kfree(buf);
716
717 return 0;
718}
1/*
2 * Copyright(C) 2016 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/circ_buf.h>
19#include <linux/coresight.h>
20#include <linux/perf_event.h>
21#include <linux/slab.h>
22#include "coresight-priv.h"
23#include "coresight-tmc.h"
24
25static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
26{
27 CS_UNLOCK(drvdata->base);
28
29 /* Wait for TMCSReady bit to be set */
30 tmc_wait_for_tmcready(drvdata);
31
32 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
33 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
34 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
35 TMC_FFCR_TRIGON_TRIGIN,
36 drvdata->base + TMC_FFCR);
37
38 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
39 tmc_enable_hw(drvdata);
40
41 CS_LOCK(drvdata->base);
42}
43
44static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
45{
46 bool lost = false;
47 char *bufp;
48 const u32 *barrier;
49 u32 read_data, status;
50 int i;
51
52 /*
53 * Get a hold of the status register and see if a wrap around
54 * has occurred.
55 */
56 status = readl_relaxed(drvdata->base + TMC_STS);
57 if (status & TMC_STS_FULL)
58 lost = true;
59
60 bufp = drvdata->buf;
61 drvdata->len = 0;
62 barrier = barrier_pkt;
63 while (1) {
64 for (i = 0; i < drvdata->memwidth; i++) {
65 read_data = readl_relaxed(drvdata->base + TMC_RRD);
66 if (read_data == 0xFFFFFFFF)
67 return;
68
69 if (lost && *barrier) {
70 read_data = *barrier;
71 barrier++;
72 }
73
74 memcpy(bufp, &read_data, 4);
75 bufp += 4;
76 drvdata->len += 4;
77 }
78 }
79}
80
81static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
82{
83 CS_UNLOCK(drvdata->base);
84
85 tmc_flush_and_stop(drvdata);
86 /*
87 * When operating in sysFS mode the content of the buffer needs to be
88 * read before the TMC is disabled.
89 */
90 if (drvdata->mode == CS_MODE_SYSFS)
91 tmc_etb_dump_hw(drvdata);
92 tmc_disable_hw(drvdata);
93
94 CS_LOCK(drvdata->base);
95}
96
97static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
98{
99 CS_UNLOCK(drvdata->base);
100
101 /* Wait for TMCSReady bit to be set */
102 tmc_wait_for_tmcready(drvdata);
103
104 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
105 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
106 drvdata->base + TMC_FFCR);
107 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
108 tmc_enable_hw(drvdata);
109
110 CS_LOCK(drvdata->base);
111}
112
113static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
114{
115 CS_UNLOCK(drvdata->base);
116
117 tmc_flush_and_stop(drvdata);
118 tmc_disable_hw(drvdata);
119
120 CS_LOCK(drvdata->base);
121}
122
123static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
124{
125 int ret = 0;
126 bool used = false;
127 char *buf = NULL;
128 unsigned long flags;
129 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
130
131 /*
132 * If we don't have a buffer release the lock and allocate memory.
133 * Otherwise keep the lock and move along.
134 */
135 spin_lock_irqsave(&drvdata->spinlock, flags);
136 if (!drvdata->buf) {
137 spin_unlock_irqrestore(&drvdata->spinlock, flags);
138
139 /* Allocating the memory here while outside of the spinlock */
140 buf = kzalloc(drvdata->size, GFP_KERNEL);
141 if (!buf)
142 return -ENOMEM;
143
144 /* Let's try again */
145 spin_lock_irqsave(&drvdata->spinlock, flags);
146 }
147
148 if (drvdata->reading) {
149 ret = -EBUSY;
150 goto out;
151 }
152
153 /*
154 * In sysFS mode we can have multiple writers per sink. Since this
155 * sink is already enabled no memory is needed and the HW need not be
156 * touched.
157 */
158 if (drvdata->mode == CS_MODE_SYSFS)
159 goto out;
160
161 /*
162 * If drvdata::buf isn't NULL, memory was allocated for a previous
163 * trace run but wasn't read. If so simply zero-out the memory.
164 * Otherwise use the memory allocated above.
165 *
166 * The memory is freed when users read the buffer using the
167 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
168 * details.
169 */
170 if (drvdata->buf) {
171 memset(drvdata->buf, 0, drvdata->size);
172 } else {
173 used = true;
174 drvdata->buf = buf;
175 }
176
177 drvdata->mode = CS_MODE_SYSFS;
178 tmc_etb_enable_hw(drvdata);
179out:
180 spin_unlock_irqrestore(&drvdata->spinlock, flags);
181
182 /* Free memory outside the spinlock if need be */
183 if (!used)
184 kfree(buf);
185
186 return ret;
187}
188
189static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
190{
191 int ret = 0;
192 unsigned long flags;
193 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
194
195 spin_lock_irqsave(&drvdata->spinlock, flags);
196 if (drvdata->reading) {
197 ret = -EINVAL;
198 goto out;
199 }
200
201 /*
202 * In Perf mode there can be only one writer per sink. There
203 * is also no need to continue if the ETB/ETR is already operated
204 * from sysFS.
205 */
206 if (drvdata->mode != CS_MODE_DISABLED) {
207 ret = -EINVAL;
208 goto out;
209 }
210
211 drvdata->mode = CS_MODE_PERF;
212 tmc_etb_enable_hw(drvdata);
213out:
214 spin_unlock_irqrestore(&drvdata->spinlock, flags);
215
216 return ret;
217}
218
219static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
220{
221 int ret;
222 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
223
224 switch (mode) {
225 case CS_MODE_SYSFS:
226 ret = tmc_enable_etf_sink_sysfs(csdev);
227 break;
228 case CS_MODE_PERF:
229 ret = tmc_enable_etf_sink_perf(csdev);
230 break;
231 /* We shouldn't be here */
232 default:
233 ret = -EINVAL;
234 break;
235 }
236
237 if (ret)
238 return ret;
239
240 dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
241 return 0;
242}
243
244static void tmc_disable_etf_sink(struct coresight_device *csdev)
245{
246 unsigned long flags;
247 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
248
249 spin_lock_irqsave(&drvdata->spinlock, flags);
250 if (drvdata->reading) {
251 spin_unlock_irqrestore(&drvdata->spinlock, flags);
252 return;
253 }
254
255 /* Disable the TMC only if it needs to */
256 if (drvdata->mode != CS_MODE_DISABLED) {
257 tmc_etb_disable_hw(drvdata);
258 drvdata->mode = CS_MODE_DISABLED;
259 }
260
261 spin_unlock_irqrestore(&drvdata->spinlock, flags);
262
263 dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
264}
265
266static int tmc_enable_etf_link(struct coresight_device *csdev,
267 int inport, int outport)
268{
269 unsigned long flags;
270 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
271
272 spin_lock_irqsave(&drvdata->spinlock, flags);
273 if (drvdata->reading) {
274 spin_unlock_irqrestore(&drvdata->spinlock, flags);
275 return -EBUSY;
276 }
277
278 tmc_etf_enable_hw(drvdata);
279 drvdata->mode = CS_MODE_SYSFS;
280 spin_unlock_irqrestore(&drvdata->spinlock, flags);
281
282 dev_info(drvdata->dev, "TMC-ETF enabled\n");
283 return 0;
284}
285
286static void tmc_disable_etf_link(struct coresight_device *csdev,
287 int inport, int outport)
288{
289 unsigned long flags;
290 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
291
292 spin_lock_irqsave(&drvdata->spinlock, flags);
293 if (drvdata->reading) {
294 spin_unlock_irqrestore(&drvdata->spinlock, flags);
295 return;
296 }
297
298 tmc_etf_disable_hw(drvdata);
299 drvdata->mode = CS_MODE_DISABLED;
300 spin_unlock_irqrestore(&drvdata->spinlock, flags);
301
302 dev_info(drvdata->dev, "TMC-ETF disabled\n");
303}
304
305static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
306 void **pages, int nr_pages, bool overwrite)
307{
308 int node;
309 struct cs_buffers *buf;
310
311 if (cpu == -1)
312 cpu = smp_processor_id();
313 node = cpu_to_node(cpu);
314
315 /* Allocate memory structure for interaction with Perf */
316 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
317 if (!buf)
318 return NULL;
319
320 buf->snapshot = overwrite;
321 buf->nr_pages = nr_pages;
322 buf->data_pages = pages;
323
324 return buf;
325}
326
327static void tmc_free_etf_buffer(void *config)
328{
329 struct cs_buffers *buf = config;
330
331 kfree(buf);
332}
333
334static int tmc_set_etf_buffer(struct coresight_device *csdev,
335 struct perf_output_handle *handle,
336 void *sink_config)
337{
338 int ret = 0;
339 unsigned long head;
340 struct cs_buffers *buf = sink_config;
341
342 /* wrap head around to the amount of space we have */
343 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
344
345 /* find the page to write to */
346 buf->cur = head / PAGE_SIZE;
347
348 /* and offset within that page */
349 buf->offset = head % PAGE_SIZE;
350
351 local_set(&buf->data_size, 0);
352
353 return ret;
354}
355
356static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
357 struct perf_output_handle *handle,
358 void *sink_config)
359{
360 long size = 0;
361 struct cs_buffers *buf = sink_config;
362
363 if (buf) {
364 /*
365 * In snapshot mode ->data_size holds the new address of the
366 * ring buffer's head. The size itself is the whole address
367 * range since we want the latest information.
368 */
369 if (buf->snapshot)
370 handle->head = local_xchg(&buf->data_size,
371 buf->nr_pages << PAGE_SHIFT);
372 /*
373 * Tell the tracer PMU how much we got in this run and if
374 * something went wrong along the way. Nobody else can use
375 * this cs_buffers instance until we are done. As such
376 * resetting parameters here and squaring off with the ring
377 * buffer API in the tracer PMU is fine.
378 */
379 size = local_xchg(&buf->data_size, 0);
380 }
381
382 return size;
383}
384
385static void tmc_update_etf_buffer(struct coresight_device *csdev,
386 struct perf_output_handle *handle,
387 void *sink_config)
388{
389 bool lost = false;
390 int i, cur;
391 const u32 *barrier;
392 u32 *buf_ptr;
393 u64 read_ptr, write_ptr;
394 u32 status, to_read;
395 unsigned long offset;
396 struct cs_buffers *buf = sink_config;
397 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
398
399 if (!buf)
400 return;
401
402 /* This shouldn't happen */
403 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
404 return;
405
406 CS_UNLOCK(drvdata->base);
407
408 tmc_flush_and_stop(drvdata);
409
410 read_ptr = tmc_read_rrp(drvdata);
411 write_ptr = tmc_read_rwp(drvdata);
412
413 /*
414 * Get a hold of the status register and see if a wrap around
415 * has occurred. If so adjust things accordingly.
416 */
417 status = readl_relaxed(drvdata->base + TMC_STS);
418 if (status & TMC_STS_FULL) {
419 lost = true;
420 to_read = drvdata->size;
421 } else {
422 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
423 }
424
425 /*
426 * The TMC RAM buffer may be bigger than the space available in the
427 * perf ring buffer (handle->size). If so advance the RRP so that we
428 * get the latest trace data.
429 */
430 if (to_read > handle->size) {
431 u32 mask = 0;
432
433 /*
434 * The value written to RRP must be byte-address aligned to
435 * the width of the trace memory databus _and_ to a frame
436 * boundary (16 byte), whichever is the biggest. For example,
437 * for 32-bit, 64-bit and 128-bit wide trace memory, the four
438 * LSBs must be 0s. For 256-bit wide trace memory, the five
439 * LSBs must be 0s.
440 */
441 switch (drvdata->memwidth) {
442 case TMC_MEM_INTF_WIDTH_32BITS:
443 case TMC_MEM_INTF_WIDTH_64BITS:
444 case TMC_MEM_INTF_WIDTH_128BITS:
445 mask = GENMASK(31, 5);
446 break;
447 case TMC_MEM_INTF_WIDTH_256BITS:
448 mask = GENMASK(31, 6);
449 break;
450 }
451
452 /*
453 * Make sure the new size is aligned in accordance with the
454 * requirement explained above.
455 */
456 to_read = handle->size & mask;
457 /* Move the RAM read pointer up */
458 read_ptr = (write_ptr + drvdata->size) - to_read;
459 /* Make sure we are still within our limits */
460 if (read_ptr > (drvdata->size - 1))
461 read_ptr -= drvdata->size;
462 /* Tell the HW */
463 tmc_write_rrp(drvdata, read_ptr);
464 lost = true;
465 }
466
467 if (lost)
468 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
469
470 cur = buf->cur;
471 offset = buf->offset;
472 barrier = barrier_pkt;
473
474 /* for every byte to read */
475 for (i = 0; i < to_read; i += 4) {
476 buf_ptr = buf->data_pages[cur] + offset;
477 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
478
479 if (lost && *barrier) {
480 *buf_ptr = *barrier;
481 barrier++;
482 }
483
484 offset += 4;
485 if (offset >= PAGE_SIZE) {
486 offset = 0;
487 cur++;
488 /* wrap around at the end of the buffer */
489 cur &= buf->nr_pages - 1;
490 }
491 }
492
493 /*
494 * In snapshot mode all we have to do is communicate to
495 * perf_aux_output_end() the address of the current head. In full
496 * trace mode the same function expects a size to move rb->aux_head
497 * forward.
498 */
499 if (buf->snapshot)
500 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
501 else
502 local_add(to_read, &buf->data_size);
503
504 CS_LOCK(drvdata->base);
505}
506
507static const struct coresight_ops_sink tmc_etf_sink_ops = {
508 .enable = tmc_enable_etf_sink,
509 .disable = tmc_disable_etf_sink,
510 .alloc_buffer = tmc_alloc_etf_buffer,
511 .free_buffer = tmc_free_etf_buffer,
512 .set_buffer = tmc_set_etf_buffer,
513 .reset_buffer = tmc_reset_etf_buffer,
514 .update_buffer = tmc_update_etf_buffer,
515};
516
517static const struct coresight_ops_link tmc_etf_link_ops = {
518 .enable = tmc_enable_etf_link,
519 .disable = tmc_disable_etf_link,
520};
521
522const struct coresight_ops tmc_etb_cs_ops = {
523 .sink_ops = &tmc_etf_sink_ops,
524};
525
526const struct coresight_ops tmc_etf_cs_ops = {
527 .sink_ops = &tmc_etf_sink_ops,
528 .link_ops = &tmc_etf_link_ops,
529};
530
531int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
532{
533 enum tmc_mode mode;
534 int ret = 0;
535 unsigned long flags;
536
537 /* config types are set a boot time and never change */
538 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
539 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
540 return -EINVAL;
541
542 spin_lock_irqsave(&drvdata->spinlock, flags);
543
544 if (drvdata->reading) {
545 ret = -EBUSY;
546 goto out;
547 }
548
549 /* There is no point in reading a TMC in HW FIFO mode */
550 mode = readl_relaxed(drvdata->base + TMC_MODE);
551 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
552 ret = -EINVAL;
553 goto out;
554 }
555
556 /* Don't interfere if operated from Perf */
557 if (drvdata->mode == CS_MODE_PERF) {
558 ret = -EINVAL;
559 goto out;
560 }
561
562 /* If drvdata::buf is NULL the trace data has been read already */
563 if (drvdata->buf == NULL) {
564 ret = -EINVAL;
565 goto out;
566 }
567
568 /* Disable the TMC if need be */
569 if (drvdata->mode == CS_MODE_SYSFS)
570 tmc_etb_disable_hw(drvdata);
571
572 drvdata->reading = true;
573out:
574 spin_unlock_irqrestore(&drvdata->spinlock, flags);
575
576 return ret;
577}
578
579int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
580{
581 char *buf = NULL;
582 enum tmc_mode mode;
583 unsigned long flags;
584
585 /* config types are set a boot time and never change */
586 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
587 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
588 return -EINVAL;
589
590 spin_lock_irqsave(&drvdata->spinlock, flags);
591
592 /* There is no point in reading a TMC in HW FIFO mode */
593 mode = readl_relaxed(drvdata->base + TMC_MODE);
594 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
595 spin_unlock_irqrestore(&drvdata->spinlock, flags);
596 return -EINVAL;
597 }
598
599 /* Re-enable the TMC if need be */
600 if (drvdata->mode == CS_MODE_SYSFS) {
601 /*
602 * The trace run will continue with the same allocated trace
603 * buffer. As such zero-out the buffer so that we don't end
604 * up with stale data.
605 *
606 * Since the tracer is still enabled drvdata::buf
607 * can't be NULL.
608 */
609 memset(drvdata->buf, 0, drvdata->size);
610 tmc_etb_enable_hw(drvdata);
611 } else {
612 /*
613 * The ETB/ETF is not tracing and the buffer was just read.
614 * As such prepare to free the trace buffer.
615 */
616 buf = drvdata->buf;
617 drvdata->buf = NULL;
618 }
619
620 drvdata->reading = false;
621 spin_unlock_irqrestore(&drvdata->spinlock, flags);
622
623 /*
624 * Free allocated memory outside of the spinlock. There is no need
625 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
626 */
627 kfree(buf);
628
629 return 0;
630}