Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7#include <linux/bitfield.h>
8#include <linux/coresight.h>
9#include <linux/coresight-pmu.h>
10#include <linux/cpumask.h>
11#include <linux/device.h>
12#include <linux/list.h>
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/perf_event.h>
16#include <linux/percpu-defs.h>
17#include <linux/slab.h>
18#include <linux/stringhash.h>
19#include <linux/types.h>
20#include <linux/workqueue.h>
21
22#include "coresight-config.h"
23#include "coresight-etm-perf.h"
24#include "coresight-priv.h"
25#include "coresight-syscfg.h"
26#include "coresight-trace-id.h"
27
28static struct pmu etm_pmu;
29static bool etm_perf_up;
30
31/*
32 * An ETM context for a running event includes the perf aux handle
33 * and aux_data. For ETM, the aux_data (etm_event_data), consists of
34 * the trace path and the sink configuration. The event data is accessible
35 * via perf_get_aux(handle). However, a sink could "end" a perf output
36 * handle via the IRQ handler. And if the "sink" encounters a failure
37 * to "begin" another session (e.g due to lack of space in the buffer),
38 * the handle will be cleared. Thus, the event_data may not be accessible
39 * from the handle when we get to the etm_event_stop(), which is required
40 * for stopping the trace path. The event_data is guaranteed to stay alive
41 * until "free_aux()", which cannot happen as long as the event is active on
42 * the ETM. Thus the event_data for the session must be part of the ETM context
43 * to make sure we can disable the trace path.
44 */
45struct etm_ctxt {
46 struct perf_output_handle handle;
47 struct etm_event_data *event_data;
48};
49
50static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt);
51static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
52
53/*
54 * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config';
55 * now take them as general formats and apply on all ETMs.
56 */
57PMU_FORMAT_ATTR(branch_broadcast, "config:"__stringify(ETM_OPT_BRANCH_BROADCAST));
58PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
59/* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */
60PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID));
61/* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */
62PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2));
63PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
64PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
65/* preset - if sink ID is used as a configuration selector */
66PMU_FORMAT_ATTR(preset, "config:0-3");
67/* Sink ID - same for all ETMs */
68PMU_FORMAT_ATTR(sinkid, "config2:0-31");
69/* config ID - set if a system configuration is selected */
70PMU_FORMAT_ATTR(configid, "config2:32-63");
71PMU_FORMAT_ATTR(cc_threshold, "config3:0-11");
72
73
74/*
75 * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1
76 * when the kernel is running at EL1; when the kernel is at EL2,
77 * the PID is in CONTEXTIDR_EL2.
78 */
79static ssize_t format_attr_contextid_show(struct device *dev,
80 struct device_attribute *attr,
81 char *page)
82{
83 int pid_fmt = ETM_OPT_CTXTID;
84
85#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
86 pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
87#endif
88 return sprintf(page, "config:%d\n", pid_fmt);
89}
90
91static struct device_attribute format_attr_contextid =
92 __ATTR(contextid, 0444, format_attr_contextid_show, NULL);
93
94static struct attribute *etm_config_formats_attr[] = {
95 &format_attr_cycacc.attr,
96 &format_attr_contextid.attr,
97 &format_attr_contextid1.attr,
98 &format_attr_contextid2.attr,
99 &format_attr_timestamp.attr,
100 &format_attr_retstack.attr,
101 &format_attr_sinkid.attr,
102 &format_attr_preset.attr,
103 &format_attr_configid.attr,
104 &format_attr_branch_broadcast.attr,
105 &format_attr_cc_threshold.attr,
106 NULL,
107};
108
109static const struct attribute_group etm_pmu_format_group = {
110 .name = "format",
111 .attrs = etm_config_formats_attr,
112};
113
114static struct attribute *etm_config_sinks_attr[] = {
115 NULL,
116};
117
118static const struct attribute_group etm_pmu_sinks_group = {
119 .name = "sinks",
120 .attrs = etm_config_sinks_attr,
121};
122
123static struct attribute *etm_config_events_attr[] = {
124 NULL,
125};
126
127static const struct attribute_group etm_pmu_events_group = {
128 .name = "events",
129 .attrs = etm_config_events_attr,
130};
131
132static const struct attribute_group *etm_pmu_attr_groups[] = {
133 &etm_pmu_format_group,
134 &etm_pmu_sinks_group,
135 &etm_pmu_events_group,
136 NULL,
137};
138
139static inline struct list_head **
140etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
141{
142 return per_cpu_ptr(data->path, cpu);
143}
144
145static inline struct list_head *
146etm_event_cpu_path(struct etm_event_data *data, int cpu)
147{
148 return *etm_event_cpu_path_ptr(data, cpu);
149}
150
151static void etm_event_read(struct perf_event *event) {}
152
153static int etm_addr_filters_alloc(struct perf_event *event)
154{
155 struct etm_filters *filters;
156 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
157
158 filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
159 if (!filters)
160 return -ENOMEM;
161
162 if (event->parent)
163 memcpy(filters, event->parent->hw.addr_filters,
164 sizeof(*filters));
165
166 event->hw.addr_filters = filters;
167
168 return 0;
169}
170
171static void etm_event_destroy(struct perf_event *event)
172{
173 kfree(event->hw.addr_filters);
174 event->hw.addr_filters = NULL;
175}
176
177static int etm_event_init(struct perf_event *event)
178{
179 int ret = 0;
180
181 if (event->attr.type != etm_pmu.type) {
182 ret = -ENOENT;
183 goto out;
184 }
185
186 ret = etm_addr_filters_alloc(event);
187 if (ret)
188 goto out;
189
190 event->destroy = etm_event_destroy;
191out:
192 return ret;
193}
194
195static void free_sink_buffer(struct etm_event_data *event_data)
196{
197 int cpu;
198 cpumask_t *mask = &event_data->mask;
199 struct coresight_device *sink;
200
201 if (!event_data->snk_config)
202 return;
203
204 if (WARN_ON(cpumask_empty(mask)))
205 return;
206
207 cpu = cpumask_first(mask);
208 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
209 sink_ops(sink)->free_buffer(event_data->snk_config);
210}
211
212static void free_event_data(struct work_struct *work)
213{
214 int cpu;
215 cpumask_t *mask;
216 struct etm_event_data *event_data;
217
218 event_data = container_of(work, struct etm_event_data, work);
219 mask = &event_data->mask;
220
221 /* Free the sink buffers, if there are any */
222 free_sink_buffer(event_data);
223
224 /* clear any configuration we were using */
225 if (event_data->cfg_hash)
226 cscfg_deactivate_config(event_data->cfg_hash);
227
228 for_each_cpu(cpu, mask) {
229 struct list_head **ppath;
230
231 ppath = etm_event_cpu_path_ptr(event_data, cpu);
232 if (!(IS_ERR_OR_NULL(*ppath))) {
233 struct coresight_device *sink = coresight_get_sink(*ppath);
234
235 /*
236 * Mark perf event as done for trace id allocator, but don't call
237 * coresight_trace_id_put_cpu_id_map() on individual IDs. Perf sessions
238 * never free trace IDs to ensure that the ID associated with a CPU
239 * cannot change during their and other's concurrent sessions. Instead,
240 * a refcount is used so that the last event to call
241 * coresight_trace_id_perf_stop() frees all IDs.
242 */
243 coresight_trace_id_perf_stop(&sink->perf_sink_id_map);
244
245 coresight_release_path(*ppath);
246 }
247 *ppath = NULL;
248 }
249
250 free_percpu(event_data->path);
251 kfree(event_data);
252}
253
254static void *alloc_event_data(int cpu)
255{
256 cpumask_t *mask;
257 struct etm_event_data *event_data;
258
259 /* First get memory for the session's data */
260 event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
261 if (!event_data)
262 return NULL;
263
264
265 mask = &event_data->mask;
266 if (cpu != -1)
267 cpumask_set_cpu(cpu, mask);
268 else
269 cpumask_copy(mask, cpu_present_mask);
270
271 /*
272 * Each CPU has a single path between source and destination. As such
273 * allocate an array using CPU numbers as indexes. That way a path
274 * for any CPU can easily be accessed at any given time. We proceed
275 * the same way for sessions involving a single CPU. The cost of
276 * unused memory when dealing with single CPU trace scenarios is small
277 * compared to the cost of searching through an optimized array.
278 */
279 event_data->path = alloc_percpu(struct list_head *);
280
281 if (!event_data->path) {
282 kfree(event_data);
283 return NULL;
284 }
285
286 return event_data;
287}
288
289static void etm_free_aux(void *data)
290{
291 struct etm_event_data *event_data = data;
292
293 schedule_work(&event_data->work);
294}
295
296/*
297 * Check if two given sinks are compatible with each other,
298 * so that they can use the same sink buffers, when an event
299 * moves around.
300 */
301static bool sinks_compatible(struct coresight_device *a,
302 struct coresight_device *b)
303{
304 if (!a || !b)
305 return false;
306 /*
307 * If the sinks are of the same subtype and driven
308 * by the same driver, we can use the same buffer
309 * on these sinks.
310 */
311 return (a->subtype.sink_subtype == b->subtype.sink_subtype) &&
312 (sink_ops(a) == sink_ops(b));
313}
314
315static void *etm_setup_aux(struct perf_event *event, void **pages,
316 int nr_pages, bool overwrite)
317{
318 u32 id, cfg_hash;
319 int cpu = event->cpu;
320 int trace_id;
321 cpumask_t *mask;
322 struct coresight_device *sink = NULL;
323 struct coresight_device *user_sink = NULL, *last_sink = NULL;
324 struct etm_event_data *event_data = NULL;
325
326 event_data = alloc_event_data(cpu);
327 if (!event_data)
328 return NULL;
329 INIT_WORK(&event_data->work, free_event_data);
330
331 /* First get the selected sink from user space. */
332 if (event->attr.config2 & GENMASK_ULL(31, 0)) {
333 id = (u32)event->attr.config2;
334 sink = user_sink = coresight_get_sink_by_id(id);
335 }
336
337 /* check if user wants a coresight configuration selected */
338 cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
339 if (cfg_hash) {
340 if (cscfg_activate_config(cfg_hash))
341 goto err;
342 event_data->cfg_hash = cfg_hash;
343 }
344
345 mask = &event_data->mask;
346
347 /*
348 * Setup the path for each CPU in a trace session. We try to build
349 * trace path for each CPU in the mask. If we don't find an ETM
350 * for the CPU or fail to build a path, we clear the CPU from the
351 * mask and continue with the rest. If ever we try to trace on those
352 * CPUs, we can handle it and fail the session.
353 */
354 for_each_cpu(cpu, mask) {
355 struct list_head *path;
356 struct coresight_device *csdev;
357
358 csdev = per_cpu(csdev_src, cpu);
359 /*
360 * If there is no ETM associated with this CPU clear it from
361 * the mask and continue with the rest. If ever we try to trace
362 * on this CPU, we handle it accordingly.
363 */
364 if (!csdev) {
365 cpumask_clear_cpu(cpu, mask);
366 continue;
367 }
368
369 /*
370 * No sink provided - look for a default sink for all the ETMs,
371 * where this event can be scheduled.
372 * We allocate the sink specific buffers only once for this
373 * event. If the ETMs have different default sink devices, we
374 * can only use a single "type" of sink as the event can carry
375 * only one sink specific buffer. Thus we have to make sure
376 * that the sinks are of the same type and driven by the same
377 * driver, as the one we allocate the buffer for. As such
378 * we choose the first sink and check if the remaining ETMs
379 * have a compatible default sink. We don't trace on a CPU
380 * if the sink is not compatible.
381 */
382 if (!user_sink) {
383 /* Find the default sink for this ETM */
384 sink = coresight_find_default_sink(csdev);
385 if (!sink) {
386 cpumask_clear_cpu(cpu, mask);
387 continue;
388 }
389
390 /* Check if this sink compatible with the last sink */
391 if (last_sink && !sinks_compatible(last_sink, sink)) {
392 cpumask_clear_cpu(cpu, mask);
393 continue;
394 }
395 last_sink = sink;
396 }
397
398 /*
399 * Building a path doesn't enable it, it simply builds a
400 * list of devices from source to sink that can be
401 * referenced later when the path is actually needed.
402 */
403 path = coresight_build_path(csdev, sink);
404 if (IS_ERR(path)) {
405 cpumask_clear_cpu(cpu, mask);
406 continue;
407 }
408
409 /* ensure we can allocate a trace ID for this CPU */
410 trace_id = coresight_trace_id_get_cpu_id_map(cpu, &sink->perf_sink_id_map);
411 if (!IS_VALID_CS_TRACE_ID(trace_id)) {
412 cpumask_clear_cpu(cpu, mask);
413 coresight_release_path(path);
414 continue;
415 }
416
417 coresight_trace_id_perf_start(&sink->perf_sink_id_map);
418 *etm_event_cpu_path_ptr(event_data, cpu) = path;
419 }
420
421 /* no sink found for any CPU - cannot trace */
422 if (!sink)
423 goto err;
424
425 /* If we don't have any CPUs ready for tracing, abort */
426 cpu = cpumask_first(mask);
427 if (cpu >= nr_cpu_ids)
428 goto err;
429
430 if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
431 goto err;
432
433 /*
434 * Allocate the sink buffer for this session. All the sinks
435 * where this event can be scheduled are ensured to be of the
436 * same type. Thus the same sink configuration is used by the
437 * sinks.
438 */
439 event_data->snk_config =
440 sink_ops(sink)->alloc_buffer(sink, event, pages,
441 nr_pages, overwrite);
442 if (!event_data->snk_config)
443 goto err;
444
445out:
446 return event_data;
447
448err:
449 etm_free_aux(event_data);
450 event_data = NULL;
451 goto out;
452}
453
454static void etm_event_start(struct perf_event *event, int flags)
455{
456 int cpu = smp_processor_id();
457 struct etm_event_data *event_data;
458 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
459 struct perf_output_handle *handle = &ctxt->handle;
460 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
461 struct list_head *path;
462 u64 hw_id;
463 u8 trace_id;
464
465 if (!csdev)
466 goto fail;
467
468 /* Have we messed up our tracking ? */
469 if (WARN_ON(ctxt->event_data))
470 goto fail;
471
472 /*
473 * Deal with the ring buffer API and get a handle on the
474 * session's information.
475 */
476 event_data = perf_aux_output_begin(handle, event);
477 if (!event_data)
478 goto fail;
479
480 /*
481 * Check if this ETM is allowed to trace, as decided
482 * at etm_setup_aux(). This could be due to an unreachable
483 * sink from this ETM. We can't do much in this case if
484 * the sink was specified or hinted to the driver. For
485 * now, simply don't record anything on this ETM.
486 *
487 * As such we pretend that everything is fine, and let
488 * it continue without actually tracing. The event could
489 * continue tracing when it moves to a CPU where it is
490 * reachable to a sink.
491 */
492 if (!cpumask_test_cpu(cpu, &event_data->mask))
493 goto out;
494
495 path = etm_event_cpu_path(event_data, cpu);
496 /* We need a sink, no need to continue without one */
497 sink = coresight_get_sink(path);
498 if (WARN_ON_ONCE(!sink))
499 goto fail_end_stop;
500
501 /* Nothing will happen without a path */
502 if (coresight_enable_path(path, CS_MODE_PERF, handle))
503 goto fail_end_stop;
504
505 /* Finally enable the tracer */
506 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF,
507 &sink->perf_sink_id_map))
508 goto fail_disable_path;
509
510 /*
511 * output cpu / trace ID in perf record, once for the lifetime
512 * of the event.
513 */
514 if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) {
515 cpumask_set_cpu(cpu, &event_data->aux_hwid_done);
516
517 trace_id = coresight_trace_id_read_cpu_id_map(cpu, &sink->perf_sink_id_map);
518
519 hw_id = FIELD_PREP(CS_AUX_HW_ID_MAJOR_VERSION_MASK,
520 CS_AUX_HW_ID_MAJOR_VERSION);
521 hw_id |= FIELD_PREP(CS_AUX_HW_ID_MINOR_VERSION_MASK,
522 CS_AUX_HW_ID_MINOR_VERSION);
523 hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, trace_id);
524 hw_id |= FIELD_PREP(CS_AUX_HW_ID_SINK_ID_MASK, coresight_get_sink_id(sink));
525
526 perf_report_aux_output_id(event, hw_id);
527 }
528
529out:
530 /* Tell the perf core the event is alive */
531 event->hw.state = 0;
532 /* Save the event_data for this ETM */
533 ctxt->event_data = event_data;
534 return;
535
536fail_disable_path:
537 coresight_disable_path(path);
538fail_end_stop:
539 /*
540 * Check if the handle is still associated with the event,
541 * to handle cases where if the sink failed to start the
542 * trace and TRUNCATED the handle already.
543 */
544 if (READ_ONCE(handle->event)) {
545 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
546 perf_aux_output_end(handle, 0);
547 }
548fail:
549 event->hw.state = PERF_HES_STOPPED;
550 return;
551}
552
553static void etm_event_stop(struct perf_event *event, int mode)
554{
555 int cpu = smp_processor_id();
556 unsigned long size;
557 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
558 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
559 struct perf_output_handle *handle = &ctxt->handle;
560 struct etm_event_data *event_data;
561 struct list_head *path;
562
563 /*
564 * If we still have access to the event_data via handle,
565 * confirm that we haven't messed up the tracking.
566 */
567 if (handle->event &&
568 WARN_ON(perf_get_aux(handle) != ctxt->event_data))
569 return;
570
571 event_data = ctxt->event_data;
572 /* Clear the event_data as this ETM is stopping the trace. */
573 ctxt->event_data = NULL;
574
575 if (event->hw.state == PERF_HES_STOPPED)
576 return;
577
578 /* We must have a valid event_data for a running event */
579 if (WARN_ON(!event_data))
580 return;
581
582 /*
583 * Check if this ETM was allowed to trace, as decided at
584 * etm_setup_aux(). If it wasn't allowed to trace, then
585 * nothing needs to be torn down other than outputting a
586 * zero sized record.
587 */
588 if (handle->event && (mode & PERF_EF_UPDATE) &&
589 !cpumask_test_cpu(cpu, &event_data->mask)) {
590 event->hw.state = PERF_HES_STOPPED;
591 perf_aux_output_end(handle, 0);
592 return;
593 }
594
595 if (!csdev)
596 return;
597
598 path = etm_event_cpu_path(event_data, cpu);
599 if (!path)
600 return;
601
602 sink = coresight_get_sink(path);
603 if (!sink)
604 return;
605
606 /* stop tracer */
607 coresight_disable_source(csdev, event);
608
609 /* tell the core */
610 event->hw.state = PERF_HES_STOPPED;
611
612 /*
613 * If the handle is not bound to an event anymore
614 * (e.g, the sink driver was unable to restart the
615 * handle due to lack of buffer space), we don't
616 * have to do anything here.
617 */
618 if (handle->event && (mode & PERF_EF_UPDATE)) {
619 if (WARN_ON_ONCE(handle->event != event))
620 return;
621
622 /* update trace information */
623 if (!sink_ops(sink)->update_buffer)
624 return;
625
626 size = sink_ops(sink)->update_buffer(sink, handle,
627 event_data->snk_config);
628 /*
629 * Make sure the handle is still valid as the
630 * sink could have closed it from an IRQ.
631 * The sink driver must handle the race with
632 * update_buffer() and IRQ. Thus either we
633 * should get a valid handle and valid size
634 * (which may be 0).
635 *
636 * But we should never get a non-zero size with
637 * an invalid handle.
638 */
639 if (READ_ONCE(handle->event))
640 perf_aux_output_end(handle, size);
641 else
642 WARN_ON(size);
643 }
644
645 /* Disabling the path make its elements available to other sessions */
646 coresight_disable_path(path);
647}
648
649static int etm_event_add(struct perf_event *event, int mode)
650{
651 int ret = 0;
652 struct hw_perf_event *hwc = &event->hw;
653
654 if (mode & PERF_EF_START) {
655 etm_event_start(event, 0);
656 if (hwc->state & PERF_HES_STOPPED)
657 ret = -EINVAL;
658 } else {
659 hwc->state = PERF_HES_STOPPED;
660 }
661
662 return ret;
663}
664
665static void etm_event_del(struct perf_event *event, int mode)
666{
667 etm_event_stop(event, PERF_EF_UPDATE);
668}
669
670static int etm_addr_filters_validate(struct list_head *filters)
671{
672 bool range = false, address = false;
673 int index = 0;
674 struct perf_addr_filter *filter;
675
676 list_for_each_entry(filter, filters, entry) {
677 /*
678 * No need to go further if there's no more
679 * room for filters.
680 */
681 if (++index > ETM_ADDR_CMP_MAX)
682 return -EOPNOTSUPP;
683
684 /* filter::size==0 means single address trigger */
685 if (filter->size) {
686 /*
687 * The existing code relies on START/STOP filters
688 * being address filters.
689 */
690 if (filter->action == PERF_ADDR_FILTER_ACTION_START ||
691 filter->action == PERF_ADDR_FILTER_ACTION_STOP)
692 return -EOPNOTSUPP;
693
694 range = true;
695 } else
696 address = true;
697
698 /*
699 * At this time we don't allow range and start/stop filtering
700 * to cohabitate, they have to be mutually exclusive.
701 */
702 if (range && address)
703 return -EOPNOTSUPP;
704 }
705
706 return 0;
707}
708
709static void etm_addr_filters_sync(struct perf_event *event)
710{
711 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
712 unsigned long start, stop;
713 struct perf_addr_filter_range *fr = event->addr_filter_ranges;
714 struct etm_filters *filters = event->hw.addr_filters;
715 struct etm_filter *etm_filter;
716 struct perf_addr_filter *filter;
717 int i = 0;
718
719 list_for_each_entry(filter, &head->list, entry) {
720 start = fr[i].start;
721 stop = start + fr[i].size;
722 etm_filter = &filters->etm_filter[i];
723
724 switch (filter->action) {
725 case PERF_ADDR_FILTER_ACTION_FILTER:
726 etm_filter->start_addr = start;
727 etm_filter->stop_addr = stop;
728 etm_filter->type = ETM_ADDR_TYPE_RANGE;
729 break;
730 case PERF_ADDR_FILTER_ACTION_START:
731 etm_filter->start_addr = start;
732 etm_filter->type = ETM_ADDR_TYPE_START;
733 break;
734 case PERF_ADDR_FILTER_ACTION_STOP:
735 etm_filter->stop_addr = stop;
736 etm_filter->type = ETM_ADDR_TYPE_STOP;
737 break;
738 }
739 i++;
740 }
741
742 filters->nr_filters = i;
743}
744
745int etm_perf_symlink(struct coresight_device *csdev, bool link)
746{
747 char entry[sizeof("cpu9999999")];
748 int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
749 struct device *pmu_dev = etm_pmu.dev;
750 struct device *cs_dev = &csdev->dev;
751
752 sprintf(entry, "cpu%d", cpu);
753
754 if (!etm_perf_up)
755 return -EPROBE_DEFER;
756
757 if (link) {
758 ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
759 if (ret)
760 return ret;
761 per_cpu(csdev_src, cpu) = csdev;
762 } else {
763 sysfs_remove_link(&pmu_dev->kobj, entry);
764 per_cpu(csdev_src, cpu) = NULL;
765 }
766
767 return 0;
768}
769EXPORT_SYMBOL_GPL(etm_perf_symlink);
770
771static ssize_t etm_perf_sink_name_show(struct device *dev,
772 struct device_attribute *dattr,
773 char *buf)
774{
775 struct dev_ext_attribute *ea;
776
777 ea = container_of(dattr, struct dev_ext_attribute, attr);
778 return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
779}
780
781static struct dev_ext_attribute *
782etm_perf_add_symlink_group(struct device *dev, const char *name, const char *group_name)
783{
784 struct dev_ext_attribute *ea;
785 unsigned long hash;
786 int ret;
787 struct device *pmu_dev = etm_pmu.dev;
788
789 if (!etm_perf_up)
790 return ERR_PTR(-EPROBE_DEFER);
791
792 ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL);
793 if (!ea)
794 return ERR_PTR(-ENOMEM);
795
796 /*
797 * If this function is called adding a sink then the hash is used for
798 * sink selection - see function coresight_get_sink_by_id().
799 * If adding a configuration then the hash is used for selection in
800 * cscfg_activate_config()
801 */
802 hash = hashlen_hash(hashlen_string(NULL, name));
803
804 sysfs_attr_init(&ea->attr.attr);
805 ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL);
806 if (!ea->attr.attr.name)
807 return ERR_PTR(-ENOMEM);
808
809 ea->attr.attr.mode = 0444;
810 ea->var = (unsigned long *)hash;
811
812 ret = sysfs_add_file_to_group(&pmu_dev->kobj,
813 &ea->attr.attr, group_name);
814
815 return ret ? ERR_PTR(ret) : ea;
816}
817
818int etm_perf_add_symlink_sink(struct coresight_device *csdev)
819{
820 const char *name;
821 struct device *dev = &csdev->dev;
822 int err = 0;
823
824 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
825 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
826 return -EINVAL;
827
828 if (csdev->ea != NULL)
829 return -EINVAL;
830
831 name = dev_name(dev);
832 csdev->ea = etm_perf_add_symlink_group(dev, name, "sinks");
833 if (IS_ERR(csdev->ea)) {
834 err = PTR_ERR(csdev->ea);
835 csdev->ea = NULL;
836 } else
837 csdev->ea->attr.show = etm_perf_sink_name_show;
838
839 return err;
840}
841
842static void etm_perf_del_symlink_group(struct dev_ext_attribute *ea, const char *group_name)
843{
844 struct device *pmu_dev = etm_pmu.dev;
845
846 sysfs_remove_file_from_group(&pmu_dev->kobj,
847 &ea->attr.attr, group_name);
848}
849
850void etm_perf_del_symlink_sink(struct coresight_device *csdev)
851{
852 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
853 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
854 return;
855
856 if (!csdev->ea)
857 return;
858
859 etm_perf_del_symlink_group(csdev->ea, "sinks");
860 csdev->ea = NULL;
861}
862
863static ssize_t etm_perf_cscfg_event_show(struct device *dev,
864 struct device_attribute *dattr,
865 char *buf)
866{
867 struct dev_ext_attribute *ea;
868
869 ea = container_of(dattr, struct dev_ext_attribute, attr);
870 return scnprintf(buf, PAGE_SIZE, "configid=0x%lx\n", (unsigned long)(ea->var));
871}
872
873int etm_perf_add_symlink_cscfg(struct device *dev, struct cscfg_config_desc *config_desc)
874{
875 int err = 0;
876
877 if (config_desc->event_ea != NULL)
878 return 0;
879
880 config_desc->event_ea = etm_perf_add_symlink_group(dev, config_desc->name, "events");
881
882 /* set the show function to the custom cscfg event */
883 if (!IS_ERR(config_desc->event_ea))
884 config_desc->event_ea->attr.show = etm_perf_cscfg_event_show;
885 else {
886 err = PTR_ERR(config_desc->event_ea);
887 config_desc->event_ea = NULL;
888 }
889
890 return err;
891}
892
893void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc)
894{
895 if (!config_desc->event_ea)
896 return;
897
898 etm_perf_del_symlink_group(config_desc->event_ea, "events");
899 config_desc->event_ea = NULL;
900}
901
902int __init etm_perf_init(void)
903{
904 int ret;
905
906 etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE |
907 PERF_PMU_CAP_ITRACE);
908
909 etm_pmu.attr_groups = etm_pmu_attr_groups;
910 etm_pmu.task_ctx_nr = perf_sw_context;
911 etm_pmu.read = etm_event_read;
912 etm_pmu.event_init = etm_event_init;
913 etm_pmu.setup_aux = etm_setup_aux;
914 etm_pmu.free_aux = etm_free_aux;
915 etm_pmu.start = etm_event_start;
916 etm_pmu.stop = etm_event_stop;
917 etm_pmu.add = etm_event_add;
918 etm_pmu.del = etm_event_del;
919 etm_pmu.addr_filters_sync = etm_addr_filters_sync;
920 etm_pmu.addr_filters_validate = etm_addr_filters_validate;
921 etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
922 etm_pmu.module = THIS_MODULE;
923
924 ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
925 if (ret == 0)
926 etm_perf_up = true;
927
928 return ret;
929}
930
931void etm_perf_exit(void)
932{
933 perf_pmu_unregister(&etm_pmu);
934}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7#include <linux/bitfield.h>
8#include <linux/coresight.h>
9#include <linux/coresight-pmu.h>
10#include <linux/cpumask.h>
11#include <linux/device.h>
12#include <linux/list.h>
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/perf_event.h>
16#include <linux/percpu-defs.h>
17#include <linux/slab.h>
18#include <linux/stringhash.h>
19#include <linux/types.h>
20#include <linux/workqueue.h>
21
22#include "coresight-config.h"
23#include "coresight-etm-perf.h"
24#include "coresight-priv.h"
25#include "coresight-syscfg.h"
26#include "coresight-trace-id.h"
27
28static struct pmu etm_pmu;
29static bool etm_perf_up;
30
31/*
32 * An ETM context for a running event includes the perf aux handle
33 * and aux_data. For ETM, the aux_data (etm_event_data), consists of
34 * the trace path and the sink configuration. The event data is accessible
35 * via perf_get_aux(handle). However, a sink could "end" a perf output
36 * handle via the IRQ handler. And if the "sink" encounters a failure
37 * to "begin" another session (e.g due to lack of space in the buffer),
38 * the handle will be cleared. Thus, the event_data may not be accessible
39 * from the handle when we get to the etm_event_stop(), which is required
40 * for stopping the trace path. The event_data is guaranteed to stay alive
41 * until "free_aux()", which cannot happen as long as the event is active on
42 * the ETM. Thus the event_data for the session must be part of the ETM context
43 * to make sure we can disable the trace path.
44 */
45struct etm_ctxt {
46 struct perf_output_handle handle;
47 struct etm_event_data *event_data;
48};
49
50static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt);
51static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
52
53/*
54 * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config';
55 * now take them as general formats and apply on all ETMs.
56 */
57PMU_FORMAT_ATTR(branch_broadcast, "config:"__stringify(ETM_OPT_BRANCH_BROADCAST));
58PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
59/* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */
60PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID));
61/* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */
62PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2));
63PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
64PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
65/* preset - if sink ID is used as a configuration selector */
66PMU_FORMAT_ATTR(preset, "config:0-3");
67/* Sink ID - same for all ETMs */
68PMU_FORMAT_ATTR(sinkid, "config2:0-31");
69/* config ID - set if a system configuration is selected */
70PMU_FORMAT_ATTR(configid, "config2:32-63");
71PMU_FORMAT_ATTR(cc_threshold, "config3:0-11");
72
73
74/*
75 * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1
76 * when the kernel is running at EL1; when the kernel is at EL2,
77 * the PID is in CONTEXTIDR_EL2.
78 */
79static ssize_t format_attr_contextid_show(struct device *dev,
80 struct device_attribute *attr,
81 char *page)
82{
83 int pid_fmt = ETM_OPT_CTXTID;
84
85#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
86 pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
87#endif
88 return sprintf(page, "config:%d\n", pid_fmt);
89}
90
91static struct device_attribute format_attr_contextid =
92 __ATTR(contextid, 0444, format_attr_contextid_show, NULL);
93
94static struct attribute *etm_config_formats_attr[] = {
95 &format_attr_cycacc.attr,
96 &format_attr_contextid.attr,
97 &format_attr_contextid1.attr,
98 &format_attr_contextid2.attr,
99 &format_attr_timestamp.attr,
100 &format_attr_retstack.attr,
101 &format_attr_sinkid.attr,
102 &format_attr_preset.attr,
103 &format_attr_configid.attr,
104 &format_attr_branch_broadcast.attr,
105 &format_attr_cc_threshold.attr,
106 NULL,
107};
108
109static const struct attribute_group etm_pmu_format_group = {
110 .name = "format",
111 .attrs = etm_config_formats_attr,
112};
113
114static struct attribute *etm_config_sinks_attr[] = {
115 NULL,
116};
117
118static const struct attribute_group etm_pmu_sinks_group = {
119 .name = "sinks",
120 .attrs = etm_config_sinks_attr,
121};
122
123static struct attribute *etm_config_events_attr[] = {
124 NULL,
125};
126
127static const struct attribute_group etm_pmu_events_group = {
128 .name = "events",
129 .attrs = etm_config_events_attr,
130};
131
132static const struct attribute_group *etm_pmu_attr_groups[] = {
133 &etm_pmu_format_group,
134 &etm_pmu_sinks_group,
135 &etm_pmu_events_group,
136 NULL,
137};
138
139static inline struct list_head **
140etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
141{
142 return per_cpu_ptr(data->path, cpu);
143}
144
145static inline struct list_head *
146etm_event_cpu_path(struct etm_event_data *data, int cpu)
147{
148 return *etm_event_cpu_path_ptr(data, cpu);
149}
150
151static void etm_event_read(struct perf_event *event) {}
152
153static int etm_addr_filters_alloc(struct perf_event *event)
154{
155 struct etm_filters *filters;
156 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
157
158 filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
159 if (!filters)
160 return -ENOMEM;
161
162 if (event->parent)
163 memcpy(filters, event->parent->hw.addr_filters,
164 sizeof(*filters));
165
166 event->hw.addr_filters = filters;
167
168 return 0;
169}
170
171static void etm_event_destroy(struct perf_event *event)
172{
173 kfree(event->hw.addr_filters);
174 event->hw.addr_filters = NULL;
175}
176
177static int etm_event_init(struct perf_event *event)
178{
179 int ret = 0;
180
181 if (event->attr.type != etm_pmu.type) {
182 ret = -ENOENT;
183 goto out;
184 }
185
186 ret = etm_addr_filters_alloc(event);
187 if (ret)
188 goto out;
189
190 event->destroy = etm_event_destroy;
191out:
192 return ret;
193}
194
195static void free_sink_buffer(struct etm_event_data *event_data)
196{
197 int cpu;
198 cpumask_t *mask = &event_data->mask;
199 struct coresight_device *sink;
200
201 if (!event_data->snk_config)
202 return;
203
204 if (WARN_ON(cpumask_empty(mask)))
205 return;
206
207 cpu = cpumask_first(mask);
208 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
209 sink_ops(sink)->free_buffer(event_data->snk_config);
210}
211
212static void free_event_data(struct work_struct *work)
213{
214 int cpu;
215 cpumask_t *mask;
216 struct etm_event_data *event_data;
217
218 event_data = container_of(work, struct etm_event_data, work);
219 mask = &event_data->mask;
220
221 /* Free the sink buffers, if there are any */
222 free_sink_buffer(event_data);
223
224 /* clear any configuration we were using */
225 if (event_data->cfg_hash)
226 cscfg_deactivate_config(event_data->cfg_hash);
227
228 for_each_cpu(cpu, mask) {
229 struct list_head **ppath;
230
231 ppath = etm_event_cpu_path_ptr(event_data, cpu);
232 if (!(IS_ERR_OR_NULL(*ppath)))
233 coresight_release_path(*ppath);
234 *ppath = NULL;
235 coresight_trace_id_put_cpu_id(cpu);
236 }
237
238 /* mark perf event as done for trace id allocator */
239 coresight_trace_id_perf_stop();
240
241 free_percpu(event_data->path);
242 kfree(event_data);
243}
244
245static void *alloc_event_data(int cpu)
246{
247 cpumask_t *mask;
248 struct etm_event_data *event_data;
249
250 /* First get memory for the session's data */
251 event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
252 if (!event_data)
253 return NULL;
254
255
256 mask = &event_data->mask;
257 if (cpu != -1)
258 cpumask_set_cpu(cpu, mask);
259 else
260 cpumask_copy(mask, cpu_present_mask);
261
262 /*
263 * Each CPU has a single path between source and destination. As such
264 * allocate an array using CPU numbers as indexes. That way a path
265 * for any CPU can easily be accessed at any given time. We proceed
266 * the same way for sessions involving a single CPU. The cost of
267 * unused memory when dealing with single CPU trace scenarios is small
268 * compared to the cost of searching through an optimized array.
269 */
270 event_data->path = alloc_percpu(struct list_head *);
271
272 if (!event_data->path) {
273 kfree(event_data);
274 return NULL;
275 }
276
277 return event_data;
278}
279
280static void etm_free_aux(void *data)
281{
282 struct etm_event_data *event_data = data;
283
284 schedule_work(&event_data->work);
285}
286
287/*
288 * Check if two given sinks are compatible with each other,
289 * so that they can use the same sink buffers, when an event
290 * moves around.
291 */
292static bool sinks_compatible(struct coresight_device *a,
293 struct coresight_device *b)
294{
295 if (!a || !b)
296 return false;
297 /*
298 * If the sinks are of the same subtype and driven
299 * by the same driver, we can use the same buffer
300 * on these sinks.
301 */
302 return (a->subtype.sink_subtype == b->subtype.sink_subtype) &&
303 (sink_ops(a) == sink_ops(b));
304}
305
306static void *etm_setup_aux(struct perf_event *event, void **pages,
307 int nr_pages, bool overwrite)
308{
309 u32 id, cfg_hash;
310 int cpu = event->cpu;
311 int trace_id;
312 cpumask_t *mask;
313 struct coresight_device *sink = NULL;
314 struct coresight_device *user_sink = NULL, *last_sink = NULL;
315 struct etm_event_data *event_data = NULL;
316
317 event_data = alloc_event_data(cpu);
318 if (!event_data)
319 return NULL;
320 INIT_WORK(&event_data->work, free_event_data);
321
322 /* First get the selected sink from user space. */
323 if (event->attr.config2 & GENMASK_ULL(31, 0)) {
324 id = (u32)event->attr.config2;
325 sink = user_sink = coresight_get_sink_by_id(id);
326 }
327
328 /* tell the trace ID allocator that a perf event is starting up */
329 coresight_trace_id_perf_start();
330
331 /* check if user wants a coresight configuration selected */
332 cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
333 if (cfg_hash) {
334 if (cscfg_activate_config(cfg_hash))
335 goto err;
336 event_data->cfg_hash = cfg_hash;
337 }
338
339 mask = &event_data->mask;
340
341 /*
342 * Setup the path for each CPU in a trace session. We try to build
343 * trace path for each CPU in the mask. If we don't find an ETM
344 * for the CPU or fail to build a path, we clear the CPU from the
345 * mask and continue with the rest. If ever we try to trace on those
346 * CPUs, we can handle it and fail the session.
347 */
348 for_each_cpu(cpu, mask) {
349 struct list_head *path;
350 struct coresight_device *csdev;
351
352 csdev = per_cpu(csdev_src, cpu);
353 /*
354 * If there is no ETM associated with this CPU clear it from
355 * the mask and continue with the rest. If ever we try to trace
356 * on this CPU, we handle it accordingly.
357 */
358 if (!csdev) {
359 cpumask_clear_cpu(cpu, mask);
360 continue;
361 }
362
363 /*
364 * No sink provided - look for a default sink for all the ETMs,
365 * where this event can be scheduled.
366 * We allocate the sink specific buffers only once for this
367 * event. If the ETMs have different default sink devices, we
368 * can only use a single "type" of sink as the event can carry
369 * only one sink specific buffer. Thus we have to make sure
370 * that the sinks are of the same type and driven by the same
371 * driver, as the one we allocate the buffer for. As such
372 * we choose the first sink and check if the remaining ETMs
373 * have a compatible default sink. We don't trace on a CPU
374 * if the sink is not compatible.
375 */
376 if (!user_sink) {
377 /* Find the default sink for this ETM */
378 sink = coresight_find_default_sink(csdev);
379 if (!sink) {
380 cpumask_clear_cpu(cpu, mask);
381 continue;
382 }
383
384 /* Check if this sink compatible with the last sink */
385 if (last_sink && !sinks_compatible(last_sink, sink)) {
386 cpumask_clear_cpu(cpu, mask);
387 continue;
388 }
389 last_sink = sink;
390 }
391
392 /*
393 * Building a path doesn't enable it, it simply builds a
394 * list of devices from source to sink that can be
395 * referenced later when the path is actually needed.
396 */
397 path = coresight_build_path(csdev, sink);
398 if (IS_ERR(path)) {
399 cpumask_clear_cpu(cpu, mask);
400 continue;
401 }
402
403 /* ensure we can allocate a trace ID for this CPU */
404 trace_id = coresight_trace_id_get_cpu_id(cpu);
405 if (!IS_VALID_CS_TRACE_ID(trace_id)) {
406 cpumask_clear_cpu(cpu, mask);
407 coresight_release_path(path);
408 continue;
409 }
410
411 *etm_event_cpu_path_ptr(event_data, cpu) = path;
412 }
413
414 /* no sink found for any CPU - cannot trace */
415 if (!sink)
416 goto err;
417
418 /* If we don't have any CPUs ready for tracing, abort */
419 cpu = cpumask_first(mask);
420 if (cpu >= nr_cpu_ids)
421 goto err;
422
423 if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
424 goto err;
425
426 /*
427 * Allocate the sink buffer for this session. All the sinks
428 * where this event can be scheduled are ensured to be of the
429 * same type. Thus the same sink configuration is used by the
430 * sinks.
431 */
432 event_data->snk_config =
433 sink_ops(sink)->alloc_buffer(sink, event, pages,
434 nr_pages, overwrite);
435 if (!event_data->snk_config)
436 goto err;
437
438out:
439 return event_data;
440
441err:
442 etm_free_aux(event_data);
443 event_data = NULL;
444 goto out;
445}
446
447static void etm_event_start(struct perf_event *event, int flags)
448{
449 int cpu = smp_processor_id();
450 struct etm_event_data *event_data;
451 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
452 struct perf_output_handle *handle = &ctxt->handle;
453 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
454 struct list_head *path;
455 u64 hw_id;
456
457 if (!csdev)
458 goto fail;
459
460 /* Have we messed up our tracking ? */
461 if (WARN_ON(ctxt->event_data))
462 goto fail;
463
464 /*
465 * Deal with the ring buffer API and get a handle on the
466 * session's information.
467 */
468 event_data = perf_aux_output_begin(handle, event);
469 if (!event_data)
470 goto fail;
471
472 /*
473 * Check if this ETM is allowed to trace, as decided
474 * at etm_setup_aux(). This could be due to an unreachable
475 * sink from this ETM. We can't do much in this case if
476 * the sink was specified or hinted to the driver. For
477 * now, simply don't record anything on this ETM.
478 *
479 * As such we pretend that everything is fine, and let
480 * it continue without actually tracing. The event could
481 * continue tracing when it moves to a CPU where it is
482 * reachable to a sink.
483 */
484 if (!cpumask_test_cpu(cpu, &event_data->mask))
485 goto out;
486
487 path = etm_event_cpu_path(event_data, cpu);
488 /* We need a sink, no need to continue without one */
489 sink = coresight_get_sink(path);
490 if (WARN_ON_ONCE(!sink))
491 goto fail_end_stop;
492
493 /* Nothing will happen without a path */
494 if (coresight_enable_path(path, CS_MODE_PERF, handle))
495 goto fail_end_stop;
496
497 /* Finally enable the tracer */
498 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
499 goto fail_disable_path;
500
501 /*
502 * output cpu / trace ID in perf record, once for the lifetime
503 * of the event.
504 */
505 if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) {
506 cpumask_set_cpu(cpu, &event_data->aux_hwid_done);
507 hw_id = FIELD_PREP(CS_AUX_HW_ID_VERSION_MASK,
508 CS_AUX_HW_ID_CURR_VERSION);
509 hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK,
510 coresight_trace_id_read_cpu_id(cpu));
511 perf_report_aux_output_id(event, hw_id);
512 }
513
514out:
515 /* Tell the perf core the event is alive */
516 event->hw.state = 0;
517 /* Save the event_data for this ETM */
518 ctxt->event_data = event_data;
519 return;
520
521fail_disable_path:
522 coresight_disable_path(path);
523fail_end_stop:
524 /*
525 * Check if the handle is still associated with the event,
526 * to handle cases where if the sink failed to start the
527 * trace and TRUNCATED the handle already.
528 */
529 if (READ_ONCE(handle->event)) {
530 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
531 perf_aux_output_end(handle, 0);
532 }
533fail:
534 event->hw.state = PERF_HES_STOPPED;
535 return;
536}
537
538static void etm_event_stop(struct perf_event *event, int mode)
539{
540 int cpu = smp_processor_id();
541 unsigned long size;
542 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
543 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
544 struct perf_output_handle *handle = &ctxt->handle;
545 struct etm_event_data *event_data;
546 struct list_head *path;
547
548 /*
549 * If we still have access to the event_data via handle,
550 * confirm that we haven't messed up the tracking.
551 */
552 if (handle->event &&
553 WARN_ON(perf_get_aux(handle) != ctxt->event_data))
554 return;
555
556 event_data = ctxt->event_data;
557 /* Clear the event_data as this ETM is stopping the trace. */
558 ctxt->event_data = NULL;
559
560 if (event->hw.state == PERF_HES_STOPPED)
561 return;
562
563 /* We must have a valid event_data for a running event */
564 if (WARN_ON(!event_data))
565 return;
566
567 /*
568 * Check if this ETM was allowed to trace, as decided at
569 * etm_setup_aux(). If it wasn't allowed to trace, then
570 * nothing needs to be torn down other than outputting a
571 * zero sized record.
572 */
573 if (handle->event && (mode & PERF_EF_UPDATE) &&
574 !cpumask_test_cpu(cpu, &event_data->mask)) {
575 event->hw.state = PERF_HES_STOPPED;
576 perf_aux_output_end(handle, 0);
577 return;
578 }
579
580 if (!csdev)
581 return;
582
583 path = etm_event_cpu_path(event_data, cpu);
584 if (!path)
585 return;
586
587 sink = coresight_get_sink(path);
588 if (!sink)
589 return;
590
591 /* stop tracer */
592 coresight_disable_source(csdev, event);
593
594 /* tell the core */
595 event->hw.state = PERF_HES_STOPPED;
596
597 /*
598 * If the handle is not bound to an event anymore
599 * (e.g, the sink driver was unable to restart the
600 * handle due to lack of buffer space), we don't
601 * have to do anything here.
602 */
603 if (handle->event && (mode & PERF_EF_UPDATE)) {
604 if (WARN_ON_ONCE(handle->event != event))
605 return;
606
607 /* update trace information */
608 if (!sink_ops(sink)->update_buffer)
609 return;
610
611 size = sink_ops(sink)->update_buffer(sink, handle,
612 event_data->snk_config);
613 /*
614 * Make sure the handle is still valid as the
615 * sink could have closed it from an IRQ.
616 * The sink driver must handle the race with
617 * update_buffer() and IRQ. Thus either we
618 * should get a valid handle and valid size
619 * (which may be 0).
620 *
621 * But we should never get a non-zero size with
622 * an invalid handle.
623 */
624 if (READ_ONCE(handle->event))
625 perf_aux_output_end(handle, size);
626 else
627 WARN_ON(size);
628 }
629
630 /* Disabling the path make its elements available to other sessions */
631 coresight_disable_path(path);
632}
633
634static int etm_event_add(struct perf_event *event, int mode)
635{
636 int ret = 0;
637 struct hw_perf_event *hwc = &event->hw;
638
639 if (mode & PERF_EF_START) {
640 etm_event_start(event, 0);
641 if (hwc->state & PERF_HES_STOPPED)
642 ret = -EINVAL;
643 } else {
644 hwc->state = PERF_HES_STOPPED;
645 }
646
647 return ret;
648}
649
650static void etm_event_del(struct perf_event *event, int mode)
651{
652 etm_event_stop(event, PERF_EF_UPDATE);
653}
654
655static int etm_addr_filters_validate(struct list_head *filters)
656{
657 bool range = false, address = false;
658 int index = 0;
659 struct perf_addr_filter *filter;
660
661 list_for_each_entry(filter, filters, entry) {
662 /*
663 * No need to go further if there's no more
664 * room for filters.
665 */
666 if (++index > ETM_ADDR_CMP_MAX)
667 return -EOPNOTSUPP;
668
669 /* filter::size==0 means single address trigger */
670 if (filter->size) {
671 /*
672 * The existing code relies on START/STOP filters
673 * being address filters.
674 */
675 if (filter->action == PERF_ADDR_FILTER_ACTION_START ||
676 filter->action == PERF_ADDR_FILTER_ACTION_STOP)
677 return -EOPNOTSUPP;
678
679 range = true;
680 } else
681 address = true;
682
683 /*
684 * At this time we don't allow range and start/stop filtering
685 * to cohabitate, they have to be mutually exclusive.
686 */
687 if (range && address)
688 return -EOPNOTSUPP;
689 }
690
691 return 0;
692}
693
694static void etm_addr_filters_sync(struct perf_event *event)
695{
696 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
697 unsigned long start, stop;
698 struct perf_addr_filter_range *fr = event->addr_filter_ranges;
699 struct etm_filters *filters = event->hw.addr_filters;
700 struct etm_filter *etm_filter;
701 struct perf_addr_filter *filter;
702 int i = 0;
703
704 list_for_each_entry(filter, &head->list, entry) {
705 start = fr[i].start;
706 stop = start + fr[i].size;
707 etm_filter = &filters->etm_filter[i];
708
709 switch (filter->action) {
710 case PERF_ADDR_FILTER_ACTION_FILTER:
711 etm_filter->start_addr = start;
712 etm_filter->stop_addr = stop;
713 etm_filter->type = ETM_ADDR_TYPE_RANGE;
714 break;
715 case PERF_ADDR_FILTER_ACTION_START:
716 etm_filter->start_addr = start;
717 etm_filter->type = ETM_ADDR_TYPE_START;
718 break;
719 case PERF_ADDR_FILTER_ACTION_STOP:
720 etm_filter->stop_addr = stop;
721 etm_filter->type = ETM_ADDR_TYPE_STOP;
722 break;
723 }
724 i++;
725 }
726
727 filters->nr_filters = i;
728}
729
730int etm_perf_symlink(struct coresight_device *csdev, bool link)
731{
732 char entry[sizeof("cpu9999999")];
733 int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
734 struct device *pmu_dev = etm_pmu.dev;
735 struct device *cs_dev = &csdev->dev;
736
737 sprintf(entry, "cpu%d", cpu);
738
739 if (!etm_perf_up)
740 return -EPROBE_DEFER;
741
742 if (link) {
743 ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
744 if (ret)
745 return ret;
746 per_cpu(csdev_src, cpu) = csdev;
747 } else {
748 sysfs_remove_link(&pmu_dev->kobj, entry);
749 per_cpu(csdev_src, cpu) = NULL;
750 }
751
752 return 0;
753}
754EXPORT_SYMBOL_GPL(etm_perf_symlink);
755
756static ssize_t etm_perf_sink_name_show(struct device *dev,
757 struct device_attribute *dattr,
758 char *buf)
759{
760 struct dev_ext_attribute *ea;
761
762 ea = container_of(dattr, struct dev_ext_attribute, attr);
763 return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
764}
765
766static struct dev_ext_attribute *
767etm_perf_add_symlink_group(struct device *dev, const char *name, const char *group_name)
768{
769 struct dev_ext_attribute *ea;
770 unsigned long hash;
771 int ret;
772 struct device *pmu_dev = etm_pmu.dev;
773
774 if (!etm_perf_up)
775 return ERR_PTR(-EPROBE_DEFER);
776
777 ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL);
778 if (!ea)
779 return ERR_PTR(-ENOMEM);
780
781 /*
782 * If this function is called adding a sink then the hash is used for
783 * sink selection - see function coresight_get_sink_by_id().
784 * If adding a configuration then the hash is used for selection in
785 * cscfg_activate_config()
786 */
787 hash = hashlen_hash(hashlen_string(NULL, name));
788
789 sysfs_attr_init(&ea->attr.attr);
790 ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL);
791 if (!ea->attr.attr.name)
792 return ERR_PTR(-ENOMEM);
793
794 ea->attr.attr.mode = 0444;
795 ea->var = (unsigned long *)hash;
796
797 ret = sysfs_add_file_to_group(&pmu_dev->kobj,
798 &ea->attr.attr, group_name);
799
800 return ret ? ERR_PTR(ret) : ea;
801}
802
803int etm_perf_add_symlink_sink(struct coresight_device *csdev)
804{
805 const char *name;
806 struct device *dev = &csdev->dev;
807 int err = 0;
808
809 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
810 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
811 return -EINVAL;
812
813 if (csdev->ea != NULL)
814 return -EINVAL;
815
816 name = dev_name(dev);
817 csdev->ea = etm_perf_add_symlink_group(dev, name, "sinks");
818 if (IS_ERR(csdev->ea)) {
819 err = PTR_ERR(csdev->ea);
820 csdev->ea = NULL;
821 } else
822 csdev->ea->attr.show = etm_perf_sink_name_show;
823
824 return err;
825}
826
827static void etm_perf_del_symlink_group(struct dev_ext_attribute *ea, const char *group_name)
828{
829 struct device *pmu_dev = etm_pmu.dev;
830
831 sysfs_remove_file_from_group(&pmu_dev->kobj,
832 &ea->attr.attr, group_name);
833}
834
835void etm_perf_del_symlink_sink(struct coresight_device *csdev)
836{
837 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
838 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
839 return;
840
841 if (!csdev->ea)
842 return;
843
844 etm_perf_del_symlink_group(csdev->ea, "sinks");
845 csdev->ea = NULL;
846}
847
848static ssize_t etm_perf_cscfg_event_show(struct device *dev,
849 struct device_attribute *dattr,
850 char *buf)
851{
852 struct dev_ext_attribute *ea;
853
854 ea = container_of(dattr, struct dev_ext_attribute, attr);
855 return scnprintf(buf, PAGE_SIZE, "configid=0x%lx\n", (unsigned long)(ea->var));
856}
857
858int etm_perf_add_symlink_cscfg(struct device *dev, struct cscfg_config_desc *config_desc)
859{
860 int err = 0;
861
862 if (config_desc->event_ea != NULL)
863 return 0;
864
865 config_desc->event_ea = etm_perf_add_symlink_group(dev, config_desc->name, "events");
866
867 /* set the show function to the custom cscfg event */
868 if (!IS_ERR(config_desc->event_ea))
869 config_desc->event_ea->attr.show = etm_perf_cscfg_event_show;
870 else {
871 err = PTR_ERR(config_desc->event_ea);
872 config_desc->event_ea = NULL;
873 }
874
875 return err;
876}
877
878void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc)
879{
880 if (!config_desc->event_ea)
881 return;
882
883 etm_perf_del_symlink_group(config_desc->event_ea, "events");
884 config_desc->event_ea = NULL;
885}
886
887int __init etm_perf_init(void)
888{
889 int ret;
890
891 etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE |
892 PERF_PMU_CAP_ITRACE);
893
894 etm_pmu.attr_groups = etm_pmu_attr_groups;
895 etm_pmu.task_ctx_nr = perf_sw_context;
896 etm_pmu.read = etm_event_read;
897 etm_pmu.event_init = etm_event_init;
898 etm_pmu.setup_aux = etm_setup_aux;
899 etm_pmu.free_aux = etm_free_aux;
900 etm_pmu.start = etm_event_start;
901 etm_pmu.stop = etm_event_stop;
902 etm_pmu.add = etm_event_add;
903 etm_pmu.del = etm_event_del;
904 etm_pmu.addr_filters_sync = etm_addr_filters_sync;
905 etm_pmu.addr_filters_validate = etm_addr_filters_validate;
906 etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
907 etm_pmu.module = THIS_MODULE;
908
909 ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
910 if (ret == 0)
911 etm_perf_up = true;
912
913 return ret;
914}
915
916void etm_perf_exit(void)
917{
918 perf_pmu_unregister(&etm_pmu);
919}