Loading...
1/*
2 * auxtrace.c: AUX area trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#include <sys/types.h>
17#include <sys/mman.h>
18#include <stdbool.h>
19
20#include <linux/kernel.h>
21#include <linux/perf_event.h>
22#include <linux/types.h>
23#include <linux/bitops.h>
24#include <linux/log2.h>
25#include <linux/string.h>
26
27#include <sys/param.h>
28#include <stdlib.h>
29#include <stdio.h>
30#include <string.h>
31#include <limits.h>
32#include <errno.h>
33#include <linux/list.h>
34
35#include "../perf.h"
36#include "util.h"
37#include "evlist.h"
38#include "cpumap.h"
39#include "thread_map.h"
40#include "asm/bug.h"
41#include "auxtrace.h"
42
43#include <linux/hash.h>
44
45#include "event.h"
46#include "session.h"
47#include "debug.h"
48#include <subcmd/parse-options.h>
49
50#include "intel-pt.h"
51#include "intel-bts.h"
52
53int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
54 struct auxtrace_mmap_params *mp,
55 void *userpg, int fd)
56{
57 struct perf_event_mmap_page *pc = userpg;
58
59 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
60
61 mm->userpg = userpg;
62 mm->mask = mp->mask;
63 mm->len = mp->len;
64 mm->prev = 0;
65 mm->idx = mp->idx;
66 mm->tid = mp->tid;
67 mm->cpu = mp->cpu;
68
69 if (!mp->len) {
70 mm->base = NULL;
71 return 0;
72 }
73
74#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
75 pr_err("Cannot use AUX area tracing mmaps\n");
76 return -1;
77#endif
78
79 pc->aux_offset = mp->offset;
80 pc->aux_size = mp->len;
81
82 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
83 if (mm->base == MAP_FAILED) {
84 pr_debug2("failed to mmap AUX area\n");
85 mm->base = NULL;
86 return -1;
87 }
88
89 return 0;
90}
91
92void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
93{
94 if (mm->base) {
95 munmap(mm->base, mm->len);
96 mm->base = NULL;
97 }
98}
99
100void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
101 off_t auxtrace_offset,
102 unsigned int auxtrace_pages,
103 bool auxtrace_overwrite)
104{
105 if (auxtrace_pages) {
106 mp->offset = auxtrace_offset;
107 mp->len = auxtrace_pages * (size_t)page_size;
108 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
109 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
110 pr_debug2("AUX area mmap length %zu\n", mp->len);
111 } else {
112 mp->len = 0;
113 }
114}
115
116void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
117 struct perf_evlist *evlist, int idx,
118 bool per_cpu)
119{
120 mp->idx = idx;
121
122 if (per_cpu) {
123 mp->cpu = evlist->cpus->map[idx];
124 if (evlist->threads)
125 mp->tid = thread_map__pid(evlist->threads, 0);
126 else
127 mp->tid = -1;
128 } else {
129 mp->cpu = -1;
130 mp->tid = thread_map__pid(evlist->threads, idx);
131 }
132}
133
134#define AUXTRACE_INIT_NR_QUEUES 32
135
136static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
137{
138 struct auxtrace_queue *queue_array;
139 unsigned int max_nr_queues, i;
140
141 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
142 if (nr_queues > max_nr_queues)
143 return NULL;
144
145 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
146 if (!queue_array)
147 return NULL;
148
149 for (i = 0; i < nr_queues; i++) {
150 INIT_LIST_HEAD(&queue_array[i].head);
151 queue_array[i].priv = NULL;
152 }
153
154 return queue_array;
155}
156
157int auxtrace_queues__init(struct auxtrace_queues *queues)
158{
159 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
160 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
161 if (!queues->queue_array)
162 return -ENOMEM;
163 return 0;
164}
165
166static int auxtrace_queues__grow(struct auxtrace_queues *queues,
167 unsigned int new_nr_queues)
168{
169 unsigned int nr_queues = queues->nr_queues;
170 struct auxtrace_queue *queue_array;
171 unsigned int i;
172
173 if (!nr_queues)
174 nr_queues = AUXTRACE_INIT_NR_QUEUES;
175
176 while (nr_queues && nr_queues < new_nr_queues)
177 nr_queues <<= 1;
178
179 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
180 return -EINVAL;
181
182 queue_array = auxtrace_alloc_queue_array(nr_queues);
183 if (!queue_array)
184 return -ENOMEM;
185
186 for (i = 0; i < queues->nr_queues; i++) {
187 list_splice_tail(&queues->queue_array[i].head,
188 &queue_array[i].head);
189 queue_array[i].priv = queues->queue_array[i].priv;
190 }
191
192 queues->nr_queues = nr_queues;
193 queues->queue_array = queue_array;
194
195 return 0;
196}
197
198static void *auxtrace_copy_data(u64 size, struct perf_session *session)
199{
200 int fd = perf_data_file__fd(session->file);
201 void *p;
202 ssize_t ret;
203
204 if (size > SSIZE_MAX)
205 return NULL;
206
207 p = malloc(size);
208 if (!p)
209 return NULL;
210
211 ret = readn(fd, p, size);
212 if (ret != (ssize_t)size) {
213 free(p);
214 return NULL;
215 }
216
217 return p;
218}
219
220static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
221 unsigned int idx,
222 struct auxtrace_buffer *buffer)
223{
224 struct auxtrace_queue *queue;
225 int err;
226
227 if (idx >= queues->nr_queues) {
228 err = auxtrace_queues__grow(queues, idx + 1);
229 if (err)
230 return err;
231 }
232
233 queue = &queues->queue_array[idx];
234
235 if (!queue->set) {
236 queue->set = true;
237 queue->tid = buffer->tid;
238 queue->cpu = buffer->cpu;
239 } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
240 pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
241 queue->cpu, queue->tid, buffer->cpu, buffer->tid);
242 return -EINVAL;
243 }
244
245 buffer->buffer_nr = queues->next_buffer_nr++;
246
247 list_add_tail(&buffer->list, &queue->head);
248
249 queues->new_data = true;
250 queues->populated = true;
251
252 return 0;
253}
254
255/* Limit buffers to 32MiB on 32-bit */
256#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
257
258static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
259 unsigned int idx,
260 struct auxtrace_buffer *buffer)
261{
262 u64 sz = buffer->size;
263 bool consecutive = false;
264 struct auxtrace_buffer *b;
265 int err;
266
267 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
268 b = memdup(buffer, sizeof(struct auxtrace_buffer));
269 if (!b)
270 return -ENOMEM;
271 b->size = BUFFER_LIMIT_FOR_32_BIT;
272 b->consecutive = consecutive;
273 err = auxtrace_queues__add_buffer(queues, idx, b);
274 if (err) {
275 auxtrace_buffer__free(b);
276 return err;
277 }
278 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
279 sz -= BUFFER_LIMIT_FOR_32_BIT;
280 consecutive = true;
281 }
282
283 buffer->size = sz;
284 buffer->consecutive = consecutive;
285
286 return 0;
287}
288
289static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
290 struct perf_session *session,
291 unsigned int idx,
292 struct auxtrace_buffer *buffer)
293{
294 if (session->one_mmap) {
295 buffer->data = buffer->data_offset - session->one_mmap_offset +
296 session->one_mmap_addr;
297 } else if (perf_data_file__is_pipe(session->file)) {
298 buffer->data = auxtrace_copy_data(buffer->size, session);
299 if (!buffer->data)
300 return -ENOMEM;
301 buffer->data_needs_freeing = true;
302 } else if (BITS_PER_LONG == 32 &&
303 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
304 int err;
305
306 err = auxtrace_queues__split_buffer(queues, idx, buffer);
307 if (err)
308 return err;
309 }
310
311 return auxtrace_queues__add_buffer(queues, idx, buffer);
312}
313
314int auxtrace_queues__add_event(struct auxtrace_queues *queues,
315 struct perf_session *session,
316 union perf_event *event, off_t data_offset,
317 struct auxtrace_buffer **buffer_ptr)
318{
319 struct auxtrace_buffer *buffer;
320 unsigned int idx;
321 int err;
322
323 buffer = zalloc(sizeof(struct auxtrace_buffer));
324 if (!buffer)
325 return -ENOMEM;
326
327 buffer->pid = -1;
328 buffer->tid = event->auxtrace.tid;
329 buffer->cpu = event->auxtrace.cpu;
330 buffer->data_offset = data_offset;
331 buffer->offset = event->auxtrace.offset;
332 buffer->reference = event->auxtrace.reference;
333 buffer->size = event->auxtrace.size;
334 idx = event->auxtrace.idx;
335
336 err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer);
337 if (err)
338 goto out_err;
339
340 if (buffer_ptr)
341 *buffer_ptr = buffer;
342
343 return 0;
344
345out_err:
346 auxtrace_buffer__free(buffer);
347 return err;
348}
349
350static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
351 struct perf_session *session,
352 off_t file_offset, size_t sz)
353{
354 union perf_event *event;
355 int err;
356 char buf[PERF_SAMPLE_MAX_SIZE];
357
358 err = perf_session__peek_event(session, file_offset, buf,
359 PERF_SAMPLE_MAX_SIZE, &event, NULL);
360 if (err)
361 return err;
362
363 if (event->header.type == PERF_RECORD_AUXTRACE) {
364 if (event->header.size < sizeof(struct auxtrace_event) ||
365 event->header.size != sz) {
366 err = -EINVAL;
367 goto out;
368 }
369 file_offset += event->header.size;
370 err = auxtrace_queues__add_event(queues, session, event,
371 file_offset, NULL);
372 }
373out:
374 return err;
375}
376
377void auxtrace_queues__free(struct auxtrace_queues *queues)
378{
379 unsigned int i;
380
381 for (i = 0; i < queues->nr_queues; i++) {
382 while (!list_empty(&queues->queue_array[i].head)) {
383 struct auxtrace_buffer *buffer;
384
385 buffer = list_entry(queues->queue_array[i].head.next,
386 struct auxtrace_buffer, list);
387 list_del(&buffer->list);
388 auxtrace_buffer__free(buffer);
389 }
390 }
391
392 zfree(&queues->queue_array);
393 queues->nr_queues = 0;
394}
395
396static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
397 unsigned int pos, unsigned int queue_nr,
398 u64 ordinal)
399{
400 unsigned int parent;
401
402 while (pos) {
403 parent = (pos - 1) >> 1;
404 if (heap_array[parent].ordinal <= ordinal)
405 break;
406 heap_array[pos] = heap_array[parent];
407 pos = parent;
408 }
409 heap_array[pos].queue_nr = queue_nr;
410 heap_array[pos].ordinal = ordinal;
411}
412
413int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
414 u64 ordinal)
415{
416 struct auxtrace_heap_item *heap_array;
417
418 if (queue_nr >= heap->heap_sz) {
419 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
420
421 while (heap_sz <= queue_nr)
422 heap_sz <<= 1;
423 heap_array = realloc(heap->heap_array,
424 heap_sz * sizeof(struct auxtrace_heap_item));
425 if (!heap_array)
426 return -ENOMEM;
427 heap->heap_array = heap_array;
428 heap->heap_sz = heap_sz;
429 }
430
431 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
432
433 return 0;
434}
435
436void auxtrace_heap__free(struct auxtrace_heap *heap)
437{
438 zfree(&heap->heap_array);
439 heap->heap_cnt = 0;
440 heap->heap_sz = 0;
441}
442
443void auxtrace_heap__pop(struct auxtrace_heap *heap)
444{
445 unsigned int pos, last, heap_cnt = heap->heap_cnt;
446 struct auxtrace_heap_item *heap_array;
447
448 if (!heap_cnt)
449 return;
450
451 heap->heap_cnt -= 1;
452
453 heap_array = heap->heap_array;
454
455 pos = 0;
456 while (1) {
457 unsigned int left, right;
458
459 left = (pos << 1) + 1;
460 if (left >= heap_cnt)
461 break;
462 right = left + 1;
463 if (right >= heap_cnt) {
464 heap_array[pos] = heap_array[left];
465 return;
466 }
467 if (heap_array[left].ordinal < heap_array[right].ordinal) {
468 heap_array[pos] = heap_array[left];
469 pos = left;
470 } else {
471 heap_array[pos] = heap_array[right];
472 pos = right;
473 }
474 }
475
476 last = heap_cnt - 1;
477 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
478 heap_array[last].ordinal);
479}
480
481size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
482 struct perf_evlist *evlist)
483{
484 if (itr)
485 return itr->info_priv_size(itr, evlist);
486 return 0;
487}
488
489static int auxtrace_not_supported(void)
490{
491 pr_err("AUX area tracing is not supported on this architecture\n");
492 return -EINVAL;
493}
494
495int auxtrace_record__info_fill(struct auxtrace_record *itr,
496 struct perf_session *session,
497 struct auxtrace_info_event *auxtrace_info,
498 size_t priv_size)
499{
500 if (itr)
501 return itr->info_fill(itr, session, auxtrace_info, priv_size);
502 return auxtrace_not_supported();
503}
504
505void auxtrace_record__free(struct auxtrace_record *itr)
506{
507 if (itr)
508 itr->free(itr);
509}
510
511int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
512{
513 if (itr && itr->snapshot_start)
514 return itr->snapshot_start(itr);
515 return 0;
516}
517
518int auxtrace_record__snapshot_finish(struct auxtrace_record *itr)
519{
520 if (itr && itr->snapshot_finish)
521 return itr->snapshot_finish(itr);
522 return 0;
523}
524
525int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
526 struct auxtrace_mmap *mm,
527 unsigned char *data, u64 *head, u64 *old)
528{
529 if (itr && itr->find_snapshot)
530 return itr->find_snapshot(itr, idx, mm, data, head, old);
531 return 0;
532}
533
534int auxtrace_record__options(struct auxtrace_record *itr,
535 struct perf_evlist *evlist,
536 struct record_opts *opts)
537{
538 if (itr)
539 return itr->recording_options(itr, evlist, opts);
540 return 0;
541}
542
543u64 auxtrace_record__reference(struct auxtrace_record *itr)
544{
545 if (itr)
546 return itr->reference(itr);
547 return 0;
548}
549
550int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
551 struct record_opts *opts, const char *str)
552{
553 if (!str)
554 return 0;
555
556 if (itr)
557 return itr->parse_snapshot_options(itr, opts, str);
558
559 pr_err("No AUX area tracing to snapshot\n");
560 return -EINVAL;
561}
562
563struct auxtrace_record *__weak
564auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, int *err)
565{
566 *err = 0;
567 return NULL;
568}
569
570static int auxtrace_index__alloc(struct list_head *head)
571{
572 struct auxtrace_index *auxtrace_index;
573
574 auxtrace_index = malloc(sizeof(struct auxtrace_index));
575 if (!auxtrace_index)
576 return -ENOMEM;
577
578 auxtrace_index->nr = 0;
579 INIT_LIST_HEAD(&auxtrace_index->list);
580
581 list_add_tail(&auxtrace_index->list, head);
582
583 return 0;
584}
585
586void auxtrace_index__free(struct list_head *head)
587{
588 struct auxtrace_index *auxtrace_index, *n;
589
590 list_for_each_entry_safe(auxtrace_index, n, head, list) {
591 list_del(&auxtrace_index->list);
592 free(auxtrace_index);
593 }
594}
595
596static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
597{
598 struct auxtrace_index *auxtrace_index;
599 int err;
600
601 if (list_empty(head)) {
602 err = auxtrace_index__alloc(head);
603 if (err)
604 return NULL;
605 }
606
607 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
608
609 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
610 err = auxtrace_index__alloc(head);
611 if (err)
612 return NULL;
613 auxtrace_index = list_entry(head->prev, struct auxtrace_index,
614 list);
615 }
616
617 return auxtrace_index;
618}
619
620int auxtrace_index__auxtrace_event(struct list_head *head,
621 union perf_event *event, off_t file_offset)
622{
623 struct auxtrace_index *auxtrace_index;
624 size_t nr;
625
626 auxtrace_index = auxtrace_index__last(head);
627 if (!auxtrace_index)
628 return -ENOMEM;
629
630 nr = auxtrace_index->nr;
631 auxtrace_index->entries[nr].file_offset = file_offset;
632 auxtrace_index->entries[nr].sz = event->header.size;
633 auxtrace_index->nr += 1;
634
635 return 0;
636}
637
638static int auxtrace_index__do_write(int fd,
639 struct auxtrace_index *auxtrace_index)
640{
641 struct auxtrace_index_entry ent;
642 size_t i;
643
644 for (i = 0; i < auxtrace_index->nr; i++) {
645 ent.file_offset = auxtrace_index->entries[i].file_offset;
646 ent.sz = auxtrace_index->entries[i].sz;
647 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
648 return -errno;
649 }
650 return 0;
651}
652
653int auxtrace_index__write(int fd, struct list_head *head)
654{
655 struct auxtrace_index *auxtrace_index;
656 u64 total = 0;
657 int err;
658
659 list_for_each_entry(auxtrace_index, head, list)
660 total += auxtrace_index->nr;
661
662 if (writen(fd, &total, sizeof(total)) != sizeof(total))
663 return -errno;
664
665 list_for_each_entry(auxtrace_index, head, list) {
666 err = auxtrace_index__do_write(fd, auxtrace_index);
667 if (err)
668 return err;
669 }
670
671 return 0;
672}
673
674static int auxtrace_index__process_entry(int fd, struct list_head *head,
675 bool needs_swap)
676{
677 struct auxtrace_index *auxtrace_index;
678 struct auxtrace_index_entry ent;
679 size_t nr;
680
681 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
682 return -1;
683
684 auxtrace_index = auxtrace_index__last(head);
685 if (!auxtrace_index)
686 return -1;
687
688 nr = auxtrace_index->nr;
689 if (needs_swap) {
690 auxtrace_index->entries[nr].file_offset =
691 bswap_64(ent.file_offset);
692 auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
693 } else {
694 auxtrace_index->entries[nr].file_offset = ent.file_offset;
695 auxtrace_index->entries[nr].sz = ent.sz;
696 }
697
698 auxtrace_index->nr = nr + 1;
699
700 return 0;
701}
702
703int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
704 bool needs_swap)
705{
706 struct list_head *head = &session->auxtrace_index;
707 u64 nr;
708
709 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
710 return -1;
711
712 if (needs_swap)
713 nr = bswap_64(nr);
714
715 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
716 return -1;
717
718 while (nr--) {
719 int err;
720
721 err = auxtrace_index__process_entry(fd, head, needs_swap);
722 if (err)
723 return -1;
724 }
725
726 return 0;
727}
728
729static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
730 struct perf_session *session,
731 struct auxtrace_index_entry *ent)
732{
733 return auxtrace_queues__add_indexed_event(queues, session,
734 ent->file_offset, ent->sz);
735}
736
737int auxtrace_queues__process_index(struct auxtrace_queues *queues,
738 struct perf_session *session)
739{
740 struct auxtrace_index *auxtrace_index;
741 struct auxtrace_index_entry *ent;
742 size_t i;
743 int err;
744
745 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
746 for (i = 0; i < auxtrace_index->nr; i++) {
747 ent = &auxtrace_index->entries[i];
748 err = auxtrace_queues__process_index_entry(queues,
749 session,
750 ent);
751 if (err)
752 return err;
753 }
754 }
755 return 0;
756}
757
758struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
759 struct auxtrace_buffer *buffer)
760{
761 if (buffer) {
762 if (list_is_last(&buffer->list, &queue->head))
763 return NULL;
764 return list_entry(buffer->list.next, struct auxtrace_buffer,
765 list);
766 } else {
767 if (list_empty(&queue->head))
768 return NULL;
769 return list_entry(queue->head.next, struct auxtrace_buffer,
770 list);
771 }
772}
773
774void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
775{
776 size_t adj = buffer->data_offset & (page_size - 1);
777 size_t size = buffer->size + adj;
778 off_t file_offset = buffer->data_offset - adj;
779 void *addr;
780
781 if (buffer->data)
782 return buffer->data;
783
784 addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
785 if (addr == MAP_FAILED)
786 return NULL;
787
788 buffer->mmap_addr = addr;
789 buffer->mmap_size = size;
790
791 buffer->data = addr + adj;
792
793 return buffer->data;
794}
795
796void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
797{
798 if (!buffer->data || !buffer->mmap_addr)
799 return;
800 munmap(buffer->mmap_addr, buffer->mmap_size);
801 buffer->mmap_addr = NULL;
802 buffer->mmap_size = 0;
803 buffer->data = NULL;
804 buffer->use_data = NULL;
805}
806
807void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
808{
809 auxtrace_buffer__put_data(buffer);
810 if (buffer->data_needs_freeing) {
811 buffer->data_needs_freeing = false;
812 zfree(&buffer->data);
813 buffer->use_data = NULL;
814 buffer->size = 0;
815 }
816}
817
818void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
819{
820 auxtrace_buffer__drop_data(buffer);
821 free(buffer);
822}
823
824void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
825 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
826 const char *msg)
827{
828 size_t size;
829
830 memset(auxtrace_error, 0, sizeof(struct auxtrace_error_event));
831
832 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
833 auxtrace_error->type = type;
834 auxtrace_error->code = code;
835 auxtrace_error->cpu = cpu;
836 auxtrace_error->pid = pid;
837 auxtrace_error->tid = tid;
838 auxtrace_error->ip = ip;
839 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
840
841 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
842 strlen(auxtrace_error->msg) + 1;
843 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
844}
845
846int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
847 struct perf_tool *tool,
848 struct perf_session *session,
849 perf_event__handler_t process)
850{
851 union perf_event *ev;
852 size_t priv_size;
853 int err;
854
855 pr_debug2("Synthesizing auxtrace information\n");
856 priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
857 ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size);
858 if (!ev)
859 return -ENOMEM;
860
861 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
862 ev->auxtrace_info.header.size = sizeof(struct auxtrace_info_event) +
863 priv_size;
864 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
865 priv_size);
866 if (err)
867 goto out_free;
868
869 err = process(tool, ev, NULL, NULL);
870out_free:
871 free(ev);
872 return err;
873}
874
875static bool auxtrace__dont_decode(struct perf_session *session)
876{
877 return !session->itrace_synth_opts ||
878 session->itrace_synth_opts->dont_decode;
879}
880
881int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
882 union perf_event *event,
883 struct perf_session *session)
884{
885 enum auxtrace_type type = event->auxtrace_info.type;
886
887 if (dump_trace)
888 fprintf(stdout, " type: %u\n", type);
889
890 switch (type) {
891 case PERF_AUXTRACE_INTEL_PT:
892 return intel_pt_process_auxtrace_info(event, session);
893 case PERF_AUXTRACE_INTEL_BTS:
894 return intel_bts_process_auxtrace_info(event, session);
895 case PERF_AUXTRACE_UNKNOWN:
896 default:
897 return -EINVAL;
898 }
899}
900
901s64 perf_event__process_auxtrace(struct perf_tool *tool,
902 union perf_event *event,
903 struct perf_session *session)
904{
905 s64 err;
906
907 if (dump_trace)
908 fprintf(stdout, " size: %#"PRIx64" offset: %#"PRIx64" ref: %#"PRIx64" idx: %u tid: %d cpu: %d\n",
909 event->auxtrace.size, event->auxtrace.offset,
910 event->auxtrace.reference, event->auxtrace.idx,
911 event->auxtrace.tid, event->auxtrace.cpu);
912
913 if (auxtrace__dont_decode(session))
914 return event->auxtrace.size;
915
916 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
917 return -EINVAL;
918
919 err = session->auxtrace->process_auxtrace_event(session, event, tool);
920 if (err < 0)
921 return err;
922
923 return event->auxtrace.size;
924}
925
926#define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
927#define PERF_ITRACE_DEFAULT_PERIOD 100000
928#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
929#define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
930#define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
931#define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
932
933void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
934{
935 synth_opts->instructions = true;
936 synth_opts->branches = true;
937 synth_opts->transactions = true;
938 synth_opts->errors = true;
939 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
940 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
941 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
942 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
943}
944
945/*
946 * Please check tools/perf/Documentation/perf-script.txt for information
947 * about the options parsed here, which is introduced after this cset,
948 * when support in 'perf script' for these options is introduced.
949 */
950int itrace_parse_synth_opts(const struct option *opt, const char *str,
951 int unset)
952{
953 struct itrace_synth_opts *synth_opts = opt->value;
954 const char *p;
955 char *endptr;
956 bool period_type_set = false;
957 bool period_set = false;
958
959 synth_opts->set = true;
960
961 if (unset) {
962 synth_opts->dont_decode = true;
963 return 0;
964 }
965
966 if (!str) {
967 itrace_synth_opts__set_default(synth_opts);
968 return 0;
969 }
970
971 for (p = str; *p;) {
972 switch (*p++) {
973 case 'i':
974 synth_opts->instructions = true;
975 while (*p == ' ' || *p == ',')
976 p += 1;
977 if (isdigit(*p)) {
978 synth_opts->period = strtoull(p, &endptr, 10);
979 period_set = true;
980 p = endptr;
981 while (*p == ' ' || *p == ',')
982 p += 1;
983 switch (*p++) {
984 case 'i':
985 synth_opts->period_type =
986 PERF_ITRACE_PERIOD_INSTRUCTIONS;
987 period_type_set = true;
988 break;
989 case 't':
990 synth_opts->period_type =
991 PERF_ITRACE_PERIOD_TICKS;
992 period_type_set = true;
993 break;
994 case 'm':
995 synth_opts->period *= 1000;
996 /* Fall through */
997 case 'u':
998 synth_opts->period *= 1000;
999 /* Fall through */
1000 case 'n':
1001 if (*p++ != 's')
1002 goto out_err;
1003 synth_opts->period_type =
1004 PERF_ITRACE_PERIOD_NANOSECS;
1005 period_type_set = true;
1006 break;
1007 case '\0':
1008 goto out;
1009 default:
1010 goto out_err;
1011 }
1012 }
1013 break;
1014 case 'b':
1015 synth_opts->branches = true;
1016 break;
1017 case 'x':
1018 synth_opts->transactions = true;
1019 break;
1020 case 'e':
1021 synth_opts->errors = true;
1022 break;
1023 case 'd':
1024 synth_opts->log = true;
1025 break;
1026 case 'c':
1027 synth_opts->branches = true;
1028 synth_opts->calls = true;
1029 break;
1030 case 'r':
1031 synth_opts->branches = true;
1032 synth_opts->returns = true;
1033 break;
1034 case 'g':
1035 synth_opts->callchain = true;
1036 synth_opts->callchain_sz =
1037 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1038 while (*p == ' ' || *p == ',')
1039 p += 1;
1040 if (isdigit(*p)) {
1041 unsigned int val;
1042
1043 val = strtoul(p, &endptr, 10);
1044 p = endptr;
1045 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1046 goto out_err;
1047 synth_opts->callchain_sz = val;
1048 }
1049 break;
1050 case 'l':
1051 synth_opts->last_branch = true;
1052 synth_opts->last_branch_sz =
1053 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1054 while (*p == ' ' || *p == ',')
1055 p += 1;
1056 if (isdigit(*p)) {
1057 unsigned int val;
1058
1059 val = strtoul(p, &endptr, 10);
1060 p = endptr;
1061 if (!val ||
1062 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1063 goto out_err;
1064 synth_opts->last_branch_sz = val;
1065 }
1066 break;
1067 case ' ':
1068 case ',':
1069 break;
1070 default:
1071 goto out_err;
1072 }
1073 }
1074out:
1075 if (synth_opts->instructions) {
1076 if (!period_type_set)
1077 synth_opts->period_type =
1078 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1079 if (!period_set)
1080 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1081 }
1082
1083 return 0;
1084
1085out_err:
1086 pr_err("Bad Instruction Tracing options '%s'\n", str);
1087 return -EINVAL;
1088}
1089
1090static const char * const auxtrace_error_type_name[] = {
1091 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1092};
1093
1094static const char *auxtrace_error_name(int type)
1095{
1096 const char *error_type_name = NULL;
1097
1098 if (type < PERF_AUXTRACE_ERROR_MAX)
1099 error_type_name = auxtrace_error_type_name[type];
1100 if (!error_type_name)
1101 error_type_name = "unknown AUX";
1102 return error_type_name;
1103}
1104
1105size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1106{
1107 struct auxtrace_error_event *e = &event->auxtrace_error;
1108 int ret;
1109
1110 ret = fprintf(fp, " %s error type %u",
1111 auxtrace_error_name(e->type), e->type);
1112 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n",
1113 e->cpu, e->pid, e->tid, e->ip, e->code, e->msg);
1114 return ret;
1115}
1116
1117void perf_session__auxtrace_error_inc(struct perf_session *session,
1118 union perf_event *event)
1119{
1120 struct auxtrace_error_event *e = &event->auxtrace_error;
1121
1122 if (e->type < PERF_AUXTRACE_ERROR_MAX)
1123 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1124}
1125
1126void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1127{
1128 int i;
1129
1130 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1131 if (!stats->nr_auxtrace_errors[i])
1132 continue;
1133 ui__warning("%u %s errors\n",
1134 stats->nr_auxtrace_errors[i],
1135 auxtrace_error_name(i));
1136 }
1137}
1138
1139int perf_event__process_auxtrace_error(struct perf_tool *tool __maybe_unused,
1140 union perf_event *event,
1141 struct perf_session *session)
1142{
1143 if (auxtrace__dont_decode(session))
1144 return 0;
1145
1146 perf_event__fprintf_auxtrace_error(event, stdout);
1147 return 0;
1148}
1149
1150static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
1151 struct auxtrace_record *itr,
1152 struct perf_tool *tool, process_auxtrace_t fn,
1153 bool snapshot, size_t snapshot_size)
1154{
1155 u64 head, old = mm->prev, offset, ref;
1156 unsigned char *data = mm->base;
1157 size_t size, head_off, old_off, len1, len2, padding;
1158 union perf_event ev;
1159 void *data1, *data2;
1160
1161 if (snapshot) {
1162 head = auxtrace_mmap__read_snapshot_head(mm);
1163 if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
1164 &head, &old))
1165 return -1;
1166 } else {
1167 head = auxtrace_mmap__read_head(mm);
1168 }
1169
1170 if (old == head)
1171 return 0;
1172
1173 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1174 mm->idx, old, head, head - old);
1175
1176 if (mm->mask) {
1177 head_off = head & mm->mask;
1178 old_off = old & mm->mask;
1179 } else {
1180 head_off = head % mm->len;
1181 old_off = old % mm->len;
1182 }
1183
1184 if (head_off > old_off)
1185 size = head_off - old_off;
1186 else
1187 size = mm->len - (old_off - head_off);
1188
1189 if (snapshot && size > snapshot_size)
1190 size = snapshot_size;
1191
1192 ref = auxtrace_record__reference(itr);
1193
1194 if (head > old || size <= head || mm->mask) {
1195 offset = head - size;
1196 } else {
1197 /*
1198 * When the buffer size is not a power of 2, 'head' wraps at the
1199 * highest multiple of the buffer size, so we have to subtract
1200 * the remainder here.
1201 */
1202 u64 rem = (0ULL - mm->len) % mm->len;
1203
1204 offset = head - size - rem;
1205 }
1206
1207 if (size > head_off) {
1208 len1 = size - head_off;
1209 data1 = &data[mm->len - len1];
1210 len2 = head_off;
1211 data2 = &data[0];
1212 } else {
1213 len1 = size;
1214 data1 = &data[head_off - len1];
1215 len2 = 0;
1216 data2 = NULL;
1217 }
1218
1219 if (itr->alignment) {
1220 unsigned int unwanted = len1 % itr->alignment;
1221
1222 len1 -= unwanted;
1223 size -= unwanted;
1224 }
1225
1226 /* padding must be written by fn() e.g. record__process_auxtrace() */
1227 padding = size & 7;
1228 if (padding)
1229 padding = 8 - padding;
1230
1231 memset(&ev, 0, sizeof(ev));
1232 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1233 ev.auxtrace.header.size = sizeof(ev.auxtrace);
1234 ev.auxtrace.size = size + padding;
1235 ev.auxtrace.offset = offset;
1236 ev.auxtrace.reference = ref;
1237 ev.auxtrace.idx = mm->idx;
1238 ev.auxtrace.tid = mm->tid;
1239 ev.auxtrace.cpu = mm->cpu;
1240
1241 if (fn(tool, &ev, data1, len1, data2, len2))
1242 return -1;
1243
1244 mm->prev = head;
1245
1246 if (!snapshot) {
1247 auxtrace_mmap__write_tail(mm, head);
1248 if (itr->read_finish) {
1249 int err;
1250
1251 err = itr->read_finish(itr, mm->idx);
1252 if (err < 0)
1253 return err;
1254 }
1255 }
1256
1257 return 1;
1258}
1259
1260int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
1261 struct perf_tool *tool, process_auxtrace_t fn)
1262{
1263 return __auxtrace_mmap__read(mm, itr, tool, fn, false, 0);
1264}
1265
1266int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
1267 struct auxtrace_record *itr,
1268 struct perf_tool *tool, process_auxtrace_t fn,
1269 size_t snapshot_size)
1270{
1271 return __auxtrace_mmap__read(mm, itr, tool, fn, true, snapshot_size);
1272}
1273
1274/**
1275 * struct auxtrace_cache - hash table to implement a cache
1276 * @hashtable: the hashtable
1277 * @sz: hashtable size (number of hlists)
1278 * @entry_size: size of an entry
1279 * @limit: limit the number of entries to this maximum, when reached the cache
1280 * is dropped and caching begins again with an empty cache
1281 * @cnt: current number of entries
1282 * @bits: hashtable size (@sz = 2^@bits)
1283 */
1284struct auxtrace_cache {
1285 struct hlist_head *hashtable;
1286 size_t sz;
1287 size_t entry_size;
1288 size_t limit;
1289 size_t cnt;
1290 unsigned int bits;
1291};
1292
1293struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1294 unsigned int limit_percent)
1295{
1296 struct auxtrace_cache *c;
1297 struct hlist_head *ht;
1298 size_t sz, i;
1299
1300 c = zalloc(sizeof(struct auxtrace_cache));
1301 if (!c)
1302 return NULL;
1303
1304 sz = 1UL << bits;
1305
1306 ht = calloc(sz, sizeof(struct hlist_head));
1307 if (!ht)
1308 goto out_free;
1309
1310 for (i = 0; i < sz; i++)
1311 INIT_HLIST_HEAD(&ht[i]);
1312
1313 c->hashtable = ht;
1314 c->sz = sz;
1315 c->entry_size = entry_size;
1316 c->limit = (c->sz * limit_percent) / 100;
1317 c->bits = bits;
1318
1319 return c;
1320
1321out_free:
1322 free(c);
1323 return NULL;
1324}
1325
1326static void auxtrace_cache__drop(struct auxtrace_cache *c)
1327{
1328 struct auxtrace_cache_entry *entry;
1329 struct hlist_node *tmp;
1330 size_t i;
1331
1332 if (!c)
1333 return;
1334
1335 for (i = 0; i < c->sz; i++) {
1336 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1337 hlist_del(&entry->hash);
1338 auxtrace_cache__free_entry(c, entry);
1339 }
1340 }
1341
1342 c->cnt = 0;
1343}
1344
1345void auxtrace_cache__free(struct auxtrace_cache *c)
1346{
1347 if (!c)
1348 return;
1349
1350 auxtrace_cache__drop(c);
1351 free(c->hashtable);
1352 free(c);
1353}
1354
1355void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1356{
1357 return malloc(c->entry_size);
1358}
1359
1360void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1361 void *entry)
1362{
1363 free(entry);
1364}
1365
1366int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1367 struct auxtrace_cache_entry *entry)
1368{
1369 if (c->limit && ++c->cnt > c->limit)
1370 auxtrace_cache__drop(c);
1371
1372 entry->key = key;
1373 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1374
1375 return 0;
1376}
1377
1378void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
1379{
1380 struct auxtrace_cache_entry *entry;
1381 struct hlist_head *hlist;
1382
1383 if (!c)
1384 return NULL;
1385
1386 hlist = &c->hashtable[hash_32(key, c->bits)];
1387 hlist_for_each_entry(entry, hlist, hash) {
1388 if (entry->key == key)
1389 return entry;
1390 }
1391
1392 return NULL;
1393}
1/*
2 * auxtrace.c: AUX area trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#include <inttypes.h>
17#include <sys/types.h>
18#include <sys/mman.h>
19#include <stdbool.h>
20#include <string.h>
21#include <limits.h>
22#include <errno.h>
23
24#include <linux/kernel.h>
25#include <linux/perf_event.h>
26#include <linux/types.h>
27#include <linux/bitops.h>
28#include <linux/log2.h>
29#include <linux/string.h>
30
31#include <sys/param.h>
32#include <stdlib.h>
33#include <stdio.h>
34#include <linux/list.h>
35
36#include "../perf.h"
37#include "util.h"
38#include "evlist.h"
39#include "dso.h"
40#include "map.h"
41#include "pmu.h"
42#include "evsel.h"
43#include "cpumap.h"
44#include "thread_map.h"
45#include "asm/bug.h"
46#include "auxtrace.h"
47
48#include <linux/hash.h>
49
50#include "event.h"
51#include "session.h"
52#include "debug.h"
53#include <subcmd/parse-options.h>
54
55#include "cs-etm.h"
56#include "intel-pt.h"
57#include "intel-bts.h"
58#include "arm-spe.h"
59
60#include "sane_ctype.h"
61#include "symbol/kallsyms.h"
62
63static bool auxtrace__dont_decode(struct perf_session *session)
64{
65 return !session->itrace_synth_opts ||
66 session->itrace_synth_opts->dont_decode;
67}
68
69int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
70 struct auxtrace_mmap_params *mp,
71 void *userpg, int fd)
72{
73 struct perf_event_mmap_page *pc = userpg;
74
75 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
76
77 mm->userpg = userpg;
78 mm->mask = mp->mask;
79 mm->len = mp->len;
80 mm->prev = 0;
81 mm->idx = mp->idx;
82 mm->tid = mp->tid;
83 mm->cpu = mp->cpu;
84
85 if (!mp->len) {
86 mm->base = NULL;
87 return 0;
88 }
89
90#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
91 pr_err("Cannot use AUX area tracing mmaps\n");
92 return -1;
93#endif
94
95 pc->aux_offset = mp->offset;
96 pc->aux_size = mp->len;
97
98 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
99 if (mm->base == MAP_FAILED) {
100 pr_debug2("failed to mmap AUX area\n");
101 mm->base = NULL;
102 return -1;
103 }
104
105 return 0;
106}
107
108void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
109{
110 if (mm->base) {
111 munmap(mm->base, mm->len);
112 mm->base = NULL;
113 }
114}
115
116void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
117 off_t auxtrace_offset,
118 unsigned int auxtrace_pages,
119 bool auxtrace_overwrite)
120{
121 if (auxtrace_pages) {
122 mp->offset = auxtrace_offset;
123 mp->len = auxtrace_pages * (size_t)page_size;
124 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
125 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
126 pr_debug2("AUX area mmap length %zu\n", mp->len);
127 } else {
128 mp->len = 0;
129 }
130}
131
132void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
133 struct perf_evlist *evlist, int idx,
134 bool per_cpu)
135{
136 mp->idx = idx;
137
138 if (per_cpu) {
139 mp->cpu = evlist->cpus->map[idx];
140 if (evlist->threads)
141 mp->tid = thread_map__pid(evlist->threads, 0);
142 else
143 mp->tid = -1;
144 } else {
145 mp->cpu = -1;
146 mp->tid = thread_map__pid(evlist->threads, idx);
147 }
148}
149
150#define AUXTRACE_INIT_NR_QUEUES 32
151
152static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
153{
154 struct auxtrace_queue *queue_array;
155 unsigned int max_nr_queues, i;
156
157 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
158 if (nr_queues > max_nr_queues)
159 return NULL;
160
161 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
162 if (!queue_array)
163 return NULL;
164
165 for (i = 0; i < nr_queues; i++) {
166 INIT_LIST_HEAD(&queue_array[i].head);
167 queue_array[i].priv = NULL;
168 }
169
170 return queue_array;
171}
172
173int auxtrace_queues__init(struct auxtrace_queues *queues)
174{
175 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
176 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
177 if (!queues->queue_array)
178 return -ENOMEM;
179 return 0;
180}
181
182static int auxtrace_queues__grow(struct auxtrace_queues *queues,
183 unsigned int new_nr_queues)
184{
185 unsigned int nr_queues = queues->nr_queues;
186 struct auxtrace_queue *queue_array;
187 unsigned int i;
188
189 if (!nr_queues)
190 nr_queues = AUXTRACE_INIT_NR_QUEUES;
191
192 while (nr_queues && nr_queues < new_nr_queues)
193 nr_queues <<= 1;
194
195 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
196 return -EINVAL;
197
198 queue_array = auxtrace_alloc_queue_array(nr_queues);
199 if (!queue_array)
200 return -ENOMEM;
201
202 for (i = 0; i < queues->nr_queues; i++) {
203 list_splice_tail(&queues->queue_array[i].head,
204 &queue_array[i].head);
205 queue_array[i].priv = queues->queue_array[i].priv;
206 }
207
208 queues->nr_queues = nr_queues;
209 queues->queue_array = queue_array;
210
211 return 0;
212}
213
214static void *auxtrace_copy_data(u64 size, struct perf_session *session)
215{
216 int fd = perf_data__fd(session->data);
217 void *p;
218 ssize_t ret;
219
220 if (size > SSIZE_MAX)
221 return NULL;
222
223 p = malloc(size);
224 if (!p)
225 return NULL;
226
227 ret = readn(fd, p, size);
228 if (ret != (ssize_t)size) {
229 free(p);
230 return NULL;
231 }
232
233 return p;
234}
235
236static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
237 unsigned int idx,
238 struct auxtrace_buffer *buffer)
239{
240 struct auxtrace_queue *queue;
241 int err;
242
243 if (idx >= queues->nr_queues) {
244 err = auxtrace_queues__grow(queues, idx + 1);
245 if (err)
246 return err;
247 }
248
249 queue = &queues->queue_array[idx];
250
251 if (!queue->set) {
252 queue->set = true;
253 queue->tid = buffer->tid;
254 queue->cpu = buffer->cpu;
255 } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
256 pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
257 queue->cpu, queue->tid, buffer->cpu, buffer->tid);
258 return -EINVAL;
259 }
260
261 buffer->buffer_nr = queues->next_buffer_nr++;
262
263 list_add_tail(&buffer->list, &queue->head);
264
265 queues->new_data = true;
266 queues->populated = true;
267
268 return 0;
269}
270
271/* Limit buffers to 32MiB on 32-bit */
272#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
273
274static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
275 unsigned int idx,
276 struct auxtrace_buffer *buffer)
277{
278 u64 sz = buffer->size;
279 bool consecutive = false;
280 struct auxtrace_buffer *b;
281 int err;
282
283 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
284 b = memdup(buffer, sizeof(struct auxtrace_buffer));
285 if (!b)
286 return -ENOMEM;
287 b->size = BUFFER_LIMIT_FOR_32_BIT;
288 b->consecutive = consecutive;
289 err = auxtrace_queues__queue_buffer(queues, idx, b);
290 if (err) {
291 auxtrace_buffer__free(b);
292 return err;
293 }
294 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
295 sz -= BUFFER_LIMIT_FOR_32_BIT;
296 consecutive = true;
297 }
298
299 buffer->size = sz;
300 buffer->consecutive = consecutive;
301
302 return 0;
303}
304
305static bool filter_cpu(struct perf_session *session, int cpu)
306{
307 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
308
309 return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
310}
311
312static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
313 struct perf_session *session,
314 unsigned int idx,
315 struct auxtrace_buffer *buffer,
316 struct auxtrace_buffer **buffer_ptr)
317{
318 int err = -ENOMEM;
319
320 if (filter_cpu(session, buffer->cpu))
321 return 0;
322
323 buffer = memdup(buffer, sizeof(*buffer));
324 if (!buffer)
325 return -ENOMEM;
326
327 if (session->one_mmap) {
328 buffer->data = buffer->data_offset - session->one_mmap_offset +
329 session->one_mmap_addr;
330 } else if (perf_data__is_pipe(session->data)) {
331 buffer->data = auxtrace_copy_data(buffer->size, session);
332 if (!buffer->data)
333 goto out_free;
334 buffer->data_needs_freeing = true;
335 } else if (BITS_PER_LONG == 32 &&
336 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
337 err = auxtrace_queues__split_buffer(queues, idx, buffer);
338 if (err)
339 goto out_free;
340 }
341
342 err = auxtrace_queues__queue_buffer(queues, idx, buffer);
343 if (err)
344 goto out_free;
345
346 /* FIXME: Doesn't work for split buffer */
347 if (buffer_ptr)
348 *buffer_ptr = buffer;
349
350 return 0;
351
352out_free:
353 auxtrace_buffer__free(buffer);
354 return err;
355}
356
357int auxtrace_queues__add_event(struct auxtrace_queues *queues,
358 struct perf_session *session,
359 union perf_event *event, off_t data_offset,
360 struct auxtrace_buffer **buffer_ptr)
361{
362 struct auxtrace_buffer buffer = {
363 .pid = -1,
364 .tid = event->auxtrace.tid,
365 .cpu = event->auxtrace.cpu,
366 .data_offset = data_offset,
367 .offset = event->auxtrace.offset,
368 .reference = event->auxtrace.reference,
369 .size = event->auxtrace.size,
370 };
371 unsigned int idx = event->auxtrace.idx;
372
373 return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
374 buffer_ptr);
375}
376
377static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
378 struct perf_session *session,
379 off_t file_offset, size_t sz)
380{
381 union perf_event *event;
382 int err;
383 char buf[PERF_SAMPLE_MAX_SIZE];
384
385 err = perf_session__peek_event(session, file_offset, buf,
386 PERF_SAMPLE_MAX_SIZE, &event, NULL);
387 if (err)
388 return err;
389
390 if (event->header.type == PERF_RECORD_AUXTRACE) {
391 if (event->header.size < sizeof(struct auxtrace_event) ||
392 event->header.size != sz) {
393 err = -EINVAL;
394 goto out;
395 }
396 file_offset += event->header.size;
397 err = auxtrace_queues__add_event(queues, session, event,
398 file_offset, NULL);
399 }
400out:
401 return err;
402}
403
404void auxtrace_queues__free(struct auxtrace_queues *queues)
405{
406 unsigned int i;
407
408 for (i = 0; i < queues->nr_queues; i++) {
409 while (!list_empty(&queues->queue_array[i].head)) {
410 struct auxtrace_buffer *buffer;
411
412 buffer = list_entry(queues->queue_array[i].head.next,
413 struct auxtrace_buffer, list);
414 list_del(&buffer->list);
415 auxtrace_buffer__free(buffer);
416 }
417 }
418
419 zfree(&queues->queue_array);
420 queues->nr_queues = 0;
421}
422
423static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
424 unsigned int pos, unsigned int queue_nr,
425 u64 ordinal)
426{
427 unsigned int parent;
428
429 while (pos) {
430 parent = (pos - 1) >> 1;
431 if (heap_array[parent].ordinal <= ordinal)
432 break;
433 heap_array[pos] = heap_array[parent];
434 pos = parent;
435 }
436 heap_array[pos].queue_nr = queue_nr;
437 heap_array[pos].ordinal = ordinal;
438}
439
440int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
441 u64 ordinal)
442{
443 struct auxtrace_heap_item *heap_array;
444
445 if (queue_nr >= heap->heap_sz) {
446 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
447
448 while (heap_sz <= queue_nr)
449 heap_sz <<= 1;
450 heap_array = realloc(heap->heap_array,
451 heap_sz * sizeof(struct auxtrace_heap_item));
452 if (!heap_array)
453 return -ENOMEM;
454 heap->heap_array = heap_array;
455 heap->heap_sz = heap_sz;
456 }
457
458 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
459
460 return 0;
461}
462
463void auxtrace_heap__free(struct auxtrace_heap *heap)
464{
465 zfree(&heap->heap_array);
466 heap->heap_cnt = 0;
467 heap->heap_sz = 0;
468}
469
470void auxtrace_heap__pop(struct auxtrace_heap *heap)
471{
472 unsigned int pos, last, heap_cnt = heap->heap_cnt;
473 struct auxtrace_heap_item *heap_array;
474
475 if (!heap_cnt)
476 return;
477
478 heap->heap_cnt -= 1;
479
480 heap_array = heap->heap_array;
481
482 pos = 0;
483 while (1) {
484 unsigned int left, right;
485
486 left = (pos << 1) + 1;
487 if (left >= heap_cnt)
488 break;
489 right = left + 1;
490 if (right >= heap_cnt) {
491 heap_array[pos] = heap_array[left];
492 return;
493 }
494 if (heap_array[left].ordinal < heap_array[right].ordinal) {
495 heap_array[pos] = heap_array[left];
496 pos = left;
497 } else {
498 heap_array[pos] = heap_array[right];
499 pos = right;
500 }
501 }
502
503 last = heap_cnt - 1;
504 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
505 heap_array[last].ordinal);
506}
507
508size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
509 struct perf_evlist *evlist)
510{
511 if (itr)
512 return itr->info_priv_size(itr, evlist);
513 return 0;
514}
515
516static int auxtrace_not_supported(void)
517{
518 pr_err("AUX area tracing is not supported on this architecture\n");
519 return -EINVAL;
520}
521
522int auxtrace_record__info_fill(struct auxtrace_record *itr,
523 struct perf_session *session,
524 struct auxtrace_info_event *auxtrace_info,
525 size_t priv_size)
526{
527 if (itr)
528 return itr->info_fill(itr, session, auxtrace_info, priv_size);
529 return auxtrace_not_supported();
530}
531
532void auxtrace_record__free(struct auxtrace_record *itr)
533{
534 if (itr)
535 itr->free(itr);
536}
537
538int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
539{
540 if (itr && itr->snapshot_start)
541 return itr->snapshot_start(itr);
542 return 0;
543}
544
545int auxtrace_record__snapshot_finish(struct auxtrace_record *itr)
546{
547 if (itr && itr->snapshot_finish)
548 return itr->snapshot_finish(itr);
549 return 0;
550}
551
552int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
553 struct auxtrace_mmap *mm,
554 unsigned char *data, u64 *head, u64 *old)
555{
556 if (itr && itr->find_snapshot)
557 return itr->find_snapshot(itr, idx, mm, data, head, old);
558 return 0;
559}
560
561int auxtrace_record__options(struct auxtrace_record *itr,
562 struct perf_evlist *evlist,
563 struct record_opts *opts)
564{
565 if (itr)
566 return itr->recording_options(itr, evlist, opts);
567 return 0;
568}
569
570u64 auxtrace_record__reference(struct auxtrace_record *itr)
571{
572 if (itr)
573 return itr->reference(itr);
574 return 0;
575}
576
577int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
578 struct record_opts *opts, const char *str)
579{
580 if (!str)
581 return 0;
582
583 if (itr)
584 return itr->parse_snapshot_options(itr, opts, str);
585
586 pr_err("No AUX area tracing to snapshot\n");
587 return -EINVAL;
588}
589
590struct auxtrace_record *__weak
591auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, int *err)
592{
593 *err = 0;
594 return NULL;
595}
596
597static int auxtrace_index__alloc(struct list_head *head)
598{
599 struct auxtrace_index *auxtrace_index;
600
601 auxtrace_index = malloc(sizeof(struct auxtrace_index));
602 if (!auxtrace_index)
603 return -ENOMEM;
604
605 auxtrace_index->nr = 0;
606 INIT_LIST_HEAD(&auxtrace_index->list);
607
608 list_add_tail(&auxtrace_index->list, head);
609
610 return 0;
611}
612
613void auxtrace_index__free(struct list_head *head)
614{
615 struct auxtrace_index *auxtrace_index, *n;
616
617 list_for_each_entry_safe(auxtrace_index, n, head, list) {
618 list_del(&auxtrace_index->list);
619 free(auxtrace_index);
620 }
621}
622
623static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
624{
625 struct auxtrace_index *auxtrace_index;
626 int err;
627
628 if (list_empty(head)) {
629 err = auxtrace_index__alloc(head);
630 if (err)
631 return NULL;
632 }
633
634 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
635
636 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
637 err = auxtrace_index__alloc(head);
638 if (err)
639 return NULL;
640 auxtrace_index = list_entry(head->prev, struct auxtrace_index,
641 list);
642 }
643
644 return auxtrace_index;
645}
646
647int auxtrace_index__auxtrace_event(struct list_head *head,
648 union perf_event *event, off_t file_offset)
649{
650 struct auxtrace_index *auxtrace_index;
651 size_t nr;
652
653 auxtrace_index = auxtrace_index__last(head);
654 if (!auxtrace_index)
655 return -ENOMEM;
656
657 nr = auxtrace_index->nr;
658 auxtrace_index->entries[nr].file_offset = file_offset;
659 auxtrace_index->entries[nr].sz = event->header.size;
660 auxtrace_index->nr += 1;
661
662 return 0;
663}
664
665static int auxtrace_index__do_write(int fd,
666 struct auxtrace_index *auxtrace_index)
667{
668 struct auxtrace_index_entry ent;
669 size_t i;
670
671 for (i = 0; i < auxtrace_index->nr; i++) {
672 ent.file_offset = auxtrace_index->entries[i].file_offset;
673 ent.sz = auxtrace_index->entries[i].sz;
674 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
675 return -errno;
676 }
677 return 0;
678}
679
680int auxtrace_index__write(int fd, struct list_head *head)
681{
682 struct auxtrace_index *auxtrace_index;
683 u64 total = 0;
684 int err;
685
686 list_for_each_entry(auxtrace_index, head, list)
687 total += auxtrace_index->nr;
688
689 if (writen(fd, &total, sizeof(total)) != sizeof(total))
690 return -errno;
691
692 list_for_each_entry(auxtrace_index, head, list) {
693 err = auxtrace_index__do_write(fd, auxtrace_index);
694 if (err)
695 return err;
696 }
697
698 return 0;
699}
700
701static int auxtrace_index__process_entry(int fd, struct list_head *head,
702 bool needs_swap)
703{
704 struct auxtrace_index *auxtrace_index;
705 struct auxtrace_index_entry ent;
706 size_t nr;
707
708 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
709 return -1;
710
711 auxtrace_index = auxtrace_index__last(head);
712 if (!auxtrace_index)
713 return -1;
714
715 nr = auxtrace_index->nr;
716 if (needs_swap) {
717 auxtrace_index->entries[nr].file_offset =
718 bswap_64(ent.file_offset);
719 auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
720 } else {
721 auxtrace_index->entries[nr].file_offset = ent.file_offset;
722 auxtrace_index->entries[nr].sz = ent.sz;
723 }
724
725 auxtrace_index->nr = nr + 1;
726
727 return 0;
728}
729
730int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
731 bool needs_swap)
732{
733 struct list_head *head = &session->auxtrace_index;
734 u64 nr;
735
736 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
737 return -1;
738
739 if (needs_swap)
740 nr = bswap_64(nr);
741
742 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
743 return -1;
744
745 while (nr--) {
746 int err;
747
748 err = auxtrace_index__process_entry(fd, head, needs_swap);
749 if (err)
750 return -1;
751 }
752
753 return 0;
754}
755
756static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
757 struct perf_session *session,
758 struct auxtrace_index_entry *ent)
759{
760 return auxtrace_queues__add_indexed_event(queues, session,
761 ent->file_offset, ent->sz);
762}
763
764int auxtrace_queues__process_index(struct auxtrace_queues *queues,
765 struct perf_session *session)
766{
767 struct auxtrace_index *auxtrace_index;
768 struct auxtrace_index_entry *ent;
769 size_t i;
770 int err;
771
772 if (auxtrace__dont_decode(session))
773 return 0;
774
775 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
776 for (i = 0; i < auxtrace_index->nr; i++) {
777 ent = &auxtrace_index->entries[i];
778 err = auxtrace_queues__process_index_entry(queues,
779 session,
780 ent);
781 if (err)
782 return err;
783 }
784 }
785 return 0;
786}
787
788struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
789 struct auxtrace_buffer *buffer)
790{
791 if (buffer) {
792 if (list_is_last(&buffer->list, &queue->head))
793 return NULL;
794 return list_entry(buffer->list.next, struct auxtrace_buffer,
795 list);
796 } else {
797 if (list_empty(&queue->head))
798 return NULL;
799 return list_entry(queue->head.next, struct auxtrace_buffer,
800 list);
801 }
802}
803
804void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
805{
806 size_t adj = buffer->data_offset & (page_size - 1);
807 size_t size = buffer->size + adj;
808 off_t file_offset = buffer->data_offset - adj;
809 void *addr;
810
811 if (buffer->data)
812 return buffer->data;
813
814 addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
815 if (addr == MAP_FAILED)
816 return NULL;
817
818 buffer->mmap_addr = addr;
819 buffer->mmap_size = size;
820
821 buffer->data = addr + adj;
822
823 return buffer->data;
824}
825
826void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
827{
828 if (!buffer->data || !buffer->mmap_addr)
829 return;
830 munmap(buffer->mmap_addr, buffer->mmap_size);
831 buffer->mmap_addr = NULL;
832 buffer->mmap_size = 0;
833 buffer->data = NULL;
834 buffer->use_data = NULL;
835}
836
837void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
838{
839 auxtrace_buffer__put_data(buffer);
840 if (buffer->data_needs_freeing) {
841 buffer->data_needs_freeing = false;
842 zfree(&buffer->data);
843 buffer->use_data = NULL;
844 buffer->size = 0;
845 }
846}
847
848void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
849{
850 auxtrace_buffer__drop_data(buffer);
851 free(buffer);
852}
853
854void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
855 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
856 const char *msg)
857{
858 size_t size;
859
860 memset(auxtrace_error, 0, sizeof(struct auxtrace_error_event));
861
862 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
863 auxtrace_error->type = type;
864 auxtrace_error->code = code;
865 auxtrace_error->cpu = cpu;
866 auxtrace_error->pid = pid;
867 auxtrace_error->tid = tid;
868 auxtrace_error->ip = ip;
869 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
870
871 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
872 strlen(auxtrace_error->msg) + 1;
873 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
874}
875
876int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
877 struct perf_tool *tool,
878 struct perf_session *session,
879 perf_event__handler_t process)
880{
881 union perf_event *ev;
882 size_t priv_size;
883 int err;
884
885 pr_debug2("Synthesizing auxtrace information\n");
886 priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
887 ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size);
888 if (!ev)
889 return -ENOMEM;
890
891 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
892 ev->auxtrace_info.header.size = sizeof(struct auxtrace_info_event) +
893 priv_size;
894 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
895 priv_size);
896 if (err)
897 goto out_free;
898
899 err = process(tool, ev, NULL, NULL);
900out_free:
901 free(ev);
902 return err;
903}
904
905int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
906 union perf_event *event,
907 struct perf_session *session)
908{
909 enum auxtrace_type type = event->auxtrace_info.type;
910
911 if (dump_trace)
912 fprintf(stdout, " type: %u\n", type);
913
914 switch (type) {
915 case PERF_AUXTRACE_INTEL_PT:
916 return intel_pt_process_auxtrace_info(event, session);
917 case PERF_AUXTRACE_INTEL_BTS:
918 return intel_bts_process_auxtrace_info(event, session);
919 case PERF_AUXTRACE_ARM_SPE:
920 return arm_spe_process_auxtrace_info(event, session);
921 case PERF_AUXTRACE_CS_ETM:
922 return cs_etm__process_auxtrace_info(event, session);
923 case PERF_AUXTRACE_UNKNOWN:
924 default:
925 return -EINVAL;
926 }
927}
928
929s64 perf_event__process_auxtrace(struct perf_tool *tool,
930 union perf_event *event,
931 struct perf_session *session)
932{
933 s64 err;
934
935 if (dump_trace)
936 fprintf(stdout, " size: %#"PRIx64" offset: %#"PRIx64" ref: %#"PRIx64" idx: %u tid: %d cpu: %d\n",
937 event->auxtrace.size, event->auxtrace.offset,
938 event->auxtrace.reference, event->auxtrace.idx,
939 event->auxtrace.tid, event->auxtrace.cpu);
940
941 if (auxtrace__dont_decode(session))
942 return event->auxtrace.size;
943
944 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
945 return -EINVAL;
946
947 err = session->auxtrace->process_auxtrace_event(session, event, tool);
948 if (err < 0)
949 return err;
950
951 return event->auxtrace.size;
952}
953
954#define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
955#define PERF_ITRACE_DEFAULT_PERIOD 100000
956#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
957#define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
958#define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
959#define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
960
961void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
962{
963 synth_opts->instructions = true;
964 synth_opts->branches = true;
965 synth_opts->transactions = true;
966 synth_opts->ptwrites = true;
967 synth_opts->pwr_events = true;
968 synth_opts->errors = true;
969 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
970 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
971 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
972 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
973 synth_opts->initial_skip = 0;
974}
975
976/*
977 * Please check tools/perf/Documentation/perf-script.txt for information
978 * about the options parsed here, which is introduced after this cset,
979 * when support in 'perf script' for these options is introduced.
980 */
981int itrace_parse_synth_opts(const struct option *opt, const char *str,
982 int unset)
983{
984 struct itrace_synth_opts *synth_opts = opt->value;
985 const char *p;
986 char *endptr;
987 bool period_type_set = false;
988 bool period_set = false;
989
990 synth_opts->set = true;
991
992 if (unset) {
993 synth_opts->dont_decode = true;
994 return 0;
995 }
996
997 if (!str) {
998 itrace_synth_opts__set_default(synth_opts);
999 return 0;
1000 }
1001
1002 for (p = str; *p;) {
1003 switch (*p++) {
1004 case 'i':
1005 synth_opts->instructions = true;
1006 while (*p == ' ' || *p == ',')
1007 p += 1;
1008 if (isdigit(*p)) {
1009 synth_opts->period = strtoull(p, &endptr, 10);
1010 period_set = true;
1011 p = endptr;
1012 while (*p == ' ' || *p == ',')
1013 p += 1;
1014 switch (*p++) {
1015 case 'i':
1016 synth_opts->period_type =
1017 PERF_ITRACE_PERIOD_INSTRUCTIONS;
1018 period_type_set = true;
1019 break;
1020 case 't':
1021 synth_opts->period_type =
1022 PERF_ITRACE_PERIOD_TICKS;
1023 period_type_set = true;
1024 break;
1025 case 'm':
1026 synth_opts->period *= 1000;
1027 /* Fall through */
1028 case 'u':
1029 synth_opts->period *= 1000;
1030 /* Fall through */
1031 case 'n':
1032 if (*p++ != 's')
1033 goto out_err;
1034 synth_opts->period_type =
1035 PERF_ITRACE_PERIOD_NANOSECS;
1036 period_type_set = true;
1037 break;
1038 case '\0':
1039 goto out;
1040 default:
1041 goto out_err;
1042 }
1043 }
1044 break;
1045 case 'b':
1046 synth_opts->branches = true;
1047 break;
1048 case 'x':
1049 synth_opts->transactions = true;
1050 break;
1051 case 'w':
1052 synth_opts->ptwrites = true;
1053 break;
1054 case 'p':
1055 synth_opts->pwr_events = true;
1056 break;
1057 case 'e':
1058 synth_opts->errors = true;
1059 break;
1060 case 'd':
1061 synth_opts->log = true;
1062 break;
1063 case 'c':
1064 synth_opts->branches = true;
1065 synth_opts->calls = true;
1066 break;
1067 case 'r':
1068 synth_opts->branches = true;
1069 synth_opts->returns = true;
1070 break;
1071 case 'g':
1072 synth_opts->callchain = true;
1073 synth_opts->callchain_sz =
1074 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1075 while (*p == ' ' || *p == ',')
1076 p += 1;
1077 if (isdigit(*p)) {
1078 unsigned int val;
1079
1080 val = strtoul(p, &endptr, 10);
1081 p = endptr;
1082 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1083 goto out_err;
1084 synth_opts->callchain_sz = val;
1085 }
1086 break;
1087 case 'l':
1088 synth_opts->last_branch = true;
1089 synth_opts->last_branch_sz =
1090 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1091 while (*p == ' ' || *p == ',')
1092 p += 1;
1093 if (isdigit(*p)) {
1094 unsigned int val;
1095
1096 val = strtoul(p, &endptr, 10);
1097 p = endptr;
1098 if (!val ||
1099 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1100 goto out_err;
1101 synth_opts->last_branch_sz = val;
1102 }
1103 break;
1104 case 's':
1105 synth_opts->initial_skip = strtoul(p, &endptr, 10);
1106 if (p == endptr)
1107 goto out_err;
1108 p = endptr;
1109 break;
1110 case ' ':
1111 case ',':
1112 break;
1113 default:
1114 goto out_err;
1115 }
1116 }
1117out:
1118 if (synth_opts->instructions) {
1119 if (!period_type_set)
1120 synth_opts->period_type =
1121 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1122 if (!period_set)
1123 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1124 }
1125
1126 return 0;
1127
1128out_err:
1129 pr_err("Bad Instruction Tracing options '%s'\n", str);
1130 return -EINVAL;
1131}
1132
1133static const char * const auxtrace_error_type_name[] = {
1134 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1135};
1136
1137static const char *auxtrace_error_name(int type)
1138{
1139 const char *error_type_name = NULL;
1140
1141 if (type < PERF_AUXTRACE_ERROR_MAX)
1142 error_type_name = auxtrace_error_type_name[type];
1143 if (!error_type_name)
1144 error_type_name = "unknown AUX";
1145 return error_type_name;
1146}
1147
1148size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1149{
1150 struct auxtrace_error_event *e = &event->auxtrace_error;
1151 int ret;
1152
1153 ret = fprintf(fp, " %s error type %u",
1154 auxtrace_error_name(e->type), e->type);
1155 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n",
1156 e->cpu, e->pid, e->tid, e->ip, e->code, e->msg);
1157 return ret;
1158}
1159
1160void perf_session__auxtrace_error_inc(struct perf_session *session,
1161 union perf_event *event)
1162{
1163 struct auxtrace_error_event *e = &event->auxtrace_error;
1164
1165 if (e->type < PERF_AUXTRACE_ERROR_MAX)
1166 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1167}
1168
1169void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1170{
1171 int i;
1172
1173 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1174 if (!stats->nr_auxtrace_errors[i])
1175 continue;
1176 ui__warning("%u %s errors\n",
1177 stats->nr_auxtrace_errors[i],
1178 auxtrace_error_name(i));
1179 }
1180}
1181
1182int perf_event__process_auxtrace_error(struct perf_tool *tool __maybe_unused,
1183 union perf_event *event,
1184 struct perf_session *session)
1185{
1186 if (auxtrace__dont_decode(session))
1187 return 0;
1188
1189 perf_event__fprintf_auxtrace_error(event, stdout);
1190 return 0;
1191}
1192
1193static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
1194 struct auxtrace_record *itr,
1195 struct perf_tool *tool, process_auxtrace_t fn,
1196 bool snapshot, size_t snapshot_size)
1197{
1198 u64 head, old = mm->prev, offset, ref;
1199 unsigned char *data = mm->base;
1200 size_t size, head_off, old_off, len1, len2, padding;
1201 union perf_event ev;
1202 void *data1, *data2;
1203
1204 if (snapshot) {
1205 head = auxtrace_mmap__read_snapshot_head(mm);
1206 if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
1207 &head, &old))
1208 return -1;
1209 } else {
1210 head = auxtrace_mmap__read_head(mm);
1211 }
1212
1213 if (old == head)
1214 return 0;
1215
1216 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1217 mm->idx, old, head, head - old);
1218
1219 if (mm->mask) {
1220 head_off = head & mm->mask;
1221 old_off = old & mm->mask;
1222 } else {
1223 head_off = head % mm->len;
1224 old_off = old % mm->len;
1225 }
1226
1227 if (head_off > old_off)
1228 size = head_off - old_off;
1229 else
1230 size = mm->len - (old_off - head_off);
1231
1232 if (snapshot && size > snapshot_size)
1233 size = snapshot_size;
1234
1235 ref = auxtrace_record__reference(itr);
1236
1237 if (head > old || size <= head || mm->mask) {
1238 offset = head - size;
1239 } else {
1240 /*
1241 * When the buffer size is not a power of 2, 'head' wraps at the
1242 * highest multiple of the buffer size, so we have to subtract
1243 * the remainder here.
1244 */
1245 u64 rem = (0ULL - mm->len) % mm->len;
1246
1247 offset = head - size - rem;
1248 }
1249
1250 if (size > head_off) {
1251 len1 = size - head_off;
1252 data1 = &data[mm->len - len1];
1253 len2 = head_off;
1254 data2 = &data[0];
1255 } else {
1256 len1 = size;
1257 data1 = &data[head_off - len1];
1258 len2 = 0;
1259 data2 = NULL;
1260 }
1261
1262 if (itr->alignment) {
1263 unsigned int unwanted = len1 % itr->alignment;
1264
1265 len1 -= unwanted;
1266 size -= unwanted;
1267 }
1268
1269 /* padding must be written by fn() e.g. record__process_auxtrace() */
1270 padding = size & 7;
1271 if (padding)
1272 padding = 8 - padding;
1273
1274 memset(&ev, 0, sizeof(ev));
1275 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1276 ev.auxtrace.header.size = sizeof(ev.auxtrace);
1277 ev.auxtrace.size = size + padding;
1278 ev.auxtrace.offset = offset;
1279 ev.auxtrace.reference = ref;
1280 ev.auxtrace.idx = mm->idx;
1281 ev.auxtrace.tid = mm->tid;
1282 ev.auxtrace.cpu = mm->cpu;
1283
1284 if (fn(tool, &ev, data1, len1, data2, len2))
1285 return -1;
1286
1287 mm->prev = head;
1288
1289 if (!snapshot) {
1290 auxtrace_mmap__write_tail(mm, head);
1291 if (itr->read_finish) {
1292 int err;
1293
1294 err = itr->read_finish(itr, mm->idx);
1295 if (err < 0)
1296 return err;
1297 }
1298 }
1299
1300 return 1;
1301}
1302
1303int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
1304 struct perf_tool *tool, process_auxtrace_t fn)
1305{
1306 return __auxtrace_mmap__read(mm, itr, tool, fn, false, 0);
1307}
1308
1309int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
1310 struct auxtrace_record *itr,
1311 struct perf_tool *tool, process_auxtrace_t fn,
1312 size_t snapshot_size)
1313{
1314 return __auxtrace_mmap__read(mm, itr, tool, fn, true, snapshot_size);
1315}
1316
1317/**
1318 * struct auxtrace_cache - hash table to implement a cache
1319 * @hashtable: the hashtable
1320 * @sz: hashtable size (number of hlists)
1321 * @entry_size: size of an entry
1322 * @limit: limit the number of entries to this maximum, when reached the cache
1323 * is dropped and caching begins again with an empty cache
1324 * @cnt: current number of entries
1325 * @bits: hashtable size (@sz = 2^@bits)
1326 */
1327struct auxtrace_cache {
1328 struct hlist_head *hashtable;
1329 size_t sz;
1330 size_t entry_size;
1331 size_t limit;
1332 size_t cnt;
1333 unsigned int bits;
1334};
1335
1336struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1337 unsigned int limit_percent)
1338{
1339 struct auxtrace_cache *c;
1340 struct hlist_head *ht;
1341 size_t sz, i;
1342
1343 c = zalloc(sizeof(struct auxtrace_cache));
1344 if (!c)
1345 return NULL;
1346
1347 sz = 1UL << bits;
1348
1349 ht = calloc(sz, sizeof(struct hlist_head));
1350 if (!ht)
1351 goto out_free;
1352
1353 for (i = 0; i < sz; i++)
1354 INIT_HLIST_HEAD(&ht[i]);
1355
1356 c->hashtable = ht;
1357 c->sz = sz;
1358 c->entry_size = entry_size;
1359 c->limit = (c->sz * limit_percent) / 100;
1360 c->bits = bits;
1361
1362 return c;
1363
1364out_free:
1365 free(c);
1366 return NULL;
1367}
1368
1369static void auxtrace_cache__drop(struct auxtrace_cache *c)
1370{
1371 struct auxtrace_cache_entry *entry;
1372 struct hlist_node *tmp;
1373 size_t i;
1374
1375 if (!c)
1376 return;
1377
1378 for (i = 0; i < c->sz; i++) {
1379 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1380 hlist_del(&entry->hash);
1381 auxtrace_cache__free_entry(c, entry);
1382 }
1383 }
1384
1385 c->cnt = 0;
1386}
1387
1388void auxtrace_cache__free(struct auxtrace_cache *c)
1389{
1390 if (!c)
1391 return;
1392
1393 auxtrace_cache__drop(c);
1394 free(c->hashtable);
1395 free(c);
1396}
1397
1398void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1399{
1400 return malloc(c->entry_size);
1401}
1402
1403void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1404 void *entry)
1405{
1406 free(entry);
1407}
1408
1409int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1410 struct auxtrace_cache_entry *entry)
1411{
1412 if (c->limit && ++c->cnt > c->limit)
1413 auxtrace_cache__drop(c);
1414
1415 entry->key = key;
1416 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1417
1418 return 0;
1419}
1420
1421void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
1422{
1423 struct auxtrace_cache_entry *entry;
1424 struct hlist_head *hlist;
1425
1426 if (!c)
1427 return NULL;
1428
1429 hlist = &c->hashtable[hash_32(key, c->bits)];
1430 hlist_for_each_entry(entry, hlist, hash) {
1431 if (entry->key == key)
1432 return entry;
1433 }
1434
1435 return NULL;
1436}
1437
1438static void addr_filter__free_str(struct addr_filter *filt)
1439{
1440 free(filt->str);
1441 filt->action = NULL;
1442 filt->sym_from = NULL;
1443 filt->sym_to = NULL;
1444 filt->filename = NULL;
1445 filt->str = NULL;
1446}
1447
1448static struct addr_filter *addr_filter__new(void)
1449{
1450 struct addr_filter *filt = zalloc(sizeof(*filt));
1451
1452 if (filt)
1453 INIT_LIST_HEAD(&filt->list);
1454
1455 return filt;
1456}
1457
1458static void addr_filter__free(struct addr_filter *filt)
1459{
1460 if (filt)
1461 addr_filter__free_str(filt);
1462 free(filt);
1463}
1464
1465static void addr_filters__add(struct addr_filters *filts,
1466 struct addr_filter *filt)
1467{
1468 list_add_tail(&filt->list, &filts->head);
1469 filts->cnt += 1;
1470}
1471
1472static void addr_filters__del(struct addr_filters *filts,
1473 struct addr_filter *filt)
1474{
1475 list_del_init(&filt->list);
1476 filts->cnt -= 1;
1477}
1478
1479void addr_filters__init(struct addr_filters *filts)
1480{
1481 INIT_LIST_HEAD(&filts->head);
1482 filts->cnt = 0;
1483}
1484
1485void addr_filters__exit(struct addr_filters *filts)
1486{
1487 struct addr_filter *filt, *n;
1488
1489 list_for_each_entry_safe(filt, n, &filts->head, list) {
1490 addr_filters__del(filts, filt);
1491 addr_filter__free(filt);
1492 }
1493}
1494
1495static int parse_num_or_str(char **inp, u64 *num, const char **str,
1496 const char *str_delim)
1497{
1498 *inp += strspn(*inp, " ");
1499
1500 if (isdigit(**inp)) {
1501 char *endptr;
1502
1503 if (!num)
1504 return -EINVAL;
1505 errno = 0;
1506 *num = strtoull(*inp, &endptr, 0);
1507 if (errno)
1508 return -errno;
1509 if (endptr == *inp)
1510 return -EINVAL;
1511 *inp = endptr;
1512 } else {
1513 size_t n;
1514
1515 if (!str)
1516 return -EINVAL;
1517 *inp += strspn(*inp, " ");
1518 *str = *inp;
1519 n = strcspn(*inp, str_delim);
1520 if (!n)
1521 return -EINVAL;
1522 *inp += n;
1523 if (**inp) {
1524 **inp = '\0';
1525 *inp += 1;
1526 }
1527 }
1528 return 0;
1529}
1530
1531static int parse_action(struct addr_filter *filt)
1532{
1533 if (!strcmp(filt->action, "filter")) {
1534 filt->start = true;
1535 filt->range = true;
1536 } else if (!strcmp(filt->action, "start")) {
1537 filt->start = true;
1538 } else if (!strcmp(filt->action, "stop")) {
1539 filt->start = false;
1540 } else if (!strcmp(filt->action, "tracestop")) {
1541 filt->start = false;
1542 filt->range = true;
1543 filt->action += 5; /* Change 'tracestop' to 'stop' */
1544 } else {
1545 return -EINVAL;
1546 }
1547 return 0;
1548}
1549
1550static int parse_sym_idx(char **inp, int *idx)
1551{
1552 *idx = -1;
1553
1554 *inp += strspn(*inp, " ");
1555
1556 if (**inp != '#')
1557 return 0;
1558
1559 *inp += 1;
1560
1561 if (**inp == 'g' || **inp == 'G') {
1562 *inp += 1;
1563 *idx = 0;
1564 } else {
1565 unsigned long num;
1566 char *endptr;
1567
1568 errno = 0;
1569 num = strtoul(*inp, &endptr, 0);
1570 if (errno)
1571 return -errno;
1572 if (endptr == *inp || num > INT_MAX)
1573 return -EINVAL;
1574 *inp = endptr;
1575 *idx = num;
1576 }
1577
1578 return 0;
1579}
1580
1581static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
1582{
1583 int err = parse_num_or_str(inp, num, str, " ");
1584
1585 if (!err && *str)
1586 err = parse_sym_idx(inp, idx);
1587
1588 return err;
1589}
1590
1591static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
1592{
1593 char *fstr;
1594 int err;
1595
1596 filt->str = fstr = strdup(*filter_inp);
1597 if (!fstr)
1598 return -ENOMEM;
1599
1600 err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
1601 if (err)
1602 goto out_err;
1603
1604 err = parse_action(filt);
1605 if (err)
1606 goto out_err;
1607
1608 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
1609 &filt->sym_from_idx);
1610 if (err)
1611 goto out_err;
1612
1613 fstr += strspn(fstr, " ");
1614
1615 if (*fstr == '/') {
1616 fstr += 1;
1617 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
1618 &filt->sym_to_idx);
1619 if (err)
1620 goto out_err;
1621 filt->range = true;
1622 }
1623
1624 fstr += strspn(fstr, " ");
1625
1626 if (*fstr == '@') {
1627 fstr += 1;
1628 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
1629 if (err)
1630 goto out_err;
1631 }
1632
1633 fstr += strspn(fstr, " ,");
1634
1635 *filter_inp += fstr - filt->str;
1636
1637 return 0;
1638
1639out_err:
1640 addr_filter__free_str(filt);
1641
1642 return err;
1643}
1644
1645int addr_filters__parse_bare_filter(struct addr_filters *filts,
1646 const char *filter)
1647{
1648 struct addr_filter *filt;
1649 const char *fstr = filter;
1650 int err;
1651
1652 while (*fstr) {
1653 filt = addr_filter__new();
1654 err = parse_one_filter(filt, &fstr);
1655 if (err) {
1656 addr_filter__free(filt);
1657 addr_filters__exit(filts);
1658 return err;
1659 }
1660 addr_filters__add(filts, filt);
1661 }
1662
1663 return 0;
1664}
1665
1666struct sym_args {
1667 const char *name;
1668 u64 start;
1669 u64 size;
1670 int idx;
1671 int cnt;
1672 bool started;
1673 bool global;
1674 bool selected;
1675 bool duplicate;
1676 bool near;
1677};
1678
1679static bool kern_sym_match(struct sym_args *args, const char *name, char type)
1680{
1681 /* A function with the same name, and global or the n'th found or any */
1682 return symbol_type__is_a(type, MAP__FUNCTION) &&
1683 !strcmp(name, args->name) &&
1684 ((args->global && isupper(type)) ||
1685 (args->selected && ++(args->cnt) == args->idx) ||
1686 (!args->global && !args->selected));
1687}
1688
1689static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
1690{
1691 struct sym_args *args = arg;
1692
1693 if (args->started) {
1694 if (!args->size)
1695 args->size = start - args->start;
1696 if (args->selected) {
1697 if (args->size)
1698 return 1;
1699 } else if (kern_sym_match(args, name, type)) {
1700 args->duplicate = true;
1701 return 1;
1702 }
1703 } else if (kern_sym_match(args, name, type)) {
1704 args->started = true;
1705 args->start = start;
1706 }
1707
1708 return 0;
1709}
1710
1711static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
1712{
1713 struct sym_args *args = arg;
1714
1715 if (kern_sym_match(args, name, type)) {
1716 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
1717 ++args->cnt, start, type, name);
1718 args->near = true;
1719 } else if (args->near) {
1720 args->near = false;
1721 pr_err("\t\twhich is near\t\t%s\n", name);
1722 }
1723
1724 return 0;
1725}
1726
1727static int sym_not_found_error(const char *sym_name, int idx)
1728{
1729 if (idx > 0) {
1730 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
1731 idx, sym_name);
1732 } else if (!idx) {
1733 pr_err("Global symbol '%s' not found.\n", sym_name);
1734 } else {
1735 pr_err("Symbol '%s' not found.\n", sym_name);
1736 }
1737 pr_err("Note that symbols must be functions.\n");
1738
1739 return -EINVAL;
1740}
1741
1742static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
1743{
1744 struct sym_args args = {
1745 .name = sym_name,
1746 .idx = idx,
1747 .global = !idx,
1748 .selected = idx > 0,
1749 };
1750 int err;
1751
1752 *start = 0;
1753 *size = 0;
1754
1755 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
1756 if (err < 0) {
1757 pr_err("Failed to parse /proc/kallsyms\n");
1758 return err;
1759 }
1760
1761 if (args.duplicate) {
1762 pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
1763 args.cnt = 0;
1764 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
1765 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
1766 sym_name);
1767 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
1768 return -EINVAL;
1769 }
1770
1771 if (!args.started) {
1772 pr_err("Kernel symbol lookup: ");
1773 return sym_not_found_error(sym_name, idx);
1774 }
1775
1776 *start = args.start;
1777 *size = args.size;
1778
1779 return 0;
1780}
1781
1782static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
1783 char type, u64 start)
1784{
1785 struct sym_args *args = arg;
1786
1787 if (!symbol_type__is_a(type, MAP__FUNCTION))
1788 return 0;
1789
1790 if (!args->started) {
1791 args->started = true;
1792 args->start = start;
1793 }
1794 /* Don't know exactly where the kernel ends, so we add a page */
1795 args->size = round_up(start, page_size) + page_size - args->start;
1796
1797 return 0;
1798}
1799
1800static int addr_filter__entire_kernel(struct addr_filter *filt)
1801{
1802 struct sym_args args = { .started = false };
1803 int err;
1804
1805 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
1806 if (err < 0 || !args.started) {
1807 pr_err("Failed to parse /proc/kallsyms\n");
1808 return err;
1809 }
1810
1811 filt->addr = args.start;
1812 filt->size = args.size;
1813
1814 return 0;
1815}
1816
1817static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
1818{
1819 if (start + size >= filt->addr)
1820 return 0;
1821
1822 if (filt->sym_from) {
1823 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
1824 filt->sym_to, start, filt->sym_from, filt->addr);
1825 } else {
1826 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
1827 filt->sym_to, start, filt->addr);
1828 }
1829
1830 return -EINVAL;
1831}
1832
1833static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
1834{
1835 bool no_size = false;
1836 u64 start, size;
1837 int err;
1838
1839 if (symbol_conf.kptr_restrict) {
1840 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
1841 return -EINVAL;
1842 }
1843
1844 if (filt->sym_from && !strcmp(filt->sym_from, "*"))
1845 return addr_filter__entire_kernel(filt);
1846
1847 if (filt->sym_from) {
1848 err = find_kern_sym(filt->sym_from, &start, &size,
1849 filt->sym_from_idx);
1850 if (err)
1851 return err;
1852 filt->addr = start;
1853 if (filt->range && !filt->size && !filt->sym_to) {
1854 filt->size = size;
1855 no_size = !size;
1856 }
1857 }
1858
1859 if (filt->sym_to) {
1860 err = find_kern_sym(filt->sym_to, &start, &size,
1861 filt->sym_to_idx);
1862 if (err)
1863 return err;
1864
1865 err = check_end_after_start(filt, start, size);
1866 if (err)
1867 return err;
1868 filt->size = start + size - filt->addr;
1869 no_size = !size;
1870 }
1871
1872 /* The very last symbol in kallsyms does not imply a particular size */
1873 if (no_size) {
1874 pr_err("Cannot determine size of symbol '%s'\n",
1875 filt->sym_to ? filt->sym_to : filt->sym_from);
1876 return -EINVAL;
1877 }
1878
1879 return 0;
1880}
1881
1882static struct dso *load_dso(const char *name)
1883{
1884 struct map *map;
1885 struct dso *dso;
1886
1887 map = dso__new_map(name);
1888 if (!map)
1889 return NULL;
1890
1891 map__load(map);
1892
1893 dso = dso__get(map->dso);
1894
1895 map__put(map);
1896
1897 return dso;
1898}
1899
1900static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
1901 int idx)
1902{
1903 /* Same name, and global or the n'th found or any */
1904 return !arch__compare_symbol_names(name, sym->name) &&
1905 ((!idx && sym->binding == STB_GLOBAL) ||
1906 (idx > 0 && ++*cnt == idx) ||
1907 idx < 0);
1908}
1909
1910static void print_duplicate_syms(struct dso *dso, const char *sym_name)
1911{
1912 struct symbol *sym;
1913 bool near = false;
1914 int cnt = 0;
1915
1916 pr_err("Multiple symbols with name '%s'\n", sym_name);
1917
1918 sym = dso__first_symbol(dso, MAP__FUNCTION);
1919 while (sym) {
1920 if (dso_sym_match(sym, sym_name, &cnt, -1)) {
1921 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
1922 ++cnt, sym->start,
1923 sym->binding == STB_GLOBAL ? 'g' :
1924 sym->binding == STB_LOCAL ? 'l' : 'w',
1925 sym->name);
1926 near = true;
1927 } else if (near) {
1928 near = false;
1929 pr_err("\t\twhich is near\t\t%s\n", sym->name);
1930 }
1931 sym = dso__next_symbol(sym);
1932 }
1933
1934 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
1935 sym_name);
1936 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
1937}
1938
1939static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
1940 u64 *size, int idx)
1941{
1942 struct symbol *sym;
1943 int cnt = 0;
1944
1945 *start = 0;
1946 *size = 0;
1947
1948 sym = dso__first_symbol(dso, MAP__FUNCTION);
1949 while (sym) {
1950 if (*start) {
1951 if (!*size)
1952 *size = sym->start - *start;
1953 if (idx > 0) {
1954 if (*size)
1955 return 1;
1956 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
1957 print_duplicate_syms(dso, sym_name);
1958 return -EINVAL;
1959 }
1960 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
1961 *start = sym->start;
1962 *size = sym->end - sym->start;
1963 }
1964 sym = dso__next_symbol(sym);
1965 }
1966
1967 if (!*start)
1968 return sym_not_found_error(sym_name, idx);
1969
1970 return 0;
1971}
1972
1973static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
1974{
1975 struct symbol *first_sym = dso__first_symbol(dso, MAP__FUNCTION);
1976 struct symbol *last_sym = dso__last_symbol(dso, MAP__FUNCTION);
1977
1978 if (!first_sym || !last_sym) {
1979 pr_err("Failed to determine filter for %s\nNo symbols found.\n",
1980 filt->filename);
1981 return -EINVAL;
1982 }
1983
1984 filt->addr = first_sym->start;
1985 filt->size = last_sym->end - first_sym->start;
1986
1987 return 0;
1988}
1989
1990static int addr_filter__resolve_syms(struct addr_filter *filt)
1991{
1992 u64 start, size;
1993 struct dso *dso;
1994 int err = 0;
1995
1996 if (!filt->sym_from && !filt->sym_to)
1997 return 0;
1998
1999 if (!filt->filename)
2000 return addr_filter__resolve_kernel_syms(filt);
2001
2002 dso = load_dso(filt->filename);
2003 if (!dso) {
2004 pr_err("Failed to load symbols from: %s\n", filt->filename);
2005 return -EINVAL;
2006 }
2007
2008 if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
2009 err = addr_filter__entire_dso(filt, dso);
2010 goto put_dso;
2011 }
2012
2013 if (filt->sym_from) {
2014 err = find_dso_sym(dso, filt->sym_from, &start, &size,
2015 filt->sym_from_idx);
2016 if (err)
2017 goto put_dso;
2018 filt->addr = start;
2019 if (filt->range && !filt->size && !filt->sym_to)
2020 filt->size = size;
2021 }
2022
2023 if (filt->sym_to) {
2024 err = find_dso_sym(dso, filt->sym_to, &start, &size,
2025 filt->sym_to_idx);
2026 if (err)
2027 goto put_dso;
2028
2029 err = check_end_after_start(filt, start, size);
2030 if (err)
2031 return err;
2032
2033 filt->size = start + size - filt->addr;
2034 }
2035
2036put_dso:
2037 dso__put(dso);
2038
2039 return err;
2040}
2041
2042static char *addr_filter__to_str(struct addr_filter *filt)
2043{
2044 char filename_buf[PATH_MAX];
2045 const char *at = "";
2046 const char *fn = "";
2047 char *filter;
2048 int err;
2049
2050 if (filt->filename) {
2051 at = "@";
2052 fn = realpath(filt->filename, filename_buf);
2053 if (!fn)
2054 return NULL;
2055 }
2056
2057 if (filt->range) {
2058 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
2059 filt->action, filt->addr, filt->size, at, fn);
2060 } else {
2061 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
2062 filt->action, filt->addr, at, fn);
2063 }
2064
2065 return err < 0 ? NULL : filter;
2066}
2067
2068static int parse_addr_filter(struct perf_evsel *evsel, const char *filter,
2069 int max_nr)
2070{
2071 struct addr_filters filts;
2072 struct addr_filter *filt;
2073 int err;
2074
2075 addr_filters__init(&filts);
2076
2077 err = addr_filters__parse_bare_filter(&filts, filter);
2078 if (err)
2079 goto out_exit;
2080
2081 if (filts.cnt > max_nr) {
2082 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2083 filts.cnt, max_nr);
2084 err = -EINVAL;
2085 goto out_exit;
2086 }
2087
2088 list_for_each_entry(filt, &filts.head, list) {
2089 char *new_filter;
2090
2091 err = addr_filter__resolve_syms(filt);
2092 if (err)
2093 goto out_exit;
2094
2095 new_filter = addr_filter__to_str(filt);
2096 if (!new_filter) {
2097 err = -ENOMEM;
2098 goto out_exit;
2099 }
2100
2101 if (perf_evsel__append_addr_filter(evsel, new_filter)) {
2102 err = -ENOMEM;
2103 goto out_exit;
2104 }
2105 }
2106
2107out_exit:
2108 addr_filters__exit(&filts);
2109
2110 if (err) {
2111 pr_err("Failed to parse address filter: '%s'\n", filter);
2112 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2113 pr_err("Where multiple filters are separated by space or comma.\n");
2114 }
2115
2116 return err;
2117}
2118
2119static struct perf_pmu *perf_evsel__find_pmu(struct perf_evsel *evsel)
2120{
2121 struct perf_pmu *pmu = NULL;
2122
2123 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
2124 if (pmu->type == evsel->attr.type)
2125 break;
2126 }
2127
2128 return pmu;
2129}
2130
2131static int perf_evsel__nr_addr_filter(struct perf_evsel *evsel)
2132{
2133 struct perf_pmu *pmu = perf_evsel__find_pmu(evsel);
2134 int nr_addr_filters = 0;
2135
2136 if (!pmu)
2137 return 0;
2138
2139 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
2140
2141 return nr_addr_filters;
2142}
2143
2144int auxtrace_parse_filters(struct perf_evlist *evlist)
2145{
2146 struct perf_evsel *evsel;
2147 char *filter;
2148 int err, max_nr;
2149
2150 evlist__for_each_entry(evlist, evsel) {
2151 filter = evsel->filter;
2152 max_nr = perf_evsel__nr_addr_filter(evsel);
2153 if (!filter || !max_nr)
2154 continue;
2155 evsel->filter = NULL;
2156 err = parse_addr_filter(evsel, filter, max_nr);
2157 free(filter);
2158 if (err)
2159 return err;
2160 pr_debug("Address filter: %s\n", evsel->filter);
2161 }
2162
2163 return 0;
2164}