Loading...
1#include "util.h"
2#include <sys/types.h>
3#include <byteswap.h>
4#include <unistd.h>
5#include <stdio.h>
6#include <stdlib.h>
7#include <linux/list.h>
8#include <linux/kernel.h>
9#include <linux/bitops.h>
10#include <sys/utsname.h>
11
12#include "evlist.h"
13#include "evsel.h"
14#include "header.h"
15#include "../perf.h"
16#include "trace-event.h"
17#include "session.h"
18#include "symbol.h"
19#include "debug.h"
20#include "cpumap.h"
21#include "pmu.h"
22#include "vdso.h"
23#include "strbuf.h"
24#include "build-id.h"
25#include "data.h"
26#include <api/fs/fs.h>
27#include "asm/bug.h"
28
29/*
30 * magic2 = "PERFILE2"
31 * must be a numerical value to let the endianness
32 * determine the memory layout. That way we are able
33 * to detect endianness when reading the perf.data file
34 * back.
35 *
36 * we check for legacy (PERFFILE) format.
37 */
38static const char *__perf_magic1 = "PERFFILE";
39static const u64 __perf_magic2 = 0x32454c4946524550ULL;
40static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
41
42#define PERF_MAGIC __perf_magic2
43
44struct perf_file_attr {
45 struct perf_event_attr attr;
46 struct perf_file_section ids;
47};
48
49void perf_header__set_feat(struct perf_header *header, int feat)
50{
51 set_bit(feat, header->adds_features);
52}
53
54void perf_header__clear_feat(struct perf_header *header, int feat)
55{
56 clear_bit(feat, header->adds_features);
57}
58
59bool perf_header__has_feat(const struct perf_header *header, int feat)
60{
61 return test_bit(feat, header->adds_features);
62}
63
64static int do_write(int fd, const void *buf, size_t size)
65{
66 while (size) {
67 int ret = write(fd, buf, size);
68
69 if (ret < 0)
70 return -errno;
71
72 size -= ret;
73 buf += ret;
74 }
75
76 return 0;
77}
78
79int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
80{
81 static const char zero_buf[NAME_ALIGN];
82 int err = do_write(fd, bf, count);
83
84 if (!err)
85 err = do_write(fd, zero_buf, count_aligned - count);
86
87 return err;
88}
89
90#define string_size(str) \
91 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
92
93static int do_write_string(int fd, const char *str)
94{
95 u32 len, olen;
96 int ret;
97
98 olen = strlen(str) + 1;
99 len = PERF_ALIGN(olen, NAME_ALIGN);
100
101 /* write len, incl. \0 */
102 ret = do_write(fd, &len, sizeof(len));
103 if (ret < 0)
104 return ret;
105
106 return write_padded(fd, str, olen, len);
107}
108
109static char *do_read_string(int fd, struct perf_header *ph)
110{
111 ssize_t sz, ret;
112 u32 len;
113 char *buf;
114
115 sz = readn(fd, &len, sizeof(len));
116 if (sz < (ssize_t)sizeof(len))
117 return NULL;
118
119 if (ph->needs_swap)
120 len = bswap_32(len);
121
122 buf = malloc(len);
123 if (!buf)
124 return NULL;
125
126 ret = readn(fd, buf, len);
127 if (ret == (ssize_t)len) {
128 /*
129 * strings are padded by zeroes
130 * thus the actual strlen of buf
131 * may be less than len
132 */
133 return buf;
134 }
135
136 free(buf);
137 return NULL;
138}
139
140static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
141 struct perf_evlist *evlist)
142{
143 return read_tracing_data(fd, &evlist->entries);
144}
145
146
147static int write_build_id(int fd, struct perf_header *h,
148 struct perf_evlist *evlist __maybe_unused)
149{
150 struct perf_session *session;
151 int err;
152
153 session = container_of(h, struct perf_session, header);
154
155 if (!perf_session__read_build_ids(session, true))
156 return -1;
157
158 err = perf_session__write_buildid_table(session, fd);
159 if (err < 0) {
160 pr_debug("failed to write buildid table\n");
161 return err;
162 }
163 perf_session__cache_build_ids(session);
164
165 return 0;
166}
167
168static int write_hostname(int fd, struct perf_header *h __maybe_unused,
169 struct perf_evlist *evlist __maybe_unused)
170{
171 struct utsname uts;
172 int ret;
173
174 ret = uname(&uts);
175 if (ret < 0)
176 return -1;
177
178 return do_write_string(fd, uts.nodename);
179}
180
181static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
182 struct perf_evlist *evlist __maybe_unused)
183{
184 struct utsname uts;
185 int ret;
186
187 ret = uname(&uts);
188 if (ret < 0)
189 return -1;
190
191 return do_write_string(fd, uts.release);
192}
193
194static int write_arch(int fd, struct perf_header *h __maybe_unused,
195 struct perf_evlist *evlist __maybe_unused)
196{
197 struct utsname uts;
198 int ret;
199
200 ret = uname(&uts);
201 if (ret < 0)
202 return -1;
203
204 return do_write_string(fd, uts.machine);
205}
206
207static int write_version(int fd, struct perf_header *h __maybe_unused,
208 struct perf_evlist *evlist __maybe_unused)
209{
210 return do_write_string(fd, perf_version_string);
211}
212
213static int __write_cpudesc(int fd, const char *cpuinfo_proc)
214{
215 FILE *file;
216 char *buf = NULL;
217 char *s, *p;
218 const char *search = cpuinfo_proc;
219 size_t len = 0;
220 int ret = -1;
221
222 if (!search)
223 return -1;
224
225 file = fopen("/proc/cpuinfo", "r");
226 if (!file)
227 return -1;
228
229 while (getline(&buf, &len, file) > 0) {
230 ret = strncmp(buf, search, strlen(search));
231 if (!ret)
232 break;
233 }
234
235 if (ret) {
236 ret = -1;
237 goto done;
238 }
239
240 s = buf;
241
242 p = strchr(buf, ':');
243 if (p && *(p+1) == ' ' && *(p+2))
244 s = p + 2;
245 p = strchr(s, '\n');
246 if (p)
247 *p = '\0';
248
249 /* squash extra space characters (branding string) */
250 p = s;
251 while (*p) {
252 if (isspace(*p)) {
253 char *r = p + 1;
254 char *q = r;
255 *p = ' ';
256 while (*q && isspace(*q))
257 q++;
258 if (q != (p+1))
259 while ((*r++ = *q++));
260 }
261 p++;
262 }
263 ret = do_write_string(fd, s);
264done:
265 free(buf);
266 fclose(file);
267 return ret;
268}
269
270static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
271 struct perf_evlist *evlist __maybe_unused)
272{
273#ifndef CPUINFO_PROC
274#define CPUINFO_PROC {"model name", }
275#endif
276 const char *cpuinfo_procs[] = CPUINFO_PROC;
277 unsigned int i;
278
279 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
280 int ret;
281 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
282 if (ret >= 0)
283 return ret;
284 }
285 return -1;
286}
287
288
289static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
290 struct perf_evlist *evlist __maybe_unused)
291{
292 long nr;
293 u32 nrc, nra;
294 int ret;
295
296 nr = sysconf(_SC_NPROCESSORS_CONF);
297 if (nr < 0)
298 return -1;
299
300 nrc = (u32)(nr & UINT_MAX);
301
302 nr = sysconf(_SC_NPROCESSORS_ONLN);
303 if (nr < 0)
304 return -1;
305
306 nra = (u32)(nr & UINT_MAX);
307
308 ret = do_write(fd, &nrc, sizeof(nrc));
309 if (ret < 0)
310 return ret;
311
312 return do_write(fd, &nra, sizeof(nra));
313}
314
315static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
316 struct perf_evlist *evlist)
317{
318 struct perf_evsel *evsel;
319 u32 nre, nri, sz;
320 int ret;
321
322 nre = evlist->nr_entries;
323
324 /*
325 * write number of events
326 */
327 ret = do_write(fd, &nre, sizeof(nre));
328 if (ret < 0)
329 return ret;
330
331 /*
332 * size of perf_event_attr struct
333 */
334 sz = (u32)sizeof(evsel->attr);
335 ret = do_write(fd, &sz, sizeof(sz));
336 if (ret < 0)
337 return ret;
338
339 evlist__for_each(evlist, evsel) {
340 ret = do_write(fd, &evsel->attr, sz);
341 if (ret < 0)
342 return ret;
343 /*
344 * write number of unique id per event
345 * there is one id per instance of an event
346 *
347 * copy into an nri to be independent of the
348 * type of ids,
349 */
350 nri = evsel->ids;
351 ret = do_write(fd, &nri, sizeof(nri));
352 if (ret < 0)
353 return ret;
354
355 /*
356 * write event string as passed on cmdline
357 */
358 ret = do_write_string(fd, perf_evsel__name(evsel));
359 if (ret < 0)
360 return ret;
361 /*
362 * write unique ids for this event
363 */
364 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
365 if (ret < 0)
366 return ret;
367 }
368 return 0;
369}
370
371static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
372 struct perf_evlist *evlist __maybe_unused)
373{
374 char buf[MAXPATHLEN];
375 char proc[32];
376 u32 n;
377 int i, ret;
378
379 /*
380 * actual atual path to perf binary
381 */
382 sprintf(proc, "/proc/%d/exe", getpid());
383 ret = readlink(proc, buf, sizeof(buf));
384 if (ret <= 0)
385 return -1;
386
387 /* readlink() does not add null termination */
388 buf[ret] = '\0';
389
390 /* account for binary path */
391 n = perf_env.nr_cmdline + 1;
392
393 ret = do_write(fd, &n, sizeof(n));
394 if (ret < 0)
395 return ret;
396
397 ret = do_write_string(fd, buf);
398 if (ret < 0)
399 return ret;
400
401 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
402 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
403 if (ret < 0)
404 return ret;
405 }
406 return 0;
407}
408
409#define CORE_SIB_FMT \
410 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
411#define THRD_SIB_FMT \
412 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
413
414struct cpu_topo {
415 u32 cpu_nr;
416 u32 core_sib;
417 u32 thread_sib;
418 char **core_siblings;
419 char **thread_siblings;
420};
421
422static int build_cpu_topo(struct cpu_topo *tp, int cpu)
423{
424 FILE *fp;
425 char filename[MAXPATHLEN];
426 char *buf = NULL, *p;
427 size_t len = 0;
428 ssize_t sret;
429 u32 i = 0;
430 int ret = -1;
431
432 sprintf(filename, CORE_SIB_FMT, cpu);
433 fp = fopen(filename, "r");
434 if (!fp)
435 goto try_threads;
436
437 sret = getline(&buf, &len, fp);
438 fclose(fp);
439 if (sret <= 0)
440 goto try_threads;
441
442 p = strchr(buf, '\n');
443 if (p)
444 *p = '\0';
445
446 for (i = 0; i < tp->core_sib; i++) {
447 if (!strcmp(buf, tp->core_siblings[i]))
448 break;
449 }
450 if (i == tp->core_sib) {
451 tp->core_siblings[i] = buf;
452 tp->core_sib++;
453 buf = NULL;
454 len = 0;
455 }
456 ret = 0;
457
458try_threads:
459 sprintf(filename, THRD_SIB_FMT, cpu);
460 fp = fopen(filename, "r");
461 if (!fp)
462 goto done;
463
464 if (getline(&buf, &len, fp) <= 0)
465 goto done;
466
467 p = strchr(buf, '\n');
468 if (p)
469 *p = '\0';
470
471 for (i = 0; i < tp->thread_sib; i++) {
472 if (!strcmp(buf, tp->thread_siblings[i]))
473 break;
474 }
475 if (i == tp->thread_sib) {
476 tp->thread_siblings[i] = buf;
477 tp->thread_sib++;
478 buf = NULL;
479 }
480 ret = 0;
481done:
482 if(fp)
483 fclose(fp);
484 free(buf);
485 return ret;
486}
487
488static void free_cpu_topo(struct cpu_topo *tp)
489{
490 u32 i;
491
492 if (!tp)
493 return;
494
495 for (i = 0 ; i < tp->core_sib; i++)
496 zfree(&tp->core_siblings[i]);
497
498 for (i = 0 ; i < tp->thread_sib; i++)
499 zfree(&tp->thread_siblings[i]);
500
501 free(tp);
502}
503
504static struct cpu_topo *build_cpu_topology(void)
505{
506 struct cpu_topo *tp;
507 void *addr;
508 u32 nr, i;
509 size_t sz;
510 long ncpus;
511 int ret = -1;
512
513 ncpus = sysconf(_SC_NPROCESSORS_CONF);
514 if (ncpus < 0)
515 return NULL;
516
517 nr = (u32)(ncpus & UINT_MAX);
518
519 sz = nr * sizeof(char *);
520
521 addr = calloc(1, sizeof(*tp) + 2 * sz);
522 if (!addr)
523 return NULL;
524
525 tp = addr;
526 tp->cpu_nr = nr;
527 addr += sizeof(*tp);
528 tp->core_siblings = addr;
529 addr += sz;
530 tp->thread_siblings = addr;
531
532 for (i = 0; i < nr; i++) {
533 ret = build_cpu_topo(tp, i);
534 if (ret < 0)
535 break;
536 }
537 if (ret) {
538 free_cpu_topo(tp);
539 tp = NULL;
540 }
541 return tp;
542}
543
544static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
545 struct perf_evlist *evlist __maybe_unused)
546{
547 struct cpu_topo *tp;
548 u32 i;
549 int ret, j;
550
551 tp = build_cpu_topology();
552 if (!tp)
553 return -1;
554
555 ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
556 if (ret < 0)
557 goto done;
558
559 for (i = 0; i < tp->core_sib; i++) {
560 ret = do_write_string(fd, tp->core_siblings[i]);
561 if (ret < 0)
562 goto done;
563 }
564 ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
565 if (ret < 0)
566 goto done;
567
568 for (i = 0; i < tp->thread_sib; i++) {
569 ret = do_write_string(fd, tp->thread_siblings[i]);
570 if (ret < 0)
571 break;
572 }
573
574 ret = perf_env__read_cpu_topology_map(&perf_env);
575 if (ret < 0)
576 goto done;
577
578 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
579 ret = do_write(fd, &perf_env.cpu[j].core_id,
580 sizeof(perf_env.cpu[j].core_id));
581 if (ret < 0)
582 return ret;
583 ret = do_write(fd, &perf_env.cpu[j].socket_id,
584 sizeof(perf_env.cpu[j].socket_id));
585 if (ret < 0)
586 return ret;
587 }
588done:
589 free_cpu_topo(tp);
590 return ret;
591}
592
593
594
595static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
596 struct perf_evlist *evlist __maybe_unused)
597{
598 char *buf = NULL;
599 FILE *fp;
600 size_t len = 0;
601 int ret = -1, n;
602 uint64_t mem;
603
604 fp = fopen("/proc/meminfo", "r");
605 if (!fp)
606 return -1;
607
608 while (getline(&buf, &len, fp) > 0) {
609 ret = strncmp(buf, "MemTotal:", 9);
610 if (!ret)
611 break;
612 }
613 if (!ret) {
614 n = sscanf(buf, "%*s %"PRIu64, &mem);
615 if (n == 1)
616 ret = do_write(fd, &mem, sizeof(mem));
617 } else
618 ret = -1;
619 free(buf);
620 fclose(fp);
621 return ret;
622}
623
624static int write_topo_node(int fd, int node)
625{
626 char str[MAXPATHLEN];
627 char field[32];
628 char *buf = NULL, *p;
629 size_t len = 0;
630 FILE *fp;
631 u64 mem_total, mem_free, mem;
632 int ret = -1;
633
634 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
635 fp = fopen(str, "r");
636 if (!fp)
637 return -1;
638
639 while (getline(&buf, &len, fp) > 0) {
640 /* skip over invalid lines */
641 if (!strchr(buf, ':'))
642 continue;
643 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
644 goto done;
645 if (!strcmp(field, "MemTotal:"))
646 mem_total = mem;
647 if (!strcmp(field, "MemFree:"))
648 mem_free = mem;
649 }
650
651 fclose(fp);
652 fp = NULL;
653
654 ret = do_write(fd, &mem_total, sizeof(u64));
655 if (ret)
656 goto done;
657
658 ret = do_write(fd, &mem_free, sizeof(u64));
659 if (ret)
660 goto done;
661
662 ret = -1;
663 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
664
665 fp = fopen(str, "r");
666 if (!fp)
667 goto done;
668
669 if (getline(&buf, &len, fp) <= 0)
670 goto done;
671
672 p = strchr(buf, '\n');
673 if (p)
674 *p = '\0';
675
676 ret = do_write_string(fd, buf);
677done:
678 free(buf);
679 if (fp)
680 fclose(fp);
681 return ret;
682}
683
684static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
685 struct perf_evlist *evlist __maybe_unused)
686{
687 char *buf = NULL;
688 size_t len = 0;
689 FILE *fp;
690 struct cpu_map *node_map = NULL;
691 char *c;
692 u32 nr, i, j;
693 int ret = -1;
694
695 fp = fopen("/sys/devices/system/node/online", "r");
696 if (!fp)
697 return -1;
698
699 if (getline(&buf, &len, fp) <= 0)
700 goto done;
701
702 c = strchr(buf, '\n');
703 if (c)
704 *c = '\0';
705
706 node_map = cpu_map__new(buf);
707 if (!node_map)
708 goto done;
709
710 nr = (u32)node_map->nr;
711
712 ret = do_write(fd, &nr, sizeof(nr));
713 if (ret < 0)
714 goto done;
715
716 for (i = 0; i < nr; i++) {
717 j = (u32)node_map->map[i];
718 ret = do_write(fd, &j, sizeof(j));
719 if (ret < 0)
720 break;
721
722 ret = write_topo_node(fd, i);
723 if (ret < 0)
724 break;
725 }
726done:
727 free(buf);
728 fclose(fp);
729 cpu_map__put(node_map);
730 return ret;
731}
732
733/*
734 * File format:
735 *
736 * struct pmu_mappings {
737 * u32 pmu_num;
738 * struct pmu_map {
739 * u32 type;
740 * char name[];
741 * }[pmu_num];
742 * };
743 */
744
745static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
746 struct perf_evlist *evlist __maybe_unused)
747{
748 struct perf_pmu *pmu = NULL;
749 off_t offset = lseek(fd, 0, SEEK_CUR);
750 __u32 pmu_num = 0;
751 int ret;
752
753 /* write real pmu_num later */
754 ret = do_write(fd, &pmu_num, sizeof(pmu_num));
755 if (ret < 0)
756 return ret;
757
758 while ((pmu = perf_pmu__scan(pmu))) {
759 if (!pmu->name)
760 continue;
761 pmu_num++;
762
763 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
764 if (ret < 0)
765 return ret;
766
767 ret = do_write_string(fd, pmu->name);
768 if (ret < 0)
769 return ret;
770 }
771
772 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
773 /* discard all */
774 lseek(fd, offset, SEEK_SET);
775 return -1;
776 }
777
778 return 0;
779}
780
781/*
782 * File format:
783 *
784 * struct group_descs {
785 * u32 nr_groups;
786 * struct group_desc {
787 * char name[];
788 * u32 leader_idx;
789 * u32 nr_members;
790 * }[nr_groups];
791 * };
792 */
793static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
794 struct perf_evlist *evlist)
795{
796 u32 nr_groups = evlist->nr_groups;
797 struct perf_evsel *evsel;
798 int ret;
799
800 ret = do_write(fd, &nr_groups, sizeof(nr_groups));
801 if (ret < 0)
802 return ret;
803
804 evlist__for_each(evlist, evsel) {
805 if (perf_evsel__is_group_leader(evsel) &&
806 evsel->nr_members > 1) {
807 const char *name = evsel->group_name ?: "{anon_group}";
808 u32 leader_idx = evsel->idx;
809 u32 nr_members = evsel->nr_members;
810
811 ret = do_write_string(fd, name);
812 if (ret < 0)
813 return ret;
814
815 ret = do_write(fd, &leader_idx, sizeof(leader_idx));
816 if (ret < 0)
817 return ret;
818
819 ret = do_write(fd, &nr_members, sizeof(nr_members));
820 if (ret < 0)
821 return ret;
822 }
823 }
824 return 0;
825}
826
827/*
828 * default get_cpuid(): nothing gets recorded
829 * actual implementation must be in arch/$(ARCH)/util/header.c
830 */
831int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused,
832 size_t sz __maybe_unused)
833{
834 return -1;
835}
836
837static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
838 struct perf_evlist *evlist __maybe_unused)
839{
840 char buffer[64];
841 int ret;
842
843 ret = get_cpuid(buffer, sizeof(buffer));
844 if (!ret)
845 goto write_it;
846
847 return -1;
848write_it:
849 return do_write_string(fd, buffer);
850}
851
852static int write_branch_stack(int fd __maybe_unused,
853 struct perf_header *h __maybe_unused,
854 struct perf_evlist *evlist __maybe_unused)
855{
856 return 0;
857}
858
859static int write_auxtrace(int fd, struct perf_header *h,
860 struct perf_evlist *evlist __maybe_unused)
861{
862 struct perf_session *session;
863 int err;
864
865 session = container_of(h, struct perf_session, header);
866
867 err = auxtrace_index__write(fd, &session->auxtrace_index);
868 if (err < 0)
869 pr_err("Failed to write auxtrace index\n");
870 return err;
871}
872
873static int cpu_cache_level__sort(const void *a, const void *b)
874{
875 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
876 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
877
878 return cache_a->level - cache_b->level;
879}
880
881static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
882{
883 if (a->level != b->level)
884 return false;
885
886 if (a->line_size != b->line_size)
887 return false;
888
889 if (a->sets != b->sets)
890 return false;
891
892 if (a->ways != b->ways)
893 return false;
894
895 if (strcmp(a->type, b->type))
896 return false;
897
898 if (strcmp(a->size, b->size))
899 return false;
900
901 if (strcmp(a->map, b->map))
902 return false;
903
904 return true;
905}
906
907static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
908{
909 char path[PATH_MAX], file[PATH_MAX];
910 struct stat st;
911 size_t len;
912
913 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
914 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
915
916 if (stat(file, &st))
917 return 1;
918
919 scnprintf(file, PATH_MAX, "%s/level", path);
920 if (sysfs__read_int(file, (int *) &cache->level))
921 return -1;
922
923 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
924 if (sysfs__read_int(file, (int *) &cache->line_size))
925 return -1;
926
927 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
928 if (sysfs__read_int(file, (int *) &cache->sets))
929 return -1;
930
931 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
932 if (sysfs__read_int(file, (int *) &cache->ways))
933 return -1;
934
935 scnprintf(file, PATH_MAX, "%s/type", path);
936 if (sysfs__read_str(file, &cache->type, &len))
937 return -1;
938
939 cache->type[len] = 0;
940 cache->type = rtrim(cache->type);
941
942 scnprintf(file, PATH_MAX, "%s/size", path);
943 if (sysfs__read_str(file, &cache->size, &len)) {
944 free(cache->type);
945 return -1;
946 }
947
948 cache->size[len] = 0;
949 cache->size = rtrim(cache->size);
950
951 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
952 if (sysfs__read_str(file, &cache->map, &len)) {
953 free(cache->map);
954 free(cache->type);
955 return -1;
956 }
957
958 cache->map[len] = 0;
959 cache->map = rtrim(cache->map);
960 return 0;
961}
962
963static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
964{
965 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
966}
967
968static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
969{
970 u32 i, cnt = 0;
971 long ncpus;
972 u32 nr, cpu;
973 u16 level;
974
975 ncpus = sysconf(_SC_NPROCESSORS_CONF);
976 if (ncpus < 0)
977 return -1;
978
979 nr = (u32)(ncpus & UINT_MAX);
980
981 for (cpu = 0; cpu < nr; cpu++) {
982 for (level = 0; level < 10; level++) {
983 struct cpu_cache_level c;
984 int err;
985
986 err = cpu_cache_level__read(&c, cpu, level);
987 if (err < 0)
988 return err;
989
990 if (err == 1)
991 break;
992
993 for (i = 0; i < cnt; i++) {
994 if (cpu_cache_level__cmp(&c, &caches[i]))
995 break;
996 }
997
998 if (i == cnt)
999 caches[cnt++] = c;
1000 else
1001 cpu_cache_level__free(&c);
1002
1003 if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1004 goto out;
1005 }
1006 }
1007 out:
1008 *cntp = cnt;
1009 return 0;
1010}
1011
1012#define MAX_CACHES 2000
1013
1014static int write_cache(int fd, struct perf_header *h __maybe_unused,
1015 struct perf_evlist *evlist __maybe_unused)
1016{
1017 struct cpu_cache_level caches[MAX_CACHES];
1018 u32 cnt = 0, i, version = 1;
1019 int ret;
1020
1021 ret = build_caches(caches, MAX_CACHES, &cnt);
1022 if (ret)
1023 goto out;
1024
1025 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1026
1027 ret = do_write(fd, &version, sizeof(u32));
1028 if (ret < 0)
1029 goto out;
1030
1031 ret = do_write(fd, &cnt, sizeof(u32));
1032 if (ret < 0)
1033 goto out;
1034
1035 for (i = 0; i < cnt; i++) {
1036 struct cpu_cache_level *c = &caches[i];
1037
1038 #define _W(v) \
1039 ret = do_write(fd, &c->v, sizeof(u32)); \
1040 if (ret < 0) \
1041 goto out;
1042
1043 _W(level)
1044 _W(line_size)
1045 _W(sets)
1046 _W(ways)
1047 #undef _W
1048
1049 #define _W(v) \
1050 ret = do_write_string(fd, (const char *) c->v); \
1051 if (ret < 0) \
1052 goto out;
1053
1054 _W(type)
1055 _W(size)
1056 _W(map)
1057 #undef _W
1058 }
1059
1060out:
1061 for (i = 0; i < cnt; i++)
1062 cpu_cache_level__free(&caches[i]);
1063 return ret;
1064}
1065
1066static int write_stat(int fd __maybe_unused,
1067 struct perf_header *h __maybe_unused,
1068 struct perf_evlist *evlist __maybe_unused)
1069{
1070 return 0;
1071}
1072
1073static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1074 FILE *fp)
1075{
1076 fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1077}
1078
1079static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1080 FILE *fp)
1081{
1082 fprintf(fp, "# os release : %s\n", ph->env.os_release);
1083}
1084
1085static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1086{
1087 fprintf(fp, "# arch : %s\n", ph->env.arch);
1088}
1089
1090static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1091 FILE *fp)
1092{
1093 fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1094}
1095
1096static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1097 FILE *fp)
1098{
1099 fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1100 fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1101}
1102
1103static void print_version(struct perf_header *ph, int fd __maybe_unused,
1104 FILE *fp)
1105{
1106 fprintf(fp, "# perf version : %s\n", ph->env.version);
1107}
1108
1109static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1110 FILE *fp)
1111{
1112 int nr, i;
1113
1114 nr = ph->env.nr_cmdline;
1115
1116 fprintf(fp, "# cmdline : ");
1117
1118 for (i = 0; i < nr; i++)
1119 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
1120 fputc('\n', fp);
1121}
1122
1123static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1124 FILE *fp)
1125{
1126 int nr, i;
1127 char *str;
1128 int cpu_nr = ph->env.nr_cpus_online;
1129
1130 nr = ph->env.nr_sibling_cores;
1131 str = ph->env.sibling_cores;
1132
1133 for (i = 0; i < nr; i++) {
1134 fprintf(fp, "# sibling cores : %s\n", str);
1135 str += strlen(str) + 1;
1136 }
1137
1138 nr = ph->env.nr_sibling_threads;
1139 str = ph->env.sibling_threads;
1140
1141 for (i = 0; i < nr; i++) {
1142 fprintf(fp, "# sibling threads : %s\n", str);
1143 str += strlen(str) + 1;
1144 }
1145
1146 if (ph->env.cpu != NULL) {
1147 for (i = 0; i < cpu_nr; i++)
1148 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1149 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1150 } else
1151 fprintf(fp, "# Core ID and Socket ID information is not available\n");
1152}
1153
1154static void free_event_desc(struct perf_evsel *events)
1155{
1156 struct perf_evsel *evsel;
1157
1158 if (!events)
1159 return;
1160
1161 for (evsel = events; evsel->attr.size; evsel++) {
1162 zfree(&evsel->name);
1163 zfree(&evsel->id);
1164 }
1165
1166 free(events);
1167}
1168
1169static struct perf_evsel *
1170read_event_desc(struct perf_header *ph, int fd)
1171{
1172 struct perf_evsel *evsel, *events = NULL;
1173 u64 *id;
1174 void *buf = NULL;
1175 u32 nre, sz, nr, i, j;
1176 ssize_t ret;
1177 size_t msz;
1178
1179 /* number of events */
1180 ret = readn(fd, &nre, sizeof(nre));
1181 if (ret != (ssize_t)sizeof(nre))
1182 goto error;
1183
1184 if (ph->needs_swap)
1185 nre = bswap_32(nre);
1186
1187 ret = readn(fd, &sz, sizeof(sz));
1188 if (ret != (ssize_t)sizeof(sz))
1189 goto error;
1190
1191 if (ph->needs_swap)
1192 sz = bswap_32(sz);
1193
1194 /* buffer to hold on file attr struct */
1195 buf = malloc(sz);
1196 if (!buf)
1197 goto error;
1198
1199 /* the last event terminates with evsel->attr.size == 0: */
1200 events = calloc(nre + 1, sizeof(*events));
1201 if (!events)
1202 goto error;
1203
1204 msz = sizeof(evsel->attr);
1205 if (sz < msz)
1206 msz = sz;
1207
1208 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1209 evsel->idx = i;
1210
1211 /*
1212 * must read entire on-file attr struct to
1213 * sync up with layout.
1214 */
1215 ret = readn(fd, buf, sz);
1216 if (ret != (ssize_t)sz)
1217 goto error;
1218
1219 if (ph->needs_swap)
1220 perf_event__attr_swap(buf);
1221
1222 memcpy(&evsel->attr, buf, msz);
1223
1224 ret = readn(fd, &nr, sizeof(nr));
1225 if (ret != (ssize_t)sizeof(nr))
1226 goto error;
1227
1228 if (ph->needs_swap) {
1229 nr = bswap_32(nr);
1230 evsel->needs_swap = true;
1231 }
1232
1233 evsel->name = do_read_string(fd, ph);
1234
1235 if (!nr)
1236 continue;
1237
1238 id = calloc(nr, sizeof(*id));
1239 if (!id)
1240 goto error;
1241 evsel->ids = nr;
1242 evsel->id = id;
1243
1244 for (j = 0 ; j < nr; j++) {
1245 ret = readn(fd, id, sizeof(*id));
1246 if (ret != (ssize_t)sizeof(*id))
1247 goto error;
1248 if (ph->needs_swap)
1249 *id = bswap_64(*id);
1250 id++;
1251 }
1252 }
1253out:
1254 free(buf);
1255 return events;
1256error:
1257 free_event_desc(events);
1258 events = NULL;
1259 goto out;
1260}
1261
1262static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1263 void *priv __attribute__((unused)))
1264{
1265 return fprintf(fp, ", %s = %s", name, val);
1266}
1267
1268static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1269{
1270 struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1271 u32 j;
1272 u64 *id;
1273
1274 if (!events) {
1275 fprintf(fp, "# event desc: not available or unable to read\n");
1276 return;
1277 }
1278
1279 for (evsel = events; evsel->attr.size; evsel++) {
1280 fprintf(fp, "# event : name = %s, ", evsel->name);
1281
1282 if (evsel->ids) {
1283 fprintf(fp, ", id = {");
1284 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1285 if (j)
1286 fputc(',', fp);
1287 fprintf(fp, " %"PRIu64, *id);
1288 }
1289 fprintf(fp, " }");
1290 }
1291
1292 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1293
1294 fputc('\n', fp);
1295 }
1296
1297 free_event_desc(events);
1298}
1299
1300static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1301 FILE *fp)
1302{
1303 fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1304}
1305
1306static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1307 FILE *fp)
1308{
1309 u32 nr, c, i;
1310 char *str, *tmp;
1311 uint64_t mem_total, mem_free;
1312
1313 /* nr nodes */
1314 nr = ph->env.nr_numa_nodes;
1315 str = ph->env.numa_nodes;
1316
1317 for (i = 0; i < nr; i++) {
1318 /* node number */
1319 c = strtoul(str, &tmp, 0);
1320 if (*tmp != ':')
1321 goto error;
1322
1323 str = tmp + 1;
1324 mem_total = strtoull(str, &tmp, 0);
1325 if (*tmp != ':')
1326 goto error;
1327
1328 str = tmp + 1;
1329 mem_free = strtoull(str, &tmp, 0);
1330 if (*tmp != ':')
1331 goto error;
1332
1333 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1334 " free = %"PRIu64" kB\n",
1335 c, mem_total, mem_free);
1336
1337 str = tmp + 1;
1338 fprintf(fp, "# node%u cpu list : %s\n", c, str);
1339
1340 str += strlen(str) + 1;
1341 }
1342 return;
1343error:
1344 fprintf(fp, "# numa topology : not available\n");
1345}
1346
1347static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1348{
1349 fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1350}
1351
1352static void print_branch_stack(struct perf_header *ph __maybe_unused,
1353 int fd __maybe_unused, FILE *fp)
1354{
1355 fprintf(fp, "# contains samples with branch stack\n");
1356}
1357
1358static void print_auxtrace(struct perf_header *ph __maybe_unused,
1359 int fd __maybe_unused, FILE *fp)
1360{
1361 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1362}
1363
1364static void print_stat(struct perf_header *ph __maybe_unused,
1365 int fd __maybe_unused, FILE *fp)
1366{
1367 fprintf(fp, "# contains stat data\n");
1368}
1369
1370static void print_cache(struct perf_header *ph __maybe_unused,
1371 int fd __maybe_unused, FILE *fp __maybe_unused)
1372{
1373 int i;
1374
1375 fprintf(fp, "# CPU cache info:\n");
1376 for (i = 0; i < ph->env.caches_cnt; i++) {
1377 fprintf(fp, "# ");
1378 cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1379 }
1380}
1381
1382static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1383 FILE *fp)
1384{
1385 const char *delimiter = "# pmu mappings: ";
1386 char *str, *tmp;
1387 u32 pmu_num;
1388 u32 type;
1389
1390 pmu_num = ph->env.nr_pmu_mappings;
1391 if (!pmu_num) {
1392 fprintf(fp, "# pmu mappings: not available\n");
1393 return;
1394 }
1395
1396 str = ph->env.pmu_mappings;
1397
1398 while (pmu_num) {
1399 type = strtoul(str, &tmp, 0);
1400 if (*tmp != ':')
1401 goto error;
1402
1403 str = tmp + 1;
1404 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1405
1406 delimiter = ", ";
1407 str += strlen(str) + 1;
1408 pmu_num--;
1409 }
1410
1411 fprintf(fp, "\n");
1412
1413 if (!pmu_num)
1414 return;
1415error:
1416 fprintf(fp, "# pmu mappings: unable to read\n");
1417}
1418
1419static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1420 FILE *fp)
1421{
1422 struct perf_session *session;
1423 struct perf_evsel *evsel;
1424 u32 nr = 0;
1425
1426 session = container_of(ph, struct perf_session, header);
1427
1428 evlist__for_each(session->evlist, evsel) {
1429 if (perf_evsel__is_group_leader(evsel) &&
1430 evsel->nr_members > 1) {
1431 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1432 perf_evsel__name(evsel));
1433
1434 nr = evsel->nr_members - 1;
1435 } else if (nr) {
1436 fprintf(fp, ",%s", perf_evsel__name(evsel));
1437
1438 if (--nr == 0)
1439 fprintf(fp, "}\n");
1440 }
1441 }
1442}
1443
1444static int __event_process_build_id(struct build_id_event *bev,
1445 char *filename,
1446 struct perf_session *session)
1447{
1448 int err = -1;
1449 struct machine *machine;
1450 u16 cpumode;
1451 struct dso *dso;
1452 enum dso_kernel_type dso_type;
1453
1454 machine = perf_session__findnew_machine(session, bev->pid);
1455 if (!machine)
1456 goto out;
1457
1458 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1459
1460 switch (cpumode) {
1461 case PERF_RECORD_MISC_KERNEL:
1462 dso_type = DSO_TYPE_KERNEL;
1463 break;
1464 case PERF_RECORD_MISC_GUEST_KERNEL:
1465 dso_type = DSO_TYPE_GUEST_KERNEL;
1466 break;
1467 case PERF_RECORD_MISC_USER:
1468 case PERF_RECORD_MISC_GUEST_USER:
1469 dso_type = DSO_TYPE_USER;
1470 break;
1471 default:
1472 goto out;
1473 }
1474
1475 dso = machine__findnew_dso(machine, filename);
1476 if (dso != NULL) {
1477 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1478
1479 dso__set_build_id(dso, &bev->build_id);
1480
1481 if (!is_kernel_module(filename, cpumode))
1482 dso->kernel = dso_type;
1483
1484 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1485 sbuild_id);
1486 pr_debug("build id event received for %s: %s\n",
1487 dso->long_name, sbuild_id);
1488 dso__put(dso);
1489 }
1490
1491 err = 0;
1492out:
1493 return err;
1494}
1495
1496static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1497 int input, u64 offset, u64 size)
1498{
1499 struct perf_session *session = container_of(header, struct perf_session, header);
1500 struct {
1501 struct perf_event_header header;
1502 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1503 char filename[0];
1504 } old_bev;
1505 struct build_id_event bev;
1506 char filename[PATH_MAX];
1507 u64 limit = offset + size;
1508
1509 while (offset < limit) {
1510 ssize_t len;
1511
1512 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1513 return -1;
1514
1515 if (header->needs_swap)
1516 perf_event_header__bswap(&old_bev.header);
1517
1518 len = old_bev.header.size - sizeof(old_bev);
1519 if (readn(input, filename, len) != len)
1520 return -1;
1521
1522 bev.header = old_bev.header;
1523
1524 /*
1525 * As the pid is the missing value, we need to fill
1526 * it properly. The header.misc value give us nice hint.
1527 */
1528 bev.pid = HOST_KERNEL_ID;
1529 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1530 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1531 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1532
1533 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1534 __event_process_build_id(&bev, filename, session);
1535
1536 offset += bev.header.size;
1537 }
1538
1539 return 0;
1540}
1541
1542static int perf_header__read_build_ids(struct perf_header *header,
1543 int input, u64 offset, u64 size)
1544{
1545 struct perf_session *session = container_of(header, struct perf_session, header);
1546 struct build_id_event bev;
1547 char filename[PATH_MAX];
1548 u64 limit = offset + size, orig_offset = offset;
1549 int err = -1;
1550
1551 while (offset < limit) {
1552 ssize_t len;
1553
1554 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1555 goto out;
1556
1557 if (header->needs_swap)
1558 perf_event_header__bswap(&bev.header);
1559
1560 len = bev.header.size - sizeof(bev);
1561 if (readn(input, filename, len) != len)
1562 goto out;
1563 /*
1564 * The a1645ce1 changeset:
1565 *
1566 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1567 *
1568 * Added a field to struct build_id_event that broke the file
1569 * format.
1570 *
1571 * Since the kernel build-id is the first entry, process the
1572 * table using the old format if the well known
1573 * '[kernel.kallsyms]' string for the kernel build-id has the
1574 * first 4 characters chopped off (where the pid_t sits).
1575 */
1576 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1577 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1578 return -1;
1579 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1580 }
1581
1582 __event_process_build_id(&bev, filename, session);
1583
1584 offset += bev.header.size;
1585 }
1586 err = 0;
1587out:
1588 return err;
1589}
1590
1591static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1592 struct perf_header *ph __maybe_unused,
1593 int fd, void *data)
1594{
1595 ssize_t ret = trace_report(fd, data, false);
1596 return ret < 0 ? -1 : 0;
1597}
1598
1599static int process_build_id(struct perf_file_section *section,
1600 struct perf_header *ph, int fd,
1601 void *data __maybe_unused)
1602{
1603 if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1604 pr_debug("Failed to read buildids, continuing...\n");
1605 return 0;
1606}
1607
1608static int process_hostname(struct perf_file_section *section __maybe_unused,
1609 struct perf_header *ph, int fd,
1610 void *data __maybe_unused)
1611{
1612 ph->env.hostname = do_read_string(fd, ph);
1613 return ph->env.hostname ? 0 : -ENOMEM;
1614}
1615
1616static int process_osrelease(struct perf_file_section *section __maybe_unused,
1617 struct perf_header *ph, int fd,
1618 void *data __maybe_unused)
1619{
1620 ph->env.os_release = do_read_string(fd, ph);
1621 return ph->env.os_release ? 0 : -ENOMEM;
1622}
1623
1624static int process_version(struct perf_file_section *section __maybe_unused,
1625 struct perf_header *ph, int fd,
1626 void *data __maybe_unused)
1627{
1628 ph->env.version = do_read_string(fd, ph);
1629 return ph->env.version ? 0 : -ENOMEM;
1630}
1631
1632static int process_arch(struct perf_file_section *section __maybe_unused,
1633 struct perf_header *ph, int fd,
1634 void *data __maybe_unused)
1635{
1636 ph->env.arch = do_read_string(fd, ph);
1637 return ph->env.arch ? 0 : -ENOMEM;
1638}
1639
1640static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1641 struct perf_header *ph, int fd,
1642 void *data __maybe_unused)
1643{
1644 ssize_t ret;
1645 u32 nr;
1646
1647 ret = readn(fd, &nr, sizeof(nr));
1648 if (ret != sizeof(nr))
1649 return -1;
1650
1651 if (ph->needs_swap)
1652 nr = bswap_32(nr);
1653
1654 ph->env.nr_cpus_avail = nr;
1655
1656 ret = readn(fd, &nr, sizeof(nr));
1657 if (ret != sizeof(nr))
1658 return -1;
1659
1660 if (ph->needs_swap)
1661 nr = bswap_32(nr);
1662
1663 ph->env.nr_cpus_online = nr;
1664 return 0;
1665}
1666
1667static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1668 struct perf_header *ph, int fd,
1669 void *data __maybe_unused)
1670{
1671 ph->env.cpu_desc = do_read_string(fd, ph);
1672 return ph->env.cpu_desc ? 0 : -ENOMEM;
1673}
1674
1675static int process_cpuid(struct perf_file_section *section __maybe_unused,
1676 struct perf_header *ph, int fd,
1677 void *data __maybe_unused)
1678{
1679 ph->env.cpuid = do_read_string(fd, ph);
1680 return ph->env.cpuid ? 0 : -ENOMEM;
1681}
1682
1683static int process_total_mem(struct perf_file_section *section __maybe_unused,
1684 struct perf_header *ph, int fd,
1685 void *data __maybe_unused)
1686{
1687 uint64_t mem;
1688 ssize_t ret;
1689
1690 ret = readn(fd, &mem, sizeof(mem));
1691 if (ret != sizeof(mem))
1692 return -1;
1693
1694 if (ph->needs_swap)
1695 mem = bswap_64(mem);
1696
1697 ph->env.total_mem = mem;
1698 return 0;
1699}
1700
1701static struct perf_evsel *
1702perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1703{
1704 struct perf_evsel *evsel;
1705
1706 evlist__for_each(evlist, evsel) {
1707 if (evsel->idx == idx)
1708 return evsel;
1709 }
1710
1711 return NULL;
1712}
1713
1714static void
1715perf_evlist__set_event_name(struct perf_evlist *evlist,
1716 struct perf_evsel *event)
1717{
1718 struct perf_evsel *evsel;
1719
1720 if (!event->name)
1721 return;
1722
1723 evsel = perf_evlist__find_by_index(evlist, event->idx);
1724 if (!evsel)
1725 return;
1726
1727 if (evsel->name)
1728 return;
1729
1730 evsel->name = strdup(event->name);
1731}
1732
1733static int
1734process_event_desc(struct perf_file_section *section __maybe_unused,
1735 struct perf_header *header, int fd,
1736 void *data __maybe_unused)
1737{
1738 struct perf_session *session;
1739 struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1740
1741 if (!events)
1742 return 0;
1743
1744 session = container_of(header, struct perf_session, header);
1745 for (evsel = events; evsel->attr.size; evsel++)
1746 perf_evlist__set_event_name(session->evlist, evsel);
1747
1748 free_event_desc(events);
1749
1750 return 0;
1751}
1752
1753static int process_cmdline(struct perf_file_section *section,
1754 struct perf_header *ph, int fd,
1755 void *data __maybe_unused)
1756{
1757 ssize_t ret;
1758 char *str, *cmdline = NULL, **argv = NULL;
1759 u32 nr, i, len = 0;
1760
1761 ret = readn(fd, &nr, sizeof(nr));
1762 if (ret != sizeof(nr))
1763 return -1;
1764
1765 if (ph->needs_swap)
1766 nr = bswap_32(nr);
1767
1768 ph->env.nr_cmdline = nr;
1769
1770 cmdline = zalloc(section->size + nr + 1);
1771 if (!cmdline)
1772 return -1;
1773
1774 argv = zalloc(sizeof(char *) * (nr + 1));
1775 if (!argv)
1776 goto error;
1777
1778 for (i = 0; i < nr; i++) {
1779 str = do_read_string(fd, ph);
1780 if (!str)
1781 goto error;
1782
1783 argv[i] = cmdline + len;
1784 memcpy(argv[i], str, strlen(str) + 1);
1785 len += strlen(str) + 1;
1786 free(str);
1787 }
1788 ph->env.cmdline = cmdline;
1789 ph->env.cmdline_argv = (const char **) argv;
1790 return 0;
1791
1792error:
1793 free(argv);
1794 free(cmdline);
1795 return -1;
1796}
1797
1798static int process_cpu_topology(struct perf_file_section *section,
1799 struct perf_header *ph, int fd,
1800 void *data __maybe_unused)
1801{
1802 ssize_t ret;
1803 u32 nr, i;
1804 char *str;
1805 struct strbuf sb;
1806 int cpu_nr = ph->env.nr_cpus_online;
1807 u64 size = 0;
1808
1809 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1810 if (!ph->env.cpu)
1811 return -1;
1812
1813 ret = readn(fd, &nr, sizeof(nr));
1814 if (ret != sizeof(nr))
1815 goto free_cpu;
1816
1817 if (ph->needs_swap)
1818 nr = bswap_32(nr);
1819
1820 ph->env.nr_sibling_cores = nr;
1821 size += sizeof(u32);
1822 strbuf_init(&sb, 128);
1823
1824 for (i = 0; i < nr; i++) {
1825 str = do_read_string(fd, ph);
1826 if (!str)
1827 goto error;
1828
1829 /* include a NULL character at the end */
1830 strbuf_add(&sb, str, strlen(str) + 1);
1831 size += string_size(str);
1832 free(str);
1833 }
1834 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1835
1836 ret = readn(fd, &nr, sizeof(nr));
1837 if (ret != sizeof(nr))
1838 return -1;
1839
1840 if (ph->needs_swap)
1841 nr = bswap_32(nr);
1842
1843 ph->env.nr_sibling_threads = nr;
1844 size += sizeof(u32);
1845
1846 for (i = 0; i < nr; i++) {
1847 str = do_read_string(fd, ph);
1848 if (!str)
1849 goto error;
1850
1851 /* include a NULL character at the end */
1852 strbuf_add(&sb, str, strlen(str) + 1);
1853 size += string_size(str);
1854 free(str);
1855 }
1856 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1857
1858 /*
1859 * The header may be from old perf,
1860 * which doesn't include core id and socket id information.
1861 */
1862 if (section->size <= size) {
1863 zfree(&ph->env.cpu);
1864 return 0;
1865 }
1866
1867 for (i = 0; i < (u32)cpu_nr; i++) {
1868 ret = readn(fd, &nr, sizeof(nr));
1869 if (ret != sizeof(nr))
1870 goto free_cpu;
1871
1872 if (ph->needs_swap)
1873 nr = bswap_32(nr);
1874
1875 ph->env.cpu[i].core_id = nr;
1876
1877 ret = readn(fd, &nr, sizeof(nr));
1878 if (ret != sizeof(nr))
1879 goto free_cpu;
1880
1881 if (ph->needs_swap)
1882 nr = bswap_32(nr);
1883
1884 if (nr > (u32)cpu_nr) {
1885 pr_debug("socket_id number is too big."
1886 "You may need to upgrade the perf tool.\n");
1887 goto free_cpu;
1888 }
1889
1890 ph->env.cpu[i].socket_id = nr;
1891 }
1892
1893 return 0;
1894
1895error:
1896 strbuf_release(&sb);
1897free_cpu:
1898 zfree(&ph->env.cpu);
1899 return -1;
1900}
1901
1902static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1903 struct perf_header *ph, int fd,
1904 void *data __maybe_unused)
1905{
1906 ssize_t ret;
1907 u32 nr, node, i;
1908 char *str;
1909 uint64_t mem_total, mem_free;
1910 struct strbuf sb;
1911
1912 /* nr nodes */
1913 ret = readn(fd, &nr, sizeof(nr));
1914 if (ret != sizeof(nr))
1915 goto error;
1916
1917 if (ph->needs_swap)
1918 nr = bswap_32(nr);
1919
1920 ph->env.nr_numa_nodes = nr;
1921 strbuf_init(&sb, 256);
1922
1923 for (i = 0; i < nr; i++) {
1924 /* node number */
1925 ret = readn(fd, &node, sizeof(node));
1926 if (ret != sizeof(node))
1927 goto error;
1928
1929 ret = readn(fd, &mem_total, sizeof(u64));
1930 if (ret != sizeof(u64))
1931 goto error;
1932
1933 ret = readn(fd, &mem_free, sizeof(u64));
1934 if (ret != sizeof(u64))
1935 goto error;
1936
1937 if (ph->needs_swap) {
1938 node = bswap_32(node);
1939 mem_total = bswap_64(mem_total);
1940 mem_free = bswap_64(mem_free);
1941 }
1942
1943 strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
1944 node, mem_total, mem_free);
1945
1946 str = do_read_string(fd, ph);
1947 if (!str)
1948 goto error;
1949
1950 /* include a NULL character at the end */
1951 strbuf_add(&sb, str, strlen(str) + 1);
1952 free(str);
1953 }
1954 ph->env.numa_nodes = strbuf_detach(&sb, NULL);
1955 return 0;
1956
1957error:
1958 strbuf_release(&sb);
1959 return -1;
1960}
1961
1962static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1963 struct perf_header *ph, int fd,
1964 void *data __maybe_unused)
1965{
1966 ssize_t ret;
1967 char *name;
1968 u32 pmu_num;
1969 u32 type;
1970 struct strbuf sb;
1971
1972 ret = readn(fd, &pmu_num, sizeof(pmu_num));
1973 if (ret != sizeof(pmu_num))
1974 return -1;
1975
1976 if (ph->needs_swap)
1977 pmu_num = bswap_32(pmu_num);
1978
1979 if (!pmu_num) {
1980 pr_debug("pmu mappings not available\n");
1981 return 0;
1982 }
1983
1984 ph->env.nr_pmu_mappings = pmu_num;
1985 strbuf_init(&sb, 128);
1986
1987 while (pmu_num) {
1988 if (readn(fd, &type, sizeof(type)) != sizeof(type))
1989 goto error;
1990 if (ph->needs_swap)
1991 type = bswap_32(type);
1992
1993 name = do_read_string(fd, ph);
1994 if (!name)
1995 goto error;
1996
1997 strbuf_addf(&sb, "%u:%s", type, name);
1998 /* include a NULL character at the end */
1999 strbuf_add(&sb, "", 1);
2000
2001 if (!strcmp(name, "msr"))
2002 ph->env.msr_pmu_type = type;
2003
2004 free(name);
2005 pmu_num--;
2006 }
2007 ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2008 return 0;
2009
2010error:
2011 strbuf_release(&sb);
2012 return -1;
2013}
2014
2015static int process_group_desc(struct perf_file_section *section __maybe_unused,
2016 struct perf_header *ph, int fd,
2017 void *data __maybe_unused)
2018{
2019 size_t ret = -1;
2020 u32 i, nr, nr_groups;
2021 struct perf_session *session;
2022 struct perf_evsel *evsel, *leader = NULL;
2023 struct group_desc {
2024 char *name;
2025 u32 leader_idx;
2026 u32 nr_members;
2027 } *desc;
2028
2029 if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2030 return -1;
2031
2032 if (ph->needs_swap)
2033 nr_groups = bswap_32(nr_groups);
2034
2035 ph->env.nr_groups = nr_groups;
2036 if (!nr_groups) {
2037 pr_debug("group desc not available\n");
2038 return 0;
2039 }
2040
2041 desc = calloc(nr_groups, sizeof(*desc));
2042 if (!desc)
2043 return -1;
2044
2045 for (i = 0; i < nr_groups; i++) {
2046 desc[i].name = do_read_string(fd, ph);
2047 if (!desc[i].name)
2048 goto out_free;
2049
2050 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2051 goto out_free;
2052
2053 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2054 goto out_free;
2055
2056 if (ph->needs_swap) {
2057 desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2058 desc[i].nr_members = bswap_32(desc[i].nr_members);
2059 }
2060 }
2061
2062 /*
2063 * Rebuild group relationship based on the group_desc
2064 */
2065 session = container_of(ph, struct perf_session, header);
2066 session->evlist->nr_groups = nr_groups;
2067
2068 i = nr = 0;
2069 evlist__for_each(session->evlist, evsel) {
2070 if (evsel->idx == (int) desc[i].leader_idx) {
2071 evsel->leader = evsel;
2072 /* {anon_group} is a dummy name */
2073 if (strcmp(desc[i].name, "{anon_group}")) {
2074 evsel->group_name = desc[i].name;
2075 desc[i].name = NULL;
2076 }
2077 evsel->nr_members = desc[i].nr_members;
2078
2079 if (i >= nr_groups || nr > 0) {
2080 pr_debug("invalid group desc\n");
2081 goto out_free;
2082 }
2083
2084 leader = evsel;
2085 nr = evsel->nr_members - 1;
2086 i++;
2087 } else if (nr) {
2088 /* This is a group member */
2089 evsel->leader = leader;
2090
2091 nr--;
2092 }
2093 }
2094
2095 if (i != nr_groups || nr != 0) {
2096 pr_debug("invalid group desc\n");
2097 goto out_free;
2098 }
2099
2100 ret = 0;
2101out_free:
2102 for (i = 0; i < nr_groups; i++)
2103 zfree(&desc[i].name);
2104 free(desc);
2105
2106 return ret;
2107}
2108
2109static int process_auxtrace(struct perf_file_section *section,
2110 struct perf_header *ph, int fd,
2111 void *data __maybe_unused)
2112{
2113 struct perf_session *session;
2114 int err;
2115
2116 session = container_of(ph, struct perf_session, header);
2117
2118 err = auxtrace_index__process(fd, section->size, session,
2119 ph->needs_swap);
2120 if (err < 0)
2121 pr_err("Failed to process auxtrace index\n");
2122 return err;
2123}
2124
2125static int process_cache(struct perf_file_section *section __maybe_unused,
2126 struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2127 void *data __maybe_unused)
2128{
2129 struct cpu_cache_level *caches;
2130 u32 cnt, i, version;
2131
2132 if (readn(fd, &version, sizeof(version)) != sizeof(version))
2133 return -1;
2134
2135 if (ph->needs_swap)
2136 version = bswap_32(version);
2137
2138 if (version != 1)
2139 return -1;
2140
2141 if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2142 return -1;
2143
2144 if (ph->needs_swap)
2145 cnt = bswap_32(cnt);
2146
2147 caches = zalloc(sizeof(*caches) * cnt);
2148 if (!caches)
2149 return -1;
2150
2151 for (i = 0; i < cnt; i++) {
2152 struct cpu_cache_level c;
2153
2154 #define _R(v) \
2155 if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2156 goto out_free_caches; \
2157 if (ph->needs_swap) \
2158 c.v = bswap_32(c.v); \
2159
2160 _R(level)
2161 _R(line_size)
2162 _R(sets)
2163 _R(ways)
2164 #undef _R
2165
2166 #define _R(v) \
2167 c.v = do_read_string(fd, ph); \
2168 if (!c.v) \
2169 goto out_free_caches;
2170
2171 _R(type)
2172 _R(size)
2173 _R(map)
2174 #undef _R
2175
2176 caches[i] = c;
2177 }
2178
2179 ph->env.caches = caches;
2180 ph->env.caches_cnt = cnt;
2181 return 0;
2182out_free_caches:
2183 free(caches);
2184 return -1;
2185}
2186
2187struct feature_ops {
2188 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2189 void (*print)(struct perf_header *h, int fd, FILE *fp);
2190 int (*process)(struct perf_file_section *section,
2191 struct perf_header *h, int fd, void *data);
2192 const char *name;
2193 bool full_only;
2194};
2195
2196#define FEAT_OPA(n, func) \
2197 [n] = { .name = #n, .write = write_##func, .print = print_##func }
2198#define FEAT_OPP(n, func) \
2199 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2200 .process = process_##func }
2201#define FEAT_OPF(n, func) \
2202 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2203 .process = process_##func, .full_only = true }
2204
2205/* feature_ops not implemented: */
2206#define print_tracing_data NULL
2207#define print_build_id NULL
2208
2209static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2210 FEAT_OPP(HEADER_TRACING_DATA, tracing_data),
2211 FEAT_OPP(HEADER_BUILD_ID, build_id),
2212 FEAT_OPP(HEADER_HOSTNAME, hostname),
2213 FEAT_OPP(HEADER_OSRELEASE, osrelease),
2214 FEAT_OPP(HEADER_VERSION, version),
2215 FEAT_OPP(HEADER_ARCH, arch),
2216 FEAT_OPP(HEADER_NRCPUS, nrcpus),
2217 FEAT_OPP(HEADER_CPUDESC, cpudesc),
2218 FEAT_OPP(HEADER_CPUID, cpuid),
2219 FEAT_OPP(HEADER_TOTAL_MEM, total_mem),
2220 FEAT_OPP(HEADER_EVENT_DESC, event_desc),
2221 FEAT_OPP(HEADER_CMDLINE, cmdline),
2222 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
2223 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
2224 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
2225 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
2226 FEAT_OPP(HEADER_GROUP_DESC, group_desc),
2227 FEAT_OPP(HEADER_AUXTRACE, auxtrace),
2228 FEAT_OPA(HEADER_STAT, stat),
2229 FEAT_OPF(HEADER_CACHE, cache),
2230};
2231
2232struct header_print_data {
2233 FILE *fp;
2234 bool full; /* extended list of headers */
2235};
2236
2237static int perf_file_section__fprintf_info(struct perf_file_section *section,
2238 struct perf_header *ph,
2239 int feat, int fd, void *data)
2240{
2241 struct header_print_data *hd = data;
2242
2243 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2244 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2245 "%d, continuing...\n", section->offset, feat);
2246 return 0;
2247 }
2248 if (feat >= HEADER_LAST_FEATURE) {
2249 pr_warning("unknown feature %d\n", feat);
2250 return 0;
2251 }
2252 if (!feat_ops[feat].print)
2253 return 0;
2254
2255 if (!feat_ops[feat].full_only || hd->full)
2256 feat_ops[feat].print(ph, fd, hd->fp);
2257 else
2258 fprintf(hd->fp, "# %s info available, use -I to display\n",
2259 feat_ops[feat].name);
2260
2261 return 0;
2262}
2263
2264int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2265{
2266 struct header_print_data hd;
2267 struct perf_header *header = &session->header;
2268 int fd = perf_data_file__fd(session->file);
2269 hd.fp = fp;
2270 hd.full = full;
2271
2272 perf_header__process_sections(header, fd, &hd,
2273 perf_file_section__fprintf_info);
2274 return 0;
2275}
2276
2277static int do_write_feat(int fd, struct perf_header *h, int type,
2278 struct perf_file_section **p,
2279 struct perf_evlist *evlist)
2280{
2281 int err;
2282 int ret = 0;
2283
2284 if (perf_header__has_feat(h, type)) {
2285 if (!feat_ops[type].write)
2286 return -1;
2287
2288 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2289
2290 err = feat_ops[type].write(fd, h, evlist);
2291 if (err < 0) {
2292 pr_debug("failed to write feature %d\n", type);
2293
2294 /* undo anything written */
2295 lseek(fd, (*p)->offset, SEEK_SET);
2296
2297 return -1;
2298 }
2299 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2300 (*p)++;
2301 }
2302 return ret;
2303}
2304
2305static int perf_header__adds_write(struct perf_header *header,
2306 struct perf_evlist *evlist, int fd)
2307{
2308 int nr_sections;
2309 struct perf_file_section *feat_sec, *p;
2310 int sec_size;
2311 u64 sec_start;
2312 int feat;
2313 int err;
2314
2315 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2316 if (!nr_sections)
2317 return 0;
2318
2319 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2320 if (feat_sec == NULL)
2321 return -ENOMEM;
2322
2323 sec_size = sizeof(*feat_sec) * nr_sections;
2324
2325 sec_start = header->feat_offset;
2326 lseek(fd, sec_start + sec_size, SEEK_SET);
2327
2328 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2329 if (do_write_feat(fd, header, feat, &p, evlist))
2330 perf_header__clear_feat(header, feat);
2331 }
2332
2333 lseek(fd, sec_start, SEEK_SET);
2334 /*
2335 * may write more than needed due to dropped feature, but
2336 * this is okay, reader will skip the mising entries
2337 */
2338 err = do_write(fd, feat_sec, sec_size);
2339 if (err < 0)
2340 pr_debug("failed to write feature section\n");
2341 free(feat_sec);
2342 return err;
2343}
2344
2345int perf_header__write_pipe(int fd)
2346{
2347 struct perf_pipe_file_header f_header;
2348 int err;
2349
2350 f_header = (struct perf_pipe_file_header){
2351 .magic = PERF_MAGIC,
2352 .size = sizeof(f_header),
2353 };
2354
2355 err = do_write(fd, &f_header, sizeof(f_header));
2356 if (err < 0) {
2357 pr_debug("failed to write perf pipe header\n");
2358 return err;
2359 }
2360
2361 return 0;
2362}
2363
2364int perf_session__write_header(struct perf_session *session,
2365 struct perf_evlist *evlist,
2366 int fd, bool at_exit)
2367{
2368 struct perf_file_header f_header;
2369 struct perf_file_attr f_attr;
2370 struct perf_header *header = &session->header;
2371 struct perf_evsel *evsel;
2372 u64 attr_offset;
2373 int err;
2374
2375 lseek(fd, sizeof(f_header), SEEK_SET);
2376
2377 evlist__for_each(session->evlist, evsel) {
2378 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2379 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2380 if (err < 0) {
2381 pr_debug("failed to write perf header\n");
2382 return err;
2383 }
2384 }
2385
2386 attr_offset = lseek(fd, 0, SEEK_CUR);
2387
2388 evlist__for_each(evlist, evsel) {
2389 f_attr = (struct perf_file_attr){
2390 .attr = evsel->attr,
2391 .ids = {
2392 .offset = evsel->id_offset,
2393 .size = evsel->ids * sizeof(u64),
2394 }
2395 };
2396 err = do_write(fd, &f_attr, sizeof(f_attr));
2397 if (err < 0) {
2398 pr_debug("failed to write perf header attribute\n");
2399 return err;
2400 }
2401 }
2402
2403 if (!header->data_offset)
2404 header->data_offset = lseek(fd, 0, SEEK_CUR);
2405 header->feat_offset = header->data_offset + header->data_size;
2406
2407 if (at_exit) {
2408 err = perf_header__adds_write(header, evlist, fd);
2409 if (err < 0)
2410 return err;
2411 }
2412
2413 f_header = (struct perf_file_header){
2414 .magic = PERF_MAGIC,
2415 .size = sizeof(f_header),
2416 .attr_size = sizeof(f_attr),
2417 .attrs = {
2418 .offset = attr_offset,
2419 .size = evlist->nr_entries * sizeof(f_attr),
2420 },
2421 .data = {
2422 .offset = header->data_offset,
2423 .size = header->data_size,
2424 },
2425 /* event_types is ignored, store zeros */
2426 };
2427
2428 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2429
2430 lseek(fd, 0, SEEK_SET);
2431 err = do_write(fd, &f_header, sizeof(f_header));
2432 if (err < 0) {
2433 pr_debug("failed to write perf header\n");
2434 return err;
2435 }
2436 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2437
2438 return 0;
2439}
2440
2441static int perf_header__getbuffer64(struct perf_header *header,
2442 int fd, void *buf, size_t size)
2443{
2444 if (readn(fd, buf, size) <= 0)
2445 return -1;
2446
2447 if (header->needs_swap)
2448 mem_bswap_64(buf, size);
2449
2450 return 0;
2451}
2452
2453int perf_header__process_sections(struct perf_header *header, int fd,
2454 void *data,
2455 int (*process)(struct perf_file_section *section,
2456 struct perf_header *ph,
2457 int feat, int fd, void *data))
2458{
2459 struct perf_file_section *feat_sec, *sec;
2460 int nr_sections;
2461 int sec_size;
2462 int feat;
2463 int err;
2464
2465 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2466 if (!nr_sections)
2467 return 0;
2468
2469 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2470 if (!feat_sec)
2471 return -1;
2472
2473 sec_size = sizeof(*feat_sec) * nr_sections;
2474
2475 lseek(fd, header->feat_offset, SEEK_SET);
2476
2477 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2478 if (err < 0)
2479 goto out_free;
2480
2481 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2482 err = process(sec++, header, feat, fd, data);
2483 if (err < 0)
2484 goto out_free;
2485 }
2486 err = 0;
2487out_free:
2488 free(feat_sec);
2489 return err;
2490}
2491
2492static const int attr_file_abi_sizes[] = {
2493 [0] = PERF_ATTR_SIZE_VER0,
2494 [1] = PERF_ATTR_SIZE_VER1,
2495 [2] = PERF_ATTR_SIZE_VER2,
2496 [3] = PERF_ATTR_SIZE_VER3,
2497 [4] = PERF_ATTR_SIZE_VER4,
2498 0,
2499};
2500
2501/*
2502 * In the legacy file format, the magic number is not used to encode endianness.
2503 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2504 * on ABI revisions, we need to try all combinations for all endianness to
2505 * detect the endianness.
2506 */
2507static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2508{
2509 uint64_t ref_size, attr_size;
2510 int i;
2511
2512 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2513 ref_size = attr_file_abi_sizes[i]
2514 + sizeof(struct perf_file_section);
2515 if (hdr_sz != ref_size) {
2516 attr_size = bswap_64(hdr_sz);
2517 if (attr_size != ref_size)
2518 continue;
2519
2520 ph->needs_swap = true;
2521 }
2522 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2523 i,
2524 ph->needs_swap);
2525 return 0;
2526 }
2527 /* could not determine endianness */
2528 return -1;
2529}
2530
2531#define PERF_PIPE_HDR_VER0 16
2532
2533static const size_t attr_pipe_abi_sizes[] = {
2534 [0] = PERF_PIPE_HDR_VER0,
2535 0,
2536};
2537
2538/*
2539 * In the legacy pipe format, there is an implicit assumption that endiannesss
2540 * between host recording the samples, and host parsing the samples is the
2541 * same. This is not always the case given that the pipe output may always be
2542 * redirected into a file and analyzed on a different machine with possibly a
2543 * different endianness and perf_event ABI revsions in the perf tool itself.
2544 */
2545static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2546{
2547 u64 attr_size;
2548 int i;
2549
2550 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2551 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2552 attr_size = bswap_64(hdr_sz);
2553 if (attr_size != hdr_sz)
2554 continue;
2555
2556 ph->needs_swap = true;
2557 }
2558 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2559 return 0;
2560 }
2561 return -1;
2562}
2563
2564bool is_perf_magic(u64 magic)
2565{
2566 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2567 || magic == __perf_magic2
2568 || magic == __perf_magic2_sw)
2569 return true;
2570
2571 return false;
2572}
2573
2574static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2575 bool is_pipe, struct perf_header *ph)
2576{
2577 int ret;
2578
2579 /* check for legacy format */
2580 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2581 if (ret == 0) {
2582 ph->version = PERF_HEADER_VERSION_1;
2583 pr_debug("legacy perf.data format\n");
2584 if (is_pipe)
2585 return try_all_pipe_abis(hdr_sz, ph);
2586
2587 return try_all_file_abis(hdr_sz, ph);
2588 }
2589 /*
2590 * the new magic number serves two purposes:
2591 * - unique number to identify actual perf.data files
2592 * - encode endianness of file
2593 */
2594 ph->version = PERF_HEADER_VERSION_2;
2595
2596 /* check magic number with one endianness */
2597 if (magic == __perf_magic2)
2598 return 0;
2599
2600 /* check magic number with opposite endianness */
2601 if (magic != __perf_magic2_sw)
2602 return -1;
2603
2604 ph->needs_swap = true;
2605
2606 return 0;
2607}
2608
2609int perf_file_header__read(struct perf_file_header *header,
2610 struct perf_header *ph, int fd)
2611{
2612 ssize_t ret;
2613
2614 lseek(fd, 0, SEEK_SET);
2615
2616 ret = readn(fd, header, sizeof(*header));
2617 if (ret <= 0)
2618 return -1;
2619
2620 if (check_magic_endian(header->magic,
2621 header->attr_size, false, ph) < 0) {
2622 pr_debug("magic/endian check failed\n");
2623 return -1;
2624 }
2625
2626 if (ph->needs_swap) {
2627 mem_bswap_64(header, offsetof(struct perf_file_header,
2628 adds_features));
2629 }
2630
2631 if (header->size != sizeof(*header)) {
2632 /* Support the previous format */
2633 if (header->size == offsetof(typeof(*header), adds_features))
2634 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2635 else
2636 return -1;
2637 } else if (ph->needs_swap) {
2638 /*
2639 * feature bitmap is declared as an array of unsigned longs --
2640 * not good since its size can differ between the host that
2641 * generated the data file and the host analyzing the file.
2642 *
2643 * We need to handle endianness, but we don't know the size of
2644 * the unsigned long where the file was generated. Take a best
2645 * guess at determining it: try 64-bit swap first (ie., file
2646 * created on a 64-bit host), and check if the hostname feature
2647 * bit is set (this feature bit is forced on as of fbe96f2).
2648 * If the bit is not, undo the 64-bit swap and try a 32-bit
2649 * swap. If the hostname bit is still not set (e.g., older data
2650 * file), punt and fallback to the original behavior --
2651 * clearing all feature bits and setting buildid.
2652 */
2653 mem_bswap_64(&header->adds_features,
2654 BITS_TO_U64(HEADER_FEAT_BITS));
2655
2656 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2657 /* unswap as u64 */
2658 mem_bswap_64(&header->adds_features,
2659 BITS_TO_U64(HEADER_FEAT_BITS));
2660
2661 /* unswap as u32 */
2662 mem_bswap_32(&header->adds_features,
2663 BITS_TO_U32(HEADER_FEAT_BITS));
2664 }
2665
2666 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2667 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2668 set_bit(HEADER_BUILD_ID, header->adds_features);
2669 }
2670 }
2671
2672 memcpy(&ph->adds_features, &header->adds_features,
2673 sizeof(ph->adds_features));
2674
2675 ph->data_offset = header->data.offset;
2676 ph->data_size = header->data.size;
2677 ph->feat_offset = header->data.offset + header->data.size;
2678 return 0;
2679}
2680
2681static int perf_file_section__process(struct perf_file_section *section,
2682 struct perf_header *ph,
2683 int feat, int fd, void *data)
2684{
2685 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2686 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2687 "%d, continuing...\n", section->offset, feat);
2688 return 0;
2689 }
2690
2691 if (feat >= HEADER_LAST_FEATURE) {
2692 pr_debug("unknown feature %d, continuing...\n", feat);
2693 return 0;
2694 }
2695
2696 if (!feat_ops[feat].process)
2697 return 0;
2698
2699 return feat_ops[feat].process(section, ph, fd, data);
2700}
2701
2702static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2703 struct perf_header *ph, int fd,
2704 bool repipe)
2705{
2706 ssize_t ret;
2707
2708 ret = readn(fd, header, sizeof(*header));
2709 if (ret <= 0)
2710 return -1;
2711
2712 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2713 pr_debug("endian/magic failed\n");
2714 return -1;
2715 }
2716
2717 if (ph->needs_swap)
2718 header->size = bswap_64(header->size);
2719
2720 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2721 return -1;
2722
2723 return 0;
2724}
2725
2726static int perf_header__read_pipe(struct perf_session *session)
2727{
2728 struct perf_header *header = &session->header;
2729 struct perf_pipe_file_header f_header;
2730
2731 if (perf_file_header__read_pipe(&f_header, header,
2732 perf_data_file__fd(session->file),
2733 session->repipe) < 0) {
2734 pr_debug("incompatible file format\n");
2735 return -EINVAL;
2736 }
2737
2738 return 0;
2739}
2740
2741static int read_attr(int fd, struct perf_header *ph,
2742 struct perf_file_attr *f_attr)
2743{
2744 struct perf_event_attr *attr = &f_attr->attr;
2745 size_t sz, left;
2746 size_t our_sz = sizeof(f_attr->attr);
2747 ssize_t ret;
2748
2749 memset(f_attr, 0, sizeof(*f_attr));
2750
2751 /* read minimal guaranteed structure */
2752 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2753 if (ret <= 0) {
2754 pr_debug("cannot read %d bytes of header attr\n",
2755 PERF_ATTR_SIZE_VER0);
2756 return -1;
2757 }
2758
2759 /* on file perf_event_attr size */
2760 sz = attr->size;
2761
2762 if (ph->needs_swap)
2763 sz = bswap_32(sz);
2764
2765 if (sz == 0) {
2766 /* assume ABI0 */
2767 sz = PERF_ATTR_SIZE_VER0;
2768 } else if (sz > our_sz) {
2769 pr_debug("file uses a more recent and unsupported ABI"
2770 " (%zu bytes extra)\n", sz - our_sz);
2771 return -1;
2772 }
2773 /* what we have not yet read and that we know about */
2774 left = sz - PERF_ATTR_SIZE_VER0;
2775 if (left) {
2776 void *ptr = attr;
2777 ptr += PERF_ATTR_SIZE_VER0;
2778
2779 ret = readn(fd, ptr, left);
2780 }
2781 /* read perf_file_section, ids are read in caller */
2782 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2783
2784 return ret <= 0 ? -1 : 0;
2785}
2786
2787static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2788 struct pevent *pevent)
2789{
2790 struct event_format *event;
2791 char bf[128];
2792
2793 /* already prepared */
2794 if (evsel->tp_format)
2795 return 0;
2796
2797 if (pevent == NULL) {
2798 pr_debug("broken or missing trace data\n");
2799 return -1;
2800 }
2801
2802 event = pevent_find_event(pevent, evsel->attr.config);
2803 if (event == NULL)
2804 return -1;
2805
2806 if (!evsel->name) {
2807 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2808 evsel->name = strdup(bf);
2809 if (evsel->name == NULL)
2810 return -1;
2811 }
2812
2813 evsel->tp_format = event;
2814 return 0;
2815}
2816
2817static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2818 struct pevent *pevent)
2819{
2820 struct perf_evsel *pos;
2821
2822 evlist__for_each(evlist, pos) {
2823 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2824 perf_evsel__prepare_tracepoint_event(pos, pevent))
2825 return -1;
2826 }
2827
2828 return 0;
2829}
2830
2831int perf_session__read_header(struct perf_session *session)
2832{
2833 struct perf_data_file *file = session->file;
2834 struct perf_header *header = &session->header;
2835 struct perf_file_header f_header;
2836 struct perf_file_attr f_attr;
2837 u64 f_id;
2838 int nr_attrs, nr_ids, i, j;
2839 int fd = perf_data_file__fd(file);
2840
2841 session->evlist = perf_evlist__new();
2842 if (session->evlist == NULL)
2843 return -ENOMEM;
2844
2845 session->evlist->env = &header->env;
2846 session->machines.host.env = &header->env;
2847 if (perf_data_file__is_pipe(file))
2848 return perf_header__read_pipe(session);
2849
2850 if (perf_file_header__read(&f_header, header, fd) < 0)
2851 return -EINVAL;
2852
2853 /*
2854 * Sanity check that perf.data was written cleanly; data size is
2855 * initialized to 0 and updated only if the on_exit function is run.
2856 * If data size is still 0 then the file contains only partial
2857 * information. Just warn user and process it as much as it can.
2858 */
2859 if (f_header.data.size == 0) {
2860 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2861 "Was the 'perf record' command properly terminated?\n",
2862 file->path);
2863 }
2864
2865 nr_attrs = f_header.attrs.size / f_header.attr_size;
2866 lseek(fd, f_header.attrs.offset, SEEK_SET);
2867
2868 for (i = 0; i < nr_attrs; i++) {
2869 struct perf_evsel *evsel;
2870 off_t tmp;
2871
2872 if (read_attr(fd, header, &f_attr) < 0)
2873 goto out_errno;
2874
2875 if (header->needs_swap) {
2876 f_attr.ids.size = bswap_64(f_attr.ids.size);
2877 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2878 perf_event__attr_swap(&f_attr.attr);
2879 }
2880
2881 tmp = lseek(fd, 0, SEEK_CUR);
2882 evsel = perf_evsel__new(&f_attr.attr);
2883
2884 if (evsel == NULL)
2885 goto out_delete_evlist;
2886
2887 evsel->needs_swap = header->needs_swap;
2888 /*
2889 * Do it before so that if perf_evsel__alloc_id fails, this
2890 * entry gets purged too at perf_evlist__delete().
2891 */
2892 perf_evlist__add(session->evlist, evsel);
2893
2894 nr_ids = f_attr.ids.size / sizeof(u64);
2895 /*
2896 * We don't have the cpu and thread maps on the header, so
2897 * for allocating the perf_sample_id table we fake 1 cpu and
2898 * hattr->ids threads.
2899 */
2900 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2901 goto out_delete_evlist;
2902
2903 lseek(fd, f_attr.ids.offset, SEEK_SET);
2904
2905 for (j = 0; j < nr_ids; j++) {
2906 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2907 goto out_errno;
2908
2909 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2910 }
2911
2912 lseek(fd, tmp, SEEK_SET);
2913 }
2914
2915 symbol_conf.nr_events = nr_attrs;
2916
2917 perf_header__process_sections(header, fd, &session->tevent,
2918 perf_file_section__process);
2919
2920 if (perf_evlist__prepare_tracepoint_events(session->evlist,
2921 session->tevent.pevent))
2922 goto out_delete_evlist;
2923
2924 return 0;
2925out_errno:
2926 return -errno;
2927
2928out_delete_evlist:
2929 perf_evlist__delete(session->evlist);
2930 session->evlist = NULL;
2931 return -ENOMEM;
2932}
2933
2934int perf_event__synthesize_attr(struct perf_tool *tool,
2935 struct perf_event_attr *attr, u32 ids, u64 *id,
2936 perf_event__handler_t process)
2937{
2938 union perf_event *ev;
2939 size_t size;
2940 int err;
2941
2942 size = sizeof(struct perf_event_attr);
2943 size = PERF_ALIGN(size, sizeof(u64));
2944 size += sizeof(struct perf_event_header);
2945 size += ids * sizeof(u64);
2946
2947 ev = malloc(size);
2948
2949 if (ev == NULL)
2950 return -ENOMEM;
2951
2952 ev->attr.attr = *attr;
2953 memcpy(ev->attr.id, id, ids * sizeof(u64));
2954
2955 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2956 ev->attr.header.size = (u16)size;
2957
2958 if (ev->attr.header.size == size)
2959 err = process(tool, ev, NULL, NULL);
2960 else
2961 err = -E2BIG;
2962
2963 free(ev);
2964
2965 return err;
2966}
2967
2968static struct event_update_event *
2969event_update_event__new(size_t size, u64 type, u64 id)
2970{
2971 struct event_update_event *ev;
2972
2973 size += sizeof(*ev);
2974 size = PERF_ALIGN(size, sizeof(u64));
2975
2976 ev = zalloc(size);
2977 if (ev) {
2978 ev->header.type = PERF_RECORD_EVENT_UPDATE;
2979 ev->header.size = (u16)size;
2980 ev->type = type;
2981 ev->id = id;
2982 }
2983 return ev;
2984}
2985
2986int
2987perf_event__synthesize_event_update_unit(struct perf_tool *tool,
2988 struct perf_evsel *evsel,
2989 perf_event__handler_t process)
2990{
2991 struct event_update_event *ev;
2992 size_t size = strlen(evsel->unit);
2993 int err;
2994
2995 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
2996 if (ev == NULL)
2997 return -ENOMEM;
2998
2999 strncpy(ev->data, evsel->unit, size);
3000 err = process(tool, (union perf_event *)ev, NULL, NULL);
3001 free(ev);
3002 return err;
3003}
3004
3005int
3006perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3007 struct perf_evsel *evsel,
3008 perf_event__handler_t process)
3009{
3010 struct event_update_event *ev;
3011 struct event_update_event_scale *ev_data;
3012 int err;
3013
3014 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3015 if (ev == NULL)
3016 return -ENOMEM;
3017
3018 ev_data = (struct event_update_event_scale *) ev->data;
3019 ev_data->scale = evsel->scale;
3020 err = process(tool, (union perf_event*) ev, NULL, NULL);
3021 free(ev);
3022 return err;
3023}
3024
3025int
3026perf_event__synthesize_event_update_name(struct perf_tool *tool,
3027 struct perf_evsel *evsel,
3028 perf_event__handler_t process)
3029{
3030 struct event_update_event *ev;
3031 size_t len = strlen(evsel->name);
3032 int err;
3033
3034 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3035 if (ev == NULL)
3036 return -ENOMEM;
3037
3038 strncpy(ev->data, evsel->name, len);
3039 err = process(tool, (union perf_event*) ev, NULL, NULL);
3040 free(ev);
3041 return err;
3042}
3043
3044int
3045perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3046 struct perf_evsel *evsel,
3047 perf_event__handler_t process)
3048{
3049 size_t size = sizeof(struct event_update_event);
3050 struct event_update_event *ev;
3051 int max, err;
3052 u16 type;
3053
3054 if (!evsel->own_cpus)
3055 return 0;
3056
3057 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3058 if (!ev)
3059 return -ENOMEM;
3060
3061 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3062 ev->header.size = (u16)size;
3063 ev->type = PERF_EVENT_UPDATE__CPUS;
3064 ev->id = evsel->id[0];
3065
3066 cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3067 evsel->own_cpus,
3068 type, max);
3069
3070 err = process(tool, (union perf_event*) ev, NULL, NULL);
3071 free(ev);
3072 return err;
3073}
3074
3075size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3076{
3077 struct event_update_event *ev = &event->event_update;
3078 struct event_update_event_scale *ev_scale;
3079 struct event_update_event_cpus *ev_cpus;
3080 struct cpu_map *map;
3081 size_t ret;
3082
3083 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
3084
3085 switch (ev->type) {
3086 case PERF_EVENT_UPDATE__SCALE:
3087 ev_scale = (struct event_update_event_scale *) ev->data;
3088 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3089 break;
3090 case PERF_EVENT_UPDATE__UNIT:
3091 ret += fprintf(fp, "... unit: %s\n", ev->data);
3092 break;
3093 case PERF_EVENT_UPDATE__NAME:
3094 ret += fprintf(fp, "... name: %s\n", ev->data);
3095 break;
3096 case PERF_EVENT_UPDATE__CPUS:
3097 ev_cpus = (struct event_update_event_cpus *) ev->data;
3098 ret += fprintf(fp, "... ");
3099
3100 map = cpu_map__new_data(&ev_cpus->cpus);
3101 if (map)
3102 ret += cpu_map__fprintf(map, fp);
3103 else
3104 ret += fprintf(fp, "failed to get cpus\n");
3105 break;
3106 default:
3107 ret += fprintf(fp, "... unknown type\n");
3108 break;
3109 }
3110
3111 return ret;
3112}
3113
3114int perf_event__synthesize_attrs(struct perf_tool *tool,
3115 struct perf_session *session,
3116 perf_event__handler_t process)
3117{
3118 struct perf_evsel *evsel;
3119 int err = 0;
3120
3121 evlist__for_each(session->evlist, evsel) {
3122 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3123 evsel->id, process);
3124 if (err) {
3125 pr_debug("failed to create perf header attribute\n");
3126 return err;
3127 }
3128 }
3129
3130 return err;
3131}
3132
3133int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3134 union perf_event *event,
3135 struct perf_evlist **pevlist)
3136{
3137 u32 i, ids, n_ids;
3138 struct perf_evsel *evsel;
3139 struct perf_evlist *evlist = *pevlist;
3140
3141 if (evlist == NULL) {
3142 *pevlist = evlist = perf_evlist__new();
3143 if (evlist == NULL)
3144 return -ENOMEM;
3145 }
3146
3147 evsel = perf_evsel__new(&event->attr.attr);
3148 if (evsel == NULL)
3149 return -ENOMEM;
3150
3151 perf_evlist__add(evlist, evsel);
3152
3153 ids = event->header.size;
3154 ids -= (void *)&event->attr.id - (void *)event;
3155 n_ids = ids / sizeof(u64);
3156 /*
3157 * We don't have the cpu and thread maps on the header, so
3158 * for allocating the perf_sample_id table we fake 1 cpu and
3159 * hattr->ids threads.
3160 */
3161 if (perf_evsel__alloc_id(evsel, 1, n_ids))
3162 return -ENOMEM;
3163
3164 for (i = 0; i < n_ids; i++) {
3165 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3166 }
3167
3168 symbol_conf.nr_events = evlist->nr_entries;
3169
3170 return 0;
3171}
3172
3173int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3174 union perf_event *event,
3175 struct perf_evlist **pevlist)
3176{
3177 struct event_update_event *ev = &event->event_update;
3178 struct event_update_event_scale *ev_scale;
3179 struct event_update_event_cpus *ev_cpus;
3180 struct perf_evlist *evlist;
3181 struct perf_evsel *evsel;
3182 struct cpu_map *map;
3183
3184 if (!pevlist || *pevlist == NULL)
3185 return -EINVAL;
3186
3187 evlist = *pevlist;
3188
3189 evsel = perf_evlist__id2evsel(evlist, ev->id);
3190 if (evsel == NULL)
3191 return -EINVAL;
3192
3193 switch (ev->type) {
3194 case PERF_EVENT_UPDATE__UNIT:
3195 evsel->unit = strdup(ev->data);
3196 break;
3197 case PERF_EVENT_UPDATE__NAME:
3198 evsel->name = strdup(ev->data);
3199 break;
3200 case PERF_EVENT_UPDATE__SCALE:
3201 ev_scale = (struct event_update_event_scale *) ev->data;
3202 evsel->scale = ev_scale->scale;
3203 case PERF_EVENT_UPDATE__CPUS:
3204 ev_cpus = (struct event_update_event_cpus *) ev->data;
3205
3206 map = cpu_map__new_data(&ev_cpus->cpus);
3207 if (map)
3208 evsel->own_cpus = map;
3209 else
3210 pr_err("failed to get event_update cpus\n");
3211 default:
3212 break;
3213 }
3214
3215 return 0;
3216}
3217
3218int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3219 struct perf_evlist *evlist,
3220 perf_event__handler_t process)
3221{
3222 union perf_event ev;
3223 struct tracing_data *tdata;
3224 ssize_t size = 0, aligned_size = 0, padding;
3225 int err __maybe_unused = 0;
3226
3227 /*
3228 * We are going to store the size of the data followed
3229 * by the data contents. Since the fd descriptor is a pipe,
3230 * we cannot seek back to store the size of the data once
3231 * we know it. Instead we:
3232 *
3233 * - write the tracing data to the temp file
3234 * - get/write the data size to pipe
3235 * - write the tracing data from the temp file
3236 * to the pipe
3237 */
3238 tdata = tracing_data_get(&evlist->entries, fd, true);
3239 if (!tdata)
3240 return -1;
3241
3242 memset(&ev, 0, sizeof(ev));
3243
3244 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3245 size = tdata->size;
3246 aligned_size = PERF_ALIGN(size, sizeof(u64));
3247 padding = aligned_size - size;
3248 ev.tracing_data.header.size = sizeof(ev.tracing_data);
3249 ev.tracing_data.size = aligned_size;
3250
3251 process(tool, &ev, NULL, NULL);
3252
3253 /*
3254 * The put function will copy all the tracing data
3255 * stored in temp file to the pipe.
3256 */
3257 tracing_data_put(tdata);
3258
3259 write_padded(fd, NULL, 0, padding);
3260
3261 return aligned_size;
3262}
3263
3264int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3265 union perf_event *event,
3266 struct perf_session *session)
3267{
3268 ssize_t size_read, padding, size = event->tracing_data.size;
3269 int fd = perf_data_file__fd(session->file);
3270 off_t offset = lseek(fd, 0, SEEK_CUR);
3271 char buf[BUFSIZ];
3272
3273 /* setup for reading amidst mmap */
3274 lseek(fd, offset + sizeof(struct tracing_data_event),
3275 SEEK_SET);
3276
3277 size_read = trace_report(fd, &session->tevent,
3278 session->repipe);
3279 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3280
3281 if (readn(fd, buf, padding) < 0) {
3282 pr_err("%s: reading input file", __func__);
3283 return -1;
3284 }
3285 if (session->repipe) {
3286 int retw = write(STDOUT_FILENO, buf, padding);
3287 if (retw <= 0 || retw != padding) {
3288 pr_err("%s: repiping tracing data padding", __func__);
3289 return -1;
3290 }
3291 }
3292
3293 if (size_read + padding != size) {
3294 pr_err("%s: tracing data size mismatch", __func__);
3295 return -1;
3296 }
3297
3298 perf_evlist__prepare_tracepoint_events(session->evlist,
3299 session->tevent.pevent);
3300
3301 return size_read + padding;
3302}
3303
3304int perf_event__synthesize_build_id(struct perf_tool *tool,
3305 struct dso *pos, u16 misc,
3306 perf_event__handler_t process,
3307 struct machine *machine)
3308{
3309 union perf_event ev;
3310 size_t len;
3311 int err = 0;
3312
3313 if (!pos->hit)
3314 return err;
3315
3316 memset(&ev, 0, sizeof(ev));
3317
3318 len = pos->long_name_len + 1;
3319 len = PERF_ALIGN(len, NAME_ALIGN);
3320 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3321 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3322 ev.build_id.header.misc = misc;
3323 ev.build_id.pid = machine->pid;
3324 ev.build_id.header.size = sizeof(ev.build_id) + len;
3325 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3326
3327 err = process(tool, &ev, NULL, machine);
3328
3329 return err;
3330}
3331
3332int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3333 union perf_event *event,
3334 struct perf_session *session)
3335{
3336 __event_process_build_id(&event->build_id,
3337 event->build_id.filename,
3338 session);
3339 return 0;
3340}
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <inttypes.h>
4#include "string2.h"
5#include <sys/param.h>
6#include <sys/types.h>
7#include <byteswap.h>
8#include <unistd.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <linux/compiler.h>
12#include <linux/list.h>
13#include <linux/kernel.h>
14#include <linux/bitops.h>
15#include <linux/string.h>
16#include <linux/stringify.h>
17#include <linux/zalloc.h>
18#include <sys/stat.h>
19#include <sys/utsname.h>
20#include <linux/time64.h>
21#include <dirent.h>
22#ifdef HAVE_LIBBPF_SUPPORT
23#include <bpf/libbpf.h>
24#endif
25#include <perf/cpumap.h>
26
27#include "dso.h"
28#include "evlist.h"
29#include "evsel.h"
30#include "util/evsel_fprintf.h"
31#include "header.h"
32#include "memswap.h"
33#include "trace-event.h"
34#include "session.h"
35#include "symbol.h"
36#include "debug.h"
37#include "cpumap.h"
38#include "pmu.h"
39#include "vdso.h"
40#include "strbuf.h"
41#include "build-id.h"
42#include "data.h"
43#include <api/fs/fs.h>
44#include "asm/bug.h"
45#include "tool.h"
46#include "time-utils.h"
47#include "units.h"
48#include "util/util.h" // perf_exe()
49#include "cputopo.h"
50#include "bpf-event.h"
51#include "clockid.h"
52#include "pmu-hybrid.h"
53
54#include <linux/ctype.h>
55#include <internal/lib.h>
56
57/*
58 * magic2 = "PERFILE2"
59 * must be a numerical value to let the endianness
60 * determine the memory layout. That way we are able
61 * to detect endianness when reading the perf.data file
62 * back.
63 *
64 * we check for legacy (PERFFILE) format.
65 */
66static const char *__perf_magic1 = "PERFFILE";
67static const u64 __perf_magic2 = 0x32454c4946524550ULL;
68static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
69
70#define PERF_MAGIC __perf_magic2
71
72const char perf_version_string[] = PERF_VERSION;
73
74struct perf_file_attr {
75 struct perf_event_attr attr;
76 struct perf_file_section ids;
77};
78
79void perf_header__set_feat(struct perf_header *header, int feat)
80{
81 set_bit(feat, header->adds_features);
82}
83
84void perf_header__clear_feat(struct perf_header *header, int feat)
85{
86 clear_bit(feat, header->adds_features);
87}
88
89bool perf_header__has_feat(const struct perf_header *header, int feat)
90{
91 return test_bit(feat, header->adds_features);
92}
93
94static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
95{
96 ssize_t ret = writen(ff->fd, buf, size);
97
98 if (ret != (ssize_t)size)
99 return ret < 0 ? (int)ret : -1;
100 return 0;
101}
102
103static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
104{
105 /* struct perf_event_header::size is u16 */
106 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
107 size_t new_size = ff->size;
108 void *addr;
109
110 if (size + ff->offset > max_size)
111 return -E2BIG;
112
113 while (size > (new_size - ff->offset))
114 new_size <<= 1;
115 new_size = min(max_size, new_size);
116
117 if (ff->size < new_size) {
118 addr = realloc(ff->buf, new_size);
119 if (!addr)
120 return -ENOMEM;
121 ff->buf = addr;
122 ff->size = new_size;
123 }
124
125 memcpy(ff->buf + ff->offset, buf, size);
126 ff->offset += size;
127
128 return 0;
129}
130
131/* Return: 0 if succeeded, -ERR if failed. */
132int do_write(struct feat_fd *ff, const void *buf, size_t size)
133{
134 if (!ff->buf)
135 return __do_write_fd(ff, buf, size);
136 return __do_write_buf(ff, buf, size);
137}
138
139/* Return: 0 if succeeded, -ERR if failed. */
140static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
141{
142 u64 *p = (u64 *) set;
143 int i, ret;
144
145 ret = do_write(ff, &size, sizeof(size));
146 if (ret < 0)
147 return ret;
148
149 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
150 ret = do_write(ff, p + i, sizeof(*p));
151 if (ret < 0)
152 return ret;
153 }
154
155 return 0;
156}
157
158/* Return: 0 if succeeded, -ERR if failed. */
159int write_padded(struct feat_fd *ff, const void *bf,
160 size_t count, size_t count_aligned)
161{
162 static const char zero_buf[NAME_ALIGN];
163 int err = do_write(ff, bf, count);
164
165 if (!err)
166 err = do_write(ff, zero_buf, count_aligned - count);
167
168 return err;
169}
170
171#define string_size(str) \
172 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
173
174/* Return: 0 if succeeded, -ERR if failed. */
175static int do_write_string(struct feat_fd *ff, const char *str)
176{
177 u32 len, olen;
178 int ret;
179
180 olen = strlen(str) + 1;
181 len = PERF_ALIGN(olen, NAME_ALIGN);
182
183 /* write len, incl. \0 */
184 ret = do_write(ff, &len, sizeof(len));
185 if (ret < 0)
186 return ret;
187
188 return write_padded(ff, str, olen, len);
189}
190
191static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
192{
193 ssize_t ret = readn(ff->fd, addr, size);
194
195 if (ret != size)
196 return ret < 0 ? (int)ret : -1;
197 return 0;
198}
199
200static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
201{
202 if (size > (ssize_t)ff->size - ff->offset)
203 return -1;
204
205 memcpy(addr, ff->buf + ff->offset, size);
206 ff->offset += size;
207
208 return 0;
209
210}
211
212static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
213{
214 if (!ff->buf)
215 return __do_read_fd(ff, addr, size);
216 return __do_read_buf(ff, addr, size);
217}
218
219static int do_read_u32(struct feat_fd *ff, u32 *addr)
220{
221 int ret;
222
223 ret = __do_read(ff, addr, sizeof(*addr));
224 if (ret)
225 return ret;
226
227 if (ff->ph->needs_swap)
228 *addr = bswap_32(*addr);
229 return 0;
230}
231
232static int do_read_u64(struct feat_fd *ff, u64 *addr)
233{
234 int ret;
235
236 ret = __do_read(ff, addr, sizeof(*addr));
237 if (ret)
238 return ret;
239
240 if (ff->ph->needs_swap)
241 *addr = bswap_64(*addr);
242 return 0;
243}
244
245static char *do_read_string(struct feat_fd *ff)
246{
247 u32 len;
248 char *buf;
249
250 if (do_read_u32(ff, &len))
251 return NULL;
252
253 buf = malloc(len);
254 if (!buf)
255 return NULL;
256
257 if (!__do_read(ff, buf, len)) {
258 /*
259 * strings are padded by zeroes
260 * thus the actual strlen of buf
261 * may be less than len
262 */
263 return buf;
264 }
265
266 free(buf);
267 return NULL;
268}
269
270/* Return: 0 if succeeded, -ERR if failed. */
271static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
272{
273 unsigned long *set;
274 u64 size, *p;
275 int i, ret;
276
277 ret = do_read_u64(ff, &size);
278 if (ret)
279 return ret;
280
281 set = bitmap_alloc(size);
282 if (!set)
283 return -ENOMEM;
284
285 p = (u64 *) set;
286
287 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
288 ret = do_read_u64(ff, p + i);
289 if (ret < 0) {
290 free(set);
291 return ret;
292 }
293 }
294
295 *pset = set;
296 *psize = size;
297 return 0;
298}
299
300static int write_tracing_data(struct feat_fd *ff,
301 struct evlist *evlist)
302{
303 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
304 return -1;
305
306 return read_tracing_data(ff->fd, &evlist->core.entries);
307}
308
309static int write_build_id(struct feat_fd *ff,
310 struct evlist *evlist __maybe_unused)
311{
312 struct perf_session *session;
313 int err;
314
315 session = container_of(ff->ph, struct perf_session, header);
316
317 if (!perf_session__read_build_ids(session, true))
318 return -1;
319
320 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
321 return -1;
322
323 err = perf_session__write_buildid_table(session, ff);
324 if (err < 0) {
325 pr_debug("failed to write buildid table\n");
326 return err;
327 }
328 perf_session__cache_build_ids(session);
329
330 return 0;
331}
332
333static int write_hostname(struct feat_fd *ff,
334 struct evlist *evlist __maybe_unused)
335{
336 struct utsname uts;
337 int ret;
338
339 ret = uname(&uts);
340 if (ret < 0)
341 return -1;
342
343 return do_write_string(ff, uts.nodename);
344}
345
346static int write_osrelease(struct feat_fd *ff,
347 struct evlist *evlist __maybe_unused)
348{
349 struct utsname uts;
350 int ret;
351
352 ret = uname(&uts);
353 if (ret < 0)
354 return -1;
355
356 return do_write_string(ff, uts.release);
357}
358
359static int write_arch(struct feat_fd *ff,
360 struct evlist *evlist __maybe_unused)
361{
362 struct utsname uts;
363 int ret;
364
365 ret = uname(&uts);
366 if (ret < 0)
367 return -1;
368
369 return do_write_string(ff, uts.machine);
370}
371
372static int write_version(struct feat_fd *ff,
373 struct evlist *evlist __maybe_unused)
374{
375 return do_write_string(ff, perf_version_string);
376}
377
378static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
379{
380 FILE *file;
381 char *buf = NULL;
382 char *s, *p;
383 const char *search = cpuinfo_proc;
384 size_t len = 0;
385 int ret = -1;
386
387 if (!search)
388 return -1;
389
390 file = fopen("/proc/cpuinfo", "r");
391 if (!file)
392 return -1;
393
394 while (getline(&buf, &len, file) > 0) {
395 ret = strncmp(buf, search, strlen(search));
396 if (!ret)
397 break;
398 }
399
400 if (ret) {
401 ret = -1;
402 goto done;
403 }
404
405 s = buf;
406
407 p = strchr(buf, ':');
408 if (p && *(p+1) == ' ' && *(p+2))
409 s = p + 2;
410 p = strchr(s, '\n');
411 if (p)
412 *p = '\0';
413
414 /* squash extra space characters (branding string) */
415 p = s;
416 while (*p) {
417 if (isspace(*p)) {
418 char *r = p + 1;
419 char *q = skip_spaces(r);
420 *p = ' ';
421 if (q != (p+1))
422 while ((*r++ = *q++));
423 }
424 p++;
425 }
426 ret = do_write_string(ff, s);
427done:
428 free(buf);
429 fclose(file);
430 return ret;
431}
432
433static int write_cpudesc(struct feat_fd *ff,
434 struct evlist *evlist __maybe_unused)
435{
436#if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__)
437#define CPUINFO_PROC { "cpu", }
438#elif defined(__s390__)
439#define CPUINFO_PROC { "vendor_id", }
440#elif defined(__sh__)
441#define CPUINFO_PROC { "cpu type", }
442#elif defined(__alpha__) || defined(__mips__)
443#define CPUINFO_PROC { "cpu model", }
444#elif defined(__arm__)
445#define CPUINFO_PROC { "model name", "Processor", }
446#elif defined(__arc__)
447#define CPUINFO_PROC { "Processor", }
448#elif defined(__xtensa__)
449#define CPUINFO_PROC { "core ID", }
450#else
451#define CPUINFO_PROC { "model name", }
452#endif
453 const char *cpuinfo_procs[] = CPUINFO_PROC;
454#undef CPUINFO_PROC
455 unsigned int i;
456
457 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
458 int ret;
459 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
460 if (ret >= 0)
461 return ret;
462 }
463 return -1;
464}
465
466
467static int write_nrcpus(struct feat_fd *ff,
468 struct evlist *evlist __maybe_unused)
469{
470 long nr;
471 u32 nrc, nra;
472 int ret;
473
474 nrc = cpu__max_present_cpu();
475
476 nr = sysconf(_SC_NPROCESSORS_ONLN);
477 if (nr < 0)
478 return -1;
479
480 nra = (u32)(nr & UINT_MAX);
481
482 ret = do_write(ff, &nrc, sizeof(nrc));
483 if (ret < 0)
484 return ret;
485
486 return do_write(ff, &nra, sizeof(nra));
487}
488
489static int write_event_desc(struct feat_fd *ff,
490 struct evlist *evlist)
491{
492 struct evsel *evsel;
493 u32 nre, nri, sz;
494 int ret;
495
496 nre = evlist->core.nr_entries;
497
498 /*
499 * write number of events
500 */
501 ret = do_write(ff, &nre, sizeof(nre));
502 if (ret < 0)
503 return ret;
504
505 /*
506 * size of perf_event_attr struct
507 */
508 sz = (u32)sizeof(evsel->core.attr);
509 ret = do_write(ff, &sz, sizeof(sz));
510 if (ret < 0)
511 return ret;
512
513 evlist__for_each_entry(evlist, evsel) {
514 ret = do_write(ff, &evsel->core.attr, sz);
515 if (ret < 0)
516 return ret;
517 /*
518 * write number of unique id per event
519 * there is one id per instance of an event
520 *
521 * copy into an nri to be independent of the
522 * type of ids,
523 */
524 nri = evsel->core.ids;
525 ret = do_write(ff, &nri, sizeof(nri));
526 if (ret < 0)
527 return ret;
528
529 /*
530 * write event string as passed on cmdline
531 */
532 ret = do_write_string(ff, evsel__name(evsel));
533 if (ret < 0)
534 return ret;
535 /*
536 * write unique ids for this event
537 */
538 ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64));
539 if (ret < 0)
540 return ret;
541 }
542 return 0;
543}
544
545static int write_cmdline(struct feat_fd *ff,
546 struct evlist *evlist __maybe_unused)
547{
548 char pbuf[MAXPATHLEN], *buf;
549 int i, ret, n;
550
551 /* actual path to perf binary */
552 buf = perf_exe(pbuf, MAXPATHLEN);
553
554 /* account for binary path */
555 n = perf_env.nr_cmdline + 1;
556
557 ret = do_write(ff, &n, sizeof(n));
558 if (ret < 0)
559 return ret;
560
561 ret = do_write_string(ff, buf);
562 if (ret < 0)
563 return ret;
564
565 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
566 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
567 if (ret < 0)
568 return ret;
569 }
570 return 0;
571}
572
573
574static int write_cpu_topology(struct feat_fd *ff,
575 struct evlist *evlist __maybe_unused)
576{
577 struct cpu_topology *tp;
578 u32 i;
579 int ret, j;
580
581 tp = cpu_topology__new();
582 if (!tp)
583 return -1;
584
585 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
586 if (ret < 0)
587 goto done;
588
589 for (i = 0; i < tp->core_sib; i++) {
590 ret = do_write_string(ff, tp->core_siblings[i]);
591 if (ret < 0)
592 goto done;
593 }
594 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
595 if (ret < 0)
596 goto done;
597
598 for (i = 0; i < tp->thread_sib; i++) {
599 ret = do_write_string(ff, tp->thread_siblings[i]);
600 if (ret < 0)
601 break;
602 }
603
604 ret = perf_env__read_cpu_topology_map(&perf_env);
605 if (ret < 0)
606 goto done;
607
608 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
609 ret = do_write(ff, &perf_env.cpu[j].core_id,
610 sizeof(perf_env.cpu[j].core_id));
611 if (ret < 0)
612 return ret;
613 ret = do_write(ff, &perf_env.cpu[j].socket_id,
614 sizeof(perf_env.cpu[j].socket_id));
615 if (ret < 0)
616 return ret;
617 }
618
619 if (!tp->die_sib)
620 goto done;
621
622 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
623 if (ret < 0)
624 goto done;
625
626 for (i = 0; i < tp->die_sib; i++) {
627 ret = do_write_string(ff, tp->die_siblings[i]);
628 if (ret < 0)
629 goto done;
630 }
631
632 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
633 ret = do_write(ff, &perf_env.cpu[j].die_id,
634 sizeof(perf_env.cpu[j].die_id));
635 if (ret < 0)
636 return ret;
637 }
638
639done:
640 cpu_topology__delete(tp);
641 return ret;
642}
643
644
645
646static int write_total_mem(struct feat_fd *ff,
647 struct evlist *evlist __maybe_unused)
648{
649 char *buf = NULL;
650 FILE *fp;
651 size_t len = 0;
652 int ret = -1, n;
653 uint64_t mem;
654
655 fp = fopen("/proc/meminfo", "r");
656 if (!fp)
657 return -1;
658
659 while (getline(&buf, &len, fp) > 0) {
660 ret = strncmp(buf, "MemTotal:", 9);
661 if (!ret)
662 break;
663 }
664 if (!ret) {
665 n = sscanf(buf, "%*s %"PRIu64, &mem);
666 if (n == 1)
667 ret = do_write(ff, &mem, sizeof(mem));
668 } else
669 ret = -1;
670 free(buf);
671 fclose(fp);
672 return ret;
673}
674
675static int write_numa_topology(struct feat_fd *ff,
676 struct evlist *evlist __maybe_unused)
677{
678 struct numa_topology *tp;
679 int ret = -1;
680 u32 i;
681
682 tp = numa_topology__new();
683 if (!tp)
684 return -ENOMEM;
685
686 ret = do_write(ff, &tp->nr, sizeof(u32));
687 if (ret < 0)
688 goto err;
689
690 for (i = 0; i < tp->nr; i++) {
691 struct numa_topology_node *n = &tp->nodes[i];
692
693 ret = do_write(ff, &n->node, sizeof(u32));
694 if (ret < 0)
695 goto err;
696
697 ret = do_write(ff, &n->mem_total, sizeof(u64));
698 if (ret)
699 goto err;
700
701 ret = do_write(ff, &n->mem_free, sizeof(u64));
702 if (ret)
703 goto err;
704
705 ret = do_write_string(ff, n->cpus);
706 if (ret < 0)
707 goto err;
708 }
709
710 ret = 0;
711
712err:
713 numa_topology__delete(tp);
714 return ret;
715}
716
717/*
718 * File format:
719 *
720 * struct pmu_mappings {
721 * u32 pmu_num;
722 * struct pmu_map {
723 * u32 type;
724 * char name[];
725 * }[pmu_num];
726 * };
727 */
728
729static int write_pmu_mappings(struct feat_fd *ff,
730 struct evlist *evlist __maybe_unused)
731{
732 struct perf_pmu *pmu = NULL;
733 u32 pmu_num = 0;
734 int ret;
735
736 /*
737 * Do a first pass to count number of pmu to avoid lseek so this
738 * works in pipe mode as well.
739 */
740 while ((pmu = perf_pmu__scan(pmu))) {
741 if (!pmu->name)
742 continue;
743 pmu_num++;
744 }
745
746 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
747 if (ret < 0)
748 return ret;
749
750 while ((pmu = perf_pmu__scan(pmu))) {
751 if (!pmu->name)
752 continue;
753
754 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
755 if (ret < 0)
756 return ret;
757
758 ret = do_write_string(ff, pmu->name);
759 if (ret < 0)
760 return ret;
761 }
762
763 return 0;
764}
765
766/*
767 * File format:
768 *
769 * struct group_descs {
770 * u32 nr_groups;
771 * struct group_desc {
772 * char name[];
773 * u32 leader_idx;
774 * u32 nr_members;
775 * }[nr_groups];
776 * };
777 */
778static int write_group_desc(struct feat_fd *ff,
779 struct evlist *evlist)
780{
781 u32 nr_groups = evlist->core.nr_groups;
782 struct evsel *evsel;
783 int ret;
784
785 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
786 if (ret < 0)
787 return ret;
788
789 evlist__for_each_entry(evlist, evsel) {
790 if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
791 const char *name = evsel->group_name ?: "{anon_group}";
792 u32 leader_idx = evsel->core.idx;
793 u32 nr_members = evsel->core.nr_members;
794
795 ret = do_write_string(ff, name);
796 if (ret < 0)
797 return ret;
798
799 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
800 if (ret < 0)
801 return ret;
802
803 ret = do_write(ff, &nr_members, sizeof(nr_members));
804 if (ret < 0)
805 return ret;
806 }
807 }
808 return 0;
809}
810
811/*
812 * Return the CPU id as a raw string.
813 *
814 * Each architecture should provide a more precise id string that
815 * can be use to match the architecture's "mapfile".
816 */
817char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
818{
819 return NULL;
820}
821
822/* Return zero when the cpuid from the mapfile.csv matches the
823 * cpuid string generated on this platform.
824 * Otherwise return non-zero.
825 */
826int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
827{
828 regex_t re;
829 regmatch_t pmatch[1];
830 int match;
831
832 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
833 /* Warn unable to generate match particular string. */
834 pr_info("Invalid regular expression %s\n", mapcpuid);
835 return 1;
836 }
837
838 match = !regexec(&re, cpuid, 1, pmatch, 0);
839 regfree(&re);
840 if (match) {
841 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
842
843 /* Verify the entire string matched. */
844 if (match_len == strlen(cpuid))
845 return 0;
846 }
847 return 1;
848}
849
850/*
851 * default get_cpuid(): nothing gets recorded
852 * actual implementation must be in arch/$(SRCARCH)/util/header.c
853 */
854int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
855{
856 return ENOSYS; /* Not implemented */
857}
858
859static int write_cpuid(struct feat_fd *ff,
860 struct evlist *evlist __maybe_unused)
861{
862 char buffer[64];
863 int ret;
864
865 ret = get_cpuid(buffer, sizeof(buffer));
866 if (ret)
867 return -1;
868
869 return do_write_string(ff, buffer);
870}
871
872static int write_branch_stack(struct feat_fd *ff __maybe_unused,
873 struct evlist *evlist __maybe_unused)
874{
875 return 0;
876}
877
878static int write_auxtrace(struct feat_fd *ff,
879 struct evlist *evlist __maybe_unused)
880{
881 struct perf_session *session;
882 int err;
883
884 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
885 return -1;
886
887 session = container_of(ff->ph, struct perf_session, header);
888
889 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
890 if (err < 0)
891 pr_err("Failed to write auxtrace index\n");
892 return err;
893}
894
895static int write_clockid(struct feat_fd *ff,
896 struct evlist *evlist __maybe_unused)
897{
898 return do_write(ff, &ff->ph->env.clock.clockid_res_ns,
899 sizeof(ff->ph->env.clock.clockid_res_ns));
900}
901
902static int write_clock_data(struct feat_fd *ff,
903 struct evlist *evlist __maybe_unused)
904{
905 u64 *data64;
906 u32 data32;
907 int ret;
908
909 /* version */
910 data32 = 1;
911
912 ret = do_write(ff, &data32, sizeof(data32));
913 if (ret < 0)
914 return ret;
915
916 /* clockid */
917 data32 = ff->ph->env.clock.clockid;
918
919 ret = do_write(ff, &data32, sizeof(data32));
920 if (ret < 0)
921 return ret;
922
923 /* TOD ref time */
924 data64 = &ff->ph->env.clock.tod_ns;
925
926 ret = do_write(ff, data64, sizeof(*data64));
927 if (ret < 0)
928 return ret;
929
930 /* clockid ref time */
931 data64 = &ff->ph->env.clock.clockid_ns;
932
933 return do_write(ff, data64, sizeof(*data64));
934}
935
936static int write_hybrid_topology(struct feat_fd *ff,
937 struct evlist *evlist __maybe_unused)
938{
939 struct hybrid_topology *tp;
940 int ret;
941 u32 i;
942
943 tp = hybrid_topology__new();
944 if (!tp)
945 return -ENOENT;
946
947 ret = do_write(ff, &tp->nr, sizeof(u32));
948 if (ret < 0)
949 goto err;
950
951 for (i = 0; i < tp->nr; i++) {
952 struct hybrid_topology_node *n = &tp->nodes[i];
953
954 ret = do_write_string(ff, n->pmu_name);
955 if (ret < 0)
956 goto err;
957
958 ret = do_write_string(ff, n->cpus);
959 if (ret < 0)
960 goto err;
961 }
962
963 ret = 0;
964
965err:
966 hybrid_topology__delete(tp);
967 return ret;
968}
969
970static int write_dir_format(struct feat_fd *ff,
971 struct evlist *evlist __maybe_unused)
972{
973 struct perf_session *session;
974 struct perf_data *data;
975
976 session = container_of(ff->ph, struct perf_session, header);
977 data = session->data;
978
979 if (WARN_ON(!perf_data__is_dir(data)))
980 return -1;
981
982 return do_write(ff, &data->dir.version, sizeof(data->dir.version));
983}
984
985#ifdef HAVE_LIBBPF_SUPPORT
986static int write_bpf_prog_info(struct feat_fd *ff,
987 struct evlist *evlist __maybe_unused)
988{
989 struct perf_env *env = &ff->ph->env;
990 struct rb_root *root;
991 struct rb_node *next;
992 int ret;
993
994 down_read(&env->bpf_progs.lock);
995
996 ret = do_write(ff, &env->bpf_progs.infos_cnt,
997 sizeof(env->bpf_progs.infos_cnt));
998 if (ret < 0)
999 goto out;
1000
1001 root = &env->bpf_progs.infos;
1002 next = rb_first(root);
1003 while (next) {
1004 struct bpf_prog_info_node *node;
1005 size_t len;
1006
1007 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1008 next = rb_next(&node->rb_node);
1009 len = sizeof(struct bpf_prog_info_linear) +
1010 node->info_linear->data_len;
1011
1012 /* before writing to file, translate address to offset */
1013 bpf_program__bpil_addr_to_offs(node->info_linear);
1014 ret = do_write(ff, node->info_linear, len);
1015 /*
1016 * translate back to address even when do_write() fails,
1017 * so that this function never changes the data.
1018 */
1019 bpf_program__bpil_offs_to_addr(node->info_linear);
1020 if (ret < 0)
1021 goto out;
1022 }
1023out:
1024 up_read(&env->bpf_progs.lock);
1025 return ret;
1026}
1027
1028static int write_bpf_btf(struct feat_fd *ff,
1029 struct evlist *evlist __maybe_unused)
1030{
1031 struct perf_env *env = &ff->ph->env;
1032 struct rb_root *root;
1033 struct rb_node *next;
1034 int ret;
1035
1036 down_read(&env->bpf_progs.lock);
1037
1038 ret = do_write(ff, &env->bpf_progs.btfs_cnt,
1039 sizeof(env->bpf_progs.btfs_cnt));
1040
1041 if (ret < 0)
1042 goto out;
1043
1044 root = &env->bpf_progs.btfs;
1045 next = rb_first(root);
1046 while (next) {
1047 struct btf_node *node;
1048
1049 node = rb_entry(next, struct btf_node, rb_node);
1050 next = rb_next(&node->rb_node);
1051 ret = do_write(ff, &node->id,
1052 sizeof(u32) * 2 + node->data_size);
1053 if (ret < 0)
1054 goto out;
1055 }
1056out:
1057 up_read(&env->bpf_progs.lock);
1058 return ret;
1059}
1060#endif // HAVE_LIBBPF_SUPPORT
1061
1062static int cpu_cache_level__sort(const void *a, const void *b)
1063{
1064 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
1065 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
1066
1067 return cache_a->level - cache_b->level;
1068}
1069
1070static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
1071{
1072 if (a->level != b->level)
1073 return false;
1074
1075 if (a->line_size != b->line_size)
1076 return false;
1077
1078 if (a->sets != b->sets)
1079 return false;
1080
1081 if (a->ways != b->ways)
1082 return false;
1083
1084 if (strcmp(a->type, b->type))
1085 return false;
1086
1087 if (strcmp(a->size, b->size))
1088 return false;
1089
1090 if (strcmp(a->map, b->map))
1091 return false;
1092
1093 return true;
1094}
1095
1096static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1097{
1098 char path[PATH_MAX], file[PATH_MAX];
1099 struct stat st;
1100 size_t len;
1101
1102 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1103 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1104
1105 if (stat(file, &st))
1106 return 1;
1107
1108 scnprintf(file, PATH_MAX, "%s/level", path);
1109 if (sysfs__read_int(file, (int *) &cache->level))
1110 return -1;
1111
1112 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1113 if (sysfs__read_int(file, (int *) &cache->line_size))
1114 return -1;
1115
1116 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1117 if (sysfs__read_int(file, (int *) &cache->sets))
1118 return -1;
1119
1120 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1121 if (sysfs__read_int(file, (int *) &cache->ways))
1122 return -1;
1123
1124 scnprintf(file, PATH_MAX, "%s/type", path);
1125 if (sysfs__read_str(file, &cache->type, &len))
1126 return -1;
1127
1128 cache->type[len] = 0;
1129 cache->type = strim(cache->type);
1130
1131 scnprintf(file, PATH_MAX, "%s/size", path);
1132 if (sysfs__read_str(file, &cache->size, &len)) {
1133 zfree(&cache->type);
1134 return -1;
1135 }
1136
1137 cache->size[len] = 0;
1138 cache->size = strim(cache->size);
1139
1140 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1141 if (sysfs__read_str(file, &cache->map, &len)) {
1142 zfree(&cache->size);
1143 zfree(&cache->type);
1144 return -1;
1145 }
1146
1147 cache->map[len] = 0;
1148 cache->map = strim(cache->map);
1149 return 0;
1150}
1151
1152static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1153{
1154 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1155}
1156
1157#define MAX_CACHE_LVL 4
1158
1159static int build_caches(struct cpu_cache_level caches[], u32 *cntp)
1160{
1161 u32 i, cnt = 0;
1162 u32 nr, cpu;
1163 u16 level;
1164
1165 nr = cpu__max_cpu();
1166
1167 for (cpu = 0; cpu < nr; cpu++) {
1168 for (level = 0; level < MAX_CACHE_LVL; level++) {
1169 struct cpu_cache_level c;
1170 int err;
1171
1172 err = cpu_cache_level__read(&c, cpu, level);
1173 if (err < 0)
1174 return err;
1175
1176 if (err == 1)
1177 break;
1178
1179 for (i = 0; i < cnt; i++) {
1180 if (cpu_cache_level__cmp(&c, &caches[i]))
1181 break;
1182 }
1183
1184 if (i == cnt)
1185 caches[cnt++] = c;
1186 else
1187 cpu_cache_level__free(&c);
1188 }
1189 }
1190 *cntp = cnt;
1191 return 0;
1192}
1193
1194static int write_cache(struct feat_fd *ff,
1195 struct evlist *evlist __maybe_unused)
1196{
1197 u32 max_caches = cpu__max_cpu() * MAX_CACHE_LVL;
1198 struct cpu_cache_level caches[max_caches];
1199 u32 cnt = 0, i, version = 1;
1200 int ret;
1201
1202 ret = build_caches(caches, &cnt);
1203 if (ret)
1204 goto out;
1205
1206 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1207
1208 ret = do_write(ff, &version, sizeof(u32));
1209 if (ret < 0)
1210 goto out;
1211
1212 ret = do_write(ff, &cnt, sizeof(u32));
1213 if (ret < 0)
1214 goto out;
1215
1216 for (i = 0; i < cnt; i++) {
1217 struct cpu_cache_level *c = &caches[i];
1218
1219 #define _W(v) \
1220 ret = do_write(ff, &c->v, sizeof(u32)); \
1221 if (ret < 0) \
1222 goto out;
1223
1224 _W(level)
1225 _W(line_size)
1226 _W(sets)
1227 _W(ways)
1228 #undef _W
1229
1230 #define _W(v) \
1231 ret = do_write_string(ff, (const char *) c->v); \
1232 if (ret < 0) \
1233 goto out;
1234
1235 _W(type)
1236 _W(size)
1237 _W(map)
1238 #undef _W
1239 }
1240
1241out:
1242 for (i = 0; i < cnt; i++)
1243 cpu_cache_level__free(&caches[i]);
1244 return ret;
1245}
1246
1247static int write_stat(struct feat_fd *ff __maybe_unused,
1248 struct evlist *evlist __maybe_unused)
1249{
1250 return 0;
1251}
1252
1253static int write_sample_time(struct feat_fd *ff,
1254 struct evlist *evlist)
1255{
1256 int ret;
1257
1258 ret = do_write(ff, &evlist->first_sample_time,
1259 sizeof(evlist->first_sample_time));
1260 if (ret < 0)
1261 return ret;
1262
1263 return do_write(ff, &evlist->last_sample_time,
1264 sizeof(evlist->last_sample_time));
1265}
1266
1267
1268static int memory_node__read(struct memory_node *n, unsigned long idx)
1269{
1270 unsigned int phys, size = 0;
1271 char path[PATH_MAX];
1272 struct dirent *ent;
1273 DIR *dir;
1274
1275#define for_each_memory(mem, dir) \
1276 while ((ent = readdir(dir))) \
1277 if (strcmp(ent->d_name, ".") && \
1278 strcmp(ent->d_name, "..") && \
1279 sscanf(ent->d_name, "memory%u", &mem) == 1)
1280
1281 scnprintf(path, PATH_MAX,
1282 "%s/devices/system/node/node%lu",
1283 sysfs__mountpoint(), idx);
1284
1285 dir = opendir(path);
1286 if (!dir) {
1287 pr_warning("failed: cant' open memory sysfs data\n");
1288 return -1;
1289 }
1290
1291 for_each_memory(phys, dir) {
1292 size = max(phys, size);
1293 }
1294
1295 size++;
1296
1297 n->set = bitmap_alloc(size);
1298 if (!n->set) {
1299 closedir(dir);
1300 return -ENOMEM;
1301 }
1302
1303 n->node = idx;
1304 n->size = size;
1305
1306 rewinddir(dir);
1307
1308 for_each_memory(phys, dir) {
1309 set_bit(phys, n->set);
1310 }
1311
1312 closedir(dir);
1313 return 0;
1314}
1315
1316static int memory_node__sort(const void *a, const void *b)
1317{
1318 const struct memory_node *na = a;
1319 const struct memory_node *nb = b;
1320
1321 return na->node - nb->node;
1322}
1323
1324static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1325{
1326 char path[PATH_MAX];
1327 struct dirent *ent;
1328 DIR *dir;
1329 u64 cnt = 0;
1330 int ret = 0;
1331
1332 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1333 sysfs__mountpoint());
1334
1335 dir = opendir(path);
1336 if (!dir) {
1337 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1338 __func__, path);
1339 return -1;
1340 }
1341
1342 while (!ret && (ent = readdir(dir))) {
1343 unsigned int idx;
1344 int r;
1345
1346 if (!strcmp(ent->d_name, ".") ||
1347 !strcmp(ent->d_name, ".."))
1348 continue;
1349
1350 r = sscanf(ent->d_name, "node%u", &idx);
1351 if (r != 1)
1352 continue;
1353
1354 if (WARN_ONCE(cnt >= size,
1355 "failed to write MEM_TOPOLOGY, way too many nodes\n")) {
1356 closedir(dir);
1357 return -1;
1358 }
1359
1360 ret = memory_node__read(&nodes[cnt++], idx);
1361 }
1362
1363 *cntp = cnt;
1364 closedir(dir);
1365
1366 if (!ret)
1367 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1368
1369 return ret;
1370}
1371
1372#define MAX_MEMORY_NODES 2000
1373
1374/*
1375 * The MEM_TOPOLOGY holds physical memory map for every
1376 * node in system. The format of data is as follows:
1377 *
1378 * 0 - version | for future changes
1379 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1380 * 16 - count | number of nodes
1381 *
1382 * For each node we store map of physical indexes for
1383 * each node:
1384 *
1385 * 32 - node id | node index
1386 * 40 - size | size of bitmap
1387 * 48 - bitmap | bitmap of memory indexes that belongs to node
1388 */
1389static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1390 struct evlist *evlist __maybe_unused)
1391{
1392 static struct memory_node nodes[MAX_MEMORY_NODES];
1393 u64 bsize, version = 1, i, nr;
1394 int ret;
1395
1396 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1397 (unsigned long long *) &bsize);
1398 if (ret)
1399 return ret;
1400
1401 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1402 if (ret)
1403 return ret;
1404
1405 ret = do_write(ff, &version, sizeof(version));
1406 if (ret < 0)
1407 goto out;
1408
1409 ret = do_write(ff, &bsize, sizeof(bsize));
1410 if (ret < 0)
1411 goto out;
1412
1413 ret = do_write(ff, &nr, sizeof(nr));
1414 if (ret < 0)
1415 goto out;
1416
1417 for (i = 0; i < nr; i++) {
1418 struct memory_node *n = &nodes[i];
1419
1420 #define _W(v) \
1421 ret = do_write(ff, &n->v, sizeof(n->v)); \
1422 if (ret < 0) \
1423 goto out;
1424
1425 _W(node)
1426 _W(size)
1427
1428 #undef _W
1429
1430 ret = do_write_bitmap(ff, n->set, n->size);
1431 if (ret < 0)
1432 goto out;
1433 }
1434
1435out:
1436 return ret;
1437}
1438
1439static int write_compressed(struct feat_fd *ff __maybe_unused,
1440 struct evlist *evlist __maybe_unused)
1441{
1442 int ret;
1443
1444 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1445 if (ret)
1446 return ret;
1447
1448 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1449 if (ret)
1450 return ret;
1451
1452 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1453 if (ret)
1454 return ret;
1455
1456 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1457 if (ret)
1458 return ret;
1459
1460 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1461}
1462
1463static int write_per_cpu_pmu_caps(struct feat_fd *ff, struct perf_pmu *pmu,
1464 bool write_pmu)
1465{
1466 struct perf_pmu_caps *caps = NULL;
1467 int nr_caps;
1468 int ret;
1469
1470 nr_caps = perf_pmu__caps_parse(pmu);
1471 if (nr_caps < 0)
1472 return nr_caps;
1473
1474 ret = do_write(ff, &nr_caps, sizeof(nr_caps));
1475 if (ret < 0)
1476 return ret;
1477
1478 list_for_each_entry(caps, &pmu->caps, list) {
1479 ret = do_write_string(ff, caps->name);
1480 if (ret < 0)
1481 return ret;
1482
1483 ret = do_write_string(ff, caps->value);
1484 if (ret < 0)
1485 return ret;
1486 }
1487
1488 if (write_pmu) {
1489 ret = do_write_string(ff, pmu->name);
1490 if (ret < 0)
1491 return ret;
1492 }
1493
1494 return ret;
1495}
1496
1497static int write_cpu_pmu_caps(struct feat_fd *ff,
1498 struct evlist *evlist __maybe_unused)
1499{
1500 struct perf_pmu *cpu_pmu = perf_pmu__find("cpu");
1501
1502 if (!cpu_pmu)
1503 return -ENOENT;
1504
1505 return write_per_cpu_pmu_caps(ff, cpu_pmu, false);
1506}
1507
1508static int write_hybrid_cpu_pmu_caps(struct feat_fd *ff,
1509 struct evlist *evlist __maybe_unused)
1510{
1511 struct perf_pmu *pmu;
1512 u32 nr_pmu = perf_pmu__hybrid_pmu_num();
1513 int ret;
1514
1515 if (nr_pmu == 0)
1516 return -ENOENT;
1517
1518 ret = do_write(ff, &nr_pmu, sizeof(nr_pmu));
1519 if (ret < 0)
1520 return ret;
1521
1522 perf_pmu__for_each_hybrid_pmu(pmu) {
1523 ret = write_per_cpu_pmu_caps(ff, pmu, true);
1524 if (ret < 0)
1525 return ret;
1526 }
1527
1528 return 0;
1529}
1530
1531static void print_hostname(struct feat_fd *ff, FILE *fp)
1532{
1533 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1534}
1535
1536static void print_osrelease(struct feat_fd *ff, FILE *fp)
1537{
1538 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1539}
1540
1541static void print_arch(struct feat_fd *ff, FILE *fp)
1542{
1543 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1544}
1545
1546static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1547{
1548 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1549}
1550
1551static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1552{
1553 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1554 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1555}
1556
1557static void print_version(struct feat_fd *ff, FILE *fp)
1558{
1559 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1560}
1561
1562static void print_cmdline(struct feat_fd *ff, FILE *fp)
1563{
1564 int nr, i;
1565
1566 nr = ff->ph->env.nr_cmdline;
1567
1568 fprintf(fp, "# cmdline : ");
1569
1570 for (i = 0; i < nr; i++) {
1571 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1572 if (!argv_i) {
1573 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1574 } else {
1575 char *mem = argv_i;
1576 do {
1577 char *quote = strchr(argv_i, '\'');
1578 if (!quote)
1579 break;
1580 *quote++ = '\0';
1581 fprintf(fp, "%s\\\'", argv_i);
1582 argv_i = quote;
1583 } while (1);
1584 fprintf(fp, "%s ", argv_i);
1585 free(mem);
1586 }
1587 }
1588 fputc('\n', fp);
1589}
1590
1591static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1592{
1593 struct perf_header *ph = ff->ph;
1594 int cpu_nr = ph->env.nr_cpus_avail;
1595 int nr, i;
1596 char *str;
1597
1598 nr = ph->env.nr_sibling_cores;
1599 str = ph->env.sibling_cores;
1600
1601 for (i = 0; i < nr; i++) {
1602 fprintf(fp, "# sibling sockets : %s\n", str);
1603 str += strlen(str) + 1;
1604 }
1605
1606 if (ph->env.nr_sibling_dies) {
1607 nr = ph->env.nr_sibling_dies;
1608 str = ph->env.sibling_dies;
1609
1610 for (i = 0; i < nr; i++) {
1611 fprintf(fp, "# sibling dies : %s\n", str);
1612 str += strlen(str) + 1;
1613 }
1614 }
1615
1616 nr = ph->env.nr_sibling_threads;
1617 str = ph->env.sibling_threads;
1618
1619 for (i = 0; i < nr; i++) {
1620 fprintf(fp, "# sibling threads : %s\n", str);
1621 str += strlen(str) + 1;
1622 }
1623
1624 if (ph->env.nr_sibling_dies) {
1625 if (ph->env.cpu != NULL) {
1626 for (i = 0; i < cpu_nr; i++)
1627 fprintf(fp, "# CPU %d: Core ID %d, "
1628 "Die ID %d, Socket ID %d\n",
1629 i, ph->env.cpu[i].core_id,
1630 ph->env.cpu[i].die_id,
1631 ph->env.cpu[i].socket_id);
1632 } else
1633 fprintf(fp, "# Core ID, Die ID and Socket ID "
1634 "information is not available\n");
1635 } else {
1636 if (ph->env.cpu != NULL) {
1637 for (i = 0; i < cpu_nr; i++)
1638 fprintf(fp, "# CPU %d: Core ID %d, "
1639 "Socket ID %d\n",
1640 i, ph->env.cpu[i].core_id,
1641 ph->env.cpu[i].socket_id);
1642 } else
1643 fprintf(fp, "# Core ID and Socket ID "
1644 "information is not available\n");
1645 }
1646}
1647
1648static void print_clockid(struct feat_fd *ff, FILE *fp)
1649{
1650 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1651 ff->ph->env.clock.clockid_res_ns * 1000);
1652}
1653
1654static void print_clock_data(struct feat_fd *ff, FILE *fp)
1655{
1656 struct timespec clockid_ns;
1657 char tstr[64], date[64];
1658 struct timeval tod_ns;
1659 clockid_t clockid;
1660 struct tm ltime;
1661 u64 ref;
1662
1663 if (!ff->ph->env.clock.enabled) {
1664 fprintf(fp, "# reference time disabled\n");
1665 return;
1666 }
1667
1668 /* Compute TOD time. */
1669 ref = ff->ph->env.clock.tod_ns;
1670 tod_ns.tv_sec = ref / NSEC_PER_SEC;
1671 ref -= tod_ns.tv_sec * NSEC_PER_SEC;
1672 tod_ns.tv_usec = ref / NSEC_PER_USEC;
1673
1674 /* Compute clockid time. */
1675 ref = ff->ph->env.clock.clockid_ns;
1676 clockid_ns.tv_sec = ref / NSEC_PER_SEC;
1677 ref -= clockid_ns.tv_sec * NSEC_PER_SEC;
1678 clockid_ns.tv_nsec = ref;
1679
1680 clockid = ff->ph->env.clock.clockid;
1681
1682 if (localtime_r(&tod_ns.tv_sec, <ime) == NULL)
1683 snprintf(tstr, sizeof(tstr), "<error>");
1684 else {
1685 strftime(date, sizeof(date), "%F %T", <ime);
1686 scnprintf(tstr, sizeof(tstr), "%s.%06d",
1687 date, (int) tod_ns.tv_usec);
1688 }
1689
1690 fprintf(fp, "# clockid: %s (%u)\n", clockid_name(clockid), clockid);
1691 fprintf(fp, "# reference time: %s = %ld.%06d (TOD) = %ld.%09ld (%s)\n",
1692 tstr, (long) tod_ns.tv_sec, (int) tod_ns.tv_usec,
1693 (long) clockid_ns.tv_sec, clockid_ns.tv_nsec,
1694 clockid_name(clockid));
1695}
1696
1697static void print_hybrid_topology(struct feat_fd *ff, FILE *fp)
1698{
1699 int i;
1700 struct hybrid_node *n;
1701
1702 fprintf(fp, "# hybrid cpu system:\n");
1703 for (i = 0; i < ff->ph->env.nr_hybrid_nodes; i++) {
1704 n = &ff->ph->env.hybrid_nodes[i];
1705 fprintf(fp, "# %s cpu list : %s\n", n->pmu_name, n->cpus);
1706 }
1707}
1708
1709static void print_dir_format(struct feat_fd *ff, FILE *fp)
1710{
1711 struct perf_session *session;
1712 struct perf_data *data;
1713
1714 session = container_of(ff->ph, struct perf_session, header);
1715 data = session->data;
1716
1717 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1718}
1719
1720#ifdef HAVE_LIBBPF_SUPPORT
1721static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1722{
1723 struct perf_env *env = &ff->ph->env;
1724 struct rb_root *root;
1725 struct rb_node *next;
1726
1727 down_read(&env->bpf_progs.lock);
1728
1729 root = &env->bpf_progs.infos;
1730 next = rb_first(root);
1731
1732 while (next) {
1733 struct bpf_prog_info_node *node;
1734
1735 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1736 next = rb_next(&node->rb_node);
1737
1738 bpf_event__print_bpf_prog_info(&node->info_linear->info,
1739 env, fp);
1740 }
1741
1742 up_read(&env->bpf_progs.lock);
1743}
1744
1745static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1746{
1747 struct perf_env *env = &ff->ph->env;
1748 struct rb_root *root;
1749 struct rb_node *next;
1750
1751 down_read(&env->bpf_progs.lock);
1752
1753 root = &env->bpf_progs.btfs;
1754 next = rb_first(root);
1755
1756 while (next) {
1757 struct btf_node *node;
1758
1759 node = rb_entry(next, struct btf_node, rb_node);
1760 next = rb_next(&node->rb_node);
1761 fprintf(fp, "# btf info of id %u\n", node->id);
1762 }
1763
1764 up_read(&env->bpf_progs.lock);
1765}
1766#endif // HAVE_LIBBPF_SUPPORT
1767
1768static void free_event_desc(struct evsel *events)
1769{
1770 struct evsel *evsel;
1771
1772 if (!events)
1773 return;
1774
1775 for (evsel = events; evsel->core.attr.size; evsel++) {
1776 zfree(&evsel->name);
1777 zfree(&evsel->core.id);
1778 }
1779
1780 free(events);
1781}
1782
1783static bool perf_attr_check(struct perf_event_attr *attr)
1784{
1785 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) {
1786 pr_warning("Reserved bits are set unexpectedly. "
1787 "Please update perf tool.\n");
1788 return false;
1789 }
1790
1791 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) {
1792 pr_warning("Unknown sample type (0x%llx) is detected. "
1793 "Please update perf tool.\n",
1794 attr->sample_type);
1795 return false;
1796 }
1797
1798 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) {
1799 pr_warning("Unknown read format (0x%llx) is detected. "
1800 "Please update perf tool.\n",
1801 attr->read_format);
1802 return false;
1803 }
1804
1805 if ((attr->sample_type & PERF_SAMPLE_BRANCH_STACK) &&
1806 (attr->branch_sample_type & ~(PERF_SAMPLE_BRANCH_MAX-1))) {
1807 pr_warning("Unknown branch sample type (0x%llx) is detected. "
1808 "Please update perf tool.\n",
1809 attr->branch_sample_type);
1810
1811 return false;
1812 }
1813
1814 return true;
1815}
1816
1817static struct evsel *read_event_desc(struct feat_fd *ff)
1818{
1819 struct evsel *evsel, *events = NULL;
1820 u64 *id;
1821 void *buf = NULL;
1822 u32 nre, sz, nr, i, j;
1823 size_t msz;
1824
1825 /* number of events */
1826 if (do_read_u32(ff, &nre))
1827 goto error;
1828
1829 if (do_read_u32(ff, &sz))
1830 goto error;
1831
1832 /* buffer to hold on file attr struct */
1833 buf = malloc(sz);
1834 if (!buf)
1835 goto error;
1836
1837 /* the last event terminates with evsel->core.attr.size == 0: */
1838 events = calloc(nre + 1, sizeof(*events));
1839 if (!events)
1840 goto error;
1841
1842 msz = sizeof(evsel->core.attr);
1843 if (sz < msz)
1844 msz = sz;
1845
1846 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1847 evsel->core.idx = i;
1848
1849 /*
1850 * must read entire on-file attr struct to
1851 * sync up with layout.
1852 */
1853 if (__do_read(ff, buf, sz))
1854 goto error;
1855
1856 if (ff->ph->needs_swap)
1857 perf_event__attr_swap(buf);
1858
1859 memcpy(&evsel->core.attr, buf, msz);
1860
1861 if (!perf_attr_check(&evsel->core.attr))
1862 goto error;
1863
1864 if (do_read_u32(ff, &nr))
1865 goto error;
1866
1867 if (ff->ph->needs_swap)
1868 evsel->needs_swap = true;
1869
1870 evsel->name = do_read_string(ff);
1871 if (!evsel->name)
1872 goto error;
1873
1874 if (!nr)
1875 continue;
1876
1877 id = calloc(nr, sizeof(*id));
1878 if (!id)
1879 goto error;
1880 evsel->core.ids = nr;
1881 evsel->core.id = id;
1882
1883 for (j = 0 ; j < nr; j++) {
1884 if (do_read_u64(ff, id))
1885 goto error;
1886 id++;
1887 }
1888 }
1889out:
1890 free(buf);
1891 return events;
1892error:
1893 free_event_desc(events);
1894 events = NULL;
1895 goto out;
1896}
1897
1898static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1899 void *priv __maybe_unused)
1900{
1901 return fprintf(fp, ", %s = %s", name, val);
1902}
1903
1904static void print_event_desc(struct feat_fd *ff, FILE *fp)
1905{
1906 struct evsel *evsel, *events;
1907 u32 j;
1908 u64 *id;
1909
1910 if (ff->events)
1911 events = ff->events;
1912 else
1913 events = read_event_desc(ff);
1914
1915 if (!events) {
1916 fprintf(fp, "# event desc: not available or unable to read\n");
1917 return;
1918 }
1919
1920 for (evsel = events; evsel->core.attr.size; evsel++) {
1921 fprintf(fp, "# event : name = %s, ", evsel->name);
1922
1923 if (evsel->core.ids) {
1924 fprintf(fp, ", id = {");
1925 for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) {
1926 if (j)
1927 fputc(',', fp);
1928 fprintf(fp, " %"PRIu64, *id);
1929 }
1930 fprintf(fp, " }");
1931 }
1932
1933 perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
1934
1935 fputc('\n', fp);
1936 }
1937
1938 free_event_desc(events);
1939 ff->events = NULL;
1940}
1941
1942static void print_total_mem(struct feat_fd *ff, FILE *fp)
1943{
1944 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
1945}
1946
1947static void print_numa_topology(struct feat_fd *ff, FILE *fp)
1948{
1949 int i;
1950 struct numa_node *n;
1951
1952 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1953 n = &ff->ph->env.numa_nodes[i];
1954
1955 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1956 " free = %"PRIu64" kB\n",
1957 n->node, n->mem_total, n->mem_free);
1958
1959 fprintf(fp, "# node%u cpu list : ", n->node);
1960 cpu_map__fprintf(n->map, fp);
1961 }
1962}
1963
1964static void print_cpuid(struct feat_fd *ff, FILE *fp)
1965{
1966 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
1967}
1968
1969static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
1970{
1971 fprintf(fp, "# contains samples with branch stack\n");
1972}
1973
1974static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
1975{
1976 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1977}
1978
1979static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
1980{
1981 fprintf(fp, "# contains stat data\n");
1982}
1983
1984static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
1985{
1986 int i;
1987
1988 fprintf(fp, "# CPU cache info:\n");
1989 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
1990 fprintf(fp, "# ");
1991 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
1992 }
1993}
1994
1995static void print_compressed(struct feat_fd *ff, FILE *fp)
1996{
1997 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
1998 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
1999 ff->ph->env.comp_level, ff->ph->env.comp_ratio);
2000}
2001
2002static void print_per_cpu_pmu_caps(FILE *fp, int nr_caps, char *cpu_pmu_caps,
2003 char *pmu_name)
2004{
2005 const char *delimiter;
2006 char *str, buf[128];
2007
2008 if (!nr_caps) {
2009 if (!pmu_name)
2010 fprintf(fp, "# cpu pmu capabilities: not available\n");
2011 else
2012 fprintf(fp, "# %s pmu capabilities: not available\n", pmu_name);
2013 return;
2014 }
2015
2016 if (!pmu_name)
2017 scnprintf(buf, sizeof(buf), "# cpu pmu capabilities: ");
2018 else
2019 scnprintf(buf, sizeof(buf), "# %s pmu capabilities: ", pmu_name);
2020
2021 delimiter = buf;
2022
2023 str = cpu_pmu_caps;
2024 while (nr_caps--) {
2025 fprintf(fp, "%s%s", delimiter, str);
2026 delimiter = ", ";
2027 str += strlen(str) + 1;
2028 }
2029
2030 fprintf(fp, "\n");
2031}
2032
2033static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
2034{
2035 print_per_cpu_pmu_caps(fp, ff->ph->env.nr_cpu_pmu_caps,
2036 ff->ph->env.cpu_pmu_caps, NULL);
2037}
2038
2039static void print_hybrid_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
2040{
2041 struct hybrid_cpc_node *n;
2042
2043 for (int i = 0; i < ff->ph->env.nr_hybrid_cpc_nodes; i++) {
2044 n = &ff->ph->env.hybrid_cpc_nodes[i];
2045 print_per_cpu_pmu_caps(fp, n->nr_cpu_pmu_caps,
2046 n->cpu_pmu_caps,
2047 n->pmu_name);
2048 }
2049}
2050
2051static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
2052{
2053 const char *delimiter = "# pmu mappings: ";
2054 char *str, *tmp;
2055 u32 pmu_num;
2056 u32 type;
2057
2058 pmu_num = ff->ph->env.nr_pmu_mappings;
2059 if (!pmu_num) {
2060 fprintf(fp, "# pmu mappings: not available\n");
2061 return;
2062 }
2063
2064 str = ff->ph->env.pmu_mappings;
2065
2066 while (pmu_num) {
2067 type = strtoul(str, &tmp, 0);
2068 if (*tmp != ':')
2069 goto error;
2070
2071 str = tmp + 1;
2072 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
2073
2074 delimiter = ", ";
2075 str += strlen(str) + 1;
2076 pmu_num--;
2077 }
2078
2079 fprintf(fp, "\n");
2080
2081 if (!pmu_num)
2082 return;
2083error:
2084 fprintf(fp, "# pmu mappings: unable to read\n");
2085}
2086
2087static void print_group_desc(struct feat_fd *ff, FILE *fp)
2088{
2089 struct perf_session *session;
2090 struct evsel *evsel;
2091 u32 nr = 0;
2092
2093 session = container_of(ff->ph, struct perf_session, header);
2094
2095 evlist__for_each_entry(session->evlist, evsel) {
2096 if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
2097 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", evsel__name(evsel));
2098
2099 nr = evsel->core.nr_members - 1;
2100 } else if (nr) {
2101 fprintf(fp, ",%s", evsel__name(evsel));
2102
2103 if (--nr == 0)
2104 fprintf(fp, "}\n");
2105 }
2106 }
2107}
2108
2109static void print_sample_time(struct feat_fd *ff, FILE *fp)
2110{
2111 struct perf_session *session;
2112 char time_buf[32];
2113 double d;
2114
2115 session = container_of(ff->ph, struct perf_session, header);
2116
2117 timestamp__scnprintf_usec(session->evlist->first_sample_time,
2118 time_buf, sizeof(time_buf));
2119 fprintf(fp, "# time of first sample : %s\n", time_buf);
2120
2121 timestamp__scnprintf_usec(session->evlist->last_sample_time,
2122 time_buf, sizeof(time_buf));
2123 fprintf(fp, "# time of last sample : %s\n", time_buf);
2124
2125 d = (double)(session->evlist->last_sample_time -
2126 session->evlist->first_sample_time) / NSEC_PER_MSEC;
2127
2128 fprintf(fp, "# sample duration : %10.3f ms\n", d);
2129}
2130
2131static void memory_node__fprintf(struct memory_node *n,
2132 unsigned long long bsize, FILE *fp)
2133{
2134 char buf_map[100], buf_size[50];
2135 unsigned long long size;
2136
2137 size = bsize * bitmap_weight(n->set, n->size);
2138 unit_number__scnprintf(buf_size, 50, size);
2139
2140 bitmap_scnprintf(n->set, n->size, buf_map, 100);
2141 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
2142}
2143
2144static void print_mem_topology(struct feat_fd *ff, FILE *fp)
2145{
2146 struct memory_node *nodes;
2147 int i, nr;
2148
2149 nodes = ff->ph->env.memory_nodes;
2150 nr = ff->ph->env.nr_memory_nodes;
2151
2152 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
2153 nr, ff->ph->env.memory_bsize);
2154
2155 for (i = 0; i < nr; i++) {
2156 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
2157 }
2158}
2159
2160static int __event_process_build_id(struct perf_record_header_build_id *bev,
2161 char *filename,
2162 struct perf_session *session)
2163{
2164 int err = -1;
2165 struct machine *machine;
2166 u16 cpumode;
2167 struct dso *dso;
2168 enum dso_space_type dso_space;
2169
2170 machine = perf_session__findnew_machine(session, bev->pid);
2171 if (!machine)
2172 goto out;
2173
2174 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2175
2176 switch (cpumode) {
2177 case PERF_RECORD_MISC_KERNEL:
2178 dso_space = DSO_SPACE__KERNEL;
2179 break;
2180 case PERF_RECORD_MISC_GUEST_KERNEL:
2181 dso_space = DSO_SPACE__KERNEL_GUEST;
2182 break;
2183 case PERF_RECORD_MISC_USER:
2184 case PERF_RECORD_MISC_GUEST_USER:
2185 dso_space = DSO_SPACE__USER;
2186 break;
2187 default:
2188 goto out;
2189 }
2190
2191 dso = machine__findnew_dso(machine, filename);
2192 if (dso != NULL) {
2193 char sbuild_id[SBUILD_ID_SIZE];
2194 struct build_id bid;
2195 size_t size = BUILD_ID_SIZE;
2196
2197 if (bev->header.misc & PERF_RECORD_MISC_BUILD_ID_SIZE)
2198 size = bev->size;
2199
2200 build_id__init(&bid, bev->data, size);
2201 dso__set_build_id(dso, &bid);
2202
2203 if (dso_space != DSO_SPACE__USER) {
2204 struct kmod_path m = { .name = NULL, };
2205
2206 if (!kmod_path__parse_name(&m, filename) && m.kmod)
2207 dso__set_module_info(dso, &m, machine);
2208
2209 dso->kernel = dso_space;
2210 free(m.name);
2211 }
2212
2213 build_id__sprintf(&dso->bid, sbuild_id);
2214 pr_debug("build id event received for %s: %s [%zu]\n",
2215 dso->long_name, sbuild_id, size);
2216 dso__put(dso);
2217 }
2218
2219 err = 0;
2220out:
2221 return err;
2222}
2223
2224static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
2225 int input, u64 offset, u64 size)
2226{
2227 struct perf_session *session = container_of(header, struct perf_session, header);
2228 struct {
2229 struct perf_event_header header;
2230 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
2231 char filename[0];
2232 } old_bev;
2233 struct perf_record_header_build_id bev;
2234 char filename[PATH_MAX];
2235 u64 limit = offset + size;
2236
2237 while (offset < limit) {
2238 ssize_t len;
2239
2240 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
2241 return -1;
2242
2243 if (header->needs_swap)
2244 perf_event_header__bswap(&old_bev.header);
2245
2246 len = old_bev.header.size - sizeof(old_bev);
2247 if (readn(input, filename, len) != len)
2248 return -1;
2249
2250 bev.header = old_bev.header;
2251
2252 /*
2253 * As the pid is the missing value, we need to fill
2254 * it properly. The header.misc value give us nice hint.
2255 */
2256 bev.pid = HOST_KERNEL_ID;
2257 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
2258 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
2259 bev.pid = DEFAULT_GUEST_KERNEL_ID;
2260
2261 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
2262 __event_process_build_id(&bev, filename, session);
2263
2264 offset += bev.header.size;
2265 }
2266
2267 return 0;
2268}
2269
2270static int perf_header__read_build_ids(struct perf_header *header,
2271 int input, u64 offset, u64 size)
2272{
2273 struct perf_session *session = container_of(header, struct perf_session, header);
2274 struct perf_record_header_build_id bev;
2275 char filename[PATH_MAX];
2276 u64 limit = offset + size, orig_offset = offset;
2277 int err = -1;
2278
2279 while (offset < limit) {
2280 ssize_t len;
2281
2282 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
2283 goto out;
2284
2285 if (header->needs_swap)
2286 perf_event_header__bswap(&bev.header);
2287
2288 len = bev.header.size - sizeof(bev);
2289 if (readn(input, filename, len) != len)
2290 goto out;
2291 /*
2292 * The a1645ce1 changeset:
2293 *
2294 * "perf: 'perf kvm' tool for monitoring guest performance from host"
2295 *
2296 * Added a field to struct perf_record_header_build_id that broke the file
2297 * format.
2298 *
2299 * Since the kernel build-id is the first entry, process the
2300 * table using the old format if the well known
2301 * '[kernel.kallsyms]' string for the kernel build-id has the
2302 * first 4 characters chopped off (where the pid_t sits).
2303 */
2304 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2305 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2306 return -1;
2307 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2308 }
2309
2310 __event_process_build_id(&bev, filename, session);
2311
2312 offset += bev.header.size;
2313 }
2314 err = 0;
2315out:
2316 return err;
2317}
2318
2319/* Macro for features that simply need to read and store a string. */
2320#define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
2321static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2322{\
2323 ff->ph->env.__feat_env = do_read_string(ff); \
2324 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2325}
2326
2327FEAT_PROCESS_STR_FUN(hostname, hostname);
2328FEAT_PROCESS_STR_FUN(osrelease, os_release);
2329FEAT_PROCESS_STR_FUN(version, version);
2330FEAT_PROCESS_STR_FUN(arch, arch);
2331FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2332FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2333
2334static int process_tracing_data(struct feat_fd *ff, void *data)
2335{
2336 ssize_t ret = trace_report(ff->fd, data, false);
2337
2338 return ret < 0 ? -1 : 0;
2339}
2340
2341static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
2342{
2343 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
2344 pr_debug("Failed to read buildids, continuing...\n");
2345 return 0;
2346}
2347
2348static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
2349{
2350 int ret;
2351 u32 nr_cpus_avail, nr_cpus_online;
2352
2353 ret = do_read_u32(ff, &nr_cpus_avail);
2354 if (ret)
2355 return ret;
2356
2357 ret = do_read_u32(ff, &nr_cpus_online);
2358 if (ret)
2359 return ret;
2360 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2361 ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
2362 return 0;
2363}
2364
2365static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
2366{
2367 u64 total_mem;
2368 int ret;
2369
2370 ret = do_read_u64(ff, &total_mem);
2371 if (ret)
2372 return -1;
2373 ff->ph->env.total_mem = (unsigned long long)total_mem;
2374 return 0;
2375}
2376
2377static struct evsel *evlist__find_by_index(struct evlist *evlist, int idx)
2378{
2379 struct evsel *evsel;
2380
2381 evlist__for_each_entry(evlist, evsel) {
2382 if (evsel->core.idx == idx)
2383 return evsel;
2384 }
2385
2386 return NULL;
2387}
2388
2389static void evlist__set_event_name(struct evlist *evlist, struct evsel *event)
2390{
2391 struct evsel *evsel;
2392
2393 if (!event->name)
2394 return;
2395
2396 evsel = evlist__find_by_index(evlist, event->core.idx);
2397 if (!evsel)
2398 return;
2399
2400 if (evsel->name)
2401 return;
2402
2403 evsel->name = strdup(event->name);
2404}
2405
2406static int
2407process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
2408{
2409 struct perf_session *session;
2410 struct evsel *evsel, *events = read_event_desc(ff);
2411
2412 if (!events)
2413 return 0;
2414
2415 session = container_of(ff->ph, struct perf_session, header);
2416
2417 if (session->data->is_pipe) {
2418 /* Save events for reading later by print_event_desc,
2419 * since they can't be read again in pipe mode. */
2420 ff->events = events;
2421 }
2422
2423 for (evsel = events; evsel->core.attr.size; evsel++)
2424 evlist__set_event_name(session->evlist, evsel);
2425
2426 if (!session->data->is_pipe)
2427 free_event_desc(events);
2428
2429 return 0;
2430}
2431
2432static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
2433{
2434 char *str, *cmdline = NULL, **argv = NULL;
2435 u32 nr, i, len = 0;
2436
2437 if (do_read_u32(ff, &nr))
2438 return -1;
2439
2440 ff->ph->env.nr_cmdline = nr;
2441
2442 cmdline = zalloc(ff->size + nr + 1);
2443 if (!cmdline)
2444 return -1;
2445
2446 argv = zalloc(sizeof(char *) * (nr + 1));
2447 if (!argv)
2448 goto error;
2449
2450 for (i = 0; i < nr; i++) {
2451 str = do_read_string(ff);
2452 if (!str)
2453 goto error;
2454
2455 argv[i] = cmdline + len;
2456 memcpy(argv[i], str, strlen(str) + 1);
2457 len += strlen(str) + 1;
2458 free(str);
2459 }
2460 ff->ph->env.cmdline = cmdline;
2461 ff->ph->env.cmdline_argv = (const char **) argv;
2462 return 0;
2463
2464error:
2465 free(argv);
2466 free(cmdline);
2467 return -1;
2468}
2469
2470static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2471{
2472 u32 nr, i;
2473 char *str;
2474 struct strbuf sb;
2475 int cpu_nr = ff->ph->env.nr_cpus_avail;
2476 u64 size = 0;
2477 struct perf_header *ph = ff->ph;
2478 bool do_core_id_test = true;
2479
2480 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2481 if (!ph->env.cpu)
2482 return -1;
2483
2484 if (do_read_u32(ff, &nr))
2485 goto free_cpu;
2486
2487 ph->env.nr_sibling_cores = nr;
2488 size += sizeof(u32);
2489 if (strbuf_init(&sb, 128) < 0)
2490 goto free_cpu;
2491
2492 for (i = 0; i < nr; i++) {
2493 str = do_read_string(ff);
2494 if (!str)
2495 goto error;
2496
2497 /* include a NULL character at the end */
2498 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2499 goto error;
2500 size += string_size(str);
2501 free(str);
2502 }
2503 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2504
2505 if (do_read_u32(ff, &nr))
2506 return -1;
2507
2508 ph->env.nr_sibling_threads = nr;
2509 size += sizeof(u32);
2510
2511 for (i = 0; i < nr; i++) {
2512 str = do_read_string(ff);
2513 if (!str)
2514 goto error;
2515
2516 /* include a NULL character at the end */
2517 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2518 goto error;
2519 size += string_size(str);
2520 free(str);
2521 }
2522 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
2523
2524 /*
2525 * The header may be from old perf,
2526 * which doesn't include core id and socket id information.
2527 */
2528 if (ff->size <= size) {
2529 zfree(&ph->env.cpu);
2530 return 0;
2531 }
2532
2533 /* On s390 the socket_id number is not related to the numbers of cpus.
2534 * The socket_id number might be higher than the numbers of cpus.
2535 * This depends on the configuration.
2536 * AArch64 is the same.
2537 */
2538 if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
2539 || !strncmp(ph->env.arch, "aarch64", 7)))
2540 do_core_id_test = false;
2541
2542 for (i = 0; i < (u32)cpu_nr; i++) {
2543 if (do_read_u32(ff, &nr))
2544 goto free_cpu;
2545
2546 ph->env.cpu[i].core_id = nr;
2547 size += sizeof(u32);
2548
2549 if (do_read_u32(ff, &nr))
2550 goto free_cpu;
2551
2552 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
2553 pr_debug("socket_id number is too big."
2554 "You may need to upgrade the perf tool.\n");
2555 goto free_cpu;
2556 }
2557
2558 ph->env.cpu[i].socket_id = nr;
2559 size += sizeof(u32);
2560 }
2561
2562 /*
2563 * The header may be from old perf,
2564 * which doesn't include die information.
2565 */
2566 if (ff->size <= size)
2567 return 0;
2568
2569 if (do_read_u32(ff, &nr))
2570 return -1;
2571
2572 ph->env.nr_sibling_dies = nr;
2573 size += sizeof(u32);
2574
2575 for (i = 0; i < nr; i++) {
2576 str = do_read_string(ff);
2577 if (!str)
2578 goto error;
2579
2580 /* include a NULL character at the end */
2581 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2582 goto error;
2583 size += string_size(str);
2584 free(str);
2585 }
2586 ph->env.sibling_dies = strbuf_detach(&sb, NULL);
2587
2588 for (i = 0; i < (u32)cpu_nr; i++) {
2589 if (do_read_u32(ff, &nr))
2590 goto free_cpu;
2591
2592 ph->env.cpu[i].die_id = nr;
2593 }
2594
2595 return 0;
2596
2597error:
2598 strbuf_release(&sb);
2599free_cpu:
2600 zfree(&ph->env.cpu);
2601 return -1;
2602}
2603
2604static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2605{
2606 struct numa_node *nodes, *n;
2607 u32 nr, i;
2608 char *str;
2609
2610 /* nr nodes */
2611 if (do_read_u32(ff, &nr))
2612 return -1;
2613
2614 nodes = zalloc(sizeof(*nodes) * nr);
2615 if (!nodes)
2616 return -ENOMEM;
2617
2618 for (i = 0; i < nr; i++) {
2619 n = &nodes[i];
2620
2621 /* node number */
2622 if (do_read_u32(ff, &n->node))
2623 goto error;
2624
2625 if (do_read_u64(ff, &n->mem_total))
2626 goto error;
2627
2628 if (do_read_u64(ff, &n->mem_free))
2629 goto error;
2630
2631 str = do_read_string(ff);
2632 if (!str)
2633 goto error;
2634
2635 n->map = perf_cpu_map__new(str);
2636 if (!n->map)
2637 goto error;
2638
2639 free(str);
2640 }
2641 ff->ph->env.nr_numa_nodes = nr;
2642 ff->ph->env.numa_nodes = nodes;
2643 return 0;
2644
2645error:
2646 free(nodes);
2647 return -1;
2648}
2649
2650static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2651{
2652 char *name;
2653 u32 pmu_num;
2654 u32 type;
2655 struct strbuf sb;
2656
2657 if (do_read_u32(ff, &pmu_num))
2658 return -1;
2659
2660 if (!pmu_num) {
2661 pr_debug("pmu mappings not available\n");
2662 return 0;
2663 }
2664
2665 ff->ph->env.nr_pmu_mappings = pmu_num;
2666 if (strbuf_init(&sb, 128) < 0)
2667 return -1;
2668
2669 while (pmu_num) {
2670 if (do_read_u32(ff, &type))
2671 goto error;
2672
2673 name = do_read_string(ff);
2674 if (!name)
2675 goto error;
2676
2677 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2678 goto error;
2679 /* include a NULL character at the end */
2680 if (strbuf_add(&sb, "", 1) < 0)
2681 goto error;
2682
2683 if (!strcmp(name, "msr"))
2684 ff->ph->env.msr_pmu_type = type;
2685
2686 free(name);
2687 pmu_num--;
2688 }
2689 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2690 return 0;
2691
2692error:
2693 strbuf_release(&sb);
2694 return -1;
2695}
2696
2697static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2698{
2699 size_t ret = -1;
2700 u32 i, nr, nr_groups;
2701 struct perf_session *session;
2702 struct evsel *evsel, *leader = NULL;
2703 struct group_desc {
2704 char *name;
2705 u32 leader_idx;
2706 u32 nr_members;
2707 } *desc;
2708
2709 if (do_read_u32(ff, &nr_groups))
2710 return -1;
2711
2712 ff->ph->env.nr_groups = nr_groups;
2713 if (!nr_groups) {
2714 pr_debug("group desc not available\n");
2715 return 0;
2716 }
2717
2718 desc = calloc(nr_groups, sizeof(*desc));
2719 if (!desc)
2720 return -1;
2721
2722 for (i = 0; i < nr_groups; i++) {
2723 desc[i].name = do_read_string(ff);
2724 if (!desc[i].name)
2725 goto out_free;
2726
2727 if (do_read_u32(ff, &desc[i].leader_idx))
2728 goto out_free;
2729
2730 if (do_read_u32(ff, &desc[i].nr_members))
2731 goto out_free;
2732 }
2733
2734 /*
2735 * Rebuild group relationship based on the group_desc
2736 */
2737 session = container_of(ff->ph, struct perf_session, header);
2738 session->evlist->core.nr_groups = nr_groups;
2739
2740 i = nr = 0;
2741 evlist__for_each_entry(session->evlist, evsel) {
2742 if (evsel->core.idx == (int) desc[i].leader_idx) {
2743 evsel__set_leader(evsel, evsel);
2744 /* {anon_group} is a dummy name */
2745 if (strcmp(desc[i].name, "{anon_group}")) {
2746 evsel->group_name = desc[i].name;
2747 desc[i].name = NULL;
2748 }
2749 evsel->core.nr_members = desc[i].nr_members;
2750
2751 if (i >= nr_groups || nr > 0) {
2752 pr_debug("invalid group desc\n");
2753 goto out_free;
2754 }
2755
2756 leader = evsel;
2757 nr = evsel->core.nr_members - 1;
2758 i++;
2759 } else if (nr) {
2760 /* This is a group member */
2761 evsel__set_leader(evsel, leader);
2762
2763 nr--;
2764 }
2765 }
2766
2767 if (i != nr_groups || nr != 0) {
2768 pr_debug("invalid group desc\n");
2769 goto out_free;
2770 }
2771
2772 ret = 0;
2773out_free:
2774 for (i = 0; i < nr_groups; i++)
2775 zfree(&desc[i].name);
2776 free(desc);
2777
2778 return ret;
2779}
2780
2781static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2782{
2783 struct perf_session *session;
2784 int err;
2785
2786 session = container_of(ff->ph, struct perf_session, header);
2787
2788 err = auxtrace_index__process(ff->fd, ff->size, session,
2789 ff->ph->needs_swap);
2790 if (err < 0)
2791 pr_err("Failed to process auxtrace index\n");
2792 return err;
2793}
2794
2795static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2796{
2797 struct cpu_cache_level *caches;
2798 u32 cnt, i, version;
2799
2800 if (do_read_u32(ff, &version))
2801 return -1;
2802
2803 if (version != 1)
2804 return -1;
2805
2806 if (do_read_u32(ff, &cnt))
2807 return -1;
2808
2809 caches = zalloc(sizeof(*caches) * cnt);
2810 if (!caches)
2811 return -1;
2812
2813 for (i = 0; i < cnt; i++) {
2814 struct cpu_cache_level c;
2815
2816 #define _R(v) \
2817 if (do_read_u32(ff, &c.v))\
2818 goto out_free_caches; \
2819
2820 _R(level)
2821 _R(line_size)
2822 _R(sets)
2823 _R(ways)
2824 #undef _R
2825
2826 #define _R(v) \
2827 c.v = do_read_string(ff); \
2828 if (!c.v) \
2829 goto out_free_caches;
2830
2831 _R(type)
2832 _R(size)
2833 _R(map)
2834 #undef _R
2835
2836 caches[i] = c;
2837 }
2838
2839 ff->ph->env.caches = caches;
2840 ff->ph->env.caches_cnt = cnt;
2841 return 0;
2842out_free_caches:
2843 free(caches);
2844 return -1;
2845}
2846
2847static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2848{
2849 struct perf_session *session;
2850 u64 first_sample_time, last_sample_time;
2851 int ret;
2852
2853 session = container_of(ff->ph, struct perf_session, header);
2854
2855 ret = do_read_u64(ff, &first_sample_time);
2856 if (ret)
2857 return -1;
2858
2859 ret = do_read_u64(ff, &last_sample_time);
2860 if (ret)
2861 return -1;
2862
2863 session->evlist->first_sample_time = first_sample_time;
2864 session->evlist->last_sample_time = last_sample_time;
2865 return 0;
2866}
2867
2868static int process_mem_topology(struct feat_fd *ff,
2869 void *data __maybe_unused)
2870{
2871 struct memory_node *nodes;
2872 u64 version, i, nr, bsize;
2873 int ret = -1;
2874
2875 if (do_read_u64(ff, &version))
2876 return -1;
2877
2878 if (version != 1)
2879 return -1;
2880
2881 if (do_read_u64(ff, &bsize))
2882 return -1;
2883
2884 if (do_read_u64(ff, &nr))
2885 return -1;
2886
2887 nodes = zalloc(sizeof(*nodes) * nr);
2888 if (!nodes)
2889 return -1;
2890
2891 for (i = 0; i < nr; i++) {
2892 struct memory_node n;
2893
2894 #define _R(v) \
2895 if (do_read_u64(ff, &n.v)) \
2896 goto out; \
2897
2898 _R(node)
2899 _R(size)
2900
2901 #undef _R
2902
2903 if (do_read_bitmap(ff, &n.set, &n.size))
2904 goto out;
2905
2906 nodes[i] = n;
2907 }
2908
2909 ff->ph->env.memory_bsize = bsize;
2910 ff->ph->env.memory_nodes = nodes;
2911 ff->ph->env.nr_memory_nodes = nr;
2912 ret = 0;
2913
2914out:
2915 if (ret)
2916 free(nodes);
2917 return ret;
2918}
2919
2920static int process_clockid(struct feat_fd *ff,
2921 void *data __maybe_unused)
2922{
2923 if (do_read_u64(ff, &ff->ph->env.clock.clockid_res_ns))
2924 return -1;
2925
2926 return 0;
2927}
2928
2929static int process_clock_data(struct feat_fd *ff,
2930 void *_data __maybe_unused)
2931{
2932 u32 data32;
2933 u64 data64;
2934
2935 /* version */
2936 if (do_read_u32(ff, &data32))
2937 return -1;
2938
2939 if (data32 != 1)
2940 return -1;
2941
2942 /* clockid */
2943 if (do_read_u32(ff, &data32))
2944 return -1;
2945
2946 ff->ph->env.clock.clockid = data32;
2947
2948 /* TOD ref time */
2949 if (do_read_u64(ff, &data64))
2950 return -1;
2951
2952 ff->ph->env.clock.tod_ns = data64;
2953
2954 /* clockid ref time */
2955 if (do_read_u64(ff, &data64))
2956 return -1;
2957
2958 ff->ph->env.clock.clockid_ns = data64;
2959 ff->ph->env.clock.enabled = true;
2960 return 0;
2961}
2962
2963static int process_hybrid_topology(struct feat_fd *ff,
2964 void *data __maybe_unused)
2965{
2966 struct hybrid_node *nodes, *n;
2967 u32 nr, i;
2968
2969 /* nr nodes */
2970 if (do_read_u32(ff, &nr))
2971 return -1;
2972
2973 nodes = zalloc(sizeof(*nodes) * nr);
2974 if (!nodes)
2975 return -ENOMEM;
2976
2977 for (i = 0; i < nr; i++) {
2978 n = &nodes[i];
2979
2980 n->pmu_name = do_read_string(ff);
2981 if (!n->pmu_name)
2982 goto error;
2983
2984 n->cpus = do_read_string(ff);
2985 if (!n->cpus)
2986 goto error;
2987 }
2988
2989 ff->ph->env.nr_hybrid_nodes = nr;
2990 ff->ph->env.hybrid_nodes = nodes;
2991 return 0;
2992
2993error:
2994 for (i = 0; i < nr; i++) {
2995 free(nodes[i].pmu_name);
2996 free(nodes[i].cpus);
2997 }
2998
2999 free(nodes);
3000 return -1;
3001}
3002
3003static int process_dir_format(struct feat_fd *ff,
3004 void *_data __maybe_unused)
3005{
3006 struct perf_session *session;
3007 struct perf_data *data;
3008
3009 session = container_of(ff->ph, struct perf_session, header);
3010 data = session->data;
3011
3012 if (WARN_ON(!perf_data__is_dir(data)))
3013 return -1;
3014
3015 return do_read_u64(ff, &data->dir.version);
3016}
3017
3018#ifdef HAVE_LIBBPF_SUPPORT
3019static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
3020{
3021 struct bpf_prog_info_linear *info_linear;
3022 struct bpf_prog_info_node *info_node;
3023 struct perf_env *env = &ff->ph->env;
3024 u32 count, i;
3025 int err = -1;
3026
3027 if (ff->ph->needs_swap) {
3028 pr_warning("interpreting bpf_prog_info from systems with endianness is not yet supported\n");
3029 return 0;
3030 }
3031
3032 if (do_read_u32(ff, &count))
3033 return -1;
3034
3035 down_write(&env->bpf_progs.lock);
3036
3037 for (i = 0; i < count; ++i) {
3038 u32 info_len, data_len;
3039
3040 info_linear = NULL;
3041 info_node = NULL;
3042 if (do_read_u32(ff, &info_len))
3043 goto out;
3044 if (do_read_u32(ff, &data_len))
3045 goto out;
3046
3047 if (info_len > sizeof(struct bpf_prog_info)) {
3048 pr_warning("detected invalid bpf_prog_info\n");
3049 goto out;
3050 }
3051
3052 info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
3053 data_len);
3054 if (!info_linear)
3055 goto out;
3056 info_linear->info_len = sizeof(struct bpf_prog_info);
3057 info_linear->data_len = data_len;
3058 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
3059 goto out;
3060 if (__do_read(ff, &info_linear->info, info_len))
3061 goto out;
3062 if (info_len < sizeof(struct bpf_prog_info))
3063 memset(((void *)(&info_linear->info)) + info_len, 0,
3064 sizeof(struct bpf_prog_info) - info_len);
3065
3066 if (__do_read(ff, info_linear->data, data_len))
3067 goto out;
3068
3069 info_node = malloc(sizeof(struct bpf_prog_info_node));
3070 if (!info_node)
3071 goto out;
3072
3073 /* after reading from file, translate offset to address */
3074 bpf_program__bpil_offs_to_addr(info_linear);
3075 info_node->info_linear = info_linear;
3076 perf_env__insert_bpf_prog_info(env, info_node);
3077 }
3078
3079 up_write(&env->bpf_progs.lock);
3080 return 0;
3081out:
3082 free(info_linear);
3083 free(info_node);
3084 up_write(&env->bpf_progs.lock);
3085 return err;
3086}
3087
3088static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
3089{
3090 struct perf_env *env = &ff->ph->env;
3091 struct btf_node *node = NULL;
3092 u32 count, i;
3093 int err = -1;
3094
3095 if (ff->ph->needs_swap) {
3096 pr_warning("interpreting btf from systems with endianness is not yet supported\n");
3097 return 0;
3098 }
3099
3100 if (do_read_u32(ff, &count))
3101 return -1;
3102
3103 down_write(&env->bpf_progs.lock);
3104
3105 for (i = 0; i < count; ++i) {
3106 u32 id, data_size;
3107
3108 if (do_read_u32(ff, &id))
3109 goto out;
3110 if (do_read_u32(ff, &data_size))
3111 goto out;
3112
3113 node = malloc(sizeof(struct btf_node) + data_size);
3114 if (!node)
3115 goto out;
3116
3117 node->id = id;
3118 node->data_size = data_size;
3119
3120 if (__do_read(ff, node->data, data_size))
3121 goto out;
3122
3123 perf_env__insert_btf(env, node);
3124 node = NULL;
3125 }
3126
3127 err = 0;
3128out:
3129 up_write(&env->bpf_progs.lock);
3130 free(node);
3131 return err;
3132}
3133#endif // HAVE_LIBBPF_SUPPORT
3134
3135static int process_compressed(struct feat_fd *ff,
3136 void *data __maybe_unused)
3137{
3138 if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
3139 return -1;
3140
3141 if (do_read_u32(ff, &(ff->ph->env.comp_type)))
3142 return -1;
3143
3144 if (do_read_u32(ff, &(ff->ph->env.comp_level)))
3145 return -1;
3146
3147 if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
3148 return -1;
3149
3150 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
3151 return -1;
3152
3153 return 0;
3154}
3155
3156static int process_per_cpu_pmu_caps(struct feat_fd *ff, int *nr_cpu_pmu_caps,
3157 char **cpu_pmu_caps,
3158 unsigned int *max_branches)
3159{
3160 char *name, *value;
3161 struct strbuf sb;
3162 u32 nr_caps;
3163
3164 if (do_read_u32(ff, &nr_caps))
3165 return -1;
3166
3167 if (!nr_caps) {
3168 pr_debug("cpu pmu capabilities not available\n");
3169 return 0;
3170 }
3171
3172 *nr_cpu_pmu_caps = nr_caps;
3173
3174 if (strbuf_init(&sb, 128) < 0)
3175 return -1;
3176
3177 while (nr_caps--) {
3178 name = do_read_string(ff);
3179 if (!name)
3180 goto error;
3181
3182 value = do_read_string(ff);
3183 if (!value)
3184 goto free_name;
3185
3186 if (strbuf_addf(&sb, "%s=%s", name, value) < 0)
3187 goto free_value;
3188
3189 /* include a NULL character at the end */
3190 if (strbuf_add(&sb, "", 1) < 0)
3191 goto free_value;
3192
3193 if (!strcmp(name, "branches"))
3194 *max_branches = atoi(value);
3195
3196 free(value);
3197 free(name);
3198 }
3199 *cpu_pmu_caps = strbuf_detach(&sb, NULL);
3200 return 0;
3201
3202free_value:
3203 free(value);
3204free_name:
3205 free(name);
3206error:
3207 strbuf_release(&sb);
3208 return -1;
3209}
3210
3211static int process_cpu_pmu_caps(struct feat_fd *ff,
3212 void *data __maybe_unused)
3213{
3214 return process_per_cpu_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps,
3215 &ff->ph->env.cpu_pmu_caps,
3216 &ff->ph->env.max_branches);
3217}
3218
3219static int process_hybrid_cpu_pmu_caps(struct feat_fd *ff,
3220 void *data __maybe_unused)
3221{
3222 struct hybrid_cpc_node *nodes;
3223 u32 nr_pmu, i;
3224 int ret;
3225
3226 if (do_read_u32(ff, &nr_pmu))
3227 return -1;
3228
3229 if (!nr_pmu) {
3230 pr_debug("hybrid cpu pmu capabilities not available\n");
3231 return 0;
3232 }
3233
3234 nodes = zalloc(sizeof(*nodes) * nr_pmu);
3235 if (!nodes)
3236 return -ENOMEM;
3237
3238 for (i = 0; i < nr_pmu; i++) {
3239 struct hybrid_cpc_node *n = &nodes[i];
3240
3241 ret = process_per_cpu_pmu_caps(ff, &n->nr_cpu_pmu_caps,
3242 &n->cpu_pmu_caps,
3243 &n->max_branches);
3244 if (ret)
3245 goto err;
3246
3247 n->pmu_name = do_read_string(ff);
3248 if (!n->pmu_name) {
3249 ret = -1;
3250 goto err;
3251 }
3252 }
3253
3254 ff->ph->env.nr_hybrid_cpc_nodes = nr_pmu;
3255 ff->ph->env.hybrid_cpc_nodes = nodes;
3256 return 0;
3257
3258err:
3259 for (i = 0; i < nr_pmu; i++) {
3260 free(nodes[i].cpu_pmu_caps);
3261 free(nodes[i].pmu_name);
3262 }
3263
3264 free(nodes);
3265 return ret;
3266}
3267
3268#define FEAT_OPR(n, func, __full_only) \
3269 [HEADER_##n] = { \
3270 .name = __stringify(n), \
3271 .write = write_##func, \
3272 .print = print_##func, \
3273 .full_only = __full_only, \
3274 .process = process_##func, \
3275 .synthesize = true \
3276 }
3277
3278#define FEAT_OPN(n, func, __full_only) \
3279 [HEADER_##n] = { \
3280 .name = __stringify(n), \
3281 .write = write_##func, \
3282 .print = print_##func, \
3283 .full_only = __full_only, \
3284 .process = process_##func \
3285 }
3286
3287/* feature_ops not implemented: */
3288#define print_tracing_data NULL
3289#define print_build_id NULL
3290
3291#define process_branch_stack NULL
3292#define process_stat NULL
3293
3294// Only used in util/synthetic-events.c
3295const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
3296
3297const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
3298 FEAT_OPN(TRACING_DATA, tracing_data, false),
3299 FEAT_OPN(BUILD_ID, build_id, false),
3300 FEAT_OPR(HOSTNAME, hostname, false),
3301 FEAT_OPR(OSRELEASE, osrelease, false),
3302 FEAT_OPR(VERSION, version, false),
3303 FEAT_OPR(ARCH, arch, false),
3304 FEAT_OPR(NRCPUS, nrcpus, false),
3305 FEAT_OPR(CPUDESC, cpudesc, false),
3306 FEAT_OPR(CPUID, cpuid, false),
3307 FEAT_OPR(TOTAL_MEM, total_mem, false),
3308 FEAT_OPR(EVENT_DESC, event_desc, false),
3309 FEAT_OPR(CMDLINE, cmdline, false),
3310 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
3311 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
3312 FEAT_OPN(BRANCH_STACK, branch_stack, false),
3313 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
3314 FEAT_OPR(GROUP_DESC, group_desc, false),
3315 FEAT_OPN(AUXTRACE, auxtrace, false),
3316 FEAT_OPN(STAT, stat, false),
3317 FEAT_OPN(CACHE, cache, true),
3318 FEAT_OPR(SAMPLE_TIME, sample_time, false),
3319 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
3320 FEAT_OPR(CLOCKID, clockid, false),
3321 FEAT_OPN(DIR_FORMAT, dir_format, false),
3322#ifdef HAVE_LIBBPF_SUPPORT
3323 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
3324 FEAT_OPR(BPF_BTF, bpf_btf, false),
3325#endif
3326 FEAT_OPR(COMPRESSED, compressed, false),
3327 FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false),
3328 FEAT_OPR(CLOCK_DATA, clock_data, false),
3329 FEAT_OPN(HYBRID_TOPOLOGY, hybrid_topology, true),
3330 FEAT_OPR(HYBRID_CPU_PMU_CAPS, hybrid_cpu_pmu_caps, false),
3331};
3332
3333struct header_print_data {
3334 FILE *fp;
3335 bool full; /* extended list of headers */
3336};
3337
3338static int perf_file_section__fprintf_info(struct perf_file_section *section,
3339 struct perf_header *ph,
3340 int feat, int fd, void *data)
3341{
3342 struct header_print_data *hd = data;
3343 struct feat_fd ff;
3344
3345 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3346 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3347 "%d, continuing...\n", section->offset, feat);
3348 return 0;
3349 }
3350 if (feat >= HEADER_LAST_FEATURE) {
3351 pr_warning("unknown feature %d\n", feat);
3352 return 0;
3353 }
3354 if (!feat_ops[feat].print)
3355 return 0;
3356
3357 ff = (struct feat_fd) {
3358 .fd = fd,
3359 .ph = ph,
3360 };
3361
3362 if (!feat_ops[feat].full_only || hd->full)
3363 feat_ops[feat].print(&ff, hd->fp);
3364 else
3365 fprintf(hd->fp, "# %s info available, use -I to display\n",
3366 feat_ops[feat].name);
3367
3368 return 0;
3369}
3370
3371int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
3372{
3373 struct header_print_data hd;
3374 struct perf_header *header = &session->header;
3375 int fd = perf_data__fd(session->data);
3376 struct stat st;
3377 time_t stctime;
3378 int ret, bit;
3379
3380 hd.fp = fp;
3381 hd.full = full;
3382
3383 ret = fstat(fd, &st);
3384 if (ret == -1)
3385 return -1;
3386
3387 stctime = st.st_mtime;
3388 fprintf(fp, "# captured on : %s", ctime(&stctime));
3389
3390 fprintf(fp, "# header version : %u\n", header->version);
3391 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
3392 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
3393 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
3394
3395 perf_header__process_sections(header, fd, &hd,
3396 perf_file_section__fprintf_info);
3397
3398 if (session->data->is_pipe)
3399 return 0;
3400
3401 fprintf(fp, "# missing features: ");
3402 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
3403 if (bit)
3404 fprintf(fp, "%s ", feat_ops[bit].name);
3405 }
3406
3407 fprintf(fp, "\n");
3408 return 0;
3409}
3410
3411static int do_write_feat(struct feat_fd *ff, int type,
3412 struct perf_file_section **p,
3413 struct evlist *evlist)
3414{
3415 int err;
3416 int ret = 0;
3417
3418 if (perf_header__has_feat(ff->ph, type)) {
3419 if (!feat_ops[type].write)
3420 return -1;
3421
3422 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
3423 return -1;
3424
3425 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
3426
3427 err = feat_ops[type].write(ff, evlist);
3428 if (err < 0) {
3429 pr_debug("failed to write feature %s\n", feat_ops[type].name);
3430
3431 /* undo anything written */
3432 lseek(ff->fd, (*p)->offset, SEEK_SET);
3433
3434 return -1;
3435 }
3436 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
3437 (*p)++;
3438 }
3439 return ret;
3440}
3441
3442static int perf_header__adds_write(struct perf_header *header,
3443 struct evlist *evlist, int fd)
3444{
3445 int nr_sections;
3446 struct feat_fd ff;
3447 struct perf_file_section *feat_sec, *p;
3448 int sec_size;
3449 u64 sec_start;
3450 int feat;
3451 int err;
3452
3453 ff = (struct feat_fd){
3454 .fd = fd,
3455 .ph = header,
3456 };
3457
3458 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3459 if (!nr_sections)
3460 return 0;
3461
3462 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
3463 if (feat_sec == NULL)
3464 return -ENOMEM;
3465
3466 sec_size = sizeof(*feat_sec) * nr_sections;
3467
3468 sec_start = header->feat_offset;
3469 lseek(fd, sec_start + sec_size, SEEK_SET);
3470
3471 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3472 if (do_write_feat(&ff, feat, &p, evlist))
3473 perf_header__clear_feat(header, feat);
3474 }
3475
3476 lseek(fd, sec_start, SEEK_SET);
3477 /*
3478 * may write more than needed due to dropped feature, but
3479 * this is okay, reader will skip the missing entries
3480 */
3481 err = do_write(&ff, feat_sec, sec_size);
3482 if (err < 0)
3483 pr_debug("failed to write feature section\n");
3484 free(feat_sec);
3485 return err;
3486}
3487
3488int perf_header__write_pipe(int fd)
3489{
3490 struct perf_pipe_file_header f_header;
3491 struct feat_fd ff;
3492 int err;
3493
3494 ff = (struct feat_fd){ .fd = fd };
3495
3496 f_header = (struct perf_pipe_file_header){
3497 .magic = PERF_MAGIC,
3498 .size = sizeof(f_header),
3499 };
3500
3501 err = do_write(&ff, &f_header, sizeof(f_header));
3502 if (err < 0) {
3503 pr_debug("failed to write perf pipe header\n");
3504 return err;
3505 }
3506
3507 return 0;
3508}
3509
3510int perf_session__write_header(struct perf_session *session,
3511 struct evlist *evlist,
3512 int fd, bool at_exit)
3513{
3514 struct perf_file_header f_header;
3515 struct perf_file_attr f_attr;
3516 struct perf_header *header = &session->header;
3517 struct evsel *evsel;
3518 struct feat_fd ff;
3519 u64 attr_offset;
3520 int err;
3521
3522 ff = (struct feat_fd){ .fd = fd};
3523 lseek(fd, sizeof(f_header), SEEK_SET);
3524
3525 evlist__for_each_entry(session->evlist, evsel) {
3526 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
3527 err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
3528 if (err < 0) {
3529 pr_debug("failed to write perf header\n");
3530 return err;
3531 }
3532 }
3533
3534 attr_offset = lseek(ff.fd, 0, SEEK_CUR);
3535
3536 evlist__for_each_entry(evlist, evsel) {
3537 if (evsel->core.attr.size < sizeof(evsel->core.attr)) {
3538 /*
3539 * We are likely in "perf inject" and have read
3540 * from an older file. Update attr size so that
3541 * reader gets the right offset to the ids.
3542 */
3543 evsel->core.attr.size = sizeof(evsel->core.attr);
3544 }
3545 f_attr = (struct perf_file_attr){
3546 .attr = evsel->core.attr,
3547 .ids = {
3548 .offset = evsel->id_offset,
3549 .size = evsel->core.ids * sizeof(u64),
3550 }
3551 };
3552 err = do_write(&ff, &f_attr, sizeof(f_attr));
3553 if (err < 0) {
3554 pr_debug("failed to write perf header attribute\n");
3555 return err;
3556 }
3557 }
3558
3559 if (!header->data_offset)
3560 header->data_offset = lseek(fd, 0, SEEK_CUR);
3561 header->feat_offset = header->data_offset + header->data_size;
3562
3563 if (at_exit) {
3564 err = perf_header__adds_write(header, evlist, fd);
3565 if (err < 0)
3566 return err;
3567 }
3568
3569 f_header = (struct perf_file_header){
3570 .magic = PERF_MAGIC,
3571 .size = sizeof(f_header),
3572 .attr_size = sizeof(f_attr),
3573 .attrs = {
3574 .offset = attr_offset,
3575 .size = evlist->core.nr_entries * sizeof(f_attr),
3576 },
3577 .data = {
3578 .offset = header->data_offset,
3579 .size = header->data_size,
3580 },
3581 /* event_types is ignored, store zeros */
3582 };
3583
3584 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
3585
3586 lseek(fd, 0, SEEK_SET);
3587 err = do_write(&ff, &f_header, sizeof(f_header));
3588 if (err < 0) {
3589 pr_debug("failed to write perf header\n");
3590 return err;
3591 }
3592 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
3593
3594 return 0;
3595}
3596
3597static int perf_header__getbuffer64(struct perf_header *header,
3598 int fd, void *buf, size_t size)
3599{
3600 if (readn(fd, buf, size) <= 0)
3601 return -1;
3602
3603 if (header->needs_swap)
3604 mem_bswap_64(buf, size);
3605
3606 return 0;
3607}
3608
3609int perf_header__process_sections(struct perf_header *header, int fd,
3610 void *data,
3611 int (*process)(struct perf_file_section *section,
3612 struct perf_header *ph,
3613 int feat, int fd, void *data))
3614{
3615 struct perf_file_section *feat_sec, *sec;
3616 int nr_sections;
3617 int sec_size;
3618 int feat;
3619 int err;
3620
3621 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3622 if (!nr_sections)
3623 return 0;
3624
3625 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
3626 if (!feat_sec)
3627 return -1;
3628
3629 sec_size = sizeof(*feat_sec) * nr_sections;
3630
3631 lseek(fd, header->feat_offset, SEEK_SET);
3632
3633 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
3634 if (err < 0)
3635 goto out_free;
3636
3637 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
3638 err = process(sec++, header, feat, fd, data);
3639 if (err < 0)
3640 goto out_free;
3641 }
3642 err = 0;
3643out_free:
3644 free(feat_sec);
3645 return err;
3646}
3647
3648static const int attr_file_abi_sizes[] = {
3649 [0] = PERF_ATTR_SIZE_VER0,
3650 [1] = PERF_ATTR_SIZE_VER1,
3651 [2] = PERF_ATTR_SIZE_VER2,
3652 [3] = PERF_ATTR_SIZE_VER3,
3653 [4] = PERF_ATTR_SIZE_VER4,
3654 0,
3655};
3656
3657/*
3658 * In the legacy file format, the magic number is not used to encode endianness.
3659 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
3660 * on ABI revisions, we need to try all combinations for all endianness to
3661 * detect the endianness.
3662 */
3663static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
3664{
3665 uint64_t ref_size, attr_size;
3666 int i;
3667
3668 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
3669 ref_size = attr_file_abi_sizes[i]
3670 + sizeof(struct perf_file_section);
3671 if (hdr_sz != ref_size) {
3672 attr_size = bswap_64(hdr_sz);
3673 if (attr_size != ref_size)
3674 continue;
3675
3676 ph->needs_swap = true;
3677 }
3678 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3679 i,
3680 ph->needs_swap);
3681 return 0;
3682 }
3683 /* could not determine endianness */
3684 return -1;
3685}
3686
3687#define PERF_PIPE_HDR_VER0 16
3688
3689static const size_t attr_pipe_abi_sizes[] = {
3690 [0] = PERF_PIPE_HDR_VER0,
3691 0,
3692};
3693
3694/*
3695 * In the legacy pipe format, there is an implicit assumption that endianness
3696 * between host recording the samples, and host parsing the samples is the
3697 * same. This is not always the case given that the pipe output may always be
3698 * redirected into a file and analyzed on a different machine with possibly a
3699 * different endianness and perf_event ABI revisions in the perf tool itself.
3700 */
3701static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3702{
3703 u64 attr_size;
3704 int i;
3705
3706 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3707 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3708 attr_size = bswap_64(hdr_sz);
3709 if (attr_size != hdr_sz)
3710 continue;
3711
3712 ph->needs_swap = true;
3713 }
3714 pr_debug("Pipe ABI%d perf.data file detected\n", i);
3715 return 0;
3716 }
3717 return -1;
3718}
3719
3720bool is_perf_magic(u64 magic)
3721{
3722 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3723 || magic == __perf_magic2
3724 || magic == __perf_magic2_sw)
3725 return true;
3726
3727 return false;
3728}
3729
3730static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3731 bool is_pipe, struct perf_header *ph)
3732{
3733 int ret;
3734
3735 /* check for legacy format */
3736 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
3737 if (ret == 0) {
3738 ph->version = PERF_HEADER_VERSION_1;
3739 pr_debug("legacy perf.data format\n");
3740 if (is_pipe)
3741 return try_all_pipe_abis(hdr_sz, ph);
3742
3743 return try_all_file_abis(hdr_sz, ph);
3744 }
3745 /*
3746 * the new magic number serves two purposes:
3747 * - unique number to identify actual perf.data files
3748 * - encode endianness of file
3749 */
3750 ph->version = PERF_HEADER_VERSION_2;
3751
3752 /* check magic number with one endianness */
3753 if (magic == __perf_magic2)
3754 return 0;
3755
3756 /* check magic number with opposite endianness */
3757 if (magic != __perf_magic2_sw)
3758 return -1;
3759
3760 ph->needs_swap = true;
3761
3762 return 0;
3763}
3764
3765int perf_file_header__read(struct perf_file_header *header,
3766 struct perf_header *ph, int fd)
3767{
3768 ssize_t ret;
3769
3770 lseek(fd, 0, SEEK_SET);
3771
3772 ret = readn(fd, header, sizeof(*header));
3773 if (ret <= 0)
3774 return -1;
3775
3776 if (check_magic_endian(header->magic,
3777 header->attr_size, false, ph) < 0) {
3778 pr_debug("magic/endian check failed\n");
3779 return -1;
3780 }
3781
3782 if (ph->needs_swap) {
3783 mem_bswap_64(header, offsetof(struct perf_file_header,
3784 adds_features));
3785 }
3786
3787 if (header->size != sizeof(*header)) {
3788 /* Support the previous format */
3789 if (header->size == offsetof(typeof(*header), adds_features))
3790 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3791 else
3792 return -1;
3793 } else if (ph->needs_swap) {
3794 /*
3795 * feature bitmap is declared as an array of unsigned longs --
3796 * not good since its size can differ between the host that
3797 * generated the data file and the host analyzing the file.
3798 *
3799 * We need to handle endianness, but we don't know the size of
3800 * the unsigned long where the file was generated. Take a best
3801 * guess at determining it: try 64-bit swap first (ie., file
3802 * created on a 64-bit host), and check if the hostname feature
3803 * bit is set (this feature bit is forced on as of fbe96f2).
3804 * If the bit is not, undo the 64-bit swap and try a 32-bit
3805 * swap. If the hostname bit is still not set (e.g., older data
3806 * file), punt and fallback to the original behavior --
3807 * clearing all feature bits and setting buildid.
3808 */
3809 mem_bswap_64(&header->adds_features,
3810 BITS_TO_U64(HEADER_FEAT_BITS));
3811
3812 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3813 /* unswap as u64 */
3814 mem_bswap_64(&header->adds_features,
3815 BITS_TO_U64(HEADER_FEAT_BITS));
3816
3817 /* unswap as u32 */
3818 mem_bswap_32(&header->adds_features,
3819 BITS_TO_U32(HEADER_FEAT_BITS));
3820 }
3821
3822 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3823 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3824 set_bit(HEADER_BUILD_ID, header->adds_features);
3825 }
3826 }
3827
3828 memcpy(&ph->adds_features, &header->adds_features,
3829 sizeof(ph->adds_features));
3830
3831 ph->data_offset = header->data.offset;
3832 ph->data_size = header->data.size;
3833 ph->feat_offset = header->data.offset + header->data.size;
3834 return 0;
3835}
3836
3837static int perf_file_section__process(struct perf_file_section *section,
3838 struct perf_header *ph,
3839 int feat, int fd, void *data)
3840{
3841 struct feat_fd fdd = {
3842 .fd = fd,
3843 .ph = ph,
3844 .size = section->size,
3845 .offset = section->offset,
3846 };
3847
3848 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3849 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3850 "%d, continuing...\n", section->offset, feat);
3851 return 0;
3852 }
3853
3854 if (feat >= HEADER_LAST_FEATURE) {
3855 pr_debug("unknown feature %d, continuing...\n", feat);
3856 return 0;
3857 }
3858
3859 if (!feat_ops[feat].process)
3860 return 0;
3861
3862 return feat_ops[feat].process(&fdd, data);
3863}
3864
3865static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
3866 struct perf_header *ph,
3867 struct perf_data* data,
3868 bool repipe)
3869{
3870 struct feat_fd ff = {
3871 .fd = STDOUT_FILENO,
3872 .ph = ph,
3873 };
3874 ssize_t ret;
3875
3876 ret = perf_data__read(data, header, sizeof(*header));
3877 if (ret <= 0)
3878 return -1;
3879
3880 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3881 pr_debug("endian/magic failed\n");
3882 return -1;
3883 }
3884
3885 if (ph->needs_swap)
3886 header->size = bswap_64(header->size);
3887
3888 if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
3889 return -1;
3890
3891 return 0;
3892}
3893
3894static int perf_header__read_pipe(struct perf_session *session)
3895{
3896 struct perf_header *header = &session->header;
3897 struct perf_pipe_file_header f_header;
3898
3899 if (perf_file_header__read_pipe(&f_header, header, session->data,
3900 session->repipe) < 0) {
3901 pr_debug("incompatible file format\n");
3902 return -EINVAL;
3903 }
3904
3905 return f_header.size == sizeof(f_header) ? 0 : -1;
3906}
3907
3908static int read_attr(int fd, struct perf_header *ph,
3909 struct perf_file_attr *f_attr)
3910{
3911 struct perf_event_attr *attr = &f_attr->attr;
3912 size_t sz, left;
3913 size_t our_sz = sizeof(f_attr->attr);
3914 ssize_t ret;
3915
3916 memset(f_attr, 0, sizeof(*f_attr));
3917
3918 /* read minimal guaranteed structure */
3919 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3920 if (ret <= 0) {
3921 pr_debug("cannot read %d bytes of header attr\n",
3922 PERF_ATTR_SIZE_VER0);
3923 return -1;
3924 }
3925
3926 /* on file perf_event_attr size */
3927 sz = attr->size;
3928
3929 if (ph->needs_swap)
3930 sz = bswap_32(sz);
3931
3932 if (sz == 0) {
3933 /* assume ABI0 */
3934 sz = PERF_ATTR_SIZE_VER0;
3935 } else if (sz > our_sz) {
3936 pr_debug("file uses a more recent and unsupported ABI"
3937 " (%zu bytes extra)\n", sz - our_sz);
3938 return -1;
3939 }
3940 /* what we have not yet read and that we know about */
3941 left = sz - PERF_ATTR_SIZE_VER0;
3942 if (left) {
3943 void *ptr = attr;
3944 ptr += PERF_ATTR_SIZE_VER0;
3945
3946 ret = readn(fd, ptr, left);
3947 }
3948 /* read perf_file_section, ids are read in caller */
3949 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3950
3951 return ret <= 0 ? -1 : 0;
3952}
3953
3954static int evsel__prepare_tracepoint_event(struct evsel *evsel, struct tep_handle *pevent)
3955{
3956 struct tep_event *event;
3957 char bf[128];
3958
3959 /* already prepared */
3960 if (evsel->tp_format)
3961 return 0;
3962
3963 if (pevent == NULL) {
3964 pr_debug("broken or missing trace data\n");
3965 return -1;
3966 }
3967
3968 event = tep_find_event(pevent, evsel->core.attr.config);
3969 if (event == NULL) {
3970 pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
3971 return -1;
3972 }
3973
3974 if (!evsel->name) {
3975 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3976 evsel->name = strdup(bf);
3977 if (evsel->name == NULL)
3978 return -1;
3979 }
3980
3981 evsel->tp_format = event;
3982 return 0;
3983}
3984
3985static int evlist__prepare_tracepoint_events(struct evlist *evlist, struct tep_handle *pevent)
3986{
3987 struct evsel *pos;
3988
3989 evlist__for_each_entry(evlist, pos) {
3990 if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
3991 evsel__prepare_tracepoint_event(pos, pevent))
3992 return -1;
3993 }
3994
3995 return 0;
3996}
3997
3998int perf_session__read_header(struct perf_session *session)
3999{
4000 struct perf_data *data = session->data;
4001 struct perf_header *header = &session->header;
4002 struct perf_file_header f_header;
4003 struct perf_file_attr f_attr;
4004 u64 f_id;
4005 int nr_attrs, nr_ids, i, j, err;
4006 int fd = perf_data__fd(data);
4007
4008 session->evlist = evlist__new();
4009 if (session->evlist == NULL)
4010 return -ENOMEM;
4011
4012 session->evlist->env = &header->env;
4013 session->machines.host.env = &header->env;
4014
4015 /*
4016 * We can read 'pipe' data event from regular file,
4017 * check for the pipe header regardless of source.
4018 */
4019 err = perf_header__read_pipe(session);
4020 if (!err || perf_data__is_pipe(data)) {
4021 data->is_pipe = true;
4022 return err;
4023 }
4024
4025 if (perf_file_header__read(&f_header, header, fd) < 0)
4026 return -EINVAL;
4027
4028 if (header->needs_swap && data->in_place_update) {
4029 pr_err("In-place update not supported when byte-swapping is required\n");
4030 return -EINVAL;
4031 }
4032
4033 /*
4034 * Sanity check that perf.data was written cleanly; data size is
4035 * initialized to 0 and updated only if the on_exit function is run.
4036 * If data size is still 0 then the file contains only partial
4037 * information. Just warn user and process it as much as it can.
4038 */
4039 if (f_header.data.size == 0) {
4040 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
4041 "Was the 'perf record' command properly terminated?\n",
4042 data->file.path);
4043 }
4044
4045 if (f_header.attr_size == 0) {
4046 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
4047 "Was the 'perf record' command properly terminated?\n",
4048 data->file.path);
4049 return -EINVAL;
4050 }
4051
4052 nr_attrs = f_header.attrs.size / f_header.attr_size;
4053 lseek(fd, f_header.attrs.offset, SEEK_SET);
4054
4055 for (i = 0; i < nr_attrs; i++) {
4056 struct evsel *evsel;
4057 off_t tmp;
4058
4059 if (read_attr(fd, header, &f_attr) < 0)
4060 goto out_errno;
4061
4062 if (header->needs_swap) {
4063 f_attr.ids.size = bswap_64(f_attr.ids.size);
4064 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
4065 perf_event__attr_swap(&f_attr.attr);
4066 }
4067
4068 tmp = lseek(fd, 0, SEEK_CUR);
4069 evsel = evsel__new(&f_attr.attr);
4070
4071 if (evsel == NULL)
4072 goto out_delete_evlist;
4073
4074 evsel->needs_swap = header->needs_swap;
4075 /*
4076 * Do it before so that if perf_evsel__alloc_id fails, this
4077 * entry gets purged too at evlist__delete().
4078 */
4079 evlist__add(session->evlist, evsel);
4080
4081 nr_ids = f_attr.ids.size / sizeof(u64);
4082 /*
4083 * We don't have the cpu and thread maps on the header, so
4084 * for allocating the perf_sample_id table we fake 1 cpu and
4085 * hattr->ids threads.
4086 */
4087 if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids))
4088 goto out_delete_evlist;
4089
4090 lseek(fd, f_attr.ids.offset, SEEK_SET);
4091
4092 for (j = 0; j < nr_ids; j++) {
4093 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
4094 goto out_errno;
4095
4096 perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id);
4097 }
4098
4099 lseek(fd, tmp, SEEK_SET);
4100 }
4101
4102 perf_header__process_sections(header, fd, &session->tevent,
4103 perf_file_section__process);
4104
4105 if (evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent))
4106 goto out_delete_evlist;
4107
4108 return 0;
4109out_errno:
4110 return -errno;
4111
4112out_delete_evlist:
4113 evlist__delete(session->evlist);
4114 session->evlist = NULL;
4115 return -ENOMEM;
4116}
4117
4118int perf_event__process_feature(struct perf_session *session,
4119 union perf_event *event)
4120{
4121 struct perf_tool *tool = session->tool;
4122 struct feat_fd ff = { .fd = 0 };
4123 struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
4124 int type = fe->header.type;
4125 u64 feat = fe->feat_id;
4126
4127 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
4128 pr_warning("invalid record type %d in pipe-mode\n", type);
4129 return 0;
4130 }
4131 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
4132 pr_warning("invalid record type %d in pipe-mode\n", type);
4133 return -1;
4134 }
4135
4136 if (!feat_ops[feat].process)
4137 return 0;
4138
4139 ff.buf = (void *)fe->data;
4140 ff.size = event->header.size - sizeof(*fe);
4141 ff.ph = &session->header;
4142
4143 if (feat_ops[feat].process(&ff, NULL))
4144 return -1;
4145
4146 if (!feat_ops[feat].print || !tool->show_feat_hdr)
4147 return 0;
4148
4149 if (!feat_ops[feat].full_only ||
4150 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
4151 feat_ops[feat].print(&ff, stdout);
4152 } else {
4153 fprintf(stdout, "# %s info available, use -I to display\n",
4154 feat_ops[feat].name);
4155 }
4156
4157 return 0;
4158}
4159
4160size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
4161{
4162 struct perf_record_event_update *ev = &event->event_update;
4163 struct perf_record_event_update_scale *ev_scale;
4164 struct perf_record_event_update_cpus *ev_cpus;
4165 struct perf_cpu_map *map;
4166 size_t ret;
4167
4168 ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id);
4169
4170 switch (ev->type) {
4171 case PERF_EVENT_UPDATE__SCALE:
4172 ev_scale = (struct perf_record_event_update_scale *)ev->data;
4173 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
4174 break;
4175 case PERF_EVENT_UPDATE__UNIT:
4176 ret += fprintf(fp, "... unit: %s\n", ev->data);
4177 break;
4178 case PERF_EVENT_UPDATE__NAME:
4179 ret += fprintf(fp, "... name: %s\n", ev->data);
4180 break;
4181 case PERF_EVENT_UPDATE__CPUS:
4182 ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
4183 ret += fprintf(fp, "... ");
4184
4185 map = cpu_map__new_data(&ev_cpus->cpus);
4186 if (map)
4187 ret += cpu_map__fprintf(map, fp);
4188 else
4189 ret += fprintf(fp, "failed to get cpus\n");
4190 break;
4191 default:
4192 ret += fprintf(fp, "... unknown type\n");
4193 break;
4194 }
4195
4196 return ret;
4197}
4198
4199int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
4200 union perf_event *event,
4201 struct evlist **pevlist)
4202{
4203 u32 i, ids, n_ids;
4204 struct evsel *evsel;
4205 struct evlist *evlist = *pevlist;
4206
4207 if (evlist == NULL) {
4208 *pevlist = evlist = evlist__new();
4209 if (evlist == NULL)
4210 return -ENOMEM;
4211 }
4212
4213 evsel = evsel__new(&event->attr.attr);
4214 if (evsel == NULL)
4215 return -ENOMEM;
4216
4217 evlist__add(evlist, evsel);
4218
4219 ids = event->header.size;
4220 ids -= (void *)&event->attr.id - (void *)event;
4221 n_ids = ids / sizeof(u64);
4222 /*
4223 * We don't have the cpu and thread maps on the header, so
4224 * for allocating the perf_sample_id table we fake 1 cpu and
4225 * hattr->ids threads.
4226 */
4227 if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
4228 return -ENOMEM;
4229
4230 for (i = 0; i < n_ids; i++) {
4231 perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
4232 }
4233
4234 return 0;
4235}
4236
4237int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
4238 union perf_event *event,
4239 struct evlist **pevlist)
4240{
4241 struct perf_record_event_update *ev = &event->event_update;
4242 struct perf_record_event_update_scale *ev_scale;
4243 struct perf_record_event_update_cpus *ev_cpus;
4244 struct evlist *evlist;
4245 struct evsel *evsel;
4246 struct perf_cpu_map *map;
4247
4248 if (!pevlist || *pevlist == NULL)
4249 return -EINVAL;
4250
4251 evlist = *pevlist;
4252
4253 evsel = evlist__id2evsel(evlist, ev->id);
4254 if (evsel == NULL)
4255 return -EINVAL;
4256
4257 switch (ev->type) {
4258 case PERF_EVENT_UPDATE__UNIT:
4259 evsel->unit = strdup(ev->data);
4260 break;
4261 case PERF_EVENT_UPDATE__NAME:
4262 evsel->name = strdup(ev->data);
4263 break;
4264 case PERF_EVENT_UPDATE__SCALE:
4265 ev_scale = (struct perf_record_event_update_scale *)ev->data;
4266 evsel->scale = ev_scale->scale;
4267 break;
4268 case PERF_EVENT_UPDATE__CPUS:
4269 ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
4270
4271 map = cpu_map__new_data(&ev_cpus->cpus);
4272 if (map)
4273 evsel->core.own_cpus = map;
4274 else
4275 pr_err("failed to get event_update cpus\n");
4276 default:
4277 break;
4278 }
4279
4280 return 0;
4281}
4282
4283int perf_event__process_tracing_data(struct perf_session *session,
4284 union perf_event *event)
4285{
4286 ssize_t size_read, padding, size = event->tracing_data.size;
4287 int fd = perf_data__fd(session->data);
4288 char buf[BUFSIZ];
4289
4290 /*
4291 * The pipe fd is already in proper place and in any case
4292 * we can't move it, and we'd screw the case where we read
4293 * 'pipe' data from regular file. The trace_report reads
4294 * data from 'fd' so we need to set it directly behind the
4295 * event, where the tracing data starts.
4296 */
4297 if (!perf_data__is_pipe(session->data)) {
4298 off_t offset = lseek(fd, 0, SEEK_CUR);
4299
4300 /* setup for reading amidst mmap */
4301 lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
4302 SEEK_SET);
4303 }
4304
4305 size_read = trace_report(fd, &session->tevent,
4306 session->repipe);
4307 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
4308
4309 if (readn(fd, buf, padding) < 0) {
4310 pr_err("%s: reading input file", __func__);
4311 return -1;
4312 }
4313 if (session->repipe) {
4314 int retw = write(STDOUT_FILENO, buf, padding);
4315 if (retw <= 0 || retw != padding) {
4316 pr_err("%s: repiping tracing data padding", __func__);
4317 return -1;
4318 }
4319 }
4320
4321 if (size_read + padding != size) {
4322 pr_err("%s: tracing data size mismatch", __func__);
4323 return -1;
4324 }
4325
4326 evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent);
4327
4328 return size_read + padding;
4329}
4330
4331int perf_event__process_build_id(struct perf_session *session,
4332 union perf_event *event)
4333{
4334 __event_process_build_id(&event->build_id,
4335 event->build_id.filename,
4336 session);
4337 return 0;
4338}