Loading...
1/*
2 * builtin-trace.c
3 *
4 * Builtin 'trace' command:
5 *
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
9 *
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 *
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
13 *
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15 */
16
17#include "util/record.h"
18#include <api/fs/tracing_path.h>
19#ifdef HAVE_LIBBPF_SUPPORT
20#include <bpf/bpf.h>
21#endif
22#include "util/bpf_map.h"
23#include "util/rlimit.h"
24#include "builtin.h"
25#include "util/cgroup.h"
26#include "util/color.h"
27#include "util/config.h"
28#include "util/debug.h"
29#include "util/dso.h"
30#include "util/env.h"
31#include "util/event.h"
32#include "util/evsel.h"
33#include "util/evsel_fprintf.h"
34#include "util/synthetic-events.h"
35#include "util/evlist.h"
36#include "util/evswitch.h"
37#include "util/mmap.h"
38#include <subcmd/pager.h>
39#include <subcmd/exec-cmd.h>
40#include "util/machine.h"
41#include "util/map.h"
42#include "util/symbol.h"
43#include "util/path.h"
44#include "util/session.h"
45#include "util/thread.h"
46#include <subcmd/parse-options.h>
47#include "util/strlist.h"
48#include "util/intlist.h"
49#include "util/thread_map.h"
50#include "util/stat.h"
51#include "util/tool.h"
52#include "util/util.h"
53#include "trace/beauty/beauty.h"
54#include "trace-event.h"
55#include "util/parse-events.h"
56#include "util/bpf-loader.h"
57#include "util/tracepoint.h"
58#include "callchain.h"
59#include "print_binary.h"
60#include "string2.h"
61#include "syscalltbl.h"
62#include "rb_resort.h"
63#include "../perf.h"
64
65#include <errno.h>
66#include <inttypes.h>
67#include <poll.h>
68#include <signal.h>
69#include <stdlib.h>
70#include <string.h>
71#include <linux/err.h>
72#include <linux/filter.h>
73#include <linux/kernel.h>
74#include <linux/random.h>
75#include <linux/stringify.h>
76#include <linux/time64.h>
77#include <linux/zalloc.h>
78#include <fcntl.h>
79#include <sys/sysmacros.h>
80
81#include <linux/ctype.h>
82#include <perf/mmap.h>
83
84#ifdef HAVE_LIBTRACEEVENT
85#include <traceevent/event-parse.h>
86#endif
87
88#ifndef O_CLOEXEC
89# define O_CLOEXEC 02000000
90#endif
91
92#ifndef F_LINUX_SPECIFIC_BASE
93# define F_LINUX_SPECIFIC_BASE 1024
94#endif
95
96#define RAW_SYSCALL_ARGS_NUM 6
97
98/*
99 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
100 */
101struct syscall_arg_fmt {
102 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
103 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
104 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
105 void *parm;
106 const char *name;
107 u16 nr_entries; // for arrays
108 bool show_zero;
109};
110
111struct syscall_fmt {
112 const char *name;
113 const char *alias;
114 struct {
115 const char *sys_enter,
116 *sys_exit;
117 } bpf_prog_name;
118 struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
119 u8 nr_args;
120 bool errpid;
121 bool timeout;
122 bool hexret;
123};
124
125struct trace {
126 struct perf_tool tool;
127 struct syscalltbl *sctbl;
128 struct {
129 struct syscall *table;
130 struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
131 struct bpf_map *sys_enter,
132 *sys_exit;
133 } prog_array;
134 struct {
135 struct evsel *sys_enter,
136 *sys_exit,
137 *augmented;
138 } events;
139 struct bpf_program *unaugmented_prog;
140 } syscalls;
141 struct {
142 struct bpf_map *map;
143 } dump;
144 struct record_opts opts;
145 struct evlist *evlist;
146 struct machine *host;
147 struct thread *current;
148 struct bpf_object *bpf_obj;
149 struct cgroup *cgroup;
150 u64 base_time;
151 FILE *output;
152 unsigned long nr_events;
153 unsigned long nr_events_printed;
154 unsigned long max_events;
155 struct evswitch evswitch;
156 struct strlist *ev_qualifier;
157 struct {
158 size_t nr;
159 int *entries;
160 } ev_qualifier_ids;
161 struct {
162 size_t nr;
163 pid_t *entries;
164 struct bpf_map *map;
165 } filter_pids;
166 double duration_filter;
167 double runtime_ms;
168 struct {
169 u64 vfs_getname,
170 proc_getname;
171 } stats;
172 unsigned int max_stack;
173 unsigned int min_stack;
174 int raw_augmented_syscalls_args_size;
175 bool raw_augmented_syscalls;
176 bool fd_path_disabled;
177 bool sort_events;
178 bool not_ev_qualifier;
179 bool live;
180 bool full_time;
181 bool sched;
182 bool multiple_threads;
183 bool summary;
184 bool summary_only;
185 bool errno_summary;
186 bool failure_only;
187 bool show_comm;
188 bool print_sample;
189 bool show_tool_stats;
190 bool trace_syscalls;
191 bool libtraceevent_print;
192 bool kernel_syscallchains;
193 s16 args_alignment;
194 bool show_tstamp;
195 bool show_duration;
196 bool show_zeros;
197 bool show_arg_names;
198 bool show_string_prefix;
199 bool force;
200 bool vfs_getname;
201 int trace_pgfaults;
202 char *perfconfig_events;
203 struct {
204 struct ordered_events data;
205 u64 last;
206 } oe;
207};
208
209struct tp_field {
210 int offset;
211 union {
212 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
213 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
214 };
215};
216
217#define TP_UINT_FIELD(bits) \
218static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
219{ \
220 u##bits value; \
221 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
222 return value; \
223}
224
225TP_UINT_FIELD(8);
226TP_UINT_FIELD(16);
227TP_UINT_FIELD(32);
228TP_UINT_FIELD(64);
229
230#define TP_UINT_FIELD__SWAPPED(bits) \
231static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
232{ \
233 u##bits value; \
234 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
235 return bswap_##bits(value);\
236}
237
238TP_UINT_FIELD__SWAPPED(16);
239TP_UINT_FIELD__SWAPPED(32);
240TP_UINT_FIELD__SWAPPED(64);
241
242static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
243{
244 field->offset = offset;
245
246 switch (size) {
247 case 1:
248 field->integer = tp_field__u8;
249 break;
250 case 2:
251 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
252 break;
253 case 4:
254 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
255 break;
256 case 8:
257 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
258 break;
259 default:
260 return -1;
261 }
262
263 return 0;
264}
265
266static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
267{
268 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
269}
270
271static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
272{
273 return sample->raw_data + field->offset;
274}
275
276static int __tp_field__init_ptr(struct tp_field *field, int offset)
277{
278 field->offset = offset;
279 field->pointer = tp_field__ptr;
280 return 0;
281}
282
283static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
284{
285 return __tp_field__init_ptr(field, format_field->offset);
286}
287
288struct syscall_tp {
289 struct tp_field id;
290 union {
291 struct tp_field args, ret;
292 };
293};
294
295/*
296 * The evsel->priv as used by 'perf trace'
297 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME
298 * fmt: for all the other tracepoints
299 */
300struct evsel_trace {
301 struct syscall_tp sc;
302 struct syscall_arg_fmt *fmt;
303};
304
305static struct evsel_trace *evsel_trace__new(void)
306{
307 return zalloc(sizeof(struct evsel_trace));
308}
309
310static void evsel_trace__delete(struct evsel_trace *et)
311{
312 if (et == NULL)
313 return;
314
315 zfree(&et->fmt);
316 free(et);
317}
318
319/*
320 * Used with raw_syscalls:sys_{enter,exit} and with the
321 * syscalls:sys_{enter,exit}_SYSCALL tracepoints
322 */
323static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel)
324{
325 struct evsel_trace *et = evsel->priv;
326
327 return &et->sc;
328}
329
330static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel)
331{
332 if (evsel->priv == NULL) {
333 evsel->priv = evsel_trace__new();
334 if (evsel->priv == NULL)
335 return NULL;
336 }
337
338 return __evsel__syscall_tp(evsel);
339}
340
341/*
342 * Used with all the other tracepoints.
343 */
344static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel)
345{
346 struct evsel_trace *et = evsel->priv;
347
348 return et->fmt;
349}
350
351static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel)
352{
353 struct evsel_trace *et = evsel->priv;
354
355 if (evsel->priv == NULL) {
356 et = evsel->priv = evsel_trace__new();
357
358 if (et == NULL)
359 return NULL;
360 }
361
362 if (et->fmt == NULL) {
363 et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
364 if (et->fmt == NULL)
365 goto out_delete;
366 }
367
368 return __evsel__syscall_arg_fmt(evsel);
369
370out_delete:
371 evsel_trace__delete(evsel->priv);
372 evsel->priv = NULL;
373 return NULL;
374}
375
376static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name)
377{
378 struct tep_format_field *format_field = evsel__field(evsel, name);
379
380 if (format_field == NULL)
381 return -1;
382
383 return tp_field__init_uint(field, format_field, evsel->needs_swap);
384}
385
386#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
387 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
388 evsel__init_tp_uint_field(evsel, &sc->name, #name); })
389
390static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name)
391{
392 struct tep_format_field *format_field = evsel__field(evsel, name);
393
394 if (format_field == NULL)
395 return -1;
396
397 return tp_field__init_ptr(field, format_field);
398}
399
400#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
401 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
402 evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
403
404static void evsel__delete_priv(struct evsel *evsel)
405{
406 zfree(&evsel->priv);
407 evsel__delete(evsel);
408}
409
410static int evsel__init_syscall_tp(struct evsel *evsel)
411{
412 struct syscall_tp *sc = evsel__syscall_tp(evsel);
413
414 if (sc != NULL) {
415 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
416 evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
417 return -ENOENT;
418 return 0;
419 }
420
421 return -ENOMEM;
422}
423
424static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
425{
426 struct syscall_tp *sc = evsel__syscall_tp(evsel);
427
428 if (sc != NULL) {
429 struct tep_format_field *syscall_id = evsel__field(tp, "id");
430 if (syscall_id == NULL)
431 syscall_id = evsel__field(tp, "__syscall_nr");
432 if (syscall_id == NULL ||
433 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
434 return -EINVAL;
435
436 return 0;
437 }
438
439 return -ENOMEM;
440}
441
442static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
443{
444 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
445
446 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
447}
448
449static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
450{
451 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
452
453 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
454}
455
456static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
457{
458 if (evsel__syscall_tp(evsel) != NULL) {
459 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
460 return -ENOENT;
461
462 evsel->handler = handler;
463 return 0;
464 }
465
466 return -ENOMEM;
467}
468
469static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
470{
471 struct evsel *evsel = evsel__newtp("raw_syscalls", direction);
472
473 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
474 if (IS_ERR(evsel))
475 evsel = evsel__newtp("syscalls", direction);
476
477 if (IS_ERR(evsel))
478 return NULL;
479
480 if (evsel__init_raw_syscall_tp(evsel, handler))
481 goto out_delete;
482
483 return evsel;
484
485out_delete:
486 evsel__delete_priv(evsel);
487 return NULL;
488}
489
490#define perf_evsel__sc_tp_uint(evsel, name, sample) \
491 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
492 fields->name.integer(&fields->name, sample); })
493
494#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
495 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
496 fields->name.pointer(&fields->name, sample); })
497
498size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val)
499{
500 int idx = val - sa->offset;
501
502 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
503 size_t printed = scnprintf(bf, size, intfmt, val);
504 if (show_suffix)
505 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
506 return printed;
507 }
508
509 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : "");
510}
511
512size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
513{
514 int idx = val - sa->offset;
515
516 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
517 size_t printed = scnprintf(bf, size, intfmt, val);
518 if (show_prefix)
519 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
520 return printed;
521 }
522
523 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
524}
525
526static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
527 const char *intfmt,
528 struct syscall_arg *arg)
529{
530 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
531}
532
533static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
534 struct syscall_arg *arg)
535{
536 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
537}
538
539#define SCA_STRARRAY syscall_arg__scnprintf_strarray
540
541bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
542{
543 return strarray__strtoul(arg->parm, bf, size, ret);
544}
545
546bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
547{
548 return strarray__strtoul_flags(arg->parm, bf, size, ret);
549}
550
551bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
552{
553 return strarrays__strtoul(arg->parm, bf, size, ret);
554}
555
556size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
557{
558 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
559}
560
561size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
562{
563 size_t printed;
564 int i;
565
566 for (i = 0; i < sas->nr_entries; ++i) {
567 struct strarray *sa = sas->entries[i];
568 int idx = val - sa->offset;
569
570 if (idx >= 0 && idx < sa->nr_entries) {
571 if (sa->entries[idx] == NULL)
572 break;
573 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
574 }
575 }
576
577 printed = scnprintf(bf, size, intfmt, val);
578 if (show_prefix)
579 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
580 return printed;
581}
582
583bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret)
584{
585 int i;
586
587 for (i = 0; i < sa->nr_entries; ++i) {
588 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') {
589 *ret = sa->offset + i;
590 return true;
591 }
592 }
593
594 return false;
595}
596
597bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret)
598{
599 u64 val = 0;
600 char *tok = bf, *sep, *end;
601
602 *ret = 0;
603
604 while (size != 0) {
605 int toklen = size;
606
607 sep = memchr(tok, '|', size);
608 if (sep != NULL) {
609 size -= sep - tok + 1;
610
611 end = sep - 1;
612 while (end > tok && isspace(*end))
613 --end;
614
615 toklen = end - tok + 1;
616 }
617
618 while (isspace(*tok))
619 ++tok;
620
621 if (isalpha(*tok) || *tok == '_') {
622 if (!strarray__strtoul(sa, tok, toklen, &val))
623 return false;
624 } else
625 val = strtoul(tok, NULL, 0);
626
627 *ret |= (1 << (val - 1));
628
629 if (sep == NULL)
630 break;
631 tok = sep + 1;
632 }
633
634 return true;
635}
636
637bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret)
638{
639 int i;
640
641 for (i = 0; i < sas->nr_entries; ++i) {
642 struct strarray *sa = sas->entries[i];
643
644 if (strarray__strtoul(sa, bf, size, ret))
645 return true;
646 }
647
648 return false;
649}
650
651size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
652 struct syscall_arg *arg)
653{
654 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
655}
656
657#ifndef AT_FDCWD
658#define AT_FDCWD -100
659#endif
660
661static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
662 struct syscall_arg *arg)
663{
664 int fd = arg->val;
665 const char *prefix = "AT_FD";
666
667 if (fd == AT_FDCWD)
668 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
669
670 return syscall_arg__scnprintf_fd(bf, size, arg);
671}
672
673#define SCA_FDAT syscall_arg__scnprintf_fd_at
674
675static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
676 struct syscall_arg *arg);
677
678#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
679
680size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
681{
682 return scnprintf(bf, size, "%#lx", arg->val);
683}
684
685size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
686{
687 if (arg->val == 0)
688 return scnprintf(bf, size, "NULL");
689 return syscall_arg__scnprintf_hex(bf, size, arg);
690}
691
692size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
693{
694 return scnprintf(bf, size, "%d", arg->val);
695}
696
697size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
698{
699 return scnprintf(bf, size, "%ld", arg->val);
700}
701
702static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg)
703{
704 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can
705 // fill missing comms using thread__set_comm()...
706 // here or in a special syscall_arg__scnprintf_pid_sched_tp...
707 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val);
708}
709
710#define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array
711
712static const char *bpf_cmd[] = {
713 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
714 "MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH",
715 "PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID",
716 "PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD",
717 "PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID",
718 "TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE",
719 "BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH",
720 "MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE",
721 "LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE",
722 "LINK_DETACH", "PROG_BIND_MAP",
723};
724static DEFINE_STRARRAY(bpf_cmd, "BPF_");
725
726static const char *fsmount_flags[] = {
727 [1] = "CLOEXEC",
728};
729static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
730
731#include "trace/beauty/generated/fsconfig_arrays.c"
732
733static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
734
735static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
736static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
737
738static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
739static DEFINE_STRARRAY(itimers, "ITIMER_");
740
741static const char *keyctl_options[] = {
742 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
743 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
744 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
745 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
746 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
747};
748static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
749
750static const char *whences[] = { "SET", "CUR", "END",
751#ifdef SEEK_DATA
752"DATA",
753#endif
754#ifdef SEEK_HOLE
755"HOLE",
756#endif
757};
758static DEFINE_STRARRAY(whences, "SEEK_");
759
760static const char *fcntl_cmds[] = {
761 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
762 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
763 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
764 "GETOWNER_UIDS",
765};
766static DEFINE_STRARRAY(fcntl_cmds, "F_");
767
768static const char *fcntl_linux_specific_cmds[] = {
769 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
770 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
771 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
772};
773
774static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
775
776static struct strarray *fcntl_cmds_arrays[] = {
777 &strarray__fcntl_cmds,
778 &strarray__fcntl_linux_specific_cmds,
779};
780
781static DEFINE_STRARRAYS(fcntl_cmds_arrays);
782
783static const char *rlimit_resources[] = {
784 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
785 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
786 "RTTIME",
787};
788static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
789
790static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
791static DEFINE_STRARRAY(sighow, "SIG_");
792
793static const char *clockid[] = {
794 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
795 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
796 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
797};
798static DEFINE_STRARRAY(clockid, "CLOCK_");
799
800static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
801 struct syscall_arg *arg)
802{
803 bool show_prefix = arg->show_string_prefix;
804 const char *suffix = "_OK";
805 size_t printed = 0;
806 int mode = arg->val;
807
808 if (mode == F_OK) /* 0 */
809 return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
810#define P_MODE(n) \
811 if (mode & n##_OK) { \
812 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
813 mode &= ~n##_OK; \
814 }
815
816 P_MODE(R);
817 P_MODE(W);
818 P_MODE(X);
819#undef P_MODE
820
821 if (mode)
822 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
823
824 return printed;
825}
826
827#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
828
829static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
830 struct syscall_arg *arg);
831
832#define SCA_FILENAME syscall_arg__scnprintf_filename
833
834static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
835 struct syscall_arg *arg)
836{
837 bool show_prefix = arg->show_string_prefix;
838 const char *prefix = "O_";
839 int printed = 0, flags = arg->val;
840
841#define P_FLAG(n) \
842 if (flags & O_##n) { \
843 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
844 flags &= ~O_##n; \
845 }
846
847 P_FLAG(CLOEXEC);
848 P_FLAG(NONBLOCK);
849#undef P_FLAG
850
851 if (flags)
852 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
853
854 return printed;
855}
856
857#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
858
859#ifndef GRND_NONBLOCK
860#define GRND_NONBLOCK 0x0001
861#endif
862#ifndef GRND_RANDOM
863#define GRND_RANDOM 0x0002
864#endif
865
866static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
867 struct syscall_arg *arg)
868{
869 bool show_prefix = arg->show_string_prefix;
870 const char *prefix = "GRND_";
871 int printed = 0, flags = arg->val;
872
873#define P_FLAG(n) \
874 if (flags & GRND_##n) { \
875 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
876 flags &= ~GRND_##n; \
877 }
878
879 P_FLAG(RANDOM);
880 P_FLAG(NONBLOCK);
881#undef P_FLAG
882
883 if (flags)
884 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
885
886 return printed;
887}
888
889#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
890
891#define STRARRAY(name, array) \
892 { .scnprintf = SCA_STRARRAY, \
893 .strtoul = STUL_STRARRAY, \
894 .parm = &strarray__##array, }
895
896#define STRARRAY_FLAGS(name, array) \
897 { .scnprintf = SCA_STRARRAY_FLAGS, \
898 .strtoul = STUL_STRARRAY_FLAGS, \
899 .parm = &strarray__##array, }
900
901#include "trace/beauty/arch_errno_names.c"
902#include "trace/beauty/eventfd.c"
903#include "trace/beauty/futex_op.c"
904#include "trace/beauty/futex_val3.c"
905#include "trace/beauty/mmap.c"
906#include "trace/beauty/mode_t.c"
907#include "trace/beauty/msg_flags.c"
908#include "trace/beauty/open_flags.c"
909#include "trace/beauty/perf_event_open.c"
910#include "trace/beauty/pid.c"
911#include "trace/beauty/sched_policy.c"
912#include "trace/beauty/seccomp.c"
913#include "trace/beauty/signum.c"
914#include "trace/beauty/socket_type.c"
915#include "trace/beauty/waitid_options.c"
916
917static struct syscall_fmt syscall_fmts[] = {
918 { .name = "access",
919 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
920 { .name = "arch_prctl",
921 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
922 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
923 { .name = "bind",
924 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
925 [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
926 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
927 { .name = "bpf",
928 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
929 { .name = "brk", .hexret = true,
930 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
931 { .name = "clock_gettime",
932 .arg = { [0] = STRARRAY(clk_id, clockid), }, },
933 { .name = "clock_nanosleep",
934 .arg = { [2] = { .scnprintf = SCA_TIMESPEC, /* rqtp */ }, }, },
935 { .name = "clone", .errpid = true, .nr_args = 5,
936 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
937 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
938 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
939 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, },
940 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, },
941 { .name = "close",
942 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
943 { .name = "connect",
944 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
945 [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
946 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
947 { .name = "epoll_ctl",
948 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
949 { .name = "eventfd2",
950 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
951 { .name = "fchmodat",
952 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
953 { .name = "fchownat",
954 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
955 { .name = "fcntl",
956 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
957 .strtoul = STUL_STRARRAYS,
958 .parm = &strarrays__fcntl_cmds_arrays,
959 .show_zero = true, },
960 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
961 { .name = "flock",
962 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
963 { .name = "fsconfig",
964 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
965 { .name = "fsmount",
966 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
967 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
968 { .name = "fspick",
969 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
970 [1] = { .scnprintf = SCA_FILENAME, /* path */ },
971 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
972 { .name = "fstat", .alias = "newfstat", },
973 { .name = "fstatat", .alias = "newfstatat", },
974 { .name = "futex",
975 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
976 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
977 { .name = "futimesat",
978 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
979 { .name = "getitimer",
980 .arg = { [0] = STRARRAY(which, itimers), }, },
981 { .name = "getpid", .errpid = true, },
982 { .name = "getpgid", .errpid = true, },
983 { .name = "getppid", .errpid = true, },
984 { .name = "getrandom",
985 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
986 { .name = "getrlimit",
987 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
988 { .name = "getsockopt",
989 .arg = { [1] = STRARRAY(level, socket_level), }, },
990 { .name = "gettid", .errpid = true, },
991 { .name = "ioctl",
992 .arg = {
993#if defined(__i386__) || defined(__x86_64__)
994/*
995 * FIXME: Make this available to all arches.
996 */
997 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
998 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
999#else
1000 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
1001#endif
1002 { .name = "kcmp", .nr_args = 5,
1003 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, },
1004 [1] = { .name = "pid2", .scnprintf = SCA_PID, },
1005 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, },
1006 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, },
1007 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, },
1008 { .name = "keyctl",
1009 .arg = { [0] = STRARRAY(option, keyctl_options), }, },
1010 { .name = "kill",
1011 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1012 { .name = "linkat",
1013 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1014 { .name = "lseek",
1015 .arg = { [2] = STRARRAY(whence, whences), }, },
1016 { .name = "lstat", .alias = "newlstat", },
1017 { .name = "madvise",
1018 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1019 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
1020 { .name = "mkdirat",
1021 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1022 { .name = "mknodat",
1023 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1024 { .name = "mmap", .hexret = true,
1025/* The standard mmap maps to old_mmap on s390x */
1026#if defined(__s390x__)
1027 .alias = "old_mmap",
1028#endif
1029 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
1030 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */
1031 .strtoul = STUL_STRARRAY_FLAGS,
1032 .parm = &strarray__mmap_flags, },
1033 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, },
1034 { .name = "mount",
1035 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
1036 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
1037 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
1038 { .name = "move_mount",
1039 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ },
1040 [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
1041 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ },
1042 [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
1043 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
1044 { .name = "mprotect",
1045 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1046 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, },
1047 { .name = "mq_unlink",
1048 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
1049 { .name = "mremap", .hexret = true,
1050 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
1051 { .name = "name_to_handle_at",
1052 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1053 { .name = "newfstatat",
1054 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1055 { .name = "open",
1056 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1057 { .name = "open_by_handle_at",
1058 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
1059 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1060 { .name = "openat",
1061 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
1062 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1063 { .name = "perf_event_open",
1064 .arg = { [0] = { .scnprintf = SCA_PERF_ATTR, /* attr */ },
1065 [2] = { .scnprintf = SCA_INT, /* cpu */ },
1066 [3] = { .scnprintf = SCA_FD, /* group_fd */ },
1067 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
1068 { .name = "pipe2",
1069 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
1070 { .name = "pkey_alloc",
1071 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, },
1072 { .name = "pkey_free",
1073 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, },
1074 { .name = "pkey_mprotect",
1075 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1076 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
1077 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
1078 { .name = "poll", .timeout = true, },
1079 { .name = "ppoll", .timeout = true, },
1080 { .name = "prctl",
1081 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */
1082 .strtoul = STUL_STRARRAY,
1083 .parm = &strarray__prctl_options, },
1084 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
1085 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
1086 { .name = "pread", .alias = "pread64", },
1087 { .name = "preadv", .alias = "pread", },
1088 { .name = "prlimit64",
1089 .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
1090 { .name = "pwrite", .alias = "pwrite64", },
1091 { .name = "readlinkat",
1092 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1093 { .name = "recvfrom",
1094 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1095 { .name = "recvmmsg",
1096 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1097 { .name = "recvmsg",
1098 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1099 { .name = "renameat",
1100 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1101 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
1102 { .name = "renameat2",
1103 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1104 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
1105 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
1106 { .name = "rt_sigaction",
1107 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1108 { .name = "rt_sigprocmask",
1109 .arg = { [0] = STRARRAY(how, sighow), }, },
1110 { .name = "rt_sigqueueinfo",
1111 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1112 { .name = "rt_tgsigqueueinfo",
1113 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1114 { .name = "sched_setscheduler",
1115 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
1116 { .name = "seccomp",
1117 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ },
1118 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
1119 { .name = "select", .timeout = true, },
1120 { .name = "sendfile", .alias = "sendfile64", },
1121 { .name = "sendmmsg",
1122 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1123 { .name = "sendmsg",
1124 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1125 { .name = "sendto",
1126 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
1127 [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
1128 { .name = "set_tid_address", .errpid = true, },
1129 { .name = "setitimer",
1130 .arg = { [0] = STRARRAY(which, itimers), }, },
1131 { .name = "setrlimit",
1132 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
1133 { .name = "setsockopt",
1134 .arg = { [1] = STRARRAY(level, socket_level), }, },
1135 { .name = "socket",
1136 .arg = { [0] = STRARRAY(family, socket_families),
1137 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1138 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1139 { .name = "socketpair",
1140 .arg = { [0] = STRARRAY(family, socket_families),
1141 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1142 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1143 { .name = "stat", .alias = "newstat", },
1144 { .name = "statx",
1145 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ },
1146 [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
1147 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, },
1148 { .name = "swapoff",
1149 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
1150 { .name = "swapon",
1151 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
1152 { .name = "symlinkat",
1153 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1154 { .name = "sync_file_range",
1155 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
1156 { .name = "tgkill",
1157 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1158 { .name = "tkill",
1159 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1160 { .name = "umount2", .alias = "umount",
1161 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
1162 { .name = "uname", .alias = "newuname", },
1163 { .name = "unlinkat",
1164 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1165 { .name = "utimensat",
1166 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
1167 { .name = "wait4", .errpid = true,
1168 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1169 { .name = "waitid", .errpid = true,
1170 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1171};
1172
1173static int syscall_fmt__cmp(const void *name, const void *fmtp)
1174{
1175 const struct syscall_fmt *fmt = fmtp;
1176 return strcmp(name, fmt->name);
1177}
1178
1179static struct syscall_fmt *__syscall_fmt__find(struct syscall_fmt *fmts, const int nmemb, const char *name)
1180{
1181 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
1182}
1183
1184static struct syscall_fmt *syscall_fmt__find(const char *name)
1185{
1186 const int nmemb = ARRAY_SIZE(syscall_fmts);
1187 return __syscall_fmt__find(syscall_fmts, nmemb, name);
1188}
1189
1190static struct syscall_fmt *__syscall_fmt__find_by_alias(struct syscall_fmt *fmts, const int nmemb, const char *alias)
1191{
1192 int i;
1193
1194 for (i = 0; i < nmemb; ++i) {
1195 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0)
1196 return &fmts[i];
1197 }
1198
1199 return NULL;
1200}
1201
1202static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
1203{
1204 const int nmemb = ARRAY_SIZE(syscall_fmts);
1205 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias);
1206}
1207
1208/*
1209 * is_exit: is this "exit" or "exit_group"?
1210 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
1211 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
1212 * nonexistent: Just a hole in the syscall table, syscall id not allocated
1213 */
1214struct syscall {
1215 struct tep_event *tp_format;
1216 int nr_args;
1217 int args_size;
1218 struct {
1219 struct bpf_program *sys_enter,
1220 *sys_exit;
1221 } bpf_prog;
1222 bool is_exit;
1223 bool is_open;
1224 bool nonexistent;
1225 struct tep_format_field *args;
1226 const char *name;
1227 struct syscall_fmt *fmt;
1228 struct syscall_arg_fmt *arg_fmt;
1229};
1230
1231/*
1232 * We need to have this 'calculated' boolean because in some cases we really
1233 * don't know what is the duration of a syscall, for instance, when we start
1234 * a session and some threads are waiting for a syscall to finish, say 'poll',
1235 * in which case all we can do is to print "( ? ) for duration and for the
1236 * start timestamp.
1237 */
1238static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
1239{
1240 double duration = (double)t / NSEC_PER_MSEC;
1241 size_t printed = fprintf(fp, "(");
1242
1243 if (!calculated)
1244 printed += fprintf(fp, " ");
1245 else if (duration >= 1.0)
1246 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1247 else if (duration >= 0.01)
1248 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1249 else
1250 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1251 return printed + fprintf(fp, "): ");
1252}
1253
1254/**
1255 * filename.ptr: The filename char pointer that will be vfs_getname'd
1256 * filename.entry_str_pos: Where to insert the string translated from
1257 * filename.ptr by the vfs_getname tracepoint/kprobe.
1258 * ret_scnprintf: syscall args may set this to a different syscall return
1259 * formatter, for instance, fcntl may return fds, file flags, etc.
1260 */
1261struct thread_trace {
1262 u64 entry_time;
1263 bool entry_pending;
1264 unsigned long nr_events;
1265 unsigned long pfmaj, pfmin;
1266 char *entry_str;
1267 double runtime_ms;
1268 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1269 struct {
1270 unsigned long ptr;
1271 short int entry_str_pos;
1272 bool pending_open;
1273 unsigned int namelen;
1274 char *name;
1275 } filename;
1276 struct {
1277 int max;
1278 struct file *table;
1279 } files;
1280
1281 struct intlist *syscall_stats;
1282};
1283
1284static struct thread_trace *thread_trace__new(void)
1285{
1286 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
1287
1288 if (ttrace) {
1289 ttrace->files.max = -1;
1290 ttrace->syscall_stats = intlist__new(NULL);
1291 }
1292
1293 return ttrace;
1294}
1295
1296static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1297{
1298 struct thread_trace *ttrace;
1299
1300 if (thread == NULL)
1301 goto fail;
1302
1303 if (thread__priv(thread) == NULL)
1304 thread__set_priv(thread, thread_trace__new());
1305
1306 if (thread__priv(thread) == NULL)
1307 goto fail;
1308
1309 ttrace = thread__priv(thread);
1310 ++ttrace->nr_events;
1311
1312 return ttrace;
1313fail:
1314 color_fprintf(fp, PERF_COLOR_RED,
1315 "WARNING: not enough memory, dropping samples!\n");
1316 return NULL;
1317}
1318
1319
1320void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1321 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1322{
1323 struct thread_trace *ttrace = thread__priv(arg->thread);
1324
1325 ttrace->ret_scnprintf = ret_scnprintf;
1326}
1327
1328#define TRACE_PFMAJ (1 << 0)
1329#define TRACE_PFMIN (1 << 1)
1330
1331static const size_t trace__entry_str_size = 2048;
1332
1333static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
1334{
1335 if (fd < 0)
1336 return NULL;
1337
1338 if (fd > ttrace->files.max) {
1339 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
1340
1341 if (nfiles == NULL)
1342 return NULL;
1343
1344 if (ttrace->files.max != -1) {
1345 memset(nfiles + ttrace->files.max + 1, 0,
1346 (fd - ttrace->files.max) * sizeof(struct file));
1347 } else {
1348 memset(nfiles, 0, (fd + 1) * sizeof(struct file));
1349 }
1350
1351 ttrace->files.table = nfiles;
1352 ttrace->files.max = fd;
1353 }
1354
1355 return ttrace->files.table + fd;
1356}
1357
1358struct file *thread__files_entry(struct thread *thread, int fd)
1359{
1360 return thread_trace__files_entry(thread__priv(thread), fd);
1361}
1362
1363static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1364{
1365 struct thread_trace *ttrace = thread__priv(thread);
1366 struct file *file = thread_trace__files_entry(ttrace, fd);
1367
1368 if (file != NULL) {
1369 struct stat st;
1370 if (stat(pathname, &st) == 0)
1371 file->dev_maj = major(st.st_rdev);
1372 file->pathname = strdup(pathname);
1373 if (file->pathname)
1374 return 0;
1375 }
1376
1377 return -1;
1378}
1379
1380static int thread__read_fd_path(struct thread *thread, int fd)
1381{
1382 char linkname[PATH_MAX], pathname[PATH_MAX];
1383 struct stat st;
1384 int ret;
1385
1386 if (thread->pid_ == thread->tid) {
1387 scnprintf(linkname, sizeof(linkname),
1388 "/proc/%d/fd/%d", thread->pid_, fd);
1389 } else {
1390 scnprintf(linkname, sizeof(linkname),
1391 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1392 }
1393
1394 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1395 return -1;
1396
1397 ret = readlink(linkname, pathname, sizeof(pathname));
1398
1399 if (ret < 0 || ret > st.st_size)
1400 return -1;
1401
1402 pathname[ret] = '\0';
1403 return trace__set_fd_pathname(thread, fd, pathname);
1404}
1405
1406static const char *thread__fd_path(struct thread *thread, int fd,
1407 struct trace *trace)
1408{
1409 struct thread_trace *ttrace = thread__priv(thread);
1410
1411 if (ttrace == NULL || trace->fd_path_disabled)
1412 return NULL;
1413
1414 if (fd < 0)
1415 return NULL;
1416
1417 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
1418 if (!trace->live)
1419 return NULL;
1420 ++trace->stats.proc_getname;
1421 if (thread__read_fd_path(thread, fd))
1422 return NULL;
1423 }
1424
1425 return ttrace->files.table[fd].pathname;
1426}
1427
1428size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
1429{
1430 int fd = arg->val;
1431 size_t printed = scnprintf(bf, size, "%d", fd);
1432 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1433
1434 if (path)
1435 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1436
1437 return printed;
1438}
1439
1440size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1441{
1442 size_t printed = scnprintf(bf, size, "%d", fd);
1443 struct thread *thread = machine__find_thread(trace->host, pid, pid);
1444
1445 if (thread) {
1446 const char *path = thread__fd_path(thread, fd, trace);
1447
1448 if (path)
1449 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1450
1451 thread__put(thread);
1452 }
1453
1454 return printed;
1455}
1456
1457static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1458 struct syscall_arg *arg)
1459{
1460 int fd = arg->val;
1461 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1462 struct thread_trace *ttrace = thread__priv(arg->thread);
1463
1464 if (ttrace && fd >= 0 && fd <= ttrace->files.max)
1465 zfree(&ttrace->files.table[fd].pathname);
1466
1467 return printed;
1468}
1469
1470static void thread__set_filename_pos(struct thread *thread, const char *bf,
1471 unsigned long ptr)
1472{
1473 struct thread_trace *ttrace = thread__priv(thread);
1474
1475 ttrace->filename.ptr = ptr;
1476 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1477}
1478
1479static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1480{
1481 struct augmented_arg *augmented_arg = arg->augmented.args;
1482 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1483 /*
1484 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1485 * we would have two strings, each prefixed by its size.
1486 */
1487 int consumed = sizeof(*augmented_arg) + augmented_arg->size;
1488
1489 arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1490 arg->augmented.size -= consumed;
1491
1492 return printed;
1493}
1494
1495static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1496 struct syscall_arg *arg)
1497{
1498 unsigned long ptr = arg->val;
1499
1500 if (arg->augmented.args)
1501 return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1502
1503 if (!arg->trace->vfs_getname)
1504 return scnprintf(bf, size, "%#x", ptr);
1505
1506 thread__set_filename_pos(arg->thread, bf, ptr);
1507 return 0;
1508}
1509
1510static bool trace__filter_duration(struct trace *trace, double t)
1511{
1512 return t < (trace->duration_filter * NSEC_PER_MSEC);
1513}
1514
1515static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1516{
1517 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1518
1519 return fprintf(fp, "%10.3f ", ts);
1520}
1521
1522/*
1523 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1524 * using ttrace->entry_time for a thread that receives a sys_exit without
1525 * first having received a sys_enter ("poll" issued before tracing session
1526 * starts, lost sys_enter exit due to ring buffer overflow).
1527 */
1528static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1529{
1530 if (tstamp > 0)
1531 return __trace__fprintf_tstamp(trace, tstamp, fp);
1532
1533 return fprintf(fp, " ? ");
1534}
1535
1536static pid_t workload_pid = -1;
1537static volatile sig_atomic_t done = false;
1538static volatile sig_atomic_t interrupted = false;
1539
1540static void sighandler_interrupt(int sig __maybe_unused)
1541{
1542 done = interrupted = true;
1543}
1544
1545static void sighandler_chld(int sig __maybe_unused, siginfo_t *info,
1546 void *context __maybe_unused)
1547{
1548 if (info->si_pid == workload_pid)
1549 done = true;
1550}
1551
1552static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1553{
1554 size_t printed = 0;
1555
1556 if (trace->multiple_threads) {
1557 if (trace->show_comm)
1558 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1559 printed += fprintf(fp, "%d ", thread->tid);
1560 }
1561
1562 return printed;
1563}
1564
1565static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1566 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1567{
1568 size_t printed = 0;
1569
1570 if (trace->show_tstamp)
1571 printed = trace__fprintf_tstamp(trace, tstamp, fp);
1572 if (trace->show_duration)
1573 printed += fprintf_duration(duration, duration_calculated, fp);
1574 return printed + trace__fprintf_comm_tid(trace, thread, fp);
1575}
1576
1577static int trace__process_event(struct trace *trace, struct machine *machine,
1578 union perf_event *event, struct perf_sample *sample)
1579{
1580 int ret = 0;
1581
1582 switch (event->header.type) {
1583 case PERF_RECORD_LOST:
1584 color_fprintf(trace->output, PERF_COLOR_RED,
1585 "LOST %" PRIu64 " events!\n", event->lost.lost);
1586 ret = machine__process_lost_event(machine, event, sample);
1587 break;
1588 default:
1589 ret = machine__process_event(machine, event, sample);
1590 break;
1591 }
1592
1593 return ret;
1594}
1595
1596static int trace__tool_process(struct perf_tool *tool,
1597 union perf_event *event,
1598 struct perf_sample *sample,
1599 struct machine *machine)
1600{
1601 struct trace *trace = container_of(tool, struct trace, tool);
1602 return trace__process_event(trace, machine, event, sample);
1603}
1604
1605static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1606{
1607 struct machine *machine = vmachine;
1608
1609 if (machine->kptr_restrict_warned)
1610 return NULL;
1611
1612 if (symbol_conf.kptr_restrict) {
1613 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1614 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1615 "Kernel samples will not be resolved.\n");
1616 machine->kptr_restrict_warned = true;
1617 return NULL;
1618 }
1619
1620 return machine__resolve_kernel_addr(vmachine, addrp, modp);
1621}
1622
1623static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1624{
1625 int err = symbol__init(NULL);
1626
1627 if (err)
1628 return err;
1629
1630 trace->host = machine__new_host();
1631 if (trace->host == NULL)
1632 return -ENOMEM;
1633
1634 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1635 if (err < 0)
1636 goto out;
1637
1638 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1639 evlist->core.threads, trace__tool_process,
1640 true, false, 1);
1641out:
1642 if (err)
1643 symbol__exit();
1644
1645 return err;
1646}
1647
1648static void trace__symbols__exit(struct trace *trace)
1649{
1650 machine__exit(trace->host);
1651 trace->host = NULL;
1652
1653 symbol__exit();
1654}
1655
1656static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1657{
1658 int idx;
1659
1660 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
1661 nr_args = sc->fmt->nr_args;
1662
1663 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1664 if (sc->arg_fmt == NULL)
1665 return -1;
1666
1667 for (idx = 0; idx < nr_args; ++idx) {
1668 if (sc->fmt)
1669 sc->arg_fmt[idx] = sc->fmt->arg[idx];
1670 }
1671
1672 sc->nr_args = nr_args;
1673 return 0;
1674}
1675
1676static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
1677 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, },
1678 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, },
1679};
1680
1681static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
1682{
1683 const struct syscall_arg_fmt *fmt = fmtp;
1684 return strcmp(name, fmt->name);
1685}
1686
1687static struct syscall_arg_fmt *
1688__syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name)
1689{
1690 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
1691}
1692
1693static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
1694{
1695 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
1696 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
1697}
1698
1699static struct tep_format_field *
1700syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
1701{
1702 struct tep_format_field *last_field = NULL;
1703 int len;
1704
1705 for (; field; field = field->next, ++arg) {
1706 last_field = field;
1707
1708 if (arg->scnprintf)
1709 continue;
1710
1711 len = strlen(field->name);
1712
1713 if (strcmp(field->type, "const char *") == 0 &&
1714 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
1715 strstr(field->name, "path") != NULL))
1716 arg->scnprintf = SCA_FILENAME;
1717 else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
1718 arg->scnprintf = SCA_PTR;
1719 else if (strcmp(field->type, "pid_t") == 0)
1720 arg->scnprintf = SCA_PID;
1721 else if (strcmp(field->type, "umode_t") == 0)
1722 arg->scnprintf = SCA_MODE_T;
1723 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) {
1724 arg->scnprintf = SCA_CHAR_ARRAY;
1725 arg->nr_entries = field->arraylen;
1726 } else if ((strcmp(field->type, "int") == 0 ||
1727 strcmp(field->type, "unsigned int") == 0 ||
1728 strcmp(field->type, "long") == 0) &&
1729 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
1730 /*
1731 * /sys/kernel/tracing/events/syscalls/sys_enter*
1732 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1733 * 65 int
1734 * 23 unsigned int
1735 * 7 unsigned long
1736 */
1737 arg->scnprintf = SCA_FD;
1738 } else {
1739 struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name);
1740
1741 if (fmt) {
1742 arg->scnprintf = fmt->scnprintf;
1743 arg->strtoul = fmt->strtoul;
1744 }
1745 }
1746 }
1747
1748 return last_field;
1749}
1750
1751static int syscall__set_arg_fmts(struct syscall *sc)
1752{
1753 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
1754
1755 if (last_field)
1756 sc->args_size = last_field->offset + last_field->size;
1757
1758 return 0;
1759}
1760
1761static int trace__read_syscall_info(struct trace *trace, int id)
1762{
1763 char tp_name[128];
1764 struct syscall *sc;
1765 const char *name = syscalltbl__name(trace->sctbl, id);
1766
1767#ifdef HAVE_SYSCALL_TABLE_SUPPORT
1768 if (trace->syscalls.table == NULL) {
1769 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1770 if (trace->syscalls.table == NULL)
1771 return -ENOMEM;
1772 }
1773#else
1774 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
1775 // When using libaudit we don't know beforehand what is the max syscall id
1776 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1777
1778 if (table == NULL)
1779 return -ENOMEM;
1780
1781 // Need to memset from offset 0 and +1 members if brand new
1782 if (trace->syscalls.table == NULL)
1783 memset(table, 0, (id + 1) * sizeof(*sc));
1784 else
1785 memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
1786
1787 trace->syscalls.table = table;
1788 trace->sctbl->syscalls.max_id = id;
1789 }
1790#endif
1791 sc = trace->syscalls.table + id;
1792 if (sc->nonexistent)
1793 return -EEXIST;
1794
1795 if (name == NULL) {
1796 sc->nonexistent = true;
1797 return -EEXIST;
1798 }
1799
1800 sc->name = name;
1801 sc->fmt = syscall_fmt__find(sc->name);
1802
1803 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1804 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1805
1806 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1807 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1808 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1809 }
1810
1811 /*
1812 * Fails to read trace point format via sysfs node, so the trace point
1813 * doesn't exist. Set the 'nonexistent' flag as true.
1814 */
1815 if (IS_ERR(sc->tp_format)) {
1816 sc->nonexistent = true;
1817 return PTR_ERR(sc->tp_format);
1818 }
1819
1820 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
1821 RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
1822 return -ENOMEM;
1823
1824 sc->args = sc->tp_format->format.fields;
1825 /*
1826 * We need to check and discard the first variable '__syscall_nr'
1827 * or 'nr' that mean the syscall number. It is needless here.
1828 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1829 */
1830 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1831 sc->args = sc->args->next;
1832 --sc->nr_args;
1833 }
1834
1835 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1836 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
1837
1838 return syscall__set_arg_fmts(sc);
1839}
1840
1841static int evsel__init_tp_arg_scnprintf(struct evsel *evsel)
1842{
1843 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
1844
1845 if (fmt != NULL) {
1846 syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields);
1847 return 0;
1848 }
1849
1850 return -ENOMEM;
1851}
1852
1853static int intcmp(const void *a, const void *b)
1854{
1855 const int *one = a, *another = b;
1856
1857 return *one - *another;
1858}
1859
1860static int trace__validate_ev_qualifier(struct trace *trace)
1861{
1862 int err = 0;
1863 bool printed_invalid_prefix = false;
1864 struct str_node *pos;
1865 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
1866
1867 trace->ev_qualifier_ids.entries = malloc(nr_allocated *
1868 sizeof(trace->ev_qualifier_ids.entries[0]));
1869
1870 if (trace->ev_qualifier_ids.entries == NULL) {
1871 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1872 trace->output);
1873 err = -EINVAL;
1874 goto out;
1875 }
1876
1877 strlist__for_each_entry(pos, trace->ev_qualifier) {
1878 const char *sc = pos->s;
1879 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1880
1881 if (id < 0) {
1882 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1883 if (id >= 0)
1884 goto matches;
1885
1886 if (!printed_invalid_prefix) {
1887 pr_debug("Skipping unknown syscalls: ");
1888 printed_invalid_prefix = true;
1889 } else {
1890 pr_debug(", ");
1891 }
1892
1893 pr_debug("%s", sc);
1894 continue;
1895 }
1896matches:
1897 trace->ev_qualifier_ids.entries[nr_used++] = id;
1898 if (match_next == -1)
1899 continue;
1900
1901 while (1) {
1902 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1903 if (id < 0)
1904 break;
1905 if (nr_allocated == nr_used) {
1906 void *entries;
1907
1908 nr_allocated += 8;
1909 entries = realloc(trace->ev_qualifier_ids.entries,
1910 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1911 if (entries == NULL) {
1912 err = -ENOMEM;
1913 fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1914 goto out_free;
1915 }
1916 trace->ev_qualifier_ids.entries = entries;
1917 }
1918 trace->ev_qualifier_ids.entries[nr_used++] = id;
1919 }
1920 }
1921
1922 trace->ev_qualifier_ids.nr = nr_used;
1923 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
1924out:
1925 if (printed_invalid_prefix)
1926 pr_debug("\n");
1927 return err;
1928out_free:
1929 zfree(&trace->ev_qualifier_ids.entries);
1930 trace->ev_qualifier_ids.nr = 0;
1931 goto out;
1932}
1933
1934static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
1935{
1936 bool in_ev_qualifier;
1937
1938 if (trace->ev_qualifier_ids.nr == 0)
1939 return true;
1940
1941 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
1942 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
1943
1944 if (in_ev_qualifier)
1945 return !trace->not_ev_qualifier;
1946
1947 return trace->not_ev_qualifier;
1948}
1949
1950/*
1951 * args is to be interpreted as a series of longs but we need to handle
1952 * 8-byte unaligned accesses. args points to raw_data within the event
1953 * and raw_data is guaranteed to be 8-byte unaligned because it is
1954 * preceded by raw_size which is a u32. So we need to copy args to a temp
1955 * variable to read it. Most notably this avoids extended load instructions
1956 * on unaligned addresses
1957 */
1958unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
1959{
1960 unsigned long val;
1961 unsigned char *p = arg->args + sizeof(unsigned long) * idx;
1962
1963 memcpy(&val, p, sizeof(val));
1964 return val;
1965}
1966
1967static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
1968 struct syscall_arg *arg)
1969{
1970 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
1971 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
1972
1973 return scnprintf(bf, size, "arg%d: ", arg->idx);
1974}
1975
1976/*
1977 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1978 * as mount 'flags' argument that needs ignoring some magic flag, see comment
1979 * in tools/perf/trace/beauty/mount_flags.c
1980 */
1981static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val)
1982{
1983 if (fmt && fmt->mask_val)
1984 return fmt->mask_val(arg, val);
1985
1986 return val;
1987}
1988
1989static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size,
1990 struct syscall_arg *arg, unsigned long val)
1991{
1992 if (fmt && fmt->scnprintf) {
1993 arg->val = val;
1994 if (fmt->parm)
1995 arg->parm = fmt->parm;
1996 return fmt->scnprintf(bf, size, arg);
1997 }
1998 return scnprintf(bf, size, "%ld", val);
1999}
2000
2001static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
2002 unsigned char *args, void *augmented_args, int augmented_args_size,
2003 struct trace *trace, struct thread *thread)
2004{
2005 size_t printed = 0;
2006 unsigned long val;
2007 u8 bit = 1;
2008 struct syscall_arg arg = {
2009 .args = args,
2010 .augmented = {
2011 .size = augmented_args_size,
2012 .args = augmented_args,
2013 },
2014 .idx = 0,
2015 .mask = 0,
2016 .trace = trace,
2017 .thread = thread,
2018 .show_string_prefix = trace->show_string_prefix,
2019 };
2020 struct thread_trace *ttrace = thread__priv(thread);
2021
2022 /*
2023 * Things like fcntl will set this in its 'cmd' formatter to pick the
2024 * right formatter for the return value (an fd? file flags?), which is
2025 * not needed for syscalls that always return a given type, say an fd.
2026 */
2027 ttrace->ret_scnprintf = NULL;
2028
2029 if (sc->args != NULL) {
2030 struct tep_format_field *field;
2031
2032 for (field = sc->args; field;
2033 field = field->next, ++arg.idx, bit <<= 1) {
2034 if (arg.mask & bit)
2035 continue;
2036
2037 arg.fmt = &sc->arg_fmt[arg.idx];
2038 val = syscall_arg__val(&arg, arg.idx);
2039 /*
2040 * Some syscall args need some mask, most don't and
2041 * return val untouched.
2042 */
2043 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
2044
2045 /*
2046 * Suppress this argument if its value is zero and
2047 * and we don't have a string associated in an
2048 * strarray for it.
2049 */
2050 if (val == 0 &&
2051 !trace->show_zeros &&
2052 !(sc->arg_fmt &&
2053 (sc->arg_fmt[arg.idx].show_zero ||
2054 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
2055 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
2056 sc->arg_fmt[arg.idx].parm))
2057 continue;
2058
2059 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2060
2061 if (trace->show_arg_names)
2062 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2063
2064 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
2065 bf + printed, size - printed, &arg, val);
2066 }
2067 } else if (IS_ERR(sc->tp_format)) {
2068 /*
2069 * If we managed to read the tracepoint /format file, then we
2070 * may end up not having any args, like with gettid(), so only
2071 * print the raw args when we didn't manage to read it.
2072 */
2073 while (arg.idx < sc->nr_args) {
2074 if (arg.mask & bit)
2075 goto next_arg;
2076 val = syscall_arg__val(&arg, arg.idx);
2077 if (printed)
2078 printed += scnprintf(bf + printed, size - printed, ", ");
2079 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
2080 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val);
2081next_arg:
2082 ++arg.idx;
2083 bit <<= 1;
2084 }
2085 }
2086
2087 return printed;
2088}
2089
2090typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2091 union perf_event *event,
2092 struct perf_sample *sample);
2093
2094static struct syscall *trace__syscall_info(struct trace *trace,
2095 struct evsel *evsel, int id)
2096{
2097 int err = 0;
2098
2099 if (id < 0) {
2100
2101 /*
2102 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
2103 * before that, leaving at a higher verbosity level till that is
2104 * explained. Reproduced with plain ftrace with:
2105 *
2106 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
2107 * grep "NR -1 " /t/trace_pipe
2108 *
2109 * After generating some load on the machine.
2110 */
2111 if (verbose > 1) {
2112 static u64 n;
2113 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2114 id, evsel__name(evsel), ++n);
2115 }
2116 return NULL;
2117 }
2118
2119 err = -EINVAL;
2120
2121#ifdef HAVE_SYSCALL_TABLE_SUPPORT
2122 if (id > trace->sctbl->syscalls.max_id) {
2123#else
2124 if (id >= trace->sctbl->syscalls.max_id) {
2125 /*
2126 * With libaudit we don't know beforehand what is the max_id,
2127 * so we let trace__read_syscall_info() figure that out as we
2128 * go on reading syscalls.
2129 */
2130 err = trace__read_syscall_info(trace, id);
2131 if (err)
2132#endif
2133 goto out_cant_read;
2134 }
2135
2136 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2137 (err = trace__read_syscall_info(trace, id)) != 0)
2138 goto out_cant_read;
2139
2140 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
2141 goto out_cant_read;
2142
2143 return &trace->syscalls.table[id];
2144
2145out_cant_read:
2146 if (verbose > 0) {
2147 char sbuf[STRERR_BUFSIZE];
2148 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
2149 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2150 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2151 fputs(" information\n", trace->output);
2152 }
2153 return NULL;
2154}
2155
2156struct syscall_stats {
2157 struct stats stats;
2158 u64 nr_failures;
2159 int max_errno;
2160 u32 *errnos;
2161};
2162
2163static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace,
2164 int id, struct perf_sample *sample, long err, bool errno_summary)
2165{
2166 struct int_node *inode;
2167 struct syscall_stats *stats;
2168 u64 duration = 0;
2169
2170 inode = intlist__findnew(ttrace->syscall_stats, id);
2171 if (inode == NULL)
2172 return;
2173
2174 stats = inode->priv;
2175 if (stats == NULL) {
2176 stats = zalloc(sizeof(*stats));
2177 if (stats == NULL)
2178 return;
2179
2180 init_stats(&stats->stats);
2181 inode->priv = stats;
2182 }
2183
2184 if (ttrace->entry_time && sample->time > ttrace->entry_time)
2185 duration = sample->time - ttrace->entry_time;
2186
2187 update_stats(&stats->stats, duration);
2188
2189 if (err < 0) {
2190 ++stats->nr_failures;
2191
2192 if (!errno_summary)
2193 return;
2194
2195 err = -err;
2196 if (err > stats->max_errno) {
2197 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32));
2198
2199 if (new_errnos) {
2200 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
2201 } else {
2202 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n",
2203 thread__comm_str(thread), thread->pid_, thread->tid);
2204 return;
2205 }
2206
2207 stats->errnos = new_errnos;
2208 stats->max_errno = err;
2209 }
2210
2211 ++stats->errnos[err - 1];
2212 }
2213}
2214
2215static int trace__printf_interrupted_entry(struct trace *trace)
2216{
2217 struct thread_trace *ttrace;
2218 size_t printed;
2219 int len;
2220
2221 if (trace->failure_only || trace->current == NULL)
2222 return 0;
2223
2224 ttrace = thread__priv(trace->current);
2225
2226 if (!ttrace->entry_pending)
2227 return 0;
2228
2229 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
2230 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2231
2232 if (len < trace->args_alignment - 4)
2233 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2234
2235 printed += fprintf(trace->output, " ...\n");
2236
2237 ttrace->entry_pending = false;
2238 ++trace->nr_events_printed;
2239
2240 return printed;
2241}
2242
2243static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
2244 struct perf_sample *sample, struct thread *thread)
2245{
2246 int printed = 0;
2247
2248 if (trace->print_sample) {
2249 double ts = (double)sample->time / NSEC_PER_MSEC;
2250
2251 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2252 evsel__name(evsel), ts,
2253 thread__comm_str(thread),
2254 sample->pid, sample->tid, sample->cpu);
2255 }
2256
2257 return printed;
2258}
2259
2260static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
2261{
2262 void *augmented_args = NULL;
2263 /*
2264 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
2265 * and there we get all 6 syscall args plus the tracepoint common fields
2266 * that gets calculated at the start and the syscall_nr (another long).
2267 * So we check if that is the case and if so don't look after the
2268 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
2269 * which is fixed.
2270 *
2271 * We'll revisit this later to pass s->args_size to the BPF augmenter
2272 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
2273 * copies only what we need for each syscall, like what happens when we
2274 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
2275 * traffic to just what is needed for each syscall.
2276 */
2277 int args_size = raw_augmented_args_size ?: sc->args_size;
2278
2279 *augmented_args_size = sample->raw_size - args_size;
2280 if (*augmented_args_size > 0)
2281 augmented_args = sample->raw_data + args_size;
2282
2283 return augmented_args;
2284}
2285
2286static void syscall__exit(struct syscall *sc)
2287{
2288 if (!sc)
2289 return;
2290
2291 free(sc->arg_fmt);
2292}
2293
2294static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
2295 union perf_event *event __maybe_unused,
2296 struct perf_sample *sample)
2297{
2298 char *msg;
2299 void *args;
2300 int printed = 0;
2301 struct thread *thread;
2302 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2303 int augmented_args_size = 0;
2304 void *augmented_args = NULL;
2305 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2306 struct thread_trace *ttrace;
2307
2308 if (sc == NULL)
2309 return -1;
2310
2311 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2312 ttrace = thread__trace(thread, trace->output);
2313 if (ttrace == NULL)
2314 goto out_put;
2315
2316 trace__fprintf_sample(trace, evsel, sample, thread);
2317
2318 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2319
2320 if (ttrace->entry_str == NULL) {
2321 ttrace->entry_str = malloc(trace__entry_str_size);
2322 if (!ttrace->entry_str)
2323 goto out_put;
2324 }
2325
2326 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2327 trace__printf_interrupted_entry(trace);
2328 /*
2329 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
2330 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
2331 * this breaks syscall__augmented_args() check for augmented args, as we calculate
2332 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
2333 * so when handling, say the openat syscall, we end up getting 6 args for the
2334 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
2335 * thinking that the extra 2 u64 args are the augmented filename, so just check
2336 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
2337 */
2338 if (evsel != trace->syscalls.events.sys_enter)
2339 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2340 ttrace->entry_time = sample->time;
2341 msg = ttrace->entry_str;
2342 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
2343
2344 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
2345 args, augmented_args, augmented_args_size, trace, thread);
2346
2347 if (sc->is_exit) {
2348 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2349 int alignment = 0;
2350
2351 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2352 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2353 if (trace->args_alignment > printed)
2354 alignment = trace->args_alignment - printed;
2355 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2356 }
2357 } else {
2358 ttrace->entry_pending = true;
2359 /* See trace__vfs_getname & trace__sys_exit */
2360 ttrace->filename.pending_open = false;
2361 }
2362
2363 if (trace->current != thread) {
2364 thread__put(trace->current);
2365 trace->current = thread__get(thread);
2366 }
2367 err = 0;
2368out_put:
2369 thread__put(thread);
2370 return err;
2371}
2372
2373static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2374 struct perf_sample *sample)
2375{
2376 struct thread_trace *ttrace;
2377 struct thread *thread;
2378 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2379 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2380 char msg[1024];
2381 void *args, *augmented_args = NULL;
2382 int augmented_args_size;
2383
2384 if (sc == NULL)
2385 return -1;
2386
2387 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2388 ttrace = thread__trace(thread, trace->output);
2389 /*
2390 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2391 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2392 */
2393 if (ttrace == NULL)
2394 goto out_put;
2395
2396 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2397 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2398 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2399 fprintf(trace->output, "%s", msg);
2400 err = 0;
2401out_put:
2402 thread__put(thread);
2403 return err;
2404}
2405
2406static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2407 struct perf_sample *sample,
2408 struct callchain_cursor *cursor)
2409{
2410 struct addr_location al;
2411 int max_stack = evsel->core.attr.sample_max_stack ?
2412 evsel->core.attr.sample_max_stack :
2413 trace->max_stack;
2414 int err;
2415
2416 if (machine__resolve(trace->host, &al, sample) < 0)
2417 return -1;
2418
2419 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
2420 addr_location__put(&al);
2421 return err;
2422}
2423
2424static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2425{
2426 /* TODO: user-configurable print_opts */
2427 const unsigned int print_opts = EVSEL__PRINT_SYM |
2428 EVSEL__PRINT_DSO |
2429 EVSEL__PRINT_UNKNOWN_AS_ADDR;
2430
2431 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
2432}
2433
2434static const char *errno_to_name(struct evsel *evsel, int err)
2435{
2436 struct perf_env *env = evsel__env(evsel);
2437 const char *arch_name = perf_env__arch(env);
2438
2439 return arch_syscalls__strerrno(arch_name, err);
2440}
2441
2442static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2443 union perf_event *event __maybe_unused,
2444 struct perf_sample *sample)
2445{
2446 long ret;
2447 u64 duration = 0;
2448 bool duration_calculated = false;
2449 struct thread *thread;
2450 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2451 int alignment = trace->args_alignment;
2452 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2453 struct thread_trace *ttrace;
2454
2455 if (sc == NULL)
2456 return -1;
2457
2458 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2459 ttrace = thread__trace(thread, trace->output);
2460 if (ttrace == NULL)
2461 goto out_put;
2462
2463 trace__fprintf_sample(trace, evsel, sample, thread);
2464
2465 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2466
2467 if (trace->summary)
2468 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2469
2470 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2471 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2472 ttrace->filename.pending_open = false;
2473 ++trace->stats.vfs_getname;
2474 }
2475
2476 if (ttrace->entry_time) {
2477 duration = sample->time - ttrace->entry_time;
2478 if (trace__filter_duration(trace, duration))
2479 goto out;
2480 duration_calculated = true;
2481 } else if (trace->duration_filter)
2482 goto out;
2483
2484 if (sample->callchain) {
2485 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2486 if (callchain_ret == 0) {
2487 if (callchain_cursor.nr < trace->min_stack)
2488 goto out;
2489 callchain_ret = 1;
2490 }
2491 }
2492
2493 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2494 goto out;
2495
2496 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2497
2498 if (ttrace->entry_pending) {
2499 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2500 } else {
2501 printed += fprintf(trace->output, " ... [");
2502 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2503 printed += 9;
2504 printed += fprintf(trace->output, "]: %s()", sc->name);
2505 }
2506
2507 printed++; /* the closing ')' */
2508
2509 if (alignment > printed)
2510 alignment -= printed;
2511 else
2512 alignment = 0;
2513
2514 fprintf(trace->output, ")%*s= ", alignment, " ");
2515
2516 if (sc->fmt == NULL) {
2517 if (ret < 0)
2518 goto errno_print;
2519signed_print:
2520 fprintf(trace->output, "%ld", ret);
2521 } else if (ret < 0) {
2522errno_print: {
2523 char bf[STRERR_BUFSIZE];
2524 const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2525 *e = errno_to_name(evsel, -ret);
2526
2527 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2528 }
2529 } else if (ret == 0 && sc->fmt->timeout)
2530 fprintf(trace->output, "0 (Timeout)");
2531 else if (ttrace->ret_scnprintf) {
2532 char bf[1024];
2533 struct syscall_arg arg = {
2534 .val = ret,
2535 .thread = thread,
2536 .trace = trace,
2537 };
2538 ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2539 ttrace->ret_scnprintf = NULL;
2540 fprintf(trace->output, "%s", bf);
2541 } else if (sc->fmt->hexret)
2542 fprintf(trace->output, "%#lx", ret);
2543 else if (sc->fmt->errpid) {
2544 struct thread *child = machine__find_thread(trace->host, ret, ret);
2545
2546 if (child != NULL) {
2547 fprintf(trace->output, "%ld", ret);
2548 if (child->comm_set)
2549 fprintf(trace->output, " (%s)", thread__comm_str(child));
2550 thread__put(child);
2551 }
2552 } else
2553 goto signed_print;
2554
2555 fputc('\n', trace->output);
2556
2557 /*
2558 * We only consider an 'event' for the sake of --max-events a non-filtered
2559 * sys_enter + sys_exit and other tracepoint events.
2560 */
2561 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2562 interrupted = true;
2563
2564 if (callchain_ret > 0)
2565 trace__fprintf_callchain(trace, sample);
2566 else if (callchain_ret < 0)
2567 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2568out:
2569 ttrace->entry_pending = false;
2570 err = 0;
2571out_put:
2572 thread__put(thread);
2573 return err;
2574}
2575
2576static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2577 union perf_event *event __maybe_unused,
2578 struct perf_sample *sample)
2579{
2580 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2581 struct thread_trace *ttrace;
2582 size_t filename_len, entry_str_len, to_move;
2583 ssize_t remaining_space;
2584 char *pos;
2585 const char *filename = evsel__rawptr(evsel, sample, "pathname");
2586
2587 if (!thread)
2588 goto out;
2589
2590 ttrace = thread__priv(thread);
2591 if (!ttrace)
2592 goto out_put;
2593
2594 filename_len = strlen(filename);
2595 if (filename_len == 0)
2596 goto out_put;
2597
2598 if (ttrace->filename.namelen < filename_len) {
2599 char *f = realloc(ttrace->filename.name, filename_len + 1);
2600
2601 if (f == NULL)
2602 goto out_put;
2603
2604 ttrace->filename.namelen = filename_len;
2605 ttrace->filename.name = f;
2606 }
2607
2608 strcpy(ttrace->filename.name, filename);
2609 ttrace->filename.pending_open = true;
2610
2611 if (!ttrace->filename.ptr)
2612 goto out_put;
2613
2614 entry_str_len = strlen(ttrace->entry_str);
2615 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2616 if (remaining_space <= 0)
2617 goto out_put;
2618
2619 if (filename_len > (size_t)remaining_space) {
2620 filename += filename_len - remaining_space;
2621 filename_len = remaining_space;
2622 }
2623
2624 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2625 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2626 memmove(pos + filename_len, pos, to_move);
2627 memcpy(pos, filename, filename_len);
2628
2629 ttrace->filename.ptr = 0;
2630 ttrace->filename.entry_str_pos = 0;
2631out_put:
2632 thread__put(thread);
2633out:
2634 return 0;
2635}
2636
2637static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2638 union perf_event *event __maybe_unused,
2639 struct perf_sample *sample)
2640{
2641 u64 runtime = evsel__intval(evsel, sample, "runtime");
2642 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2643 struct thread *thread = machine__findnew_thread(trace->host,
2644 sample->pid,
2645 sample->tid);
2646 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2647
2648 if (ttrace == NULL)
2649 goto out_dump;
2650
2651 ttrace->runtime_ms += runtime_ms;
2652 trace->runtime_ms += runtime_ms;
2653out_put:
2654 thread__put(thread);
2655 return 0;
2656
2657out_dump:
2658 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2659 evsel->name,
2660 evsel__strval(evsel, sample, "comm"),
2661 (pid_t)evsel__intval(evsel, sample, "pid"),
2662 runtime,
2663 evsel__intval(evsel, sample, "vruntime"));
2664 goto out_put;
2665}
2666
2667static int bpf_output__printer(enum binary_printer_ops op,
2668 unsigned int val, void *extra __maybe_unused, FILE *fp)
2669{
2670 unsigned char ch = (unsigned char)val;
2671
2672 switch (op) {
2673 case BINARY_PRINT_CHAR_DATA:
2674 return fprintf(fp, "%c", isprint(ch) ? ch : '.');
2675 case BINARY_PRINT_DATA_BEGIN:
2676 case BINARY_PRINT_LINE_BEGIN:
2677 case BINARY_PRINT_ADDR:
2678 case BINARY_PRINT_NUM_DATA:
2679 case BINARY_PRINT_NUM_PAD:
2680 case BINARY_PRINT_SEP:
2681 case BINARY_PRINT_CHAR_PAD:
2682 case BINARY_PRINT_LINE_END:
2683 case BINARY_PRINT_DATA_END:
2684 default:
2685 break;
2686 }
2687
2688 return 0;
2689}
2690
2691static void bpf_output__fprintf(struct trace *trace,
2692 struct perf_sample *sample)
2693{
2694 binary__fprintf(sample->raw_data, sample->raw_size, 8,
2695 bpf_output__printer, NULL, trace->output);
2696 ++trace->nr_events_printed;
2697}
2698
2699static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
2700 struct thread *thread, void *augmented_args, int augmented_args_size)
2701{
2702 char bf[2048];
2703 size_t size = sizeof(bf);
2704 struct tep_format_field *field = evsel->tp_format->format.fields;
2705 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
2706 size_t printed = 0;
2707 unsigned long val;
2708 u8 bit = 1;
2709 struct syscall_arg syscall_arg = {
2710 .augmented = {
2711 .size = augmented_args_size,
2712 .args = augmented_args,
2713 },
2714 .idx = 0,
2715 .mask = 0,
2716 .trace = trace,
2717 .thread = thread,
2718 .show_string_prefix = trace->show_string_prefix,
2719 };
2720
2721 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
2722 if (syscall_arg.mask & bit)
2723 continue;
2724
2725 syscall_arg.len = 0;
2726 syscall_arg.fmt = arg;
2727 if (field->flags & TEP_FIELD_IS_ARRAY) {
2728 int offset = field->offset;
2729
2730 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2731 offset = format_field__intval(field, sample, evsel->needs_swap);
2732 syscall_arg.len = offset >> 16;
2733 offset &= 0xffff;
2734#ifdef HAVE_LIBTRACEEVENT_TEP_FIELD_IS_RELATIVE
2735 if (field->flags & TEP_FIELD_IS_RELATIVE)
2736 offset += field->offset + field->size;
2737#endif
2738 }
2739
2740 val = (uintptr_t)(sample->raw_data + offset);
2741 } else
2742 val = format_field__intval(field, sample, evsel->needs_swap);
2743 /*
2744 * Some syscall args need some mask, most don't and
2745 * return val untouched.
2746 */
2747 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
2748
2749 /*
2750 * Suppress this argument if its value is zero and
2751 * we don't have a string associated in an
2752 * strarray for it.
2753 */
2754 if (val == 0 &&
2755 !trace->show_zeros &&
2756 !((arg->show_zero ||
2757 arg->scnprintf == SCA_STRARRAY ||
2758 arg->scnprintf == SCA_STRARRAYS) &&
2759 arg->parm))
2760 continue;
2761
2762 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2763
2764 if (trace->show_arg_names)
2765 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2766
2767 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
2768 }
2769
2770 return printed + fprintf(trace->output, "%s", bf);
2771}
2772
2773static int trace__event_handler(struct trace *trace, struct evsel *evsel,
2774 union perf_event *event __maybe_unused,
2775 struct perf_sample *sample)
2776{
2777 struct thread *thread;
2778 int callchain_ret = 0;
2779 /*
2780 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2781 * this event's max_events having been hit and this is an entry coming
2782 * from the ring buffer that we should discard, since the max events
2783 * have already been considered/printed.
2784 */
2785 if (evsel->disabled)
2786 return 0;
2787
2788 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2789
2790 if (sample->callchain) {
2791 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2792 if (callchain_ret == 0) {
2793 if (callchain_cursor.nr < trace->min_stack)
2794 goto out;
2795 callchain_ret = 1;
2796 }
2797 }
2798
2799 trace__printf_interrupted_entry(trace);
2800 trace__fprintf_tstamp(trace, sample->time, trace->output);
2801
2802 if (trace->trace_syscalls && trace->show_duration)
2803 fprintf(trace->output, "( ): ");
2804
2805 if (thread)
2806 trace__fprintf_comm_tid(trace, thread, trace->output);
2807
2808 if (evsel == trace->syscalls.events.augmented) {
2809 int id = perf_evsel__sc_tp_uint(evsel, id, sample);
2810 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2811
2812 if (sc) {
2813 fprintf(trace->output, "%s(", sc->name);
2814 trace__fprintf_sys_enter(trace, evsel, sample);
2815 fputc(')', trace->output);
2816 goto newline;
2817 }
2818
2819 /*
2820 * XXX: Not having the associated syscall info or not finding/adding
2821 * the thread should never happen, but if it does...
2822 * fall thru and print it as a bpf_output event.
2823 */
2824 }
2825
2826 fprintf(trace->output, "%s(", evsel->name);
2827
2828 if (evsel__is_bpf_output(evsel)) {
2829 bpf_output__fprintf(trace, sample);
2830 } else if (evsel->tp_format) {
2831 if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
2832 trace__fprintf_sys_enter(trace, evsel, sample)) {
2833 if (trace->libtraceevent_print) {
2834 event_format__fprintf(evsel->tp_format, sample->cpu,
2835 sample->raw_data, sample->raw_size,
2836 trace->output);
2837 } else {
2838 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
2839 }
2840 }
2841 }
2842
2843newline:
2844 fprintf(trace->output, ")\n");
2845
2846 if (callchain_ret > 0)
2847 trace__fprintf_callchain(trace, sample);
2848 else if (callchain_ret < 0)
2849 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2850
2851 ++trace->nr_events_printed;
2852
2853 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
2854 evsel__disable(evsel);
2855 evsel__close(evsel);
2856 }
2857out:
2858 thread__put(thread);
2859 return 0;
2860}
2861
2862static void print_location(FILE *f, struct perf_sample *sample,
2863 struct addr_location *al,
2864 bool print_dso, bool print_sym)
2865{
2866
2867 if ((verbose > 0 || print_dso) && al->map)
2868 fprintf(f, "%s@", al->map->dso->long_name);
2869
2870 if ((verbose > 0 || print_sym) && al->sym)
2871 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2872 al->addr - al->sym->start);
2873 else if (al->map)
2874 fprintf(f, "0x%" PRIx64, al->addr);
2875 else
2876 fprintf(f, "0x%" PRIx64, sample->addr);
2877}
2878
2879static int trace__pgfault(struct trace *trace,
2880 struct evsel *evsel,
2881 union perf_event *event __maybe_unused,
2882 struct perf_sample *sample)
2883{
2884 struct thread *thread;
2885 struct addr_location al;
2886 char map_type = 'd';
2887 struct thread_trace *ttrace;
2888 int err = -1;
2889 int callchain_ret = 0;
2890
2891 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2892
2893 if (sample->callchain) {
2894 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2895 if (callchain_ret == 0) {
2896 if (callchain_cursor.nr < trace->min_stack)
2897 goto out_put;
2898 callchain_ret = 1;
2899 }
2900 }
2901
2902 ttrace = thread__trace(thread, trace->output);
2903 if (ttrace == NULL)
2904 goto out_put;
2905
2906 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2907 ttrace->pfmaj++;
2908 else
2909 ttrace->pfmin++;
2910
2911 if (trace->summary_only)
2912 goto out;
2913
2914 thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
2915
2916 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2917
2918 fprintf(trace->output, "%sfault [",
2919 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2920 "maj" : "min");
2921
2922 print_location(trace->output, sample, &al, false, true);
2923
2924 fprintf(trace->output, "] => ");
2925
2926 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2927
2928 if (!al.map) {
2929 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2930
2931 if (al.map)
2932 map_type = 'x';
2933 else
2934 map_type = '?';
2935 }
2936
2937 print_location(trace->output, sample, &al, true, false);
2938
2939 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2940
2941 if (callchain_ret > 0)
2942 trace__fprintf_callchain(trace, sample);
2943 else if (callchain_ret < 0)
2944 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2945
2946 ++trace->nr_events_printed;
2947out:
2948 err = 0;
2949out_put:
2950 thread__put(thread);
2951 return err;
2952}
2953
2954static void trace__set_base_time(struct trace *trace,
2955 struct evsel *evsel,
2956 struct perf_sample *sample)
2957{
2958 /*
2959 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2960 * and don't use sample->time unconditionally, we may end up having
2961 * some other event in the future without PERF_SAMPLE_TIME for good
2962 * reason, i.e. we may not be interested in its timestamps, just in
2963 * it taking place, picking some piece of information when it
2964 * appears in our event stream (vfs_getname comes to mind).
2965 */
2966 if (trace->base_time == 0 && !trace->full_time &&
2967 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2968 trace->base_time = sample->time;
2969}
2970
2971static int trace__process_sample(struct perf_tool *tool,
2972 union perf_event *event,
2973 struct perf_sample *sample,
2974 struct evsel *evsel,
2975 struct machine *machine __maybe_unused)
2976{
2977 struct trace *trace = container_of(tool, struct trace, tool);
2978 struct thread *thread;
2979 int err = 0;
2980
2981 tracepoint_handler handler = evsel->handler;
2982
2983 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2984 if (thread && thread__is_filtered(thread))
2985 goto out;
2986
2987 trace__set_base_time(trace, evsel, sample);
2988
2989 if (handler) {
2990 ++trace->nr_events;
2991 handler(trace, evsel, event, sample);
2992 }
2993out:
2994 thread__put(thread);
2995 return err;
2996}
2997
2998static int trace__record(struct trace *trace, int argc, const char **argv)
2999{
3000 unsigned int rec_argc, i, j;
3001 const char **rec_argv;
3002 const char * const record_args[] = {
3003 "record",
3004 "-R",
3005 "-m", "1024",
3006 "-c", "1",
3007 };
3008 pid_t pid = getpid();
3009 char *filter = asprintf__tp_filter_pids(1, &pid);
3010 const char * const sc_args[] = { "-e", };
3011 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
3012 const char * const majpf_args[] = { "-e", "major-faults" };
3013 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
3014 const char * const minpf_args[] = { "-e", "minor-faults" };
3015 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
3016 int err = -1;
3017
3018 /* +3 is for the event string below and the pid filter */
3019 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 +
3020 majpf_args_nr + minpf_args_nr + argc;
3021 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3022
3023 if (rec_argv == NULL || filter == NULL)
3024 goto out_free;
3025
3026 j = 0;
3027 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3028 rec_argv[j++] = record_args[i];
3029
3030 if (trace->trace_syscalls) {
3031 for (i = 0; i < sc_args_nr; i++)
3032 rec_argv[j++] = sc_args[i];
3033
3034 /* event string may be different for older kernels - e.g., RHEL6 */
3035 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
3036 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
3037 else if (is_valid_tracepoint("syscalls:sys_enter"))
3038 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
3039 else {
3040 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
3041 goto out_free;
3042 }
3043 }
3044
3045 rec_argv[j++] = "--filter";
3046 rec_argv[j++] = filter;
3047
3048 if (trace->trace_pgfaults & TRACE_PFMAJ)
3049 for (i = 0; i < majpf_args_nr; i++)
3050 rec_argv[j++] = majpf_args[i];
3051
3052 if (trace->trace_pgfaults & TRACE_PFMIN)
3053 for (i = 0; i < minpf_args_nr; i++)
3054 rec_argv[j++] = minpf_args[i];
3055
3056 for (i = 0; i < (unsigned int)argc; i++)
3057 rec_argv[j++] = argv[i];
3058
3059 err = cmd_record(j, rec_argv);
3060out_free:
3061 free(filter);
3062 free(rec_argv);
3063 return err;
3064}
3065
3066static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3067
3068static bool evlist__add_vfs_getname(struct evlist *evlist)
3069{
3070 bool found = false;
3071 struct evsel *evsel, *tmp;
3072 struct parse_events_error err;
3073 int ret;
3074
3075 parse_events_error__init(&err);
3076 ret = parse_events(evlist, "probe:vfs_getname*", &err);
3077 parse_events_error__exit(&err);
3078 if (ret)
3079 return false;
3080
3081 evlist__for_each_entry_safe(evlist, evsel, tmp) {
3082 if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
3083 continue;
3084
3085 if (evsel__field(evsel, "pathname")) {
3086 evsel->handler = trace__vfs_getname;
3087 found = true;
3088 continue;
3089 }
3090
3091 list_del_init(&evsel->core.node);
3092 evsel->evlist = NULL;
3093 evsel__delete(evsel);
3094 }
3095
3096 return found;
3097}
3098
3099static struct evsel *evsel__new_pgfault(u64 config)
3100{
3101 struct evsel *evsel;
3102 struct perf_event_attr attr = {
3103 .type = PERF_TYPE_SOFTWARE,
3104 .mmap_data = 1,
3105 };
3106
3107 attr.config = config;
3108 attr.sample_period = 1;
3109
3110 event_attr_init(&attr);
3111
3112 evsel = evsel__new(&attr);
3113 if (evsel)
3114 evsel->handler = trace__pgfault;
3115
3116 return evsel;
3117}
3118
3119static void evlist__free_syscall_tp_fields(struct evlist *evlist)
3120{
3121 struct evsel *evsel;
3122
3123 evlist__for_each_entry(evlist, evsel) {
3124 struct evsel_trace *et = evsel->priv;
3125
3126 if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls"))
3127 continue;
3128
3129 free(et->fmt);
3130 free(et);
3131 }
3132}
3133
3134static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
3135{
3136 const u32 type = event->header.type;
3137 struct evsel *evsel;
3138
3139 if (type != PERF_RECORD_SAMPLE) {
3140 trace__process_event(trace, trace->host, event, sample);
3141 return;
3142 }
3143
3144 evsel = evlist__id2evsel(trace->evlist, sample->id);
3145 if (evsel == NULL) {
3146 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3147 return;
3148 }
3149
3150 if (evswitch__discard(&trace->evswitch, evsel))
3151 return;
3152
3153 trace__set_base_time(trace, evsel, sample);
3154
3155 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
3156 sample->raw_data == NULL) {
3157 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3158 evsel__name(evsel), sample->tid,
3159 sample->cpu, sample->raw_size);
3160 } else {
3161 tracepoint_handler handler = evsel->handler;
3162 handler(trace, evsel, event, sample);
3163 }
3164
3165 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3166 interrupted = true;
3167}
3168
3169static int trace__add_syscall_newtp(struct trace *trace)
3170{
3171 int ret = -1;
3172 struct evlist *evlist = trace->evlist;
3173 struct evsel *sys_enter, *sys_exit;
3174
3175 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
3176 if (sys_enter == NULL)
3177 goto out;
3178
3179 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
3180 goto out_delete_sys_enter;
3181
3182 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
3183 if (sys_exit == NULL)
3184 goto out_delete_sys_enter;
3185
3186 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
3187 goto out_delete_sys_exit;
3188
3189 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3190 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3191
3192 evlist__add(evlist, sys_enter);
3193 evlist__add(evlist, sys_exit);
3194
3195 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3196 /*
3197 * We're interested only in the user space callchain
3198 * leading to the syscall, allow overriding that for
3199 * debugging reasons using --kernel_syscall_callchains
3200 */
3201 sys_exit->core.attr.exclude_callchain_kernel = 1;
3202 }
3203
3204 trace->syscalls.events.sys_enter = sys_enter;
3205 trace->syscalls.events.sys_exit = sys_exit;
3206
3207 ret = 0;
3208out:
3209 return ret;
3210
3211out_delete_sys_exit:
3212 evsel__delete_priv(sys_exit);
3213out_delete_sys_enter:
3214 evsel__delete_priv(sys_enter);
3215 goto out;
3216}
3217
3218static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
3219{
3220 int err = -1;
3221 struct evsel *sys_exit;
3222 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3223 trace->ev_qualifier_ids.nr,
3224 trace->ev_qualifier_ids.entries);
3225
3226 if (filter == NULL)
3227 goto out_enomem;
3228
3229 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3230 sys_exit = trace->syscalls.events.sys_exit;
3231 err = evsel__append_tp_filter(sys_exit, filter);
3232 }
3233
3234 free(filter);
3235out:
3236 return err;
3237out_enomem:
3238 errno = ENOMEM;
3239 goto out;
3240}
3241
3242#ifdef HAVE_LIBBPF_SUPPORT
3243static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
3244{
3245 if (trace->bpf_obj == NULL)
3246 return NULL;
3247
3248 return bpf_object__find_map_by_name(trace->bpf_obj, name);
3249}
3250
3251static void trace__set_bpf_map_filtered_pids(struct trace *trace)
3252{
3253 trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
3254}
3255
3256static void trace__set_bpf_map_syscalls(struct trace *trace)
3257{
3258 trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
3259 trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
3260}
3261
3262static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
3263{
3264 struct bpf_program *pos, *prog = NULL;
3265 const char *sec_name;
3266
3267 if (trace->bpf_obj == NULL)
3268 return NULL;
3269
3270 bpf_object__for_each_program(pos, trace->bpf_obj) {
3271 sec_name = bpf_program__section_name(pos);
3272 if (sec_name && !strcmp(sec_name, name)) {
3273 prog = pos;
3274 break;
3275 }
3276 }
3277
3278 return prog;
3279}
3280
3281static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
3282 const char *prog_name, const char *type)
3283{
3284 struct bpf_program *prog;
3285
3286 if (prog_name == NULL) {
3287 char default_prog_name[256];
3288 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
3289 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3290 if (prog != NULL)
3291 goto out_found;
3292 if (sc->fmt && sc->fmt->alias) {
3293 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
3294 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3295 if (prog != NULL)
3296 goto out_found;
3297 }
3298 goto out_unaugmented;
3299 }
3300
3301 prog = trace__find_bpf_program_by_title(trace, prog_name);
3302
3303 if (prog != NULL) {
3304out_found:
3305 return prog;
3306 }
3307
3308 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
3309 prog_name, type, sc->name);
3310out_unaugmented:
3311 return trace->syscalls.unaugmented_prog;
3312}
3313
3314static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
3315{
3316 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3317
3318 if (sc == NULL)
3319 return;
3320
3321 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3322 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
3323}
3324
3325static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
3326{
3327 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3328 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3329}
3330
3331static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
3332{
3333 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3334 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3335}
3336
3337static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
3338{
3339 struct tep_format_field *field, *candidate_field;
3340 int id;
3341
3342 /*
3343 * We're only interested in syscalls that have a pointer:
3344 */
3345 for (field = sc->args; field; field = field->next) {
3346 if (field->flags & TEP_FIELD_IS_POINTER)
3347 goto try_to_find_pair;
3348 }
3349
3350 return NULL;
3351
3352try_to_find_pair:
3353 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3354 struct syscall *pair = trace__syscall_info(trace, NULL, id);
3355 struct bpf_program *pair_prog;
3356 bool is_candidate = false;
3357
3358 if (pair == NULL || pair == sc ||
3359 pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
3360 continue;
3361
3362 for (field = sc->args, candidate_field = pair->args;
3363 field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
3364 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
3365 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
3366
3367 if (is_pointer) {
3368 if (!candidate_is_pointer) {
3369 // The candidate just doesn't copies our pointer arg, might copy other pointers we want.
3370 continue;
3371 }
3372 } else {
3373 if (candidate_is_pointer) {
3374 // The candidate might copy a pointer we don't have, skip it.
3375 goto next_candidate;
3376 }
3377 continue;
3378 }
3379
3380 if (strcmp(field->type, candidate_field->type))
3381 goto next_candidate;
3382
3383 is_candidate = true;
3384 }
3385
3386 if (!is_candidate)
3387 goto next_candidate;
3388
3389 /*
3390 * Check if the tentative pair syscall augmenter has more pointers, if it has,
3391 * then it may be collecting that and we then can't use it, as it would collect
3392 * more than what is common to the two syscalls.
3393 */
3394 if (candidate_field) {
3395 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
3396 if (candidate_field->flags & TEP_FIELD_IS_POINTER)
3397 goto next_candidate;
3398 }
3399
3400 pair_prog = pair->bpf_prog.sys_enter;
3401 /*
3402 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
3403 * have been searched for, so search it here and if it returns the
3404 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
3405 * program for a filtered syscall on a non-filtered one.
3406 *
3407 * For instance, we have "!syscalls:sys_enter_renameat" and that is
3408 * useful for "renameat2".
3409 */
3410 if (pair_prog == NULL) {
3411 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3412 if (pair_prog == trace->syscalls.unaugmented_prog)
3413 goto next_candidate;
3414 }
3415
3416 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
3417 return pair_prog;
3418 next_candidate:
3419 continue;
3420 }
3421
3422 return NULL;
3423}
3424
3425static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3426{
3427 int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3428 map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3429 int err = 0, key;
3430
3431 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3432 int prog_fd;
3433
3434 if (!trace__syscall_enabled(trace, key))
3435 continue;
3436
3437 trace__init_syscall_bpf_progs(trace, key);
3438
3439 // It'll get at least the "!raw_syscalls:unaugmented"
3440 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3441 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3442 if (err)
3443 break;
3444 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3445 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
3446 if (err)
3447 break;
3448 }
3449
3450 /*
3451 * Now lets do a second pass looking for enabled syscalls without
3452 * an augmenter that have a signature that is a superset of another
3453 * syscall with an augmenter so that we can auto-reuse it.
3454 *
3455 * I.e. if we have an augmenter for the "open" syscall that has
3456 * this signature:
3457 *
3458 * int open(const char *pathname, int flags, mode_t mode);
3459 *
3460 * I.e. that will collect just the first string argument, then we
3461 * can reuse it for the 'creat' syscall, that has this signature:
3462 *
3463 * int creat(const char *pathname, mode_t mode);
3464 *
3465 * and for:
3466 *
3467 * int stat(const char *pathname, struct stat *statbuf);
3468 * int lstat(const char *pathname, struct stat *statbuf);
3469 *
3470 * Because the 'open' augmenter will collect the first arg as a string,
3471 * and leave alone all the other args, which already helps with
3472 * beautifying 'stat' and 'lstat''s pathname arg.
3473 *
3474 * Then, in time, when 'stat' gets an augmenter that collects both
3475 * first and second arg (this one on the raw_syscalls:sys_exit prog
3476 * array tail call, then that one will be used.
3477 */
3478 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3479 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3480 struct bpf_program *pair_prog;
3481 int prog_fd;
3482
3483 if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
3484 continue;
3485
3486 /*
3487 * For now we're just reusing the sys_enter prog, and if it
3488 * already has an augmenter, we don't need to find one.
3489 */
3490 if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3491 continue;
3492
3493 /*
3494 * Look at all the other syscalls for one that has a signature
3495 * that is close enough that we can share:
3496 */
3497 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3498 if (pair_prog == NULL)
3499 continue;
3500
3501 sc->bpf_prog.sys_enter = pair_prog;
3502
3503 /*
3504 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3505 * with the fd for the program we're reusing:
3506 */
3507 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
3508 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3509 if (err)
3510 break;
3511 }
3512
3513
3514 return err;
3515}
3516
3517static void trace__delete_augmented_syscalls(struct trace *trace)
3518{
3519 struct evsel *evsel, *tmp;
3520
3521 evlist__remove(trace->evlist, trace->syscalls.events.augmented);
3522 evsel__delete(trace->syscalls.events.augmented);
3523 trace->syscalls.events.augmented = NULL;
3524
3525 evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
3526 if (evsel->bpf_obj == trace->bpf_obj) {
3527 evlist__remove(trace->evlist, evsel);
3528 evsel__delete(evsel);
3529 }
3530
3531 }
3532
3533 bpf_object__close(trace->bpf_obj);
3534 trace->bpf_obj = NULL;
3535}
3536#else // HAVE_LIBBPF_SUPPORT
3537static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
3538 const char *name __maybe_unused)
3539{
3540 return NULL;
3541}
3542
3543static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
3544{
3545}
3546
3547static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
3548{
3549}
3550
3551static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3552 const char *name __maybe_unused)
3553{
3554 return NULL;
3555}
3556
3557static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3558{
3559 return 0;
3560}
3561
3562static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
3563{
3564}
3565#endif // HAVE_LIBBPF_SUPPORT
3566
3567static bool trace__only_augmented_syscalls_evsels(struct trace *trace)
3568{
3569 struct evsel *evsel;
3570
3571 evlist__for_each_entry(trace->evlist, evsel) {
3572 if (evsel == trace->syscalls.events.augmented ||
3573 evsel->bpf_obj == trace->bpf_obj)
3574 continue;
3575
3576 return false;
3577 }
3578
3579 return true;
3580}
3581
3582static int trace__set_ev_qualifier_filter(struct trace *trace)
3583{
3584 if (trace->syscalls.events.sys_enter)
3585 return trace__set_ev_qualifier_tp_filter(trace);
3586 return 0;
3587}
3588
3589static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
3590 size_t npids __maybe_unused, pid_t *pids __maybe_unused)
3591{
3592 int err = 0;
3593#ifdef HAVE_LIBBPF_SUPPORT
3594 bool value = true;
3595 int map_fd = bpf_map__fd(map);
3596 size_t i;
3597
3598 for (i = 0; i < npids; ++i) {
3599 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
3600 if (err)
3601 break;
3602 }
3603#endif
3604 return err;
3605}
3606
3607static int trace__set_filter_loop_pids(struct trace *trace)
3608{
3609 unsigned int nr = 1, err;
3610 pid_t pids[32] = {
3611 getpid(),
3612 };
3613 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3614
3615 while (thread && nr < ARRAY_SIZE(pids)) {
3616 struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3617
3618 if (parent == NULL)
3619 break;
3620
3621 if (!strcmp(thread__comm_str(parent), "sshd") ||
3622 strstarts(thread__comm_str(parent), "gnome-terminal")) {
3623 pids[nr++] = parent->tid;
3624 break;
3625 }
3626 thread = parent;
3627 }
3628
3629 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3630 if (!err && trace->filter_pids.map)
3631 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3632
3633 return err;
3634}
3635
3636static int trace__set_filter_pids(struct trace *trace)
3637{
3638 int err = 0;
3639 /*
3640 * Better not use !target__has_task() here because we need to cover the
3641 * case where no threads were specified in the command line, but a
3642 * workload was, and in that case we will fill in the thread_map when
3643 * we fork the workload in evlist__prepare_workload.
3644 */
3645 if (trace->filter_pids.nr > 0) {
3646 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3647 trace->filter_pids.entries);
3648 if (!err && trace->filter_pids.map) {
3649 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3650 trace->filter_pids.entries);
3651 }
3652 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3653 err = trace__set_filter_loop_pids(trace);
3654 }
3655
3656 return err;
3657}
3658
3659static int __trace__deliver_event(struct trace *trace, union perf_event *event)
3660{
3661 struct evlist *evlist = trace->evlist;
3662 struct perf_sample sample;
3663 int err = evlist__parse_sample(evlist, event, &sample);
3664
3665 if (err)
3666 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3667 else
3668 trace__handle_event(trace, event, &sample);
3669
3670 return 0;
3671}
3672
3673static int __trace__flush_events(struct trace *trace)
3674{
3675 u64 first = ordered_events__first_time(&trace->oe.data);
3676 u64 flush = trace->oe.last - NSEC_PER_SEC;
3677
3678 /* Is there some thing to flush.. */
3679 if (first && first < flush)
3680 return ordered_events__flush_time(&trace->oe.data, flush);
3681
3682 return 0;
3683}
3684
3685static int trace__flush_events(struct trace *trace)
3686{
3687 return !trace->sort_events ? 0 : __trace__flush_events(trace);
3688}
3689
3690static int trace__deliver_event(struct trace *trace, union perf_event *event)
3691{
3692 int err;
3693
3694 if (!trace->sort_events)
3695 return __trace__deliver_event(trace, event);
3696
3697 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3698 if (err && err != -1)
3699 return err;
3700
3701 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
3702 if (err)
3703 return err;
3704
3705 return trace__flush_events(trace);
3706}
3707
3708static int ordered_events__deliver_event(struct ordered_events *oe,
3709 struct ordered_event *event)
3710{
3711 struct trace *trace = container_of(oe, struct trace, oe.data);
3712
3713 return __trace__deliver_event(trace, event->event);
3714}
3715
3716static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg)
3717{
3718 struct tep_format_field *field;
3719 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
3720
3721 if (evsel->tp_format == NULL || fmt == NULL)
3722 return NULL;
3723
3724 for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
3725 if (strcmp(field->name, arg) == 0)
3726 return fmt;
3727
3728 return NULL;
3729}
3730
3731static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
3732{
3733 char *tok, *left = evsel->filter, *new_filter = evsel->filter;
3734
3735 while ((tok = strpbrk(left, "=<>!")) != NULL) {
3736 char *right = tok + 1, *right_end;
3737
3738 if (*right == '=')
3739 ++right;
3740
3741 while (isspace(*right))
3742 ++right;
3743
3744 if (*right == '\0')
3745 break;
3746
3747 while (!isalpha(*left))
3748 if (++left == tok) {
3749 /*
3750 * Bail out, can't find the name of the argument that is being
3751 * used in the filter, let it try to set this filter, will fail later.
3752 */
3753 return 0;
3754 }
3755
3756 right_end = right + 1;
3757 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|')
3758 ++right_end;
3759
3760 if (isalpha(*right)) {
3761 struct syscall_arg_fmt *fmt;
3762 int left_size = tok - left,
3763 right_size = right_end - right;
3764 char arg[128];
3765
3766 while (isspace(left[left_size - 1]))
3767 --left_size;
3768
3769 scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
3770
3771 fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg);
3772 if (fmt == NULL) {
3773 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
3774 arg, evsel->name, evsel->filter);
3775 return -1;
3776 }
3777
3778 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
3779 arg, (int)(right - tok), tok, right_size, right);
3780
3781 if (fmt->strtoul) {
3782 u64 val;
3783 struct syscall_arg syscall_arg = {
3784 .parm = fmt->parm,
3785 };
3786
3787 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) {
3788 char *n, expansion[19];
3789 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val);
3790 int expansion_offset = right - new_filter;
3791
3792 pr_debug("%s", expansion);
3793
3794 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) {
3795 pr_debug(" out of memory!\n");
3796 free(new_filter);
3797 return -1;
3798 }
3799 if (new_filter != evsel->filter)
3800 free(new_filter);
3801 left = n + expansion_offset + expansion_lenght;
3802 new_filter = n;
3803 } else {
3804 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n",
3805 right_size, right, arg, evsel->name, evsel->filter);
3806 return -1;
3807 }
3808 } else {
3809 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n",
3810 arg, evsel->name, evsel->filter);
3811 return -1;
3812 }
3813
3814 pr_debug("\n");
3815 } else {
3816 left = right_end;
3817 }
3818 }
3819
3820 if (new_filter != evsel->filter) {
3821 pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
3822 evsel__set_filter(evsel, new_filter);
3823 free(new_filter);
3824 }
3825
3826 return 0;
3827}
3828
3829static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
3830{
3831 struct evlist *evlist = trace->evlist;
3832 struct evsel *evsel;
3833
3834 evlist__for_each_entry(evlist, evsel) {
3835 if (evsel->filter == NULL)
3836 continue;
3837
3838 if (trace__expand_filter(trace, evsel)) {
3839 *err_evsel = evsel;
3840 return -1;
3841 }
3842 }
3843
3844 return 0;
3845}
3846
3847static int trace__run(struct trace *trace, int argc, const char **argv)
3848{
3849 struct evlist *evlist = trace->evlist;
3850 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
3851 int err = -1, i;
3852 unsigned long before;
3853 const bool forks = argc > 0;
3854 bool draining = false;
3855
3856 trace->live = true;
3857
3858 if (!trace->raw_augmented_syscalls) {
3859 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3860 goto out_error_raw_syscalls;
3861
3862 if (trace->trace_syscalls)
3863 trace->vfs_getname = evlist__add_vfs_getname(evlist);
3864 }
3865
3866 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3867 pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
3868 if (pgfault_maj == NULL)
3869 goto out_error_mem;
3870 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3871 evlist__add(evlist, pgfault_maj);
3872 }
3873
3874 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3875 pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
3876 if (pgfault_min == NULL)
3877 goto out_error_mem;
3878 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3879 evlist__add(evlist, pgfault_min);
3880 }
3881
3882 /* Enable ignoring missing threads when -u/-p option is defined. */
3883 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
3884
3885 if (trace->sched &&
3886 evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
3887 goto out_error_sched_stat_runtime;
3888 /*
3889 * If a global cgroup was set, apply it to all the events without an
3890 * explicit cgroup. I.e.:
3891 *
3892 * trace -G A -e sched:*switch
3893 *
3894 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3895 * _and_ sched:sched_switch to the 'A' cgroup, while:
3896 *
3897 * trace -e sched:*switch -G A
3898 *
3899 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3900 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3901 * a cgroup (on the root cgroup, sys wide, etc).
3902 *
3903 * Multiple cgroups:
3904 *
3905 * trace -G A -e sched:*switch -G B
3906 *
3907 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3908 * to the 'B' cgroup.
3909 *
3910 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3911 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3912 */
3913 if (trace->cgroup)
3914 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3915
3916 err = evlist__create_maps(evlist, &trace->opts.target);
3917 if (err < 0) {
3918 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3919 goto out_delete_evlist;
3920 }
3921
3922 err = trace__symbols_init(trace, evlist);
3923 if (err < 0) {
3924 fprintf(trace->output, "Problems initializing symbol libraries!\n");
3925 goto out_delete_evlist;
3926 }
3927
3928 evlist__config(evlist, &trace->opts, &callchain_param);
3929
3930 if (forks) {
3931 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
3932 if (err < 0) {
3933 fprintf(trace->output, "Couldn't run the workload!\n");
3934 goto out_delete_evlist;
3935 }
3936 workload_pid = evlist->workload.pid;
3937 }
3938
3939 err = evlist__open(evlist);
3940 if (err < 0)
3941 goto out_error_open;
3942
3943 err = bpf__apply_obj_config();
3944 if (err) {
3945 char errbuf[BUFSIZ];
3946
3947 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
3948 pr_err("ERROR: Apply config to BPF failed: %s\n",
3949 errbuf);
3950 goto out_error_open;
3951 }
3952
3953 err = trace__set_filter_pids(trace);
3954 if (err < 0)
3955 goto out_error_mem;
3956
3957 if (trace->syscalls.prog_array.sys_enter)
3958 trace__init_syscalls_bpf_prog_array_maps(trace);
3959
3960 if (trace->ev_qualifier_ids.nr > 0) {
3961 err = trace__set_ev_qualifier_filter(trace);
3962 if (err < 0)
3963 goto out_errno;
3964
3965 if (trace->syscalls.events.sys_exit) {
3966 pr_debug("event qualifier tracepoint filter: %s\n",
3967 trace->syscalls.events.sys_exit->filter);
3968 }
3969 }
3970
3971 /*
3972 * If the "close" syscall is not traced, then we will not have the
3973 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
3974 * fd->pathname table and were ending up showing the last value set by
3975 * syscalls opening a pathname and associating it with a descriptor or
3976 * reading it from /proc/pid/fd/ in cases where that doesn't make
3977 * sense.
3978 *
3979 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
3980 * not in use.
3981 */
3982 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
3983
3984 err = trace__expand_filters(trace, &evsel);
3985 if (err)
3986 goto out_delete_evlist;
3987 err = evlist__apply_filters(evlist, &evsel);
3988 if (err < 0)
3989 goto out_error_apply_filters;
3990
3991 if (trace->dump.map)
3992 bpf_map__fprintf(trace->dump.map, trace->output);
3993
3994 err = evlist__mmap(evlist, trace->opts.mmap_pages);
3995 if (err < 0)
3996 goto out_error_mmap;
3997
3998 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
3999 evlist__enable(evlist);
4000
4001 if (forks)
4002 evlist__start_workload(evlist);
4003
4004 if (trace->opts.initial_delay) {
4005 usleep(trace->opts.initial_delay * 1000);
4006 evlist__enable(evlist);
4007 }
4008
4009 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4010 perf_thread_map__nr(evlist->core.threads) > 1 ||
4011 evlist__first(evlist)->core.attr.inherit;
4012
4013 /*
4014 * Now that we already used evsel->core.attr to ask the kernel to setup the
4015 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
4016 * trace__resolve_callchain(), allowing per-event max-stack settings
4017 * to override an explicitly set --max-stack global setting.
4018 */
4019 evlist__for_each_entry(evlist, evsel) {
4020 if (evsel__has_callchain(evsel) &&
4021 evsel->core.attr.sample_max_stack == 0)
4022 evsel->core.attr.sample_max_stack = trace->max_stack;
4023 }
4024again:
4025 before = trace->nr_events;
4026
4027 for (i = 0; i < evlist->core.nr_mmaps; i++) {
4028 union perf_event *event;
4029 struct mmap *md;
4030
4031 md = &evlist->mmap[i];
4032 if (perf_mmap__read_init(&md->core) < 0)
4033 continue;
4034
4035 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
4036 ++trace->nr_events;
4037
4038 err = trace__deliver_event(trace, event);
4039 if (err)
4040 goto out_disable;
4041
4042 perf_mmap__consume(&md->core);
4043
4044 if (interrupted)
4045 goto out_disable;
4046
4047 if (done && !draining) {
4048 evlist__disable(evlist);
4049 draining = true;
4050 }
4051 }
4052 perf_mmap__read_done(&md->core);
4053 }
4054
4055 if (trace->nr_events == before) {
4056 int timeout = done ? 100 : -1;
4057
4058 if (!draining && evlist__poll(evlist, timeout) > 0) {
4059 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
4060 draining = true;
4061
4062 goto again;
4063 } else {
4064 if (trace__flush_events(trace))
4065 goto out_disable;
4066 }
4067 } else {
4068 goto again;
4069 }
4070
4071out_disable:
4072 thread__zput(trace->current);
4073
4074 evlist__disable(evlist);
4075
4076 if (trace->sort_events)
4077 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4078
4079 if (!err) {
4080 if (trace->summary)
4081 trace__fprintf_thread_summary(trace, trace->output);
4082
4083 if (trace->show_tool_stats) {
4084 fprintf(trace->output, "Stats:\n "
4085 " vfs_getname : %" PRIu64 "\n"
4086 " proc_getname: %" PRIu64 "\n",
4087 trace->stats.vfs_getname,
4088 trace->stats.proc_getname);
4089 }
4090 }
4091
4092out_delete_evlist:
4093 trace__symbols__exit(trace);
4094 evlist__free_syscall_tp_fields(evlist);
4095 evlist__delete(evlist);
4096 cgroup__put(trace->cgroup);
4097 trace->evlist = NULL;
4098 trace->live = false;
4099 return err;
4100{
4101 char errbuf[BUFSIZ];
4102
4103out_error_sched_stat_runtime:
4104 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
4105 goto out_error;
4106
4107out_error_raw_syscalls:
4108 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
4109 goto out_error;
4110
4111out_error_mmap:
4112 evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
4113 goto out_error;
4114
4115out_error_open:
4116 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
4117
4118out_error:
4119 fprintf(trace->output, "%s\n", errbuf);
4120 goto out_delete_evlist;
4121
4122out_error_apply_filters:
4123 fprintf(trace->output,
4124 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
4125 evsel->filter, evsel__name(evsel), errno,
4126 str_error_r(errno, errbuf, sizeof(errbuf)));
4127 goto out_delete_evlist;
4128}
4129out_error_mem:
4130 fprintf(trace->output, "Not enough memory to run!\n");
4131 goto out_delete_evlist;
4132
4133out_errno:
4134 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4135 goto out_delete_evlist;
4136}
4137
4138static int trace__replay(struct trace *trace)
4139{
4140 const struct evsel_str_handler handlers[] = {
4141 { "probe:vfs_getname", trace__vfs_getname, },
4142 };
4143 struct perf_data data = {
4144 .path = input_name,
4145 .mode = PERF_DATA_MODE_READ,
4146 .force = trace->force,
4147 };
4148 struct perf_session *session;
4149 struct evsel *evsel;
4150 int err = -1;
4151
4152 trace->tool.sample = trace__process_sample;
4153 trace->tool.mmap = perf_event__process_mmap;
4154 trace->tool.mmap2 = perf_event__process_mmap2;
4155 trace->tool.comm = perf_event__process_comm;
4156 trace->tool.exit = perf_event__process_exit;
4157 trace->tool.fork = perf_event__process_fork;
4158 trace->tool.attr = perf_event__process_attr;
4159 trace->tool.tracing_data = perf_event__process_tracing_data;
4160 trace->tool.build_id = perf_event__process_build_id;
4161 trace->tool.namespaces = perf_event__process_namespaces;
4162
4163 trace->tool.ordered_events = true;
4164 trace->tool.ordering_requires_timestamps = true;
4165
4166 /* add tid to output */
4167 trace->multiple_threads = true;
4168
4169 session = perf_session__new(&data, &trace->tool);
4170 if (IS_ERR(session))
4171 return PTR_ERR(session);
4172
4173 if (trace->opts.target.pid)
4174 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4175
4176 if (trace->opts.target.tid)
4177 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4178
4179 if (symbol__init(&session->header.env) < 0)
4180 goto out;
4181
4182 trace->host = &session->machines.host;
4183
4184 err = perf_session__set_tracepoints_handlers(session, handlers);
4185 if (err)
4186 goto out;
4187
4188 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
4189 trace->syscalls.events.sys_enter = evsel;
4190 /* older kernels have syscalls tp versus raw_syscalls */
4191 if (evsel == NULL)
4192 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
4193
4194 if (evsel &&
4195 (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
4196 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
4197 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
4198 goto out;
4199 }
4200
4201 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
4202 trace->syscalls.events.sys_exit = evsel;
4203 if (evsel == NULL)
4204 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
4205 if (evsel &&
4206 (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
4207 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
4208 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
4209 goto out;
4210 }
4211
4212 evlist__for_each_entry(session->evlist, evsel) {
4213 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
4214 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
4215 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
4216 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
4217 evsel->handler = trace__pgfault;
4218 }
4219
4220 setup_pager();
4221
4222 err = perf_session__process_events(session);
4223 if (err)
4224 pr_err("Failed to process events, error %d", err);
4225
4226 else if (trace->summary)
4227 trace__fprintf_thread_summary(trace, trace->output);
4228
4229out:
4230 perf_session__delete(session);
4231
4232 return err;
4233}
4234
4235static size_t trace__fprintf_threads_header(FILE *fp)
4236{
4237 size_t printed;
4238
4239 printed = fprintf(fp, "\n Summary of events:\n\n");
4240
4241 return printed;
4242}
4243
4244DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
4245 struct syscall_stats *stats;
4246 double msecs;
4247 int syscall;
4248)
4249{
4250 struct int_node *source = rb_entry(nd, struct int_node, rb_node);
4251 struct syscall_stats *stats = source->priv;
4252
4253 entry->syscall = source->i;
4254 entry->stats = stats;
4255 entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
4256}
4257
4258static size_t thread__dump_stats(struct thread_trace *ttrace,
4259 struct trace *trace, FILE *fp)
4260{
4261 size_t printed = 0;
4262 struct syscall *sc;
4263 struct rb_node *nd;
4264 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
4265
4266 if (syscall_stats == NULL)
4267 return 0;
4268
4269 printed += fprintf(fp, "\n");
4270
4271 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n");
4272 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
4273 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n");
4274
4275 resort_rb__for_each_entry(nd, syscall_stats) {
4276 struct syscall_stats *stats = syscall_stats_entry->stats;
4277 if (stats) {
4278 double min = (double)(stats->stats.min) / NSEC_PER_MSEC;
4279 double max = (double)(stats->stats.max) / NSEC_PER_MSEC;
4280 double avg = avg_stats(&stats->stats);
4281 double pct;
4282 u64 n = (u64)stats->stats.n;
4283
4284 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
4285 avg /= NSEC_PER_MSEC;
4286
4287 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4288 printed += fprintf(fp, " %-15s", sc->name);
4289 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f",
4290 n, stats->nr_failures, syscall_stats_entry->msecs, min, avg);
4291 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
4292
4293 if (trace->errno_summary && stats->nr_failures) {
4294 const char *arch_name = perf_env__arch(trace->host->env);
4295 int e;
4296
4297 for (e = 0; e < stats->max_errno; ++e) {
4298 if (stats->errnos[e] != 0)
4299 fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]);
4300 }
4301 }
4302 }
4303 }
4304
4305 resort_rb__delete(syscall_stats);
4306 printed += fprintf(fp, "\n\n");
4307
4308 return printed;
4309}
4310
4311static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
4312{
4313 size_t printed = 0;
4314 struct thread_trace *ttrace = thread__priv(thread);
4315 double ratio;
4316
4317 if (ttrace == NULL)
4318 return 0;
4319
4320 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4321
4322 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
4323 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
4324 printed += fprintf(fp, "%.1f%%", ratio);
4325 if (ttrace->pfmaj)
4326 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
4327 if (ttrace->pfmin)
4328 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
4329 if (trace->sched)
4330 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
4331 else if (fputc('\n', fp) != EOF)
4332 ++printed;
4333
4334 printed += thread__dump_stats(ttrace, trace, fp);
4335
4336 return printed;
4337}
4338
4339static unsigned long thread__nr_events(struct thread_trace *ttrace)
4340{
4341 return ttrace ? ttrace->nr_events : 0;
4342}
4343
4344DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
4345 struct thread *thread;
4346)
4347{
4348 entry->thread = rb_entry(nd, struct thread, rb_node);
4349}
4350
4351static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
4352{
4353 size_t printed = trace__fprintf_threads_header(fp);
4354 struct rb_node *nd;
4355 int i;
4356
4357 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
4358 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
4359
4360 if (threads == NULL) {
4361 fprintf(fp, "%s", "Error sorting output by nr_events!\n");
4362 return 0;
4363 }
4364
4365 resort_rb__for_each_entry(nd, threads)
4366 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
4367
4368 resort_rb__delete(threads);
4369 }
4370 return printed;
4371}
4372
4373static int trace__set_duration(const struct option *opt, const char *str,
4374 int unset __maybe_unused)
4375{
4376 struct trace *trace = opt->value;
4377
4378 trace->duration_filter = atof(str);
4379 return 0;
4380}
4381
4382static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
4383 int unset __maybe_unused)
4384{
4385 int ret = -1;
4386 size_t i;
4387 struct trace *trace = opt->value;
4388 /*
4389 * FIXME: introduce a intarray class, plain parse csv and create a
4390 * { int nr, int entries[] } struct...
4391 */
4392 struct intlist *list = intlist__new(str);
4393
4394 if (list == NULL)
4395 return -1;
4396
4397 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4398 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4399
4400 if (trace->filter_pids.entries == NULL)
4401 goto out;
4402
4403 trace->filter_pids.entries[0] = getpid();
4404
4405 for (i = 1; i < trace->filter_pids.nr; ++i)
4406 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4407
4408 intlist__delete(list);
4409 ret = 0;
4410out:
4411 return ret;
4412}
4413
4414static int trace__open_output(struct trace *trace, const char *filename)
4415{
4416 struct stat st;
4417
4418 if (!stat(filename, &st) && st.st_size) {
4419 char oldname[PATH_MAX];
4420
4421 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
4422 unlink(oldname);
4423 rename(filename, oldname);
4424 }
4425
4426 trace->output = fopen(filename, "w");
4427
4428 return trace->output == NULL ? -errno : 0;
4429}
4430
4431static int parse_pagefaults(const struct option *opt, const char *str,
4432 int unset __maybe_unused)
4433{
4434 int *trace_pgfaults = opt->value;
4435
4436 if (strcmp(str, "all") == 0)
4437 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
4438 else if (strcmp(str, "maj") == 0)
4439 *trace_pgfaults |= TRACE_PFMAJ;
4440 else if (strcmp(str, "min") == 0)
4441 *trace_pgfaults |= TRACE_PFMIN;
4442 else
4443 return -1;
4444
4445 return 0;
4446}
4447
4448static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler)
4449{
4450 struct evsel *evsel;
4451
4452 evlist__for_each_entry(evlist, evsel) {
4453 if (evsel->handler == NULL)
4454 evsel->handler = handler;
4455 }
4456}
4457
4458static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
4459{
4460 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
4461
4462 if (fmt) {
4463 struct syscall_fmt *scfmt = syscall_fmt__find(name);
4464
4465 if (scfmt) {
4466 int skip = 0;
4467
4468 if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 ||
4469 strcmp(evsel->tp_format->format.fields->name, "nr") == 0)
4470 ++skip;
4471
4472 memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt));
4473 }
4474 }
4475}
4476
4477static int evlist__set_syscall_tp_fields(struct evlist *evlist)
4478{
4479 struct evsel *evsel;
4480
4481 evlist__for_each_entry(evlist, evsel) {
4482 if (evsel->priv || !evsel->tp_format)
4483 continue;
4484
4485 if (strcmp(evsel->tp_format->system, "syscalls")) {
4486 evsel__init_tp_arg_scnprintf(evsel);
4487 continue;
4488 }
4489
4490 if (evsel__init_syscall_tp(evsel))
4491 return -1;
4492
4493 if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
4494 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4495
4496 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
4497 return -1;
4498
4499 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1);
4500 } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
4501 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4502
4503 if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
4504 return -1;
4505
4506 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1);
4507 }
4508 }
4509
4510 return 0;
4511}
4512
4513/*
4514 * XXX: Hackish, just splitting the combined -e+--event (syscalls
4515 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
4516 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4517 *
4518 * It'd be better to introduce a parse_options() variant that would return a
4519 * list with the terms it didn't match to an event...
4520 */
4521static int trace__parse_events_option(const struct option *opt, const char *str,
4522 int unset __maybe_unused)
4523{
4524 struct trace *trace = (struct trace *)opt->value;
4525 const char *s = str;
4526 char *sep = NULL, *lists[2] = { NULL, NULL, };
4527 int len = strlen(str) + 1, err = -1, list, idx;
4528 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
4529 char group_name[PATH_MAX];
4530 struct syscall_fmt *fmt;
4531
4532 if (strace_groups_dir == NULL)
4533 return -1;
4534
4535 if (*s == '!') {
4536 ++s;
4537 trace->not_ev_qualifier = true;
4538 }
4539
4540 while (1) {
4541 if ((sep = strchr(s, ',')) != NULL)
4542 *sep = '\0';
4543
4544 list = 0;
4545 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4546 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4547 list = 1;
4548 goto do_concat;
4549 }
4550
4551 fmt = syscall_fmt__find_by_alias(s);
4552 if (fmt != NULL) {
4553 list = 1;
4554 s = fmt->name;
4555 } else {
4556 path__join(group_name, sizeof(group_name), strace_groups_dir, s);
4557 if (access(group_name, R_OK) == 0)
4558 list = 1;
4559 }
4560do_concat:
4561 if (lists[list]) {
4562 sprintf(lists[list] + strlen(lists[list]), ",%s", s);
4563 } else {
4564 lists[list] = malloc(len);
4565 if (lists[list] == NULL)
4566 goto out;
4567 strcpy(lists[list], s);
4568 }
4569
4570 if (!sep)
4571 break;
4572
4573 *sep = ',';
4574 s = sep + 1;
4575 }
4576
4577 if (lists[1] != NULL) {
4578 struct strlist_config slist_config = {
4579 .dirname = strace_groups_dir,
4580 };
4581
4582 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4583 if (trace->ev_qualifier == NULL) {
4584 fputs("Not enough memory to parse event qualifier", trace->output);
4585 goto out;
4586 }
4587
4588 if (trace__validate_ev_qualifier(trace))
4589 goto out;
4590 trace->trace_syscalls = true;
4591 }
4592
4593 err = 0;
4594
4595 if (lists[0]) {
4596 struct option o = {
4597 .value = &trace->evlist,
4598 };
4599 err = parse_events_option(&o, lists[0], 0);
4600 }
4601out:
4602 free(strace_groups_dir);
4603 free(lists[0]);
4604 free(lists[1]);
4605 if (sep)
4606 *sep = ',';
4607
4608 return err;
4609}
4610
4611static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
4612{
4613 struct trace *trace = opt->value;
4614
4615 if (!list_empty(&trace->evlist->core.entries)) {
4616 struct option o = {
4617 .value = &trace->evlist,
4618 };
4619 return parse_cgroups(&o, str, unset);
4620 }
4621 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4622
4623 return 0;
4624}
4625
4626static int trace__config(const char *var, const char *value, void *arg)
4627{
4628 struct trace *trace = arg;
4629 int err = 0;
4630
4631 if (!strcmp(var, "trace.add_events")) {
4632 trace->perfconfig_events = strdup(value);
4633 if (trace->perfconfig_events == NULL) {
4634 pr_err("Not enough memory for %s\n", "trace.add_events");
4635 return -1;
4636 }
4637 } else if (!strcmp(var, "trace.show_timestamp")) {
4638 trace->show_tstamp = perf_config_bool(var, value);
4639 } else if (!strcmp(var, "trace.show_duration")) {
4640 trace->show_duration = perf_config_bool(var, value);
4641 } else if (!strcmp(var, "trace.show_arg_names")) {
4642 trace->show_arg_names = perf_config_bool(var, value);
4643 if (!trace->show_arg_names)
4644 trace->show_zeros = true;
4645 } else if (!strcmp(var, "trace.show_zeros")) {
4646 bool new_show_zeros = perf_config_bool(var, value);
4647 if (!trace->show_arg_names && !new_show_zeros) {
4648 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4649 goto out;
4650 }
4651 trace->show_zeros = new_show_zeros;
4652 } else if (!strcmp(var, "trace.show_prefix")) {
4653 trace->show_string_prefix = perf_config_bool(var, value);
4654 } else if (!strcmp(var, "trace.no_inherit")) {
4655 trace->opts.no_inherit = perf_config_bool(var, value);
4656 } else if (!strcmp(var, "trace.args_alignment")) {
4657 int args_alignment = 0;
4658 if (perf_config_int(&args_alignment, var, value) == 0)
4659 trace->args_alignment = args_alignment;
4660 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
4661 if (strcasecmp(value, "libtraceevent") == 0)
4662 trace->libtraceevent_print = true;
4663 else if (strcasecmp(value, "libbeauty") == 0)
4664 trace->libtraceevent_print = false;
4665 }
4666out:
4667 return err;
4668}
4669
4670static void trace__exit(struct trace *trace)
4671{
4672 int i;
4673
4674 strlist__delete(trace->ev_qualifier);
4675 free(trace->ev_qualifier_ids.entries);
4676 if (trace->syscalls.table) {
4677 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
4678 syscall__exit(&trace->syscalls.table[i]);
4679 free(trace->syscalls.table);
4680 }
4681 syscalltbl__delete(trace->sctbl);
4682 zfree(&trace->perfconfig_events);
4683}
4684
4685int cmd_trace(int argc, const char **argv)
4686{
4687 const char *trace_usage[] = {
4688 "perf trace [<options>] [<command>]",
4689 "perf trace [<options>] -- <command> [<options>]",
4690 "perf trace record [<options>] [<command>]",
4691 "perf trace record [<options>] -- <command> [<options>]",
4692 NULL
4693 };
4694 struct trace trace = {
4695 .opts = {
4696 .target = {
4697 .uid = UINT_MAX,
4698 .uses_mmap = true,
4699 },
4700 .user_freq = UINT_MAX,
4701 .user_interval = ULLONG_MAX,
4702 .no_buffering = true,
4703 .mmap_pages = UINT_MAX,
4704 },
4705 .output = stderr,
4706 .show_comm = true,
4707 .show_tstamp = true,
4708 .show_duration = true,
4709 .show_arg_names = true,
4710 .args_alignment = 70,
4711 .trace_syscalls = false,
4712 .kernel_syscallchains = false,
4713 .max_stack = UINT_MAX,
4714 .max_events = ULONG_MAX,
4715 };
4716 const char *map_dump_str = NULL;
4717 const char *output_name = NULL;
4718 const struct option trace_options[] = {
4719 OPT_CALLBACK('e', "event", &trace, "event",
4720 "event/syscall selector. use 'perf list' to list available events",
4721 trace__parse_events_option),
4722 OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
4723 "event filter", parse_filter),
4724 OPT_BOOLEAN(0, "comm", &trace.show_comm,
4725 "show the thread COMM next to its id"),
4726 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4727 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4728 trace__parse_events_option),
4729 OPT_STRING('o', "output", &output_name, "file", "output file name"),
4730 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
4731 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4732 "trace events on existing process id"),
4733 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4734 "trace events on existing thread id"),
4735 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4736 "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
4737 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4738 "system-wide collection from all CPUs"),
4739 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4740 "list of cpus to monitor"),
4741 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4742 "child tasks do not inherit counters"),
4743 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4744 "number of mmap data pages", evlist__parse_mmap_pages),
4745 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4746 "user to profile"),
4747 OPT_CALLBACK(0, "duration", &trace, "float",
4748 "show only events with duration > N.M ms",
4749 trace__set_duration),
4750#ifdef HAVE_LIBBPF_SUPPORT
4751 OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
4752#endif
4753 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4754 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
4755 OPT_BOOLEAN('T', "time", &trace.full_time,
4756 "Show full timestamp, not time relative to first start"),
4757 OPT_BOOLEAN(0, "failure", &trace.failure_only,
4758 "Show only syscalls that failed"),
4759 OPT_BOOLEAN('s', "summary", &trace.summary_only,
4760 "Show only syscall summary with statistics"),
4761 OPT_BOOLEAN('S', "with-summary", &trace.summary,
4762 "Show all syscalls and summary with statistics"),
4763 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
4764 "Show errno stats per syscall, use with -s or -S"),
4765 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4766 "Trace pagefaults", parse_pagefaults, "maj"),
4767 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4768 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4769 OPT_CALLBACK(0, "call-graph", &trace.opts,
4770 "record_mode[,record_size]", record_callchain_help,
4771 &record_parse_callchain_opt),
4772 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
4773 "Use libtraceevent to print the tracepoint arguments."),
4774 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4775 "Show the kernel callchains on the syscall exit path"),
4776 OPT_ULONG(0, "max-events", &trace.max_events,
4777 "Set the maximum number of events to print, exit after that is reached. "),
4778 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4779 "Set the minimum stack depth when parsing the callchain, "
4780 "anything below the specified depth will be ignored."),
4781 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4782 "Set the maximum stack depth when parsing the callchain, "
4783 "anything beyond the specified depth will be ignored. "
4784 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
4785 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4786 "Sort batch of events before processing, use if getting out of order events"),
4787 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4788 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4789 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
4790 "per thread proc mmap processing timeout in ms"),
4791 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4792 trace__parse_cgroups),
4793 OPT_INTEGER('D', "delay", &trace.opts.initial_delay,
4794 "ms to wait before starting measurement after program "
4795 "start"),
4796 OPTS_EVSWITCH(&trace.evswitch),
4797 OPT_END()
4798 };
4799 bool __maybe_unused max_stack_user_set = true;
4800 bool mmap_pages_user_set = true;
4801 struct evsel *evsel;
4802 const char * const trace_subcommands[] = { "record", NULL };
4803 int err = -1;
4804 char bf[BUFSIZ];
4805 struct sigaction sigchld_act;
4806
4807 signal(SIGSEGV, sighandler_dump_stack);
4808 signal(SIGFPE, sighandler_dump_stack);
4809 signal(SIGINT, sighandler_interrupt);
4810
4811 memset(&sigchld_act, 0, sizeof(sigchld_act));
4812 sigchld_act.sa_flags = SA_SIGINFO;
4813 sigchld_act.sa_sigaction = sighandler_chld;
4814 sigaction(SIGCHLD, &sigchld_act, NULL);
4815
4816 trace.evlist = evlist__new();
4817 trace.sctbl = syscalltbl__new();
4818
4819 if (trace.evlist == NULL || trace.sctbl == NULL) {
4820 pr_err("Not enough memory to run!\n");
4821 err = -ENOMEM;
4822 goto out;
4823 }
4824
4825 /*
4826 * Parsing .perfconfig may entail creating a BPF event, that may need
4827 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4828 * is too small. This affects just this process, not touching the
4829 * global setting. If it fails we'll get something in 'perf trace -v'
4830 * to help diagnose the problem.
4831 */
4832 rlimit__bump_memlock();
4833
4834 err = perf_config(trace__config, &trace);
4835 if (err)
4836 goto out;
4837
4838 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
4839 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
4840
4841 /*
4842 * Here we already passed thru trace__parse_events_option() and it has
4843 * already figured out if -e syscall_name, if not but if --event
4844 * foo:bar was used, the user is interested _just_ in those, say,
4845 * tracepoint events, not in the strace-like syscall-name-based mode.
4846 *
4847 * This is important because we need to check if strace-like mode is
4848 * needed to decided if we should filter out the eBPF
4849 * __augmented_syscalls__ code, if it is in the mix, say, via
4850 * .perfconfig trace.add_events, and filter those out.
4851 */
4852 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4853 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4854 trace.trace_syscalls = true;
4855 }
4856 /*
4857 * Now that we have --verbose figured out, lets see if we need to parse
4858 * events from .perfconfig, so that if those events fail parsing, say some
4859 * BPF program fails, then we'll be able to use --verbose to see what went
4860 * wrong in more detail.
4861 */
4862 if (trace.perfconfig_events != NULL) {
4863 struct parse_events_error parse_err;
4864
4865 parse_events_error__init(&parse_err);
4866 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
4867 if (err)
4868 parse_events_error__print(&parse_err, trace.perfconfig_events);
4869 parse_events_error__exit(&parse_err);
4870 if (err)
4871 goto out;
4872 }
4873
4874 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4875 usage_with_options_msg(trace_usage, trace_options,
4876 "cgroup monitoring only available in system-wide mode");
4877 }
4878
4879 evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4880 if (IS_ERR(evsel)) {
4881 bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4882 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4883 goto out;
4884 }
4885
4886 if (evsel) {
4887 trace.syscalls.events.augmented = evsel;
4888
4889 evsel = evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4890 if (evsel == NULL) {
4891 pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4892 goto out;
4893 }
4894
4895 if (evsel->bpf_obj == NULL) {
4896 pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4897 goto out;
4898 }
4899
4900 trace.bpf_obj = evsel->bpf_obj;
4901
4902 /*
4903 * If we have _just_ the augmenter event but don't have a
4904 * explicit --syscalls, then assume we want all strace-like
4905 * syscalls:
4906 */
4907 if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
4908 trace.trace_syscalls = true;
4909 /*
4910 * So, if we have a syscall augmenter, but trace_syscalls, aka
4911 * strace-like syscall tracing is not set, then we need to trow
4912 * away the augmenter, i.e. all the events that were created
4913 * from that BPF object file.
4914 *
4915 * This is more to fix the current .perfconfig trace.add_events
4916 * style of setting up the strace-like eBPF based syscall point
4917 * payload augmenter.
4918 *
4919 * All this complexity will be avoided by adding an alternative
4920 * to trace.add_events in the form of
4921 * trace.bpf_augmented_syscalls, that will be only parsed if we
4922 * need it.
4923 *
4924 * .perfconfig trace.add_events is still useful if we want, for
4925 * instance, have msr_write.msr in some .perfconfig profile based
4926 * 'perf trace --config determinism.profile' mode, where for some
4927 * particular goal/workload type we want a set of events and
4928 * output mode (with timings, etc) instead of having to add
4929 * all via the command line.
4930 *
4931 * Also --config to specify an alternate .perfconfig file needs
4932 * to be implemented.
4933 */
4934 if (!trace.trace_syscalls) {
4935 trace__delete_augmented_syscalls(&trace);
4936 } else {
4937 trace__set_bpf_map_filtered_pids(&trace);
4938 trace__set_bpf_map_syscalls(&trace);
4939 trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
4940 }
4941 }
4942
4943 err = bpf__setup_stdout(trace.evlist);
4944 if (err) {
4945 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
4946 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
4947 goto out;
4948 }
4949
4950 err = -1;
4951
4952 if (map_dump_str) {
4953 trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
4954 if (trace.dump.map == NULL) {
4955 pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
4956 goto out;
4957 }
4958 }
4959
4960 if (trace.trace_pgfaults) {
4961 trace.opts.sample_address = true;
4962 trace.opts.sample_time = true;
4963 }
4964
4965 if (trace.opts.mmap_pages == UINT_MAX)
4966 mmap_pages_user_set = false;
4967
4968 if (trace.max_stack == UINT_MAX) {
4969 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
4970 max_stack_user_set = false;
4971 }
4972
4973#ifdef HAVE_DWARF_UNWIND_SUPPORT
4974 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
4975 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
4976 }
4977#endif
4978
4979 if (callchain_param.enabled) {
4980 if (!mmap_pages_user_set && geteuid() == 0)
4981 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
4982
4983 symbol_conf.use_callchain = true;
4984 }
4985
4986 if (trace.evlist->core.nr_entries > 0) {
4987 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
4988 if (evlist__set_syscall_tp_fields(trace.evlist)) {
4989 perror("failed to set syscalls:* tracepoint fields");
4990 goto out;
4991 }
4992 }
4993
4994 if (trace.sort_events) {
4995 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
4996 ordered_events__set_copy_on_queue(&trace.oe.data, true);
4997 }
4998
4999 /*
5000 * If we are augmenting syscalls, then combine what we put in the
5001 * __augmented_syscalls__ BPF map with what is in the
5002 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
5003 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
5004 *
5005 * We'll switch to look at two BPF maps, one for sys_enter and the
5006 * other for sys_exit when we start augmenting the sys_exit paths with
5007 * buffers that are being copied from kernel to userspace, think 'read'
5008 * syscall.
5009 */
5010 if (trace.syscalls.events.augmented) {
5011 evlist__for_each_entry(trace.evlist, evsel) {
5012 bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
5013
5014 if (raw_syscalls_sys_exit) {
5015 trace.raw_augmented_syscalls = true;
5016 goto init_augmented_syscall_tp;
5017 }
5018
5019 if (trace.syscalls.events.augmented->priv == NULL &&
5020 strstr(evsel__name(evsel), "syscalls:sys_enter")) {
5021 struct evsel *augmented = trace.syscalls.events.augmented;
5022 if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
5023 evsel__init_augmented_syscall_tp_args(augmented))
5024 goto out;
5025 /*
5026 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
5027 * Above we made sure we can get from the payload the tp fields
5028 * that we get from syscalls:sys_enter tracefs format file.
5029 */
5030 augmented->handler = trace__sys_enter;
5031 /*
5032 * Now we do the same for the *syscalls:sys_enter event so that
5033 * if we handle it directly, i.e. if the BPF prog returns 0 so
5034 * as not to filter it, then we'll handle it just like we would
5035 * for the BPF_OUTPUT one:
5036 */
5037 if (evsel__init_augmented_syscall_tp(evsel, evsel) ||
5038 evsel__init_augmented_syscall_tp_args(evsel))
5039 goto out;
5040 evsel->handler = trace__sys_enter;
5041 }
5042
5043 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
5044 struct syscall_tp *sc;
5045init_augmented_syscall_tp:
5046 if (evsel__init_augmented_syscall_tp(evsel, evsel))
5047 goto out;
5048 sc = __evsel__syscall_tp(evsel);
5049 /*
5050 * For now with BPF raw_augmented we hook into
5051 * raw_syscalls:sys_enter and there we get all
5052 * 6 syscall args plus the tracepoint common
5053 * fields and the syscall_nr (another long).
5054 * So we check if that is the case and if so
5055 * don't look after the sc->args_size but
5056 * always after the full raw_syscalls:sys_enter
5057 * payload, which is fixed.
5058 *
5059 * We'll revisit this later to pass
5060 * s->args_size to the BPF augmenter (now
5061 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
5062 * so that it copies only what we need for each
5063 * syscall, like what happens when we use
5064 * syscalls:sys_enter_NAME, so that we reduce
5065 * the kernel/userspace traffic to just what is
5066 * needed for each syscall.
5067 */
5068 if (trace.raw_augmented_syscalls)
5069 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5070 evsel__init_augmented_syscall_tp_ret(evsel);
5071 evsel->handler = trace__sys_exit;
5072 }
5073 }
5074 }
5075
5076 if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
5077 return trace__record(&trace, argc-1, &argv[1]);
5078
5079 /* Using just --errno-summary will trigger --summary */
5080 if (trace.errno_summary && !trace.summary && !trace.summary_only)
5081 trace.summary_only = true;
5082
5083 /* summary_only implies summary option, but don't overwrite summary if set */
5084 if (trace.summary_only)
5085 trace.summary = trace.summary_only;
5086
5087 if (output_name != NULL) {
5088 err = trace__open_output(&trace, output_name);
5089 if (err < 0) {
5090 perror("failed to create output file");
5091 goto out;
5092 }
5093 }
5094
5095 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5096 if (err)
5097 goto out_close;
5098
5099 err = target__validate(&trace.opts.target);
5100 if (err) {
5101 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5102 fprintf(trace.output, "%s", bf);
5103 goto out_close;
5104 }
5105
5106 err = target__parse_uid(&trace.opts.target);
5107 if (err) {
5108 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5109 fprintf(trace.output, "%s", bf);
5110 goto out_close;
5111 }
5112
5113 if (!argc && target__none(&trace.opts.target))
5114 trace.opts.target.system_wide = true;
5115
5116 if (input_name)
5117 err = trace__replay(&trace);
5118 else
5119 err = trace__run(&trace, argc, argv);
5120
5121out_close:
5122 if (output_name != NULL)
5123 fclose(trace.output);
5124out:
5125 trace__exit(&trace);
5126 return err;
5127}
1/*
2 * builtin-trace.c
3 *
4 * Builtin 'trace' command:
5 *
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
9 *
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 *
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
13 *
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15 */
16
17#include "util/record.h"
18#include <traceevent/event-parse.h>
19#include <api/fs/tracing_path.h>
20#include <bpf/bpf.h>
21#include "util/bpf_map.h"
22#include "util/rlimit.h"
23#include "builtin.h"
24#include "util/cgroup.h"
25#include "util/color.h"
26#include "util/config.h"
27#include "util/debug.h"
28#include "util/dso.h"
29#include "util/env.h"
30#include "util/event.h"
31#include "util/evsel.h"
32#include "util/evsel_fprintf.h"
33#include "util/synthetic-events.h"
34#include "util/evlist.h"
35#include "util/evswitch.h"
36#include "util/mmap.h"
37#include <subcmd/pager.h>
38#include <subcmd/exec-cmd.h>
39#include "util/machine.h"
40#include "util/map.h"
41#include "util/symbol.h"
42#include "util/path.h"
43#include "util/session.h"
44#include "util/thread.h"
45#include <subcmd/parse-options.h>
46#include "util/strlist.h"
47#include "util/intlist.h"
48#include "util/thread_map.h"
49#include "util/stat.h"
50#include "util/tool.h"
51#include "util/util.h"
52#include "trace/beauty/beauty.h"
53#include "trace-event.h"
54#include "util/parse-events.h"
55#include "util/bpf-loader.h"
56#include "callchain.h"
57#include "print_binary.h"
58#include "string2.h"
59#include "syscalltbl.h"
60#include "rb_resort.h"
61#include "../perf.h"
62
63#include <errno.h>
64#include <inttypes.h>
65#include <poll.h>
66#include <signal.h>
67#include <stdlib.h>
68#include <string.h>
69#include <linux/err.h>
70#include <linux/filter.h>
71#include <linux/kernel.h>
72#include <linux/random.h>
73#include <linux/stringify.h>
74#include <linux/time64.h>
75#include <linux/zalloc.h>
76#include <fcntl.h>
77#include <sys/sysmacros.h>
78
79#include <linux/ctype.h>
80
81#ifndef O_CLOEXEC
82# define O_CLOEXEC 02000000
83#endif
84
85#ifndef F_LINUX_SPECIFIC_BASE
86# define F_LINUX_SPECIFIC_BASE 1024
87#endif
88
89struct trace {
90 struct perf_tool tool;
91 struct syscalltbl *sctbl;
92 struct {
93 struct syscall *table;
94 struct bpf_map *map;
95 struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
96 struct bpf_map *sys_enter,
97 *sys_exit;
98 } prog_array;
99 struct {
100 struct evsel *sys_enter,
101 *sys_exit,
102 *augmented;
103 } events;
104 struct bpf_program *unaugmented_prog;
105 } syscalls;
106 struct {
107 struct bpf_map *map;
108 } dump;
109 struct record_opts opts;
110 struct evlist *evlist;
111 struct machine *host;
112 struct thread *current;
113 struct bpf_object *bpf_obj;
114 struct cgroup *cgroup;
115 u64 base_time;
116 FILE *output;
117 unsigned long nr_events;
118 unsigned long nr_events_printed;
119 unsigned long max_events;
120 struct evswitch evswitch;
121 struct strlist *ev_qualifier;
122 struct {
123 size_t nr;
124 int *entries;
125 } ev_qualifier_ids;
126 struct {
127 size_t nr;
128 pid_t *entries;
129 struct bpf_map *map;
130 } filter_pids;
131 double duration_filter;
132 double runtime_ms;
133 struct {
134 u64 vfs_getname,
135 proc_getname;
136 } stats;
137 unsigned int max_stack;
138 unsigned int min_stack;
139 int raw_augmented_syscalls_args_size;
140 bool raw_augmented_syscalls;
141 bool fd_path_disabled;
142 bool sort_events;
143 bool not_ev_qualifier;
144 bool live;
145 bool full_time;
146 bool sched;
147 bool multiple_threads;
148 bool summary;
149 bool summary_only;
150 bool failure_only;
151 bool show_comm;
152 bool print_sample;
153 bool show_tool_stats;
154 bool trace_syscalls;
155 bool kernel_syscallchains;
156 s16 args_alignment;
157 bool show_tstamp;
158 bool show_duration;
159 bool show_zeros;
160 bool show_arg_names;
161 bool show_string_prefix;
162 bool force;
163 bool vfs_getname;
164 int trace_pgfaults;
165 struct {
166 struct ordered_events data;
167 u64 last;
168 } oe;
169};
170
171struct tp_field {
172 int offset;
173 union {
174 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
175 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
176 };
177};
178
179#define TP_UINT_FIELD(bits) \
180static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
181{ \
182 u##bits value; \
183 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
184 return value; \
185}
186
187TP_UINT_FIELD(8);
188TP_UINT_FIELD(16);
189TP_UINT_FIELD(32);
190TP_UINT_FIELD(64);
191
192#define TP_UINT_FIELD__SWAPPED(bits) \
193static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
194{ \
195 u##bits value; \
196 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
197 return bswap_##bits(value);\
198}
199
200TP_UINT_FIELD__SWAPPED(16);
201TP_UINT_FIELD__SWAPPED(32);
202TP_UINT_FIELD__SWAPPED(64);
203
204static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
205{
206 field->offset = offset;
207
208 switch (size) {
209 case 1:
210 field->integer = tp_field__u8;
211 break;
212 case 2:
213 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
214 break;
215 case 4:
216 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
217 break;
218 case 8:
219 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
220 break;
221 default:
222 return -1;
223 }
224
225 return 0;
226}
227
228static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
229{
230 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
231}
232
233static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
234{
235 return sample->raw_data + field->offset;
236}
237
238static int __tp_field__init_ptr(struct tp_field *field, int offset)
239{
240 field->offset = offset;
241 field->pointer = tp_field__ptr;
242 return 0;
243}
244
245static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
246{
247 return __tp_field__init_ptr(field, format_field->offset);
248}
249
250struct syscall_tp {
251 struct tp_field id;
252 union {
253 struct tp_field args, ret;
254 };
255};
256
257static int perf_evsel__init_tp_uint_field(struct evsel *evsel,
258 struct tp_field *field,
259 const char *name)
260{
261 struct tep_format_field *format_field = perf_evsel__field(evsel, name);
262
263 if (format_field == NULL)
264 return -1;
265
266 return tp_field__init_uint(field, format_field, evsel->needs_swap);
267}
268
269#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
270 ({ struct syscall_tp *sc = evsel->priv;\
271 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
272
273static int perf_evsel__init_tp_ptr_field(struct evsel *evsel,
274 struct tp_field *field,
275 const char *name)
276{
277 struct tep_format_field *format_field = perf_evsel__field(evsel, name);
278
279 if (format_field == NULL)
280 return -1;
281
282 return tp_field__init_ptr(field, format_field);
283}
284
285#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
286 ({ struct syscall_tp *sc = evsel->priv;\
287 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
288
289static void evsel__delete_priv(struct evsel *evsel)
290{
291 zfree(&evsel->priv);
292 evsel__delete(evsel);
293}
294
295static int perf_evsel__init_syscall_tp(struct evsel *evsel)
296{
297 struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
298
299 if (evsel->priv != NULL) {
300 if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
301 perf_evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
302 goto out_delete;
303 return 0;
304 }
305
306 return -ENOMEM;
307out_delete:
308 zfree(&evsel->priv);
309 return -ENOENT;
310}
311
312static int perf_evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
313{
314 struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
315
316 if (evsel->priv != NULL) {
317 struct tep_format_field *syscall_id = perf_evsel__field(tp, "id");
318 if (syscall_id == NULL)
319 syscall_id = perf_evsel__field(tp, "__syscall_nr");
320 if (syscall_id == NULL)
321 goto out_delete;
322 if (__tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
323 goto out_delete;
324
325 return 0;
326 }
327
328 return -ENOMEM;
329out_delete:
330 zfree(&evsel->priv);
331 return -EINVAL;
332}
333
334static int perf_evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
335{
336 struct syscall_tp *sc = evsel->priv;
337
338 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
339}
340
341static int perf_evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
342{
343 struct syscall_tp *sc = evsel->priv;
344
345 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
346}
347
348static int perf_evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
349{
350 evsel->priv = malloc(sizeof(struct syscall_tp));
351 if (evsel->priv != NULL) {
352 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
353 goto out_delete;
354
355 evsel->handler = handler;
356 return 0;
357 }
358
359 return -ENOMEM;
360
361out_delete:
362 zfree(&evsel->priv);
363 return -ENOENT;
364}
365
366static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
367{
368 struct evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
369
370 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
371 if (IS_ERR(evsel))
372 evsel = perf_evsel__newtp("syscalls", direction);
373
374 if (IS_ERR(evsel))
375 return NULL;
376
377 if (perf_evsel__init_raw_syscall_tp(evsel, handler))
378 goto out_delete;
379
380 return evsel;
381
382out_delete:
383 evsel__delete_priv(evsel);
384 return NULL;
385}
386
387#define perf_evsel__sc_tp_uint(evsel, name, sample) \
388 ({ struct syscall_tp *fields = evsel->priv; \
389 fields->name.integer(&fields->name, sample); })
390
391#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
392 ({ struct syscall_tp *fields = evsel->priv; \
393 fields->name.pointer(&fields->name, sample); })
394
395size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
396{
397 int idx = val - sa->offset;
398
399 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
400 size_t printed = scnprintf(bf, size, intfmt, val);
401 if (show_prefix)
402 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
403 return printed;
404 }
405
406 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
407}
408
409static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
410 const char *intfmt,
411 struct syscall_arg *arg)
412{
413 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
414}
415
416static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
417 struct syscall_arg *arg)
418{
419 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
420}
421
422#define SCA_STRARRAY syscall_arg__scnprintf_strarray
423
424size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
425{
426 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
427}
428
429size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
430{
431 size_t printed;
432 int i;
433
434 for (i = 0; i < sas->nr_entries; ++i) {
435 struct strarray *sa = sas->entries[i];
436 int idx = val - sa->offset;
437
438 if (idx >= 0 && idx < sa->nr_entries) {
439 if (sa->entries[idx] == NULL)
440 break;
441 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
442 }
443 }
444
445 printed = scnprintf(bf, size, intfmt, val);
446 if (show_prefix)
447 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
448 return printed;
449}
450
451size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
452 struct syscall_arg *arg)
453{
454 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
455}
456
457#ifndef AT_FDCWD
458#define AT_FDCWD -100
459#endif
460
461static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
462 struct syscall_arg *arg)
463{
464 int fd = arg->val;
465 const char *prefix = "AT_FD";
466
467 if (fd == AT_FDCWD)
468 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
469
470 return syscall_arg__scnprintf_fd(bf, size, arg);
471}
472
473#define SCA_FDAT syscall_arg__scnprintf_fd_at
474
475static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
476 struct syscall_arg *arg);
477
478#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
479
480size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
481{
482 return scnprintf(bf, size, "%#lx", arg->val);
483}
484
485size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
486{
487 if (arg->val == 0)
488 return scnprintf(bf, size, "NULL");
489 return syscall_arg__scnprintf_hex(bf, size, arg);
490}
491
492size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
493{
494 return scnprintf(bf, size, "%d", arg->val);
495}
496
497size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
498{
499 return scnprintf(bf, size, "%ld", arg->val);
500}
501
502static const char *bpf_cmd[] = {
503 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
504 "MAP_GET_NEXT_KEY", "PROG_LOAD",
505};
506static DEFINE_STRARRAY(bpf_cmd, "BPF_");
507
508static const char *fsmount_flags[] = {
509 [1] = "CLOEXEC",
510};
511static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
512
513#include "trace/beauty/generated/fsconfig_arrays.c"
514
515static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
516
517static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
518static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
519
520static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
521static DEFINE_STRARRAY(itimers, "ITIMER_");
522
523static const char *keyctl_options[] = {
524 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
525 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
526 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
527 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
528 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
529};
530static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
531
532static const char *whences[] = { "SET", "CUR", "END",
533#ifdef SEEK_DATA
534"DATA",
535#endif
536#ifdef SEEK_HOLE
537"HOLE",
538#endif
539};
540static DEFINE_STRARRAY(whences, "SEEK_");
541
542static const char *fcntl_cmds[] = {
543 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
544 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
545 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
546 "GETOWNER_UIDS",
547};
548static DEFINE_STRARRAY(fcntl_cmds, "F_");
549
550static const char *fcntl_linux_specific_cmds[] = {
551 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
552 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
553 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
554};
555
556static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
557
558static struct strarray *fcntl_cmds_arrays[] = {
559 &strarray__fcntl_cmds,
560 &strarray__fcntl_linux_specific_cmds,
561};
562
563static DEFINE_STRARRAYS(fcntl_cmds_arrays);
564
565static const char *rlimit_resources[] = {
566 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
567 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
568 "RTTIME",
569};
570static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
571
572static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
573static DEFINE_STRARRAY(sighow, "SIG_");
574
575static const char *clockid[] = {
576 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
577 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
578 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
579};
580static DEFINE_STRARRAY(clockid, "CLOCK_");
581
582static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
583 struct syscall_arg *arg)
584{
585 bool show_prefix = arg->show_string_prefix;
586 const char *suffix = "_OK";
587 size_t printed = 0;
588 int mode = arg->val;
589
590 if (mode == F_OK) /* 0 */
591 return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
592#define P_MODE(n) \
593 if (mode & n##_OK) { \
594 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
595 mode &= ~n##_OK; \
596 }
597
598 P_MODE(R);
599 P_MODE(W);
600 P_MODE(X);
601#undef P_MODE
602
603 if (mode)
604 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
605
606 return printed;
607}
608
609#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
610
611static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
612 struct syscall_arg *arg);
613
614#define SCA_FILENAME syscall_arg__scnprintf_filename
615
616static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
617 struct syscall_arg *arg)
618{
619 bool show_prefix = arg->show_string_prefix;
620 const char *prefix = "O_";
621 int printed = 0, flags = arg->val;
622
623#define P_FLAG(n) \
624 if (flags & O_##n) { \
625 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
626 flags &= ~O_##n; \
627 }
628
629 P_FLAG(CLOEXEC);
630 P_FLAG(NONBLOCK);
631#undef P_FLAG
632
633 if (flags)
634 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
635
636 return printed;
637}
638
639#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
640
641#ifndef GRND_NONBLOCK
642#define GRND_NONBLOCK 0x0001
643#endif
644#ifndef GRND_RANDOM
645#define GRND_RANDOM 0x0002
646#endif
647
648static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
649 struct syscall_arg *arg)
650{
651 bool show_prefix = arg->show_string_prefix;
652 const char *prefix = "GRND_";
653 int printed = 0, flags = arg->val;
654
655#define P_FLAG(n) \
656 if (flags & GRND_##n) { \
657 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
658 flags &= ~GRND_##n; \
659 }
660
661 P_FLAG(RANDOM);
662 P_FLAG(NONBLOCK);
663#undef P_FLAG
664
665 if (flags)
666 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
667
668 return printed;
669}
670
671#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
672
673#define STRARRAY(name, array) \
674 { .scnprintf = SCA_STRARRAY, \
675 .parm = &strarray__##array, }
676
677#define STRARRAY_FLAGS(name, array) \
678 { .scnprintf = SCA_STRARRAY_FLAGS, \
679 .parm = &strarray__##array, }
680
681#include "trace/beauty/arch_errno_names.c"
682#include "trace/beauty/eventfd.c"
683#include "trace/beauty/futex_op.c"
684#include "trace/beauty/futex_val3.c"
685#include "trace/beauty/mmap.c"
686#include "trace/beauty/mode_t.c"
687#include "trace/beauty/msg_flags.c"
688#include "trace/beauty/open_flags.c"
689#include "trace/beauty/perf_event_open.c"
690#include "trace/beauty/pid.c"
691#include "trace/beauty/sched_policy.c"
692#include "trace/beauty/seccomp.c"
693#include "trace/beauty/signum.c"
694#include "trace/beauty/socket_type.c"
695#include "trace/beauty/waitid_options.c"
696
697struct syscall_arg_fmt {
698 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
699 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
700 void *parm;
701 const char *name;
702 bool show_zero;
703};
704
705static struct syscall_fmt {
706 const char *name;
707 const char *alias;
708 struct {
709 const char *sys_enter,
710 *sys_exit;
711 } bpf_prog_name;
712 struct syscall_arg_fmt arg[6];
713 u8 nr_args;
714 bool errpid;
715 bool timeout;
716 bool hexret;
717} syscall_fmts[] = {
718 { .name = "access",
719 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
720 { .name = "arch_prctl",
721 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
722 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
723 { .name = "bind",
724 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
725 [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
726 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
727 { .name = "bpf",
728 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
729 { .name = "brk", .hexret = true,
730 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
731 { .name = "clock_gettime",
732 .arg = { [0] = STRARRAY(clk_id, clockid), }, },
733 { .name = "clone", .errpid = true, .nr_args = 5,
734 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
735 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
736 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
737 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, },
738 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, },
739 { .name = "close",
740 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
741 { .name = "connect",
742 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
743 [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
744 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
745 { .name = "epoll_ctl",
746 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
747 { .name = "eventfd2",
748 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
749 { .name = "fchmodat",
750 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
751 { .name = "fchownat",
752 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
753 { .name = "fcntl",
754 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
755 .parm = &strarrays__fcntl_cmds_arrays,
756 .show_zero = true, },
757 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
758 { .name = "flock",
759 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
760 { .name = "fsconfig",
761 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
762 { .name = "fsmount",
763 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
764 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
765 { .name = "fspick",
766 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
767 [1] = { .scnprintf = SCA_FILENAME, /* path */ },
768 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
769 { .name = "fstat", .alias = "newfstat", },
770 { .name = "fstatat", .alias = "newfstatat", },
771 { .name = "futex",
772 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
773 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
774 { .name = "futimesat",
775 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
776 { .name = "getitimer",
777 .arg = { [0] = STRARRAY(which, itimers), }, },
778 { .name = "getpid", .errpid = true, },
779 { .name = "getpgid", .errpid = true, },
780 { .name = "getppid", .errpid = true, },
781 { .name = "getrandom",
782 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
783 { .name = "getrlimit",
784 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
785 { .name = "gettid", .errpid = true, },
786 { .name = "ioctl",
787 .arg = {
788#if defined(__i386__) || defined(__x86_64__)
789/*
790 * FIXME: Make this available to all arches.
791 */
792 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
793 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
794#else
795 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
796#endif
797 { .name = "kcmp", .nr_args = 5,
798 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, },
799 [1] = { .name = "pid2", .scnprintf = SCA_PID, },
800 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, },
801 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, },
802 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, },
803 { .name = "keyctl",
804 .arg = { [0] = STRARRAY(option, keyctl_options), }, },
805 { .name = "kill",
806 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
807 { .name = "linkat",
808 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
809 { .name = "lseek",
810 .arg = { [2] = STRARRAY(whence, whences), }, },
811 { .name = "lstat", .alias = "newlstat", },
812 { .name = "madvise",
813 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
814 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
815 { .name = "mkdirat",
816 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
817 { .name = "mknodat",
818 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
819 { .name = "mmap", .hexret = true,
820/* The standard mmap maps to old_mmap on s390x */
821#if defined(__s390x__)
822 .alias = "old_mmap",
823#endif
824 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
825 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ },
826 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, },
827 { .name = "mount",
828 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
829 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
830 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
831 { .name = "move_mount",
832 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ },
833 [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
834 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ },
835 [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
836 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
837 { .name = "mprotect",
838 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
839 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, },
840 { .name = "mq_unlink",
841 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
842 { .name = "mremap", .hexret = true,
843 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
844 { .name = "name_to_handle_at",
845 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
846 { .name = "newfstatat",
847 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
848 { .name = "open",
849 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
850 { .name = "open_by_handle_at",
851 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
852 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
853 { .name = "openat",
854 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
855 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
856 { .name = "perf_event_open",
857 .arg = { [2] = { .scnprintf = SCA_INT, /* cpu */ },
858 [3] = { .scnprintf = SCA_FD, /* group_fd */ },
859 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
860 { .name = "pipe2",
861 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
862 { .name = "pkey_alloc",
863 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, },
864 { .name = "pkey_free",
865 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, },
866 { .name = "pkey_mprotect",
867 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
868 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
869 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
870 { .name = "poll", .timeout = true, },
871 { .name = "ppoll", .timeout = true, },
872 { .name = "prctl",
873 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ },
874 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
875 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
876 { .name = "pread", .alias = "pread64", },
877 { .name = "preadv", .alias = "pread", },
878 { .name = "prlimit64",
879 .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
880 { .name = "pwrite", .alias = "pwrite64", },
881 { .name = "readlinkat",
882 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
883 { .name = "recvfrom",
884 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
885 { .name = "recvmmsg",
886 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
887 { .name = "recvmsg",
888 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
889 { .name = "renameat",
890 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
891 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
892 { .name = "renameat2",
893 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
894 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
895 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
896 { .name = "rt_sigaction",
897 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
898 { .name = "rt_sigprocmask",
899 .arg = { [0] = STRARRAY(how, sighow), }, },
900 { .name = "rt_sigqueueinfo",
901 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
902 { .name = "rt_tgsigqueueinfo",
903 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
904 { .name = "sched_setscheduler",
905 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
906 { .name = "seccomp",
907 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ },
908 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
909 { .name = "select", .timeout = true, },
910 { .name = "sendfile", .alias = "sendfile64", },
911 { .name = "sendmmsg",
912 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
913 { .name = "sendmsg",
914 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
915 { .name = "sendto",
916 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
917 [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
918 { .name = "set_tid_address", .errpid = true, },
919 { .name = "setitimer",
920 .arg = { [0] = STRARRAY(which, itimers), }, },
921 { .name = "setrlimit",
922 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
923 { .name = "socket",
924 .arg = { [0] = STRARRAY(family, socket_families),
925 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
926 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
927 { .name = "socketpair",
928 .arg = { [0] = STRARRAY(family, socket_families),
929 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
930 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
931 { .name = "stat", .alias = "newstat", },
932 { .name = "statx",
933 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ },
934 [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
935 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, },
936 { .name = "swapoff",
937 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
938 { .name = "swapon",
939 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
940 { .name = "symlinkat",
941 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
942 { .name = "sync_file_range",
943 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
944 { .name = "tgkill",
945 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
946 { .name = "tkill",
947 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
948 { .name = "umount2", .alias = "umount",
949 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
950 { .name = "uname", .alias = "newuname", },
951 { .name = "unlinkat",
952 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
953 { .name = "utimensat",
954 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
955 { .name = "wait4", .errpid = true,
956 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
957 { .name = "waitid", .errpid = true,
958 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
959};
960
961static int syscall_fmt__cmp(const void *name, const void *fmtp)
962{
963 const struct syscall_fmt *fmt = fmtp;
964 return strcmp(name, fmt->name);
965}
966
967static struct syscall_fmt *syscall_fmt__find(const char *name)
968{
969 const int nmemb = ARRAY_SIZE(syscall_fmts);
970 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
971}
972
973static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
974{
975 int i, nmemb = ARRAY_SIZE(syscall_fmts);
976
977 for (i = 0; i < nmemb; ++i) {
978 if (syscall_fmts[i].alias && strcmp(syscall_fmts[i].alias, alias) == 0)
979 return &syscall_fmts[i];
980 }
981
982 return NULL;
983}
984
985/*
986 * is_exit: is this "exit" or "exit_group"?
987 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
988 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
989 * nonexistent: Just a hole in the syscall table, syscall id not allocated
990 */
991struct syscall {
992 struct tep_event *tp_format;
993 int nr_args;
994 int args_size;
995 struct {
996 struct bpf_program *sys_enter,
997 *sys_exit;
998 } bpf_prog;
999 bool is_exit;
1000 bool is_open;
1001 bool nonexistent;
1002 struct tep_format_field *args;
1003 const char *name;
1004 struct syscall_fmt *fmt;
1005 struct syscall_arg_fmt *arg_fmt;
1006};
1007
1008/*
1009 * Must match what is in the BPF program:
1010 *
1011 * tools/perf/examples/bpf/augmented_raw_syscalls.c
1012 */
1013struct bpf_map_syscall_entry {
1014 bool enabled;
1015 u16 string_args_len[6];
1016};
1017
1018/*
1019 * We need to have this 'calculated' boolean because in some cases we really
1020 * don't know what is the duration of a syscall, for instance, when we start
1021 * a session and some threads are waiting for a syscall to finish, say 'poll',
1022 * in which case all we can do is to print "( ? ) for duration and for the
1023 * start timestamp.
1024 */
1025static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
1026{
1027 double duration = (double)t / NSEC_PER_MSEC;
1028 size_t printed = fprintf(fp, "(");
1029
1030 if (!calculated)
1031 printed += fprintf(fp, " ");
1032 else if (duration >= 1.0)
1033 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1034 else if (duration >= 0.01)
1035 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1036 else
1037 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1038 return printed + fprintf(fp, "): ");
1039}
1040
1041/**
1042 * filename.ptr: The filename char pointer that will be vfs_getname'd
1043 * filename.entry_str_pos: Where to insert the string translated from
1044 * filename.ptr by the vfs_getname tracepoint/kprobe.
1045 * ret_scnprintf: syscall args may set this to a different syscall return
1046 * formatter, for instance, fcntl may return fds, file flags, etc.
1047 */
1048struct thread_trace {
1049 u64 entry_time;
1050 bool entry_pending;
1051 unsigned long nr_events;
1052 unsigned long pfmaj, pfmin;
1053 char *entry_str;
1054 double runtime_ms;
1055 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1056 struct {
1057 unsigned long ptr;
1058 short int entry_str_pos;
1059 bool pending_open;
1060 unsigned int namelen;
1061 char *name;
1062 } filename;
1063 struct {
1064 int max;
1065 struct file *table;
1066 } files;
1067
1068 struct intlist *syscall_stats;
1069};
1070
1071static struct thread_trace *thread_trace__new(void)
1072{
1073 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
1074
1075 if (ttrace) {
1076 ttrace->files.max = -1;
1077 ttrace->syscall_stats = intlist__new(NULL);
1078 }
1079
1080 return ttrace;
1081}
1082
1083static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1084{
1085 struct thread_trace *ttrace;
1086
1087 if (thread == NULL)
1088 goto fail;
1089
1090 if (thread__priv(thread) == NULL)
1091 thread__set_priv(thread, thread_trace__new());
1092
1093 if (thread__priv(thread) == NULL)
1094 goto fail;
1095
1096 ttrace = thread__priv(thread);
1097 ++ttrace->nr_events;
1098
1099 return ttrace;
1100fail:
1101 color_fprintf(fp, PERF_COLOR_RED,
1102 "WARNING: not enough memory, dropping samples!\n");
1103 return NULL;
1104}
1105
1106
1107void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1108 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1109{
1110 struct thread_trace *ttrace = thread__priv(arg->thread);
1111
1112 ttrace->ret_scnprintf = ret_scnprintf;
1113}
1114
1115#define TRACE_PFMAJ (1 << 0)
1116#define TRACE_PFMIN (1 << 1)
1117
1118static const size_t trace__entry_str_size = 2048;
1119
1120static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
1121{
1122 if (fd < 0)
1123 return NULL;
1124
1125 if (fd > ttrace->files.max) {
1126 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
1127
1128 if (nfiles == NULL)
1129 return NULL;
1130
1131 if (ttrace->files.max != -1) {
1132 memset(nfiles + ttrace->files.max + 1, 0,
1133 (fd - ttrace->files.max) * sizeof(struct file));
1134 } else {
1135 memset(nfiles, 0, (fd + 1) * sizeof(struct file));
1136 }
1137
1138 ttrace->files.table = nfiles;
1139 ttrace->files.max = fd;
1140 }
1141
1142 return ttrace->files.table + fd;
1143}
1144
1145struct file *thread__files_entry(struct thread *thread, int fd)
1146{
1147 return thread_trace__files_entry(thread__priv(thread), fd);
1148}
1149
1150static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1151{
1152 struct thread_trace *ttrace = thread__priv(thread);
1153 struct file *file = thread_trace__files_entry(ttrace, fd);
1154
1155 if (file != NULL) {
1156 struct stat st;
1157 if (stat(pathname, &st) == 0)
1158 file->dev_maj = major(st.st_rdev);
1159 file->pathname = strdup(pathname);
1160 if (file->pathname)
1161 return 0;
1162 }
1163
1164 return -1;
1165}
1166
1167static int thread__read_fd_path(struct thread *thread, int fd)
1168{
1169 char linkname[PATH_MAX], pathname[PATH_MAX];
1170 struct stat st;
1171 int ret;
1172
1173 if (thread->pid_ == thread->tid) {
1174 scnprintf(linkname, sizeof(linkname),
1175 "/proc/%d/fd/%d", thread->pid_, fd);
1176 } else {
1177 scnprintf(linkname, sizeof(linkname),
1178 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1179 }
1180
1181 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1182 return -1;
1183
1184 ret = readlink(linkname, pathname, sizeof(pathname));
1185
1186 if (ret < 0 || ret > st.st_size)
1187 return -1;
1188
1189 pathname[ret] = '\0';
1190 return trace__set_fd_pathname(thread, fd, pathname);
1191}
1192
1193static const char *thread__fd_path(struct thread *thread, int fd,
1194 struct trace *trace)
1195{
1196 struct thread_trace *ttrace = thread__priv(thread);
1197
1198 if (ttrace == NULL || trace->fd_path_disabled)
1199 return NULL;
1200
1201 if (fd < 0)
1202 return NULL;
1203
1204 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
1205 if (!trace->live)
1206 return NULL;
1207 ++trace->stats.proc_getname;
1208 if (thread__read_fd_path(thread, fd))
1209 return NULL;
1210 }
1211
1212 return ttrace->files.table[fd].pathname;
1213}
1214
1215size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
1216{
1217 int fd = arg->val;
1218 size_t printed = scnprintf(bf, size, "%d", fd);
1219 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1220
1221 if (path)
1222 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1223
1224 return printed;
1225}
1226
1227size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1228{
1229 size_t printed = scnprintf(bf, size, "%d", fd);
1230 struct thread *thread = machine__find_thread(trace->host, pid, pid);
1231
1232 if (thread) {
1233 const char *path = thread__fd_path(thread, fd, trace);
1234
1235 if (path)
1236 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1237
1238 thread__put(thread);
1239 }
1240
1241 return printed;
1242}
1243
1244static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1245 struct syscall_arg *arg)
1246{
1247 int fd = arg->val;
1248 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1249 struct thread_trace *ttrace = thread__priv(arg->thread);
1250
1251 if (ttrace && fd >= 0 && fd <= ttrace->files.max)
1252 zfree(&ttrace->files.table[fd].pathname);
1253
1254 return printed;
1255}
1256
1257static void thread__set_filename_pos(struct thread *thread, const char *bf,
1258 unsigned long ptr)
1259{
1260 struct thread_trace *ttrace = thread__priv(thread);
1261
1262 ttrace->filename.ptr = ptr;
1263 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1264}
1265
1266static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1267{
1268 struct augmented_arg *augmented_arg = arg->augmented.args;
1269 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1270 /*
1271 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1272 * we would have two strings, each prefixed by its size.
1273 */
1274 int consumed = sizeof(*augmented_arg) + augmented_arg->size;
1275
1276 arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1277 arg->augmented.size -= consumed;
1278
1279 return printed;
1280}
1281
1282static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1283 struct syscall_arg *arg)
1284{
1285 unsigned long ptr = arg->val;
1286
1287 if (arg->augmented.args)
1288 return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1289
1290 if (!arg->trace->vfs_getname)
1291 return scnprintf(bf, size, "%#x", ptr);
1292
1293 thread__set_filename_pos(arg->thread, bf, ptr);
1294 return 0;
1295}
1296
1297static bool trace__filter_duration(struct trace *trace, double t)
1298{
1299 return t < (trace->duration_filter * NSEC_PER_MSEC);
1300}
1301
1302static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1303{
1304 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1305
1306 return fprintf(fp, "%10.3f ", ts);
1307}
1308
1309/*
1310 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1311 * using ttrace->entry_time for a thread that receives a sys_exit without
1312 * first having received a sys_enter ("poll" issued before tracing session
1313 * starts, lost sys_enter exit due to ring buffer overflow).
1314 */
1315static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1316{
1317 if (tstamp > 0)
1318 return __trace__fprintf_tstamp(trace, tstamp, fp);
1319
1320 return fprintf(fp, " ? ");
1321}
1322
1323static bool done = false;
1324static bool interrupted = false;
1325
1326static void sig_handler(int sig)
1327{
1328 done = true;
1329 interrupted = sig == SIGINT;
1330}
1331
1332static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1333{
1334 size_t printed = 0;
1335
1336 if (trace->multiple_threads) {
1337 if (trace->show_comm)
1338 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1339 printed += fprintf(fp, "%d ", thread->tid);
1340 }
1341
1342 return printed;
1343}
1344
1345static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1346 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1347{
1348 size_t printed = 0;
1349
1350 if (trace->show_tstamp)
1351 printed = trace__fprintf_tstamp(trace, tstamp, fp);
1352 if (trace->show_duration)
1353 printed += fprintf_duration(duration, duration_calculated, fp);
1354 return printed + trace__fprintf_comm_tid(trace, thread, fp);
1355}
1356
1357static int trace__process_event(struct trace *trace, struct machine *machine,
1358 union perf_event *event, struct perf_sample *sample)
1359{
1360 int ret = 0;
1361
1362 switch (event->header.type) {
1363 case PERF_RECORD_LOST:
1364 color_fprintf(trace->output, PERF_COLOR_RED,
1365 "LOST %" PRIu64 " events!\n", event->lost.lost);
1366 ret = machine__process_lost_event(machine, event, sample);
1367 break;
1368 default:
1369 ret = machine__process_event(machine, event, sample);
1370 break;
1371 }
1372
1373 return ret;
1374}
1375
1376static int trace__tool_process(struct perf_tool *tool,
1377 union perf_event *event,
1378 struct perf_sample *sample,
1379 struct machine *machine)
1380{
1381 struct trace *trace = container_of(tool, struct trace, tool);
1382 return trace__process_event(trace, machine, event, sample);
1383}
1384
1385static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1386{
1387 struct machine *machine = vmachine;
1388
1389 if (machine->kptr_restrict_warned)
1390 return NULL;
1391
1392 if (symbol_conf.kptr_restrict) {
1393 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1394 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1395 "Kernel samples will not be resolved.\n");
1396 machine->kptr_restrict_warned = true;
1397 return NULL;
1398 }
1399
1400 return machine__resolve_kernel_addr(vmachine, addrp, modp);
1401}
1402
1403static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1404{
1405 int err = symbol__init(NULL);
1406
1407 if (err)
1408 return err;
1409
1410 trace->host = machine__new_host();
1411 if (trace->host == NULL)
1412 return -ENOMEM;
1413
1414 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1415 if (err < 0)
1416 goto out;
1417
1418 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1419 evlist->core.threads, trace__tool_process, false,
1420 1);
1421out:
1422 if (err)
1423 symbol__exit();
1424
1425 return err;
1426}
1427
1428static void trace__symbols__exit(struct trace *trace)
1429{
1430 machine__exit(trace->host);
1431 trace->host = NULL;
1432
1433 symbol__exit();
1434}
1435
1436static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1437{
1438 int idx;
1439
1440 if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
1441 nr_args = sc->fmt->nr_args;
1442
1443 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1444 if (sc->arg_fmt == NULL)
1445 return -1;
1446
1447 for (idx = 0; idx < nr_args; ++idx) {
1448 if (sc->fmt)
1449 sc->arg_fmt[idx] = sc->fmt->arg[idx];
1450 }
1451
1452 sc->nr_args = nr_args;
1453 return 0;
1454}
1455
1456static int syscall__set_arg_fmts(struct syscall *sc)
1457{
1458 struct tep_format_field *field, *last_field = NULL;
1459 int idx = 0, len;
1460
1461 for (field = sc->args; field; field = field->next, ++idx) {
1462 last_field = field;
1463
1464 if (sc->fmt && sc->fmt->arg[idx].scnprintf)
1465 continue;
1466
1467 len = strlen(field->name);
1468
1469 if (strcmp(field->type, "const char *") == 0 &&
1470 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
1471 strstr(field->name, "path") != NULL))
1472 sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
1473 else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
1474 sc->arg_fmt[idx].scnprintf = SCA_PTR;
1475 else if (strcmp(field->type, "pid_t") == 0)
1476 sc->arg_fmt[idx].scnprintf = SCA_PID;
1477 else if (strcmp(field->type, "umode_t") == 0)
1478 sc->arg_fmt[idx].scnprintf = SCA_MODE_T;
1479 else if ((strcmp(field->type, "int") == 0 ||
1480 strcmp(field->type, "unsigned int") == 0 ||
1481 strcmp(field->type, "long") == 0) &&
1482 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
1483 /*
1484 * /sys/kernel/tracing/events/syscalls/sys_enter*
1485 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1486 * 65 int
1487 * 23 unsigned int
1488 * 7 unsigned long
1489 */
1490 sc->arg_fmt[idx].scnprintf = SCA_FD;
1491 }
1492 }
1493
1494 if (last_field)
1495 sc->args_size = last_field->offset + last_field->size;
1496
1497 return 0;
1498}
1499
1500static int trace__read_syscall_info(struct trace *trace, int id)
1501{
1502 char tp_name[128];
1503 struct syscall *sc;
1504 const char *name = syscalltbl__name(trace->sctbl, id);
1505
1506 if (trace->syscalls.table == NULL) {
1507 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1508 if (trace->syscalls.table == NULL)
1509 return -ENOMEM;
1510 }
1511
1512 sc = trace->syscalls.table + id;
1513 if (sc->nonexistent)
1514 return 0;
1515
1516 if (name == NULL) {
1517 sc->nonexistent = true;
1518 return 0;
1519 }
1520
1521 sc->name = name;
1522 sc->fmt = syscall_fmt__find(sc->name);
1523
1524 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1525 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1526
1527 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1528 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1529 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1530 }
1531
1532 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
1533 return -ENOMEM;
1534
1535 if (IS_ERR(sc->tp_format))
1536 return PTR_ERR(sc->tp_format);
1537
1538 sc->args = sc->tp_format->format.fields;
1539 /*
1540 * We need to check and discard the first variable '__syscall_nr'
1541 * or 'nr' that mean the syscall number. It is needless here.
1542 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1543 */
1544 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1545 sc->args = sc->args->next;
1546 --sc->nr_args;
1547 }
1548
1549 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1550 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
1551
1552 return syscall__set_arg_fmts(sc);
1553}
1554
1555static int intcmp(const void *a, const void *b)
1556{
1557 const int *one = a, *another = b;
1558
1559 return *one - *another;
1560}
1561
1562static int trace__validate_ev_qualifier(struct trace *trace)
1563{
1564 int err = 0;
1565 bool printed_invalid_prefix = false;
1566 struct str_node *pos;
1567 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
1568
1569 trace->ev_qualifier_ids.entries = malloc(nr_allocated *
1570 sizeof(trace->ev_qualifier_ids.entries[0]));
1571
1572 if (trace->ev_qualifier_ids.entries == NULL) {
1573 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1574 trace->output);
1575 err = -EINVAL;
1576 goto out;
1577 }
1578
1579 strlist__for_each_entry(pos, trace->ev_qualifier) {
1580 const char *sc = pos->s;
1581 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1582
1583 if (id < 0) {
1584 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1585 if (id >= 0)
1586 goto matches;
1587
1588 if (!printed_invalid_prefix) {
1589 pr_debug("Skipping unknown syscalls: ");
1590 printed_invalid_prefix = true;
1591 } else {
1592 pr_debug(", ");
1593 }
1594
1595 pr_debug("%s", sc);
1596 continue;
1597 }
1598matches:
1599 trace->ev_qualifier_ids.entries[nr_used++] = id;
1600 if (match_next == -1)
1601 continue;
1602
1603 while (1) {
1604 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1605 if (id < 0)
1606 break;
1607 if (nr_allocated == nr_used) {
1608 void *entries;
1609
1610 nr_allocated += 8;
1611 entries = realloc(trace->ev_qualifier_ids.entries,
1612 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1613 if (entries == NULL) {
1614 err = -ENOMEM;
1615 fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1616 goto out_free;
1617 }
1618 trace->ev_qualifier_ids.entries = entries;
1619 }
1620 trace->ev_qualifier_ids.entries[nr_used++] = id;
1621 }
1622 }
1623
1624 trace->ev_qualifier_ids.nr = nr_used;
1625 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
1626out:
1627 if (printed_invalid_prefix)
1628 pr_debug("\n");
1629 return err;
1630out_free:
1631 zfree(&trace->ev_qualifier_ids.entries);
1632 trace->ev_qualifier_ids.nr = 0;
1633 goto out;
1634}
1635
1636static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
1637{
1638 bool in_ev_qualifier;
1639
1640 if (trace->ev_qualifier_ids.nr == 0)
1641 return true;
1642
1643 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
1644 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
1645
1646 if (in_ev_qualifier)
1647 return !trace->not_ev_qualifier;
1648
1649 return trace->not_ev_qualifier;
1650}
1651
1652/*
1653 * args is to be interpreted as a series of longs but we need to handle
1654 * 8-byte unaligned accesses. args points to raw_data within the event
1655 * and raw_data is guaranteed to be 8-byte unaligned because it is
1656 * preceded by raw_size which is a u32. So we need to copy args to a temp
1657 * variable to read it. Most notably this avoids extended load instructions
1658 * on unaligned addresses
1659 */
1660unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
1661{
1662 unsigned long val;
1663 unsigned char *p = arg->args + sizeof(unsigned long) * idx;
1664
1665 memcpy(&val, p, sizeof(val));
1666 return val;
1667}
1668
1669static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
1670 struct syscall_arg *arg)
1671{
1672 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
1673 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
1674
1675 return scnprintf(bf, size, "arg%d: ", arg->idx);
1676}
1677
1678/*
1679 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1680 * as mount 'flags' argument that needs ignoring some magic flag, see comment
1681 * in tools/perf/trace/beauty/mount_flags.c
1682 */
1683static unsigned long syscall__mask_val(struct syscall *sc, struct syscall_arg *arg, unsigned long val)
1684{
1685 if (sc->arg_fmt && sc->arg_fmt[arg->idx].mask_val)
1686 return sc->arg_fmt[arg->idx].mask_val(arg, val);
1687
1688 return val;
1689}
1690
1691static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size,
1692 struct syscall_arg *arg, unsigned long val)
1693{
1694 if (sc->arg_fmt && sc->arg_fmt[arg->idx].scnprintf) {
1695 arg->val = val;
1696 if (sc->arg_fmt[arg->idx].parm)
1697 arg->parm = sc->arg_fmt[arg->idx].parm;
1698 return sc->arg_fmt[arg->idx].scnprintf(bf, size, arg);
1699 }
1700 return scnprintf(bf, size, "%ld", val);
1701}
1702
1703static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1704 unsigned char *args, void *augmented_args, int augmented_args_size,
1705 struct trace *trace, struct thread *thread)
1706{
1707 size_t printed = 0;
1708 unsigned long val;
1709 u8 bit = 1;
1710 struct syscall_arg arg = {
1711 .args = args,
1712 .augmented = {
1713 .size = augmented_args_size,
1714 .args = augmented_args,
1715 },
1716 .idx = 0,
1717 .mask = 0,
1718 .trace = trace,
1719 .thread = thread,
1720 .show_string_prefix = trace->show_string_prefix,
1721 };
1722 struct thread_trace *ttrace = thread__priv(thread);
1723
1724 /*
1725 * Things like fcntl will set this in its 'cmd' formatter to pick the
1726 * right formatter for the return value (an fd? file flags?), which is
1727 * not needed for syscalls that always return a given type, say an fd.
1728 */
1729 ttrace->ret_scnprintf = NULL;
1730
1731 if (sc->args != NULL) {
1732 struct tep_format_field *field;
1733
1734 for (field = sc->args; field;
1735 field = field->next, ++arg.idx, bit <<= 1) {
1736 if (arg.mask & bit)
1737 continue;
1738
1739 val = syscall_arg__val(&arg, arg.idx);
1740 /*
1741 * Some syscall args need some mask, most don't and
1742 * return val untouched.
1743 */
1744 val = syscall__mask_val(sc, &arg, val);
1745
1746 /*
1747 * Suppress this argument if its value is zero and
1748 * and we don't have a string associated in an
1749 * strarray for it.
1750 */
1751 if (val == 0 &&
1752 !trace->show_zeros &&
1753 !(sc->arg_fmt &&
1754 (sc->arg_fmt[arg.idx].show_zero ||
1755 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
1756 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
1757 sc->arg_fmt[arg.idx].parm))
1758 continue;
1759
1760 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
1761
1762 if (trace->show_arg_names)
1763 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
1764
1765 printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
1766 }
1767 } else if (IS_ERR(sc->tp_format)) {
1768 /*
1769 * If we managed to read the tracepoint /format file, then we
1770 * may end up not having any args, like with gettid(), so only
1771 * print the raw args when we didn't manage to read it.
1772 */
1773 while (arg.idx < sc->nr_args) {
1774 if (arg.mask & bit)
1775 goto next_arg;
1776 val = syscall_arg__val(&arg, arg.idx);
1777 if (printed)
1778 printed += scnprintf(bf + printed, size - printed, ", ");
1779 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
1780 printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
1781next_arg:
1782 ++arg.idx;
1783 bit <<= 1;
1784 }
1785 }
1786
1787 return printed;
1788}
1789
1790typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
1791 union perf_event *event,
1792 struct perf_sample *sample);
1793
1794static struct syscall *trace__syscall_info(struct trace *trace,
1795 struct evsel *evsel, int id)
1796{
1797 int err = 0;
1798
1799 if (id < 0) {
1800
1801 /*
1802 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1803 * before that, leaving at a higher verbosity level till that is
1804 * explained. Reproduced with plain ftrace with:
1805 *
1806 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1807 * grep "NR -1 " /t/trace_pipe
1808 *
1809 * After generating some load on the machine.
1810 */
1811 if (verbose > 1) {
1812 static u64 n;
1813 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1814 id, perf_evsel__name(evsel), ++n);
1815 }
1816 return NULL;
1817 }
1818
1819 err = -EINVAL;
1820
1821 if (id > trace->sctbl->syscalls.max_id)
1822 goto out_cant_read;
1823
1824 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
1825 (err = trace__read_syscall_info(trace, id)) != 0)
1826 goto out_cant_read;
1827
1828 if (trace->syscalls.table[id].name == NULL) {
1829 if (trace->syscalls.table[id].nonexistent)
1830 return NULL;
1831 goto out_cant_read;
1832 }
1833
1834 return &trace->syscalls.table[id];
1835
1836out_cant_read:
1837 if (verbose > 0) {
1838 char sbuf[STRERR_BUFSIZE];
1839 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
1840 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
1841 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1842 fputs(" information\n", trace->output);
1843 }
1844 return NULL;
1845}
1846
1847static void thread__update_stats(struct thread_trace *ttrace,
1848 int id, struct perf_sample *sample)
1849{
1850 struct int_node *inode;
1851 struct stats *stats;
1852 u64 duration = 0;
1853
1854 inode = intlist__findnew(ttrace->syscall_stats, id);
1855 if (inode == NULL)
1856 return;
1857
1858 stats = inode->priv;
1859 if (stats == NULL) {
1860 stats = malloc(sizeof(struct stats));
1861 if (stats == NULL)
1862 return;
1863 init_stats(stats);
1864 inode->priv = stats;
1865 }
1866
1867 if (ttrace->entry_time && sample->time > ttrace->entry_time)
1868 duration = sample->time - ttrace->entry_time;
1869
1870 update_stats(stats, duration);
1871}
1872
1873static int trace__printf_interrupted_entry(struct trace *trace)
1874{
1875 struct thread_trace *ttrace;
1876 size_t printed;
1877 int len;
1878
1879 if (trace->failure_only || trace->current == NULL)
1880 return 0;
1881
1882 ttrace = thread__priv(trace->current);
1883
1884 if (!ttrace->entry_pending)
1885 return 0;
1886
1887 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
1888 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
1889
1890 if (len < trace->args_alignment - 4)
1891 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
1892
1893 printed += fprintf(trace->output, " ...\n");
1894
1895 ttrace->entry_pending = false;
1896 ++trace->nr_events_printed;
1897
1898 return printed;
1899}
1900
1901static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
1902 struct perf_sample *sample, struct thread *thread)
1903{
1904 int printed = 0;
1905
1906 if (trace->print_sample) {
1907 double ts = (double)sample->time / NSEC_PER_MSEC;
1908
1909 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
1910 perf_evsel__name(evsel), ts,
1911 thread__comm_str(thread),
1912 sample->pid, sample->tid, sample->cpu);
1913 }
1914
1915 return printed;
1916}
1917
1918static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
1919{
1920 void *augmented_args = NULL;
1921 /*
1922 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
1923 * and there we get all 6 syscall args plus the tracepoint common fields
1924 * that gets calculated at the start and the syscall_nr (another long).
1925 * So we check if that is the case and if so don't look after the
1926 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
1927 * which is fixed.
1928 *
1929 * We'll revisit this later to pass s->args_size to the BPF augmenter
1930 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
1931 * copies only what we need for each syscall, like what happens when we
1932 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
1933 * traffic to just what is needed for each syscall.
1934 */
1935 int args_size = raw_augmented_args_size ?: sc->args_size;
1936
1937 *augmented_args_size = sample->raw_size - args_size;
1938 if (*augmented_args_size > 0)
1939 augmented_args = sample->raw_data + args_size;
1940
1941 return augmented_args;
1942}
1943
1944static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
1945 union perf_event *event __maybe_unused,
1946 struct perf_sample *sample)
1947{
1948 char *msg;
1949 void *args;
1950 int printed = 0;
1951 struct thread *thread;
1952 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
1953 int augmented_args_size = 0;
1954 void *augmented_args = NULL;
1955 struct syscall *sc = trace__syscall_info(trace, evsel, id);
1956 struct thread_trace *ttrace;
1957
1958 if (sc == NULL)
1959 return -1;
1960
1961 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1962 ttrace = thread__trace(thread, trace->output);
1963 if (ttrace == NULL)
1964 goto out_put;
1965
1966 trace__fprintf_sample(trace, evsel, sample, thread);
1967
1968 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1969
1970 if (ttrace->entry_str == NULL) {
1971 ttrace->entry_str = malloc(trace__entry_str_size);
1972 if (!ttrace->entry_str)
1973 goto out_put;
1974 }
1975
1976 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
1977 trace__printf_interrupted_entry(trace);
1978 /*
1979 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
1980 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
1981 * this breaks syscall__augmented_args() check for augmented args, as we calculate
1982 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
1983 * so when handling, say the openat syscall, we end up getting 6 args for the
1984 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
1985 * thinking that the extra 2 u64 args are the augmented filename, so just check
1986 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
1987 */
1988 if (evsel != trace->syscalls.events.sys_enter)
1989 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
1990 ttrace->entry_time = sample->time;
1991 msg = ttrace->entry_str;
1992 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
1993
1994 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
1995 args, augmented_args, augmented_args_size, trace, thread);
1996
1997 if (sc->is_exit) {
1998 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
1999 int alignment = 0;
2000
2001 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2002 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2003 if (trace->args_alignment > printed)
2004 alignment = trace->args_alignment - printed;
2005 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2006 }
2007 } else {
2008 ttrace->entry_pending = true;
2009 /* See trace__vfs_getname & trace__sys_exit */
2010 ttrace->filename.pending_open = false;
2011 }
2012
2013 if (trace->current != thread) {
2014 thread__put(trace->current);
2015 trace->current = thread__get(thread);
2016 }
2017 err = 0;
2018out_put:
2019 thread__put(thread);
2020 return err;
2021}
2022
2023static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2024 struct perf_sample *sample)
2025{
2026 struct thread_trace *ttrace;
2027 struct thread *thread;
2028 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2029 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2030 char msg[1024];
2031 void *args, *augmented_args = NULL;
2032 int augmented_args_size;
2033
2034 if (sc == NULL)
2035 return -1;
2036
2037 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2038 ttrace = thread__trace(thread, trace->output);
2039 /*
2040 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2041 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2042 */
2043 if (ttrace == NULL)
2044 goto out_put;
2045
2046 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2047 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2048 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2049 fprintf(trace->output, "%s", msg);
2050 err = 0;
2051out_put:
2052 thread__put(thread);
2053 return err;
2054}
2055
2056static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2057 struct perf_sample *sample,
2058 struct callchain_cursor *cursor)
2059{
2060 struct addr_location al;
2061 int max_stack = evsel->core.attr.sample_max_stack ?
2062 evsel->core.attr.sample_max_stack :
2063 trace->max_stack;
2064 int err;
2065
2066 if (machine__resolve(trace->host, &al, sample) < 0)
2067 return -1;
2068
2069 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
2070 addr_location__put(&al);
2071 return err;
2072}
2073
2074static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2075{
2076 /* TODO: user-configurable print_opts */
2077 const unsigned int print_opts = EVSEL__PRINT_SYM |
2078 EVSEL__PRINT_DSO |
2079 EVSEL__PRINT_UNKNOWN_AS_ADDR;
2080
2081 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
2082}
2083
2084static const char *errno_to_name(struct evsel *evsel, int err)
2085{
2086 struct perf_env *env = perf_evsel__env(evsel);
2087 const char *arch_name = perf_env__arch(env);
2088
2089 return arch_syscalls__strerrno(arch_name, err);
2090}
2091
2092static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2093 union perf_event *event __maybe_unused,
2094 struct perf_sample *sample)
2095{
2096 long ret;
2097 u64 duration = 0;
2098 bool duration_calculated = false;
2099 struct thread *thread;
2100 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2101 int alignment = trace->args_alignment;
2102 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2103 struct thread_trace *ttrace;
2104
2105 if (sc == NULL)
2106 return -1;
2107
2108 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2109 ttrace = thread__trace(thread, trace->output);
2110 if (ttrace == NULL)
2111 goto out_put;
2112
2113 trace__fprintf_sample(trace, evsel, sample, thread);
2114
2115 if (trace->summary)
2116 thread__update_stats(ttrace, id, sample);
2117
2118 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2119
2120 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2121 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2122 ttrace->filename.pending_open = false;
2123 ++trace->stats.vfs_getname;
2124 }
2125
2126 if (ttrace->entry_time) {
2127 duration = sample->time - ttrace->entry_time;
2128 if (trace__filter_duration(trace, duration))
2129 goto out;
2130 duration_calculated = true;
2131 } else if (trace->duration_filter)
2132 goto out;
2133
2134 if (sample->callchain) {
2135 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2136 if (callchain_ret == 0) {
2137 if (callchain_cursor.nr < trace->min_stack)
2138 goto out;
2139 callchain_ret = 1;
2140 }
2141 }
2142
2143 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2144 goto out;
2145
2146 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2147
2148 if (ttrace->entry_pending) {
2149 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2150 } else {
2151 printed += fprintf(trace->output, " ... [");
2152 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2153 printed += 9;
2154 printed += fprintf(trace->output, "]: %s()", sc->name);
2155 }
2156
2157 printed++; /* the closing ')' */
2158
2159 if (alignment > printed)
2160 alignment -= printed;
2161 else
2162 alignment = 0;
2163
2164 fprintf(trace->output, ")%*s= ", alignment, " ");
2165
2166 if (sc->fmt == NULL) {
2167 if (ret < 0)
2168 goto errno_print;
2169signed_print:
2170 fprintf(trace->output, "%ld", ret);
2171 } else if (ret < 0) {
2172errno_print: {
2173 char bf[STRERR_BUFSIZE];
2174 const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2175 *e = errno_to_name(evsel, -ret);
2176
2177 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2178 }
2179 } else if (ret == 0 && sc->fmt->timeout)
2180 fprintf(trace->output, "0 (Timeout)");
2181 else if (ttrace->ret_scnprintf) {
2182 char bf[1024];
2183 struct syscall_arg arg = {
2184 .val = ret,
2185 .thread = thread,
2186 .trace = trace,
2187 };
2188 ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2189 ttrace->ret_scnprintf = NULL;
2190 fprintf(trace->output, "%s", bf);
2191 } else if (sc->fmt->hexret)
2192 fprintf(trace->output, "%#lx", ret);
2193 else if (sc->fmt->errpid) {
2194 struct thread *child = machine__find_thread(trace->host, ret, ret);
2195
2196 if (child != NULL) {
2197 fprintf(trace->output, "%ld", ret);
2198 if (child->comm_set)
2199 fprintf(trace->output, " (%s)", thread__comm_str(child));
2200 thread__put(child);
2201 }
2202 } else
2203 goto signed_print;
2204
2205 fputc('\n', trace->output);
2206
2207 /*
2208 * We only consider an 'event' for the sake of --max-events a non-filtered
2209 * sys_enter + sys_exit and other tracepoint events.
2210 */
2211 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2212 interrupted = true;
2213
2214 if (callchain_ret > 0)
2215 trace__fprintf_callchain(trace, sample);
2216 else if (callchain_ret < 0)
2217 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2218out:
2219 ttrace->entry_pending = false;
2220 err = 0;
2221out_put:
2222 thread__put(thread);
2223 return err;
2224}
2225
2226static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2227 union perf_event *event __maybe_unused,
2228 struct perf_sample *sample)
2229{
2230 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2231 struct thread_trace *ttrace;
2232 size_t filename_len, entry_str_len, to_move;
2233 ssize_t remaining_space;
2234 char *pos;
2235 const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
2236
2237 if (!thread)
2238 goto out;
2239
2240 ttrace = thread__priv(thread);
2241 if (!ttrace)
2242 goto out_put;
2243
2244 filename_len = strlen(filename);
2245 if (filename_len == 0)
2246 goto out_put;
2247
2248 if (ttrace->filename.namelen < filename_len) {
2249 char *f = realloc(ttrace->filename.name, filename_len + 1);
2250
2251 if (f == NULL)
2252 goto out_put;
2253
2254 ttrace->filename.namelen = filename_len;
2255 ttrace->filename.name = f;
2256 }
2257
2258 strcpy(ttrace->filename.name, filename);
2259 ttrace->filename.pending_open = true;
2260
2261 if (!ttrace->filename.ptr)
2262 goto out_put;
2263
2264 entry_str_len = strlen(ttrace->entry_str);
2265 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2266 if (remaining_space <= 0)
2267 goto out_put;
2268
2269 if (filename_len > (size_t)remaining_space) {
2270 filename += filename_len - remaining_space;
2271 filename_len = remaining_space;
2272 }
2273
2274 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2275 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2276 memmove(pos + filename_len, pos, to_move);
2277 memcpy(pos, filename, filename_len);
2278
2279 ttrace->filename.ptr = 0;
2280 ttrace->filename.entry_str_pos = 0;
2281out_put:
2282 thread__put(thread);
2283out:
2284 return 0;
2285}
2286
2287static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2288 union perf_event *event __maybe_unused,
2289 struct perf_sample *sample)
2290{
2291 u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
2292 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2293 struct thread *thread = machine__findnew_thread(trace->host,
2294 sample->pid,
2295 sample->tid);
2296 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2297
2298 if (ttrace == NULL)
2299 goto out_dump;
2300
2301 ttrace->runtime_ms += runtime_ms;
2302 trace->runtime_ms += runtime_ms;
2303out_put:
2304 thread__put(thread);
2305 return 0;
2306
2307out_dump:
2308 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2309 evsel->name,
2310 perf_evsel__strval(evsel, sample, "comm"),
2311 (pid_t)perf_evsel__intval(evsel, sample, "pid"),
2312 runtime,
2313 perf_evsel__intval(evsel, sample, "vruntime"));
2314 goto out_put;
2315}
2316
2317static int bpf_output__printer(enum binary_printer_ops op,
2318 unsigned int val, void *extra __maybe_unused, FILE *fp)
2319{
2320 unsigned char ch = (unsigned char)val;
2321
2322 switch (op) {
2323 case BINARY_PRINT_CHAR_DATA:
2324 return fprintf(fp, "%c", isprint(ch) ? ch : '.');
2325 case BINARY_PRINT_DATA_BEGIN:
2326 case BINARY_PRINT_LINE_BEGIN:
2327 case BINARY_PRINT_ADDR:
2328 case BINARY_PRINT_NUM_DATA:
2329 case BINARY_PRINT_NUM_PAD:
2330 case BINARY_PRINT_SEP:
2331 case BINARY_PRINT_CHAR_PAD:
2332 case BINARY_PRINT_LINE_END:
2333 case BINARY_PRINT_DATA_END:
2334 default:
2335 break;
2336 }
2337
2338 return 0;
2339}
2340
2341static void bpf_output__fprintf(struct trace *trace,
2342 struct perf_sample *sample)
2343{
2344 binary__fprintf(sample->raw_data, sample->raw_size, 8,
2345 bpf_output__printer, NULL, trace->output);
2346 ++trace->nr_events_printed;
2347}
2348
2349static int trace__event_handler(struct trace *trace, struct evsel *evsel,
2350 union perf_event *event __maybe_unused,
2351 struct perf_sample *sample)
2352{
2353 struct thread *thread;
2354 int callchain_ret = 0;
2355 /*
2356 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2357 * this event's max_events having been hit and this is an entry coming
2358 * from the ring buffer that we should discard, since the max events
2359 * have already been considered/printed.
2360 */
2361 if (evsel->disabled)
2362 return 0;
2363
2364 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2365
2366 if (sample->callchain) {
2367 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2368 if (callchain_ret == 0) {
2369 if (callchain_cursor.nr < trace->min_stack)
2370 goto out;
2371 callchain_ret = 1;
2372 }
2373 }
2374
2375 trace__printf_interrupted_entry(trace);
2376 trace__fprintf_tstamp(trace, sample->time, trace->output);
2377
2378 if (trace->trace_syscalls && trace->show_duration)
2379 fprintf(trace->output, "( ): ");
2380
2381 if (thread)
2382 trace__fprintf_comm_tid(trace, thread, trace->output);
2383
2384 if (evsel == trace->syscalls.events.augmented) {
2385 int id = perf_evsel__sc_tp_uint(evsel, id, sample);
2386 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2387
2388 if (sc) {
2389 fprintf(trace->output, "%s(", sc->name);
2390 trace__fprintf_sys_enter(trace, evsel, sample);
2391 fputc(')', trace->output);
2392 goto newline;
2393 }
2394
2395 /*
2396 * XXX: Not having the associated syscall info or not finding/adding
2397 * the thread should never happen, but if it does...
2398 * fall thru and print it as a bpf_output event.
2399 */
2400 }
2401
2402 fprintf(trace->output, "%s:", evsel->name);
2403
2404 if (perf_evsel__is_bpf_output(evsel)) {
2405 bpf_output__fprintf(trace, sample);
2406 } else if (evsel->tp_format) {
2407 if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
2408 trace__fprintf_sys_enter(trace, evsel, sample)) {
2409 event_format__fprintf(evsel->tp_format, sample->cpu,
2410 sample->raw_data, sample->raw_size,
2411 trace->output);
2412 ++trace->nr_events_printed;
2413
2414 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
2415 evsel__disable(evsel);
2416 evsel__close(evsel);
2417 }
2418 }
2419 }
2420
2421newline:
2422 fprintf(trace->output, "\n");
2423
2424 if (callchain_ret > 0)
2425 trace__fprintf_callchain(trace, sample);
2426 else if (callchain_ret < 0)
2427 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2428out:
2429 thread__put(thread);
2430 return 0;
2431}
2432
2433static void print_location(FILE *f, struct perf_sample *sample,
2434 struct addr_location *al,
2435 bool print_dso, bool print_sym)
2436{
2437
2438 if ((verbose > 0 || print_dso) && al->map)
2439 fprintf(f, "%s@", al->map->dso->long_name);
2440
2441 if ((verbose > 0 || print_sym) && al->sym)
2442 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2443 al->addr - al->sym->start);
2444 else if (al->map)
2445 fprintf(f, "0x%" PRIx64, al->addr);
2446 else
2447 fprintf(f, "0x%" PRIx64, sample->addr);
2448}
2449
2450static int trace__pgfault(struct trace *trace,
2451 struct evsel *evsel,
2452 union perf_event *event __maybe_unused,
2453 struct perf_sample *sample)
2454{
2455 struct thread *thread;
2456 struct addr_location al;
2457 char map_type = 'd';
2458 struct thread_trace *ttrace;
2459 int err = -1;
2460 int callchain_ret = 0;
2461
2462 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2463
2464 if (sample->callchain) {
2465 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2466 if (callchain_ret == 0) {
2467 if (callchain_cursor.nr < trace->min_stack)
2468 goto out_put;
2469 callchain_ret = 1;
2470 }
2471 }
2472
2473 ttrace = thread__trace(thread, trace->output);
2474 if (ttrace == NULL)
2475 goto out_put;
2476
2477 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2478 ttrace->pfmaj++;
2479 else
2480 ttrace->pfmin++;
2481
2482 if (trace->summary_only)
2483 goto out;
2484
2485 thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
2486
2487 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2488
2489 fprintf(trace->output, "%sfault [",
2490 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2491 "maj" : "min");
2492
2493 print_location(trace->output, sample, &al, false, true);
2494
2495 fprintf(trace->output, "] => ");
2496
2497 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2498
2499 if (!al.map) {
2500 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2501
2502 if (al.map)
2503 map_type = 'x';
2504 else
2505 map_type = '?';
2506 }
2507
2508 print_location(trace->output, sample, &al, true, false);
2509
2510 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2511
2512 if (callchain_ret > 0)
2513 trace__fprintf_callchain(trace, sample);
2514 else if (callchain_ret < 0)
2515 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2516
2517 ++trace->nr_events_printed;
2518out:
2519 err = 0;
2520out_put:
2521 thread__put(thread);
2522 return err;
2523}
2524
2525static void trace__set_base_time(struct trace *trace,
2526 struct evsel *evsel,
2527 struct perf_sample *sample)
2528{
2529 /*
2530 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2531 * and don't use sample->time unconditionally, we may end up having
2532 * some other event in the future without PERF_SAMPLE_TIME for good
2533 * reason, i.e. we may not be interested in its timestamps, just in
2534 * it taking place, picking some piece of information when it
2535 * appears in our event stream (vfs_getname comes to mind).
2536 */
2537 if (trace->base_time == 0 && !trace->full_time &&
2538 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2539 trace->base_time = sample->time;
2540}
2541
2542static int trace__process_sample(struct perf_tool *tool,
2543 union perf_event *event,
2544 struct perf_sample *sample,
2545 struct evsel *evsel,
2546 struct machine *machine __maybe_unused)
2547{
2548 struct trace *trace = container_of(tool, struct trace, tool);
2549 struct thread *thread;
2550 int err = 0;
2551
2552 tracepoint_handler handler = evsel->handler;
2553
2554 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2555 if (thread && thread__is_filtered(thread))
2556 goto out;
2557
2558 trace__set_base_time(trace, evsel, sample);
2559
2560 if (handler) {
2561 ++trace->nr_events;
2562 handler(trace, evsel, event, sample);
2563 }
2564out:
2565 thread__put(thread);
2566 return err;
2567}
2568
2569static int trace__record(struct trace *trace, int argc, const char **argv)
2570{
2571 unsigned int rec_argc, i, j;
2572 const char **rec_argv;
2573 const char * const record_args[] = {
2574 "record",
2575 "-R",
2576 "-m", "1024",
2577 "-c", "1",
2578 };
2579
2580 const char * const sc_args[] = { "-e", };
2581 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
2582 const char * const majpf_args[] = { "-e", "major-faults" };
2583 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
2584 const char * const minpf_args[] = { "-e", "minor-faults" };
2585 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
2586
2587 /* +1 is for the event string below */
2588 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
2589 majpf_args_nr + minpf_args_nr + argc;
2590 rec_argv = calloc(rec_argc + 1, sizeof(char *));
2591
2592 if (rec_argv == NULL)
2593 return -ENOMEM;
2594
2595 j = 0;
2596 for (i = 0; i < ARRAY_SIZE(record_args); i++)
2597 rec_argv[j++] = record_args[i];
2598
2599 if (trace->trace_syscalls) {
2600 for (i = 0; i < sc_args_nr; i++)
2601 rec_argv[j++] = sc_args[i];
2602
2603 /* event string may be different for older kernels - e.g., RHEL6 */
2604 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2605 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2606 else if (is_valid_tracepoint("syscalls:sys_enter"))
2607 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
2608 else {
2609 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2610 free(rec_argv);
2611 return -1;
2612 }
2613 }
2614
2615 if (trace->trace_pgfaults & TRACE_PFMAJ)
2616 for (i = 0; i < majpf_args_nr; i++)
2617 rec_argv[j++] = majpf_args[i];
2618
2619 if (trace->trace_pgfaults & TRACE_PFMIN)
2620 for (i = 0; i < minpf_args_nr; i++)
2621 rec_argv[j++] = minpf_args[i];
2622
2623 for (i = 0; i < (unsigned int)argc; i++)
2624 rec_argv[j++] = argv[i];
2625
2626 return cmd_record(j, rec_argv);
2627}
2628
2629static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2630
2631static bool evlist__add_vfs_getname(struct evlist *evlist)
2632{
2633 bool found = false;
2634 struct evsel *evsel, *tmp;
2635 struct parse_events_error err = { .idx = 0, };
2636 int ret = parse_events(evlist, "probe:vfs_getname*", &err);
2637
2638 if (ret)
2639 return false;
2640
2641 evlist__for_each_entry_safe(evlist, evsel, tmp) {
2642 if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
2643 continue;
2644
2645 if (perf_evsel__field(evsel, "pathname")) {
2646 evsel->handler = trace__vfs_getname;
2647 found = true;
2648 continue;
2649 }
2650
2651 list_del_init(&evsel->core.node);
2652 evsel->evlist = NULL;
2653 evsel__delete(evsel);
2654 }
2655
2656 return found;
2657}
2658
2659static struct evsel *perf_evsel__new_pgfault(u64 config)
2660{
2661 struct evsel *evsel;
2662 struct perf_event_attr attr = {
2663 .type = PERF_TYPE_SOFTWARE,
2664 .mmap_data = 1,
2665 };
2666
2667 attr.config = config;
2668 attr.sample_period = 1;
2669
2670 event_attr_init(&attr);
2671
2672 evsel = evsel__new(&attr);
2673 if (evsel)
2674 evsel->handler = trace__pgfault;
2675
2676 return evsel;
2677}
2678
2679static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2680{
2681 const u32 type = event->header.type;
2682 struct evsel *evsel;
2683
2684 if (type != PERF_RECORD_SAMPLE) {
2685 trace__process_event(trace, trace->host, event, sample);
2686 return;
2687 }
2688
2689 evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2690 if (evsel == NULL) {
2691 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2692 return;
2693 }
2694
2695 if (evswitch__discard(&trace->evswitch, evsel))
2696 return;
2697
2698 trace__set_base_time(trace, evsel, sample);
2699
2700 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
2701 sample->raw_data == NULL) {
2702 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2703 perf_evsel__name(evsel), sample->tid,
2704 sample->cpu, sample->raw_size);
2705 } else {
2706 tracepoint_handler handler = evsel->handler;
2707 handler(trace, evsel, event, sample);
2708 }
2709
2710 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
2711 interrupted = true;
2712}
2713
2714static int trace__add_syscall_newtp(struct trace *trace)
2715{
2716 int ret = -1;
2717 struct evlist *evlist = trace->evlist;
2718 struct evsel *sys_enter, *sys_exit;
2719
2720 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
2721 if (sys_enter == NULL)
2722 goto out;
2723
2724 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2725 goto out_delete_sys_enter;
2726
2727 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
2728 if (sys_exit == NULL)
2729 goto out_delete_sys_enter;
2730
2731 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2732 goto out_delete_sys_exit;
2733
2734 perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
2735 perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
2736
2737 evlist__add(evlist, sys_enter);
2738 evlist__add(evlist, sys_exit);
2739
2740 if (callchain_param.enabled && !trace->kernel_syscallchains) {
2741 /*
2742 * We're interested only in the user space callchain
2743 * leading to the syscall, allow overriding that for
2744 * debugging reasons using --kernel_syscall_callchains
2745 */
2746 sys_exit->core.attr.exclude_callchain_kernel = 1;
2747 }
2748
2749 trace->syscalls.events.sys_enter = sys_enter;
2750 trace->syscalls.events.sys_exit = sys_exit;
2751
2752 ret = 0;
2753out:
2754 return ret;
2755
2756out_delete_sys_exit:
2757 evsel__delete_priv(sys_exit);
2758out_delete_sys_enter:
2759 evsel__delete_priv(sys_enter);
2760 goto out;
2761}
2762
2763static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
2764{
2765 int err = -1;
2766 struct evsel *sys_exit;
2767 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2768 trace->ev_qualifier_ids.nr,
2769 trace->ev_qualifier_ids.entries);
2770
2771 if (filter == NULL)
2772 goto out_enomem;
2773
2774 if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter,
2775 filter)) {
2776 sys_exit = trace->syscalls.events.sys_exit;
2777 err = perf_evsel__append_tp_filter(sys_exit, filter);
2778 }
2779
2780 free(filter);
2781out:
2782 return err;
2783out_enomem:
2784 errno = ENOMEM;
2785 goto out;
2786}
2787
2788#ifdef HAVE_LIBBPF_SUPPORT
2789static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
2790{
2791 if (trace->bpf_obj == NULL)
2792 return NULL;
2793
2794 return bpf_object__find_program_by_title(trace->bpf_obj, name);
2795}
2796
2797static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
2798 const char *prog_name, const char *type)
2799{
2800 struct bpf_program *prog;
2801
2802 if (prog_name == NULL) {
2803 char default_prog_name[256];
2804 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
2805 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
2806 if (prog != NULL)
2807 goto out_found;
2808 if (sc->fmt && sc->fmt->alias) {
2809 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
2810 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
2811 if (prog != NULL)
2812 goto out_found;
2813 }
2814 goto out_unaugmented;
2815 }
2816
2817 prog = trace__find_bpf_program_by_title(trace, prog_name);
2818
2819 if (prog != NULL) {
2820out_found:
2821 return prog;
2822 }
2823
2824 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
2825 prog_name, type, sc->name);
2826out_unaugmented:
2827 return trace->syscalls.unaugmented_prog;
2828}
2829
2830static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
2831{
2832 struct syscall *sc = trace__syscall_info(trace, NULL, id);
2833
2834 if (sc == NULL)
2835 return;
2836
2837 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
2838 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
2839}
2840
2841static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
2842{
2843 struct syscall *sc = trace__syscall_info(trace, NULL, id);
2844 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
2845}
2846
2847static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
2848{
2849 struct syscall *sc = trace__syscall_info(trace, NULL, id);
2850 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
2851}
2852
2853static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
2854{
2855 struct syscall *sc = trace__syscall_info(trace, NULL, id);
2856 int arg = 0;
2857
2858 if (sc == NULL)
2859 goto out;
2860
2861 for (; arg < sc->nr_args; ++arg) {
2862 entry->string_args_len[arg] = 0;
2863 if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
2864 /* Should be set like strace -s strsize */
2865 entry->string_args_len[arg] = PATH_MAX;
2866 }
2867 }
2868out:
2869 for (; arg < 6; ++arg)
2870 entry->string_args_len[arg] = 0;
2871}
2872static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
2873{
2874 int fd = bpf_map__fd(trace->syscalls.map);
2875 struct bpf_map_syscall_entry value = {
2876 .enabled = !trace->not_ev_qualifier,
2877 };
2878 int err = 0;
2879 size_t i;
2880
2881 for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
2882 int key = trace->ev_qualifier_ids.entries[i];
2883
2884 if (value.enabled) {
2885 trace__init_bpf_map_syscall_args(trace, key, &value);
2886 trace__init_syscall_bpf_progs(trace, key);
2887 }
2888
2889 err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
2890 if (err)
2891 break;
2892 }
2893
2894 return err;
2895}
2896
2897static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
2898{
2899 int fd = bpf_map__fd(trace->syscalls.map);
2900 struct bpf_map_syscall_entry value = {
2901 .enabled = enabled,
2902 };
2903 int err = 0, key;
2904
2905 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
2906 if (enabled)
2907 trace__init_bpf_map_syscall_args(trace, key, &value);
2908
2909 err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
2910 if (err)
2911 break;
2912 }
2913
2914 return err;
2915}
2916
2917static int trace__init_syscalls_bpf_map(struct trace *trace)
2918{
2919 bool enabled = true;
2920
2921 if (trace->ev_qualifier_ids.nr)
2922 enabled = trace->not_ev_qualifier;
2923
2924 return __trace__init_syscalls_bpf_map(trace, enabled);
2925}
2926
2927static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
2928{
2929 struct tep_format_field *field, *candidate_field;
2930 int id;
2931
2932 /*
2933 * We're only interested in syscalls that have a pointer:
2934 */
2935 for (field = sc->args; field; field = field->next) {
2936 if (field->flags & TEP_FIELD_IS_POINTER)
2937 goto try_to_find_pair;
2938 }
2939
2940 return NULL;
2941
2942try_to_find_pair:
2943 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
2944 struct syscall *pair = trace__syscall_info(trace, NULL, id);
2945 struct bpf_program *pair_prog;
2946 bool is_candidate = false;
2947
2948 if (pair == NULL || pair == sc ||
2949 pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
2950 continue;
2951
2952 for (field = sc->args, candidate_field = pair->args;
2953 field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
2954 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
2955 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
2956
2957 if (is_pointer) {
2958 if (!candidate_is_pointer) {
2959 // The candidate just doesn't copies our pointer arg, might copy other pointers we want.
2960 continue;
2961 }
2962 } else {
2963 if (candidate_is_pointer) {
2964 // The candidate might copy a pointer we don't have, skip it.
2965 goto next_candidate;
2966 }
2967 continue;
2968 }
2969
2970 if (strcmp(field->type, candidate_field->type))
2971 goto next_candidate;
2972
2973 is_candidate = true;
2974 }
2975
2976 if (!is_candidate)
2977 goto next_candidate;
2978
2979 /*
2980 * Check if the tentative pair syscall augmenter has more pointers, if it has,
2981 * then it may be collecting that and we then can't use it, as it would collect
2982 * more than what is common to the two syscalls.
2983 */
2984 if (candidate_field) {
2985 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
2986 if (candidate_field->flags & TEP_FIELD_IS_POINTER)
2987 goto next_candidate;
2988 }
2989
2990 pair_prog = pair->bpf_prog.sys_enter;
2991 /*
2992 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
2993 * have been searched for, so search it here and if it returns the
2994 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
2995 * program for a filtered syscall on a non-filtered one.
2996 *
2997 * For instance, we have "!syscalls:sys_enter_renameat" and that is
2998 * useful for "renameat2".
2999 */
3000 if (pair_prog == NULL) {
3001 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3002 if (pair_prog == trace->syscalls.unaugmented_prog)
3003 goto next_candidate;
3004 }
3005
3006 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
3007 return pair_prog;
3008 next_candidate:
3009 continue;
3010 }
3011
3012 return NULL;
3013}
3014
3015static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3016{
3017 int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3018 map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3019 int err = 0, key;
3020
3021 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3022 int prog_fd;
3023
3024 if (!trace__syscall_enabled(trace, key))
3025 continue;
3026
3027 trace__init_syscall_bpf_progs(trace, key);
3028
3029 // It'll get at least the "!raw_syscalls:unaugmented"
3030 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3031 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3032 if (err)
3033 break;
3034 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3035 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
3036 if (err)
3037 break;
3038 }
3039
3040 /*
3041 * Now lets do a second pass looking for enabled syscalls without
3042 * an augmenter that have a signature that is a superset of another
3043 * syscall with an augmenter so that we can auto-reuse it.
3044 *
3045 * I.e. if we have an augmenter for the "open" syscall that has
3046 * this signature:
3047 *
3048 * int open(const char *pathname, int flags, mode_t mode);
3049 *
3050 * I.e. that will collect just the first string argument, then we
3051 * can reuse it for the 'creat' syscall, that has this signature:
3052 *
3053 * int creat(const char *pathname, mode_t mode);
3054 *
3055 * and for:
3056 *
3057 * int stat(const char *pathname, struct stat *statbuf);
3058 * int lstat(const char *pathname, struct stat *statbuf);
3059 *
3060 * Because the 'open' augmenter will collect the first arg as a string,
3061 * and leave alone all the other args, which already helps with
3062 * beautifying 'stat' and 'lstat''s pathname arg.
3063 *
3064 * Then, in time, when 'stat' gets an augmenter that collects both
3065 * first and second arg (this one on the raw_syscalls:sys_exit prog
3066 * array tail call, then that one will be used.
3067 */
3068 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3069 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3070 struct bpf_program *pair_prog;
3071 int prog_fd;
3072
3073 if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
3074 continue;
3075
3076 /*
3077 * For now we're just reusing the sys_enter prog, and if it
3078 * already has an augmenter, we don't need to find one.
3079 */
3080 if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3081 continue;
3082
3083 /*
3084 * Look at all the other syscalls for one that has a signature
3085 * that is close enough that we can share:
3086 */
3087 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3088 if (pair_prog == NULL)
3089 continue;
3090
3091 sc->bpf_prog.sys_enter = pair_prog;
3092
3093 /*
3094 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3095 * with the fd for the program we're reusing:
3096 */
3097 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
3098 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3099 if (err)
3100 break;
3101 }
3102
3103
3104 return err;
3105}
3106#else
3107static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
3108{
3109 return 0;
3110}
3111
3112static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
3113{
3114 return 0;
3115}
3116
3117static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3118 const char *name __maybe_unused)
3119{
3120 return NULL;
3121}
3122
3123static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3124{
3125 return 0;
3126}
3127#endif // HAVE_LIBBPF_SUPPORT
3128
3129static int trace__set_ev_qualifier_filter(struct trace *trace)
3130{
3131 if (trace->syscalls.map)
3132 return trace__set_ev_qualifier_bpf_filter(trace);
3133 if (trace->syscalls.events.sys_enter)
3134 return trace__set_ev_qualifier_tp_filter(trace);
3135 return 0;
3136}
3137
3138static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
3139 size_t npids __maybe_unused, pid_t *pids __maybe_unused)
3140{
3141 int err = 0;
3142#ifdef HAVE_LIBBPF_SUPPORT
3143 bool value = true;
3144 int map_fd = bpf_map__fd(map);
3145 size_t i;
3146
3147 for (i = 0; i < npids; ++i) {
3148 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
3149 if (err)
3150 break;
3151 }
3152#endif
3153 return err;
3154}
3155
3156static int trace__set_filter_loop_pids(struct trace *trace)
3157{
3158 unsigned int nr = 1, err;
3159 pid_t pids[32] = {
3160 getpid(),
3161 };
3162 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3163
3164 while (thread && nr < ARRAY_SIZE(pids)) {
3165 struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3166
3167 if (parent == NULL)
3168 break;
3169
3170 if (!strcmp(thread__comm_str(parent), "sshd") ||
3171 strstarts(thread__comm_str(parent), "gnome-terminal")) {
3172 pids[nr++] = parent->tid;
3173 break;
3174 }
3175 thread = parent;
3176 }
3177
3178 err = perf_evlist__set_tp_filter_pids(trace->evlist, nr, pids);
3179 if (!err && trace->filter_pids.map)
3180 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3181
3182 return err;
3183}
3184
3185static int trace__set_filter_pids(struct trace *trace)
3186{
3187 int err = 0;
3188 /*
3189 * Better not use !target__has_task() here because we need to cover the
3190 * case where no threads were specified in the command line, but a
3191 * workload was, and in that case we will fill in the thread_map when
3192 * we fork the workload in perf_evlist__prepare_workload.
3193 */
3194 if (trace->filter_pids.nr > 0) {
3195 err = perf_evlist__set_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3196 trace->filter_pids.entries);
3197 if (!err && trace->filter_pids.map) {
3198 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3199 trace->filter_pids.entries);
3200 }
3201 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3202 err = trace__set_filter_loop_pids(trace);
3203 }
3204
3205 return err;
3206}
3207
3208static int __trace__deliver_event(struct trace *trace, union perf_event *event)
3209{
3210 struct evlist *evlist = trace->evlist;
3211 struct perf_sample sample;
3212 int err;
3213
3214 err = perf_evlist__parse_sample(evlist, event, &sample);
3215 if (err)
3216 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3217 else
3218 trace__handle_event(trace, event, &sample);
3219
3220 return 0;
3221}
3222
3223static int __trace__flush_events(struct trace *trace)
3224{
3225 u64 first = ordered_events__first_time(&trace->oe.data);
3226 u64 flush = trace->oe.last - NSEC_PER_SEC;
3227
3228 /* Is there some thing to flush.. */
3229 if (first && first < flush)
3230 return ordered_events__flush_time(&trace->oe.data, flush);
3231
3232 return 0;
3233}
3234
3235static int trace__flush_events(struct trace *trace)
3236{
3237 return !trace->sort_events ? 0 : __trace__flush_events(trace);
3238}
3239
3240static int trace__deliver_event(struct trace *trace, union perf_event *event)
3241{
3242 int err;
3243
3244 if (!trace->sort_events)
3245 return __trace__deliver_event(trace, event);
3246
3247 err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3248 if (err && err != -1)
3249 return err;
3250
3251 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
3252 if (err)
3253 return err;
3254
3255 return trace__flush_events(trace);
3256}
3257
3258static int ordered_events__deliver_event(struct ordered_events *oe,
3259 struct ordered_event *event)
3260{
3261 struct trace *trace = container_of(oe, struct trace, oe.data);
3262
3263 return __trace__deliver_event(trace, event->event);
3264}
3265
3266static int trace__run(struct trace *trace, int argc, const char **argv)
3267{
3268 struct evlist *evlist = trace->evlist;
3269 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
3270 int err = -1, i;
3271 unsigned long before;
3272 const bool forks = argc > 0;
3273 bool draining = false;
3274
3275 trace->live = true;
3276
3277 if (!trace->raw_augmented_syscalls) {
3278 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3279 goto out_error_raw_syscalls;
3280
3281 if (trace->trace_syscalls)
3282 trace->vfs_getname = evlist__add_vfs_getname(evlist);
3283 }
3284
3285 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3286 pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
3287 if (pgfault_maj == NULL)
3288 goto out_error_mem;
3289 perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3290 evlist__add(evlist, pgfault_maj);
3291 }
3292
3293 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3294 pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
3295 if (pgfault_min == NULL)
3296 goto out_error_mem;
3297 perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3298 evlist__add(evlist, pgfault_min);
3299 }
3300
3301 if (trace->sched &&
3302 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
3303 trace__sched_stat_runtime))
3304 goto out_error_sched_stat_runtime;
3305
3306 /*
3307 * If a global cgroup was set, apply it to all the events without an
3308 * explicit cgroup. I.e.:
3309 *
3310 * trace -G A -e sched:*switch
3311 *
3312 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3313 * _and_ sched:sched_switch to the 'A' cgroup, while:
3314 *
3315 * trace -e sched:*switch -G A
3316 *
3317 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3318 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3319 * a cgroup (on the root cgroup, sys wide, etc).
3320 *
3321 * Multiple cgroups:
3322 *
3323 * trace -G A -e sched:*switch -G B
3324 *
3325 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3326 * to the 'B' cgroup.
3327 *
3328 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3329 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3330 */
3331 if (trace->cgroup)
3332 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3333
3334 err = perf_evlist__create_maps(evlist, &trace->opts.target);
3335 if (err < 0) {
3336 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3337 goto out_delete_evlist;
3338 }
3339
3340 err = trace__symbols_init(trace, evlist);
3341 if (err < 0) {
3342 fprintf(trace->output, "Problems initializing symbol libraries!\n");
3343 goto out_delete_evlist;
3344 }
3345
3346 perf_evlist__config(evlist, &trace->opts, &callchain_param);
3347
3348 signal(SIGCHLD, sig_handler);
3349 signal(SIGINT, sig_handler);
3350
3351 if (forks) {
3352 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
3353 argv, false, NULL);
3354 if (err < 0) {
3355 fprintf(trace->output, "Couldn't run the workload!\n");
3356 goto out_delete_evlist;
3357 }
3358 }
3359
3360 err = evlist__open(evlist);
3361 if (err < 0)
3362 goto out_error_open;
3363
3364 err = bpf__apply_obj_config();
3365 if (err) {
3366 char errbuf[BUFSIZ];
3367
3368 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
3369 pr_err("ERROR: Apply config to BPF failed: %s\n",
3370 errbuf);
3371 goto out_error_open;
3372 }
3373
3374 err = trace__set_filter_pids(trace);
3375 if (err < 0)
3376 goto out_error_mem;
3377
3378 if (trace->syscalls.map)
3379 trace__init_syscalls_bpf_map(trace);
3380
3381 if (trace->syscalls.prog_array.sys_enter)
3382 trace__init_syscalls_bpf_prog_array_maps(trace);
3383
3384 if (trace->ev_qualifier_ids.nr > 0) {
3385 err = trace__set_ev_qualifier_filter(trace);
3386 if (err < 0)
3387 goto out_errno;
3388
3389 if (trace->syscalls.events.sys_exit) {
3390 pr_debug("event qualifier tracepoint filter: %s\n",
3391 trace->syscalls.events.sys_exit->filter);
3392 }
3393 }
3394
3395 /*
3396 * If the "close" syscall is not traced, then we will not have the
3397 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
3398 * fd->pathname table and were ending up showing the last value set by
3399 * syscalls opening a pathname and associating it with a descriptor or
3400 * reading it from /proc/pid/fd/ in cases where that doesn't make
3401 * sense.
3402 *
3403 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
3404 * not in use.
3405 */
3406 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
3407
3408 err = perf_evlist__apply_filters(evlist, &evsel);
3409 if (err < 0)
3410 goto out_error_apply_filters;
3411
3412 if (trace->dump.map)
3413 bpf_map__fprintf(trace->dump.map, trace->output);
3414
3415 err = evlist__mmap(evlist, trace->opts.mmap_pages);
3416 if (err < 0)
3417 goto out_error_mmap;
3418
3419 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
3420 evlist__enable(evlist);
3421
3422 if (forks)
3423 perf_evlist__start_workload(evlist);
3424
3425 if (trace->opts.initial_delay) {
3426 usleep(trace->opts.initial_delay * 1000);
3427 evlist__enable(evlist);
3428 }
3429
3430 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
3431 evlist->core.threads->nr > 1 ||
3432 evlist__first(evlist)->core.attr.inherit;
3433
3434 /*
3435 * Now that we already used evsel->core.attr to ask the kernel to setup the
3436 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
3437 * trace__resolve_callchain(), allowing per-event max-stack settings
3438 * to override an explicitly set --max-stack global setting.
3439 */
3440 evlist__for_each_entry(evlist, evsel) {
3441 if (evsel__has_callchain(evsel) &&
3442 evsel->core.attr.sample_max_stack == 0)
3443 evsel->core.attr.sample_max_stack = trace->max_stack;
3444 }
3445again:
3446 before = trace->nr_events;
3447
3448 for (i = 0; i < evlist->core.nr_mmaps; i++) {
3449 union perf_event *event;
3450 struct mmap *md;
3451
3452 md = &evlist->mmap[i];
3453 if (perf_mmap__read_init(md) < 0)
3454 continue;
3455
3456 while ((event = perf_mmap__read_event(md)) != NULL) {
3457 ++trace->nr_events;
3458
3459 err = trace__deliver_event(trace, event);
3460 if (err)
3461 goto out_disable;
3462
3463 perf_mmap__consume(md);
3464
3465 if (interrupted)
3466 goto out_disable;
3467
3468 if (done && !draining) {
3469 evlist__disable(evlist);
3470 draining = true;
3471 }
3472 }
3473 perf_mmap__read_done(md);
3474 }
3475
3476 if (trace->nr_events == before) {
3477 int timeout = done ? 100 : -1;
3478
3479 if (!draining && evlist__poll(evlist, timeout) > 0) {
3480 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
3481 draining = true;
3482
3483 goto again;
3484 } else {
3485 if (trace__flush_events(trace))
3486 goto out_disable;
3487 }
3488 } else {
3489 goto again;
3490 }
3491
3492out_disable:
3493 thread__zput(trace->current);
3494
3495 evlist__disable(evlist);
3496
3497 if (trace->sort_events)
3498 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
3499
3500 if (!err) {
3501 if (trace->summary)
3502 trace__fprintf_thread_summary(trace, trace->output);
3503
3504 if (trace->show_tool_stats) {
3505 fprintf(trace->output, "Stats:\n "
3506 " vfs_getname : %" PRIu64 "\n"
3507 " proc_getname: %" PRIu64 "\n",
3508 trace->stats.vfs_getname,
3509 trace->stats.proc_getname);
3510 }
3511 }
3512
3513out_delete_evlist:
3514 trace__symbols__exit(trace);
3515
3516 evlist__delete(evlist);
3517 cgroup__put(trace->cgroup);
3518 trace->evlist = NULL;
3519 trace->live = false;
3520 return err;
3521{
3522 char errbuf[BUFSIZ];
3523
3524out_error_sched_stat_runtime:
3525 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
3526 goto out_error;
3527
3528out_error_raw_syscalls:
3529 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
3530 goto out_error;
3531
3532out_error_mmap:
3533 perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
3534 goto out_error;
3535
3536out_error_open:
3537 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
3538
3539out_error:
3540 fprintf(trace->output, "%s\n", errbuf);
3541 goto out_delete_evlist;
3542
3543out_error_apply_filters:
3544 fprintf(trace->output,
3545 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
3546 evsel->filter, perf_evsel__name(evsel), errno,
3547 str_error_r(errno, errbuf, sizeof(errbuf)));
3548 goto out_delete_evlist;
3549}
3550out_error_mem:
3551 fprintf(trace->output, "Not enough memory to run!\n");
3552 goto out_delete_evlist;
3553
3554out_errno:
3555 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
3556 goto out_delete_evlist;
3557}
3558
3559static int trace__replay(struct trace *trace)
3560{
3561 const struct evsel_str_handler handlers[] = {
3562 { "probe:vfs_getname", trace__vfs_getname, },
3563 };
3564 struct perf_data data = {
3565 .path = input_name,
3566 .mode = PERF_DATA_MODE_READ,
3567 .force = trace->force,
3568 };
3569 struct perf_session *session;
3570 struct evsel *evsel;
3571 int err = -1;
3572
3573 trace->tool.sample = trace__process_sample;
3574 trace->tool.mmap = perf_event__process_mmap;
3575 trace->tool.mmap2 = perf_event__process_mmap2;
3576 trace->tool.comm = perf_event__process_comm;
3577 trace->tool.exit = perf_event__process_exit;
3578 trace->tool.fork = perf_event__process_fork;
3579 trace->tool.attr = perf_event__process_attr;
3580 trace->tool.tracing_data = perf_event__process_tracing_data;
3581 trace->tool.build_id = perf_event__process_build_id;
3582 trace->tool.namespaces = perf_event__process_namespaces;
3583
3584 trace->tool.ordered_events = true;
3585 trace->tool.ordering_requires_timestamps = true;
3586
3587 /* add tid to output */
3588 trace->multiple_threads = true;
3589
3590 session = perf_session__new(&data, false, &trace->tool);
3591 if (IS_ERR(session))
3592 return PTR_ERR(session);
3593
3594 if (trace->opts.target.pid)
3595 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
3596
3597 if (trace->opts.target.tid)
3598 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
3599
3600 if (symbol__init(&session->header.env) < 0)
3601 goto out;
3602
3603 trace->host = &session->machines.host;
3604
3605 err = perf_session__set_tracepoints_handlers(session, handlers);
3606 if (err)
3607 goto out;
3608
3609 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3610 "raw_syscalls:sys_enter");
3611 /* older kernels have syscalls tp versus raw_syscalls */
3612 if (evsel == NULL)
3613 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3614 "syscalls:sys_enter");
3615
3616 if (evsel &&
3617 (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
3618 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
3619 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
3620 goto out;
3621 }
3622
3623 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3624 "raw_syscalls:sys_exit");
3625 if (evsel == NULL)
3626 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3627 "syscalls:sys_exit");
3628 if (evsel &&
3629 (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
3630 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
3631 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
3632 goto out;
3633 }
3634
3635 evlist__for_each_entry(session->evlist, evsel) {
3636 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
3637 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
3638 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
3639 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
3640 evsel->handler = trace__pgfault;
3641 }
3642
3643 setup_pager();
3644
3645 err = perf_session__process_events(session);
3646 if (err)
3647 pr_err("Failed to process events, error %d", err);
3648
3649 else if (trace->summary)
3650 trace__fprintf_thread_summary(trace, trace->output);
3651
3652out:
3653 perf_session__delete(session);
3654
3655 return err;
3656}
3657
3658static size_t trace__fprintf_threads_header(FILE *fp)
3659{
3660 size_t printed;
3661
3662 printed = fprintf(fp, "\n Summary of events:\n\n");
3663
3664 return printed;
3665}
3666
3667DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
3668 struct stats *stats;
3669 double msecs;
3670 int syscall;
3671)
3672{
3673 struct int_node *source = rb_entry(nd, struct int_node, rb_node);
3674 struct stats *stats = source->priv;
3675
3676 entry->syscall = source->i;
3677 entry->stats = stats;
3678 entry->msecs = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
3679}
3680
3681static size_t thread__dump_stats(struct thread_trace *ttrace,
3682 struct trace *trace, FILE *fp)
3683{
3684 size_t printed = 0;
3685 struct syscall *sc;
3686 struct rb_node *nd;
3687 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
3688
3689 if (syscall_stats == NULL)
3690 return 0;
3691
3692 printed += fprintf(fp, "\n");
3693
3694 printed += fprintf(fp, " syscall calls total min avg max stddev\n");
3695 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
3696 printed += fprintf(fp, " --------------- -------- --------- --------- --------- --------- ------\n");
3697
3698 resort_rb__for_each_entry(nd, syscall_stats) {
3699 struct stats *stats = syscall_stats_entry->stats;
3700 if (stats) {
3701 double min = (double)(stats->min) / NSEC_PER_MSEC;
3702 double max = (double)(stats->max) / NSEC_PER_MSEC;
3703 double avg = avg_stats(stats);
3704 double pct;
3705 u64 n = (u64) stats->n;
3706
3707 pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
3708 avg /= NSEC_PER_MSEC;
3709
3710 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
3711 printed += fprintf(fp, " %-15s", sc->name);
3712 printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
3713 n, syscall_stats_entry->msecs, min, avg);
3714 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
3715 }
3716 }
3717
3718 resort_rb__delete(syscall_stats);
3719 printed += fprintf(fp, "\n\n");
3720
3721 return printed;
3722}
3723
3724static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
3725{
3726 size_t printed = 0;
3727 struct thread_trace *ttrace = thread__priv(thread);
3728 double ratio;
3729
3730 if (ttrace == NULL)
3731 return 0;
3732
3733 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
3734
3735 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
3736 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
3737 printed += fprintf(fp, "%.1f%%", ratio);
3738 if (ttrace->pfmaj)
3739 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
3740 if (ttrace->pfmin)
3741 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
3742 if (trace->sched)
3743 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
3744 else if (fputc('\n', fp) != EOF)
3745 ++printed;
3746
3747 printed += thread__dump_stats(ttrace, trace, fp);
3748
3749 return printed;
3750}
3751
3752static unsigned long thread__nr_events(struct thread_trace *ttrace)
3753{
3754 return ttrace ? ttrace->nr_events : 0;
3755}
3756
3757DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
3758 struct thread *thread;
3759)
3760{
3761 entry->thread = rb_entry(nd, struct thread, rb_node);
3762}
3763
3764static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
3765{
3766 size_t printed = trace__fprintf_threads_header(fp);
3767 struct rb_node *nd;
3768 int i;
3769
3770 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
3771 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
3772
3773 if (threads == NULL) {
3774 fprintf(fp, "%s", "Error sorting output by nr_events!\n");
3775 return 0;
3776 }
3777
3778 resort_rb__for_each_entry(nd, threads)
3779 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
3780
3781 resort_rb__delete(threads);
3782 }
3783 return printed;
3784}
3785
3786static int trace__set_duration(const struct option *opt, const char *str,
3787 int unset __maybe_unused)
3788{
3789 struct trace *trace = opt->value;
3790
3791 trace->duration_filter = atof(str);
3792 return 0;
3793}
3794
3795static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
3796 int unset __maybe_unused)
3797{
3798 int ret = -1;
3799 size_t i;
3800 struct trace *trace = opt->value;
3801 /*
3802 * FIXME: introduce a intarray class, plain parse csv and create a
3803 * { int nr, int entries[] } struct...
3804 */
3805 struct intlist *list = intlist__new(str);
3806
3807 if (list == NULL)
3808 return -1;
3809
3810 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
3811 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
3812
3813 if (trace->filter_pids.entries == NULL)
3814 goto out;
3815
3816 trace->filter_pids.entries[0] = getpid();
3817
3818 for (i = 1; i < trace->filter_pids.nr; ++i)
3819 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
3820
3821 intlist__delete(list);
3822 ret = 0;
3823out:
3824 return ret;
3825}
3826
3827static int trace__open_output(struct trace *trace, const char *filename)
3828{
3829 struct stat st;
3830
3831 if (!stat(filename, &st) && st.st_size) {
3832 char oldname[PATH_MAX];
3833
3834 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
3835 unlink(oldname);
3836 rename(filename, oldname);
3837 }
3838
3839 trace->output = fopen(filename, "w");
3840
3841 return trace->output == NULL ? -errno : 0;
3842}
3843
3844static int parse_pagefaults(const struct option *opt, const char *str,
3845 int unset __maybe_unused)
3846{
3847 int *trace_pgfaults = opt->value;
3848
3849 if (strcmp(str, "all") == 0)
3850 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
3851 else if (strcmp(str, "maj") == 0)
3852 *trace_pgfaults |= TRACE_PFMAJ;
3853 else if (strcmp(str, "min") == 0)
3854 *trace_pgfaults |= TRACE_PFMIN;
3855 else
3856 return -1;
3857
3858 return 0;
3859}
3860
3861static void evlist__set_evsel_handler(struct evlist *evlist, void *handler)
3862{
3863 struct evsel *evsel;
3864
3865 evlist__for_each_entry(evlist, evsel)
3866 evsel->handler = handler;
3867}
3868
3869static int evlist__set_syscall_tp_fields(struct evlist *evlist)
3870{
3871 struct evsel *evsel;
3872
3873 evlist__for_each_entry(evlist, evsel) {
3874 if (evsel->priv || !evsel->tp_format)
3875 continue;
3876
3877 if (strcmp(evsel->tp_format->system, "syscalls"))
3878 continue;
3879
3880 if (perf_evsel__init_syscall_tp(evsel))
3881 return -1;
3882
3883 if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
3884 struct syscall_tp *sc = evsel->priv;
3885
3886 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
3887 return -1;
3888 } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
3889 struct syscall_tp *sc = evsel->priv;
3890
3891 if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
3892 return -1;
3893 }
3894 }
3895
3896 return 0;
3897}
3898
3899/*
3900 * XXX: Hackish, just splitting the combined -e+--event (syscalls
3901 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
3902 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
3903 *
3904 * It'd be better to introduce a parse_options() variant that would return a
3905 * list with the terms it didn't match to an event...
3906 */
3907static int trace__parse_events_option(const struct option *opt, const char *str,
3908 int unset __maybe_unused)
3909{
3910 struct trace *trace = (struct trace *)opt->value;
3911 const char *s = str;
3912 char *sep = NULL, *lists[2] = { NULL, NULL, };
3913 int len = strlen(str) + 1, err = -1, list, idx;
3914 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
3915 char group_name[PATH_MAX];
3916 struct syscall_fmt *fmt;
3917
3918 if (strace_groups_dir == NULL)
3919 return -1;
3920
3921 if (*s == '!') {
3922 ++s;
3923 trace->not_ev_qualifier = true;
3924 }
3925
3926 while (1) {
3927 if ((sep = strchr(s, ',')) != NULL)
3928 *sep = '\0';
3929
3930 list = 0;
3931 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
3932 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
3933 list = 1;
3934 goto do_concat;
3935 }
3936
3937 fmt = syscall_fmt__find_by_alias(s);
3938 if (fmt != NULL) {
3939 list = 1;
3940 s = fmt->name;
3941 } else {
3942 path__join(group_name, sizeof(group_name), strace_groups_dir, s);
3943 if (access(group_name, R_OK) == 0)
3944 list = 1;
3945 }
3946do_concat:
3947 if (lists[list]) {
3948 sprintf(lists[list] + strlen(lists[list]), ",%s", s);
3949 } else {
3950 lists[list] = malloc(len);
3951 if (lists[list] == NULL)
3952 goto out;
3953 strcpy(lists[list], s);
3954 }
3955
3956 if (!sep)
3957 break;
3958
3959 *sep = ',';
3960 s = sep + 1;
3961 }
3962
3963 if (lists[1] != NULL) {
3964 struct strlist_config slist_config = {
3965 .dirname = strace_groups_dir,
3966 };
3967
3968 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
3969 if (trace->ev_qualifier == NULL) {
3970 fputs("Not enough memory to parse event qualifier", trace->output);
3971 goto out;
3972 }
3973
3974 if (trace__validate_ev_qualifier(trace))
3975 goto out;
3976 trace->trace_syscalls = true;
3977 }
3978
3979 err = 0;
3980
3981 if (lists[0]) {
3982 struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
3983 "event selector. use 'perf list' to list available events",
3984 parse_events_option);
3985 err = parse_events_option(&o, lists[0], 0);
3986 }
3987out:
3988 if (sep)
3989 *sep = ',';
3990
3991 return err;
3992}
3993
3994static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
3995{
3996 struct trace *trace = opt->value;
3997
3998 if (!list_empty(&trace->evlist->core.entries))
3999 return parse_cgroups(opt, str, unset);
4000
4001 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4002
4003 return 0;
4004}
4005
4006static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
4007{
4008 if (trace->bpf_obj == NULL)
4009 return NULL;
4010
4011 return bpf_object__find_map_by_name(trace->bpf_obj, name);
4012}
4013
4014static void trace__set_bpf_map_filtered_pids(struct trace *trace)
4015{
4016 trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
4017}
4018
4019static void trace__set_bpf_map_syscalls(struct trace *trace)
4020{
4021 trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
4022 trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
4023 trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
4024}
4025
4026static int trace__config(const char *var, const char *value, void *arg)
4027{
4028 struct trace *trace = arg;
4029 int err = 0;
4030
4031 if (!strcmp(var, "trace.add_events")) {
4032 struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
4033 "event selector. use 'perf list' to list available events",
4034 parse_events_option);
4035 /*
4036 * We can't propagate parse_event_option() return, as it is 1
4037 * for failure while perf_config() expects -1.
4038 */
4039 if (parse_events_option(&o, value, 0))
4040 err = -1;
4041 } else if (!strcmp(var, "trace.show_timestamp")) {
4042 trace->show_tstamp = perf_config_bool(var, value);
4043 } else if (!strcmp(var, "trace.show_duration")) {
4044 trace->show_duration = perf_config_bool(var, value);
4045 } else if (!strcmp(var, "trace.show_arg_names")) {
4046 trace->show_arg_names = perf_config_bool(var, value);
4047 if (!trace->show_arg_names)
4048 trace->show_zeros = true;
4049 } else if (!strcmp(var, "trace.show_zeros")) {
4050 bool new_show_zeros = perf_config_bool(var, value);
4051 if (!trace->show_arg_names && !new_show_zeros) {
4052 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4053 goto out;
4054 }
4055 trace->show_zeros = new_show_zeros;
4056 } else if (!strcmp(var, "trace.show_prefix")) {
4057 trace->show_string_prefix = perf_config_bool(var, value);
4058 } else if (!strcmp(var, "trace.no_inherit")) {
4059 trace->opts.no_inherit = perf_config_bool(var, value);
4060 } else if (!strcmp(var, "trace.args_alignment")) {
4061 int args_alignment = 0;
4062 if (perf_config_int(&args_alignment, var, value) == 0)
4063 trace->args_alignment = args_alignment;
4064 }
4065out:
4066 return err;
4067}
4068
4069int cmd_trace(int argc, const char **argv)
4070{
4071 const char *trace_usage[] = {
4072 "perf trace [<options>] [<command>]",
4073 "perf trace [<options>] -- <command> [<options>]",
4074 "perf trace record [<options>] [<command>]",
4075 "perf trace record [<options>] -- <command> [<options>]",
4076 NULL
4077 };
4078 struct trace trace = {
4079 .opts = {
4080 .target = {
4081 .uid = UINT_MAX,
4082 .uses_mmap = true,
4083 },
4084 .user_freq = UINT_MAX,
4085 .user_interval = ULLONG_MAX,
4086 .no_buffering = true,
4087 .mmap_pages = UINT_MAX,
4088 },
4089 .output = stderr,
4090 .show_comm = true,
4091 .show_tstamp = true,
4092 .show_duration = true,
4093 .show_arg_names = true,
4094 .args_alignment = 70,
4095 .trace_syscalls = false,
4096 .kernel_syscallchains = false,
4097 .max_stack = UINT_MAX,
4098 .max_events = ULONG_MAX,
4099 };
4100 const char *map_dump_str = NULL;
4101 const char *output_name = NULL;
4102 const struct option trace_options[] = {
4103 OPT_CALLBACK('e', "event", &trace, "event",
4104 "event/syscall selector. use 'perf list' to list available events",
4105 trace__parse_events_option),
4106 OPT_BOOLEAN(0, "comm", &trace.show_comm,
4107 "show the thread COMM next to its id"),
4108 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4109 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4110 trace__parse_events_option),
4111 OPT_STRING('o', "output", &output_name, "file", "output file name"),
4112 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
4113 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4114 "trace events on existing process id"),
4115 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4116 "trace events on existing thread id"),
4117 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4118 "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
4119 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4120 "system-wide collection from all CPUs"),
4121 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4122 "list of cpus to monitor"),
4123 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4124 "child tasks do not inherit counters"),
4125 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4126 "number of mmap data pages",
4127 perf_evlist__parse_mmap_pages),
4128 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4129 "user to profile"),
4130 OPT_CALLBACK(0, "duration", &trace, "float",
4131 "show only events with duration > N.M ms",
4132 trace__set_duration),
4133#ifdef HAVE_LIBBPF_SUPPORT
4134 OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
4135#endif
4136 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4137 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
4138 OPT_BOOLEAN('T', "time", &trace.full_time,
4139 "Show full timestamp, not time relative to first start"),
4140 OPT_BOOLEAN(0, "failure", &trace.failure_only,
4141 "Show only syscalls that failed"),
4142 OPT_BOOLEAN('s', "summary", &trace.summary_only,
4143 "Show only syscall summary with statistics"),
4144 OPT_BOOLEAN('S', "with-summary", &trace.summary,
4145 "Show all syscalls and summary with statistics"),
4146 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4147 "Trace pagefaults", parse_pagefaults, "maj"),
4148 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4149 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4150 OPT_CALLBACK(0, "call-graph", &trace.opts,
4151 "record_mode[,record_size]", record_callchain_help,
4152 &record_parse_callchain_opt),
4153 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4154 "Show the kernel callchains on the syscall exit path"),
4155 OPT_ULONG(0, "max-events", &trace.max_events,
4156 "Set the maximum number of events to print, exit after that is reached. "),
4157 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4158 "Set the minimum stack depth when parsing the callchain, "
4159 "anything below the specified depth will be ignored."),
4160 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4161 "Set the maximum stack depth when parsing the callchain, "
4162 "anything beyond the specified depth will be ignored. "
4163 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
4164 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4165 "Sort batch of events before processing, use if getting out of order events"),
4166 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4167 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4168 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
4169 "per thread proc mmap processing timeout in ms"),
4170 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4171 trace__parse_cgroups),
4172 OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
4173 "ms to wait before starting measurement after program "
4174 "start"),
4175 OPTS_EVSWITCH(&trace.evswitch),
4176 OPT_END()
4177 };
4178 bool __maybe_unused max_stack_user_set = true;
4179 bool mmap_pages_user_set = true;
4180 struct evsel *evsel;
4181 const char * const trace_subcommands[] = { "record", NULL };
4182 int err = -1;
4183 char bf[BUFSIZ];
4184
4185 signal(SIGSEGV, sighandler_dump_stack);
4186 signal(SIGFPE, sighandler_dump_stack);
4187
4188 trace.evlist = evlist__new();
4189 trace.sctbl = syscalltbl__new();
4190
4191 if (trace.evlist == NULL || trace.sctbl == NULL) {
4192 pr_err("Not enough memory to run!\n");
4193 err = -ENOMEM;
4194 goto out;
4195 }
4196
4197 /*
4198 * Parsing .perfconfig may entail creating a BPF event, that may need
4199 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4200 * is too small. This affects just this process, not touching the
4201 * global setting. If it fails we'll get something in 'perf trace -v'
4202 * to help diagnose the problem.
4203 */
4204 rlimit__bump_memlock();
4205
4206 err = perf_config(trace__config, &trace);
4207 if (err)
4208 goto out;
4209
4210 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
4211 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
4212
4213 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4214 usage_with_options_msg(trace_usage, trace_options,
4215 "cgroup monitoring only available in system-wide mode");
4216 }
4217
4218 evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4219 if (IS_ERR(evsel)) {
4220 bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4221 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4222 goto out;
4223 }
4224
4225 if (evsel) {
4226 trace.syscalls.events.augmented = evsel;
4227
4228 evsel = perf_evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4229 if (evsel == NULL) {
4230 pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4231 goto out;
4232 }
4233
4234 if (evsel->bpf_obj == NULL) {
4235 pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4236 goto out;
4237 }
4238
4239 trace.bpf_obj = evsel->bpf_obj;
4240
4241 trace__set_bpf_map_filtered_pids(&trace);
4242 trace__set_bpf_map_syscalls(&trace);
4243 trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
4244 }
4245
4246 err = bpf__setup_stdout(trace.evlist);
4247 if (err) {
4248 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
4249 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
4250 goto out;
4251 }
4252
4253 err = -1;
4254
4255 if (map_dump_str) {
4256 trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
4257 if (trace.dump.map == NULL) {
4258 pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
4259 goto out;
4260 }
4261 }
4262
4263 if (trace.trace_pgfaults) {
4264 trace.opts.sample_address = true;
4265 trace.opts.sample_time = true;
4266 }
4267
4268 if (trace.opts.mmap_pages == UINT_MAX)
4269 mmap_pages_user_set = false;
4270
4271 if (trace.max_stack == UINT_MAX) {
4272 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
4273 max_stack_user_set = false;
4274 }
4275
4276#ifdef HAVE_DWARF_UNWIND_SUPPORT
4277 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
4278 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
4279 }
4280#endif
4281
4282 if (callchain_param.enabled) {
4283 if (!mmap_pages_user_set && geteuid() == 0)
4284 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
4285
4286 symbol_conf.use_callchain = true;
4287 }
4288
4289 if (trace.evlist->core.nr_entries > 0) {
4290 evlist__set_evsel_handler(trace.evlist, trace__event_handler);
4291 if (evlist__set_syscall_tp_fields(trace.evlist)) {
4292 perror("failed to set syscalls:* tracepoint fields");
4293 goto out;
4294 }
4295 }
4296
4297 if (trace.sort_events) {
4298 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
4299 ordered_events__set_copy_on_queue(&trace.oe.data, true);
4300 }
4301
4302 /*
4303 * If we are augmenting syscalls, then combine what we put in the
4304 * __augmented_syscalls__ BPF map with what is in the
4305 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
4306 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
4307 *
4308 * We'll switch to look at two BPF maps, one for sys_enter and the
4309 * other for sys_exit when we start augmenting the sys_exit paths with
4310 * buffers that are being copied from kernel to userspace, think 'read'
4311 * syscall.
4312 */
4313 if (trace.syscalls.events.augmented) {
4314 evlist__for_each_entry(trace.evlist, evsel) {
4315 bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
4316
4317 if (raw_syscalls_sys_exit) {
4318 trace.raw_augmented_syscalls = true;
4319 goto init_augmented_syscall_tp;
4320 }
4321
4322 if (trace.syscalls.events.augmented->priv == NULL &&
4323 strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
4324 struct evsel *augmented = trace.syscalls.events.augmented;
4325 if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
4326 perf_evsel__init_augmented_syscall_tp_args(augmented))
4327 goto out;
4328 /*
4329 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
4330 * Above we made sure we can get from the payload the tp fields
4331 * that we get from syscalls:sys_enter tracefs format file.
4332 */
4333 augmented->handler = trace__sys_enter;
4334 /*
4335 * Now we do the same for the *syscalls:sys_enter event so that
4336 * if we handle it directly, i.e. if the BPF prog returns 0 so
4337 * as not to filter it, then we'll handle it just like we would
4338 * for the BPF_OUTPUT one:
4339 */
4340 if (perf_evsel__init_augmented_syscall_tp(evsel, evsel) ||
4341 perf_evsel__init_augmented_syscall_tp_args(evsel))
4342 goto out;
4343 evsel->handler = trace__sys_enter;
4344 }
4345
4346 if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
4347 struct syscall_tp *sc;
4348init_augmented_syscall_tp:
4349 if (perf_evsel__init_augmented_syscall_tp(evsel, evsel))
4350 goto out;
4351 sc = evsel->priv;
4352 /*
4353 * For now with BPF raw_augmented we hook into
4354 * raw_syscalls:sys_enter and there we get all
4355 * 6 syscall args plus the tracepoint common
4356 * fields and the syscall_nr (another long).
4357 * So we check if that is the case and if so
4358 * don't look after the sc->args_size but
4359 * always after the full raw_syscalls:sys_enter
4360 * payload, which is fixed.
4361 *
4362 * We'll revisit this later to pass
4363 * s->args_size to the BPF augmenter (now
4364 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
4365 * so that it copies only what we need for each
4366 * syscall, like what happens when we use
4367 * syscalls:sys_enter_NAME, so that we reduce
4368 * the kernel/userspace traffic to just what is
4369 * needed for each syscall.
4370 */
4371 if (trace.raw_augmented_syscalls)
4372 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
4373 perf_evsel__init_augmented_syscall_tp_ret(evsel);
4374 evsel->handler = trace__sys_exit;
4375 }
4376 }
4377 }
4378
4379 if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
4380 return trace__record(&trace, argc-1, &argv[1]);
4381
4382 /* summary_only implies summary option, but don't overwrite summary if set */
4383 if (trace.summary_only)
4384 trace.summary = trace.summary_only;
4385
4386 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4387 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4388 trace.trace_syscalls = true;
4389 }
4390
4391 if (output_name != NULL) {
4392 err = trace__open_output(&trace, output_name);
4393 if (err < 0) {
4394 perror("failed to create output file");
4395 goto out;
4396 }
4397 }
4398
4399 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
4400 if (err)
4401 goto out_close;
4402
4403 err = target__validate(&trace.opts.target);
4404 if (err) {
4405 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
4406 fprintf(trace.output, "%s", bf);
4407 goto out_close;
4408 }
4409
4410 err = target__parse_uid(&trace.opts.target);
4411 if (err) {
4412 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
4413 fprintf(trace.output, "%s", bf);
4414 goto out_close;
4415 }
4416
4417 if (!argc && target__none(&trace.opts.target))
4418 trace.opts.target.system_wide = true;
4419
4420 if (input_name)
4421 err = trace__replay(&trace);
4422 else
4423 err = trace__run(&trace, argc, argv);
4424
4425out_close:
4426 if (output_name != NULL)
4427 fclose(trace.output);
4428out:
4429 return err;
4430}