Loading...
1/*
2 * builtin-trace.c
3 *
4 * Builtin 'trace' command:
5 *
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
9 *
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 *
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
13 *
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15 *
16 * Released under the GPL v2. (and only v2, not any later version)
17 */
18
19#include <traceevent/event-parse.h>
20#include <api/fs/tracing_path.h>
21#include "builtin.h"
22#include "util/cgroup.h"
23#include "util/color.h"
24#include "util/debug.h"
25#include "util/env.h"
26#include "util/event.h"
27#include "util/evlist.h"
28#include <subcmd/exec-cmd.h>
29#include "util/machine.h"
30#include "util/path.h"
31#include "util/session.h"
32#include "util/thread.h"
33#include <subcmd/parse-options.h>
34#include "util/strlist.h"
35#include "util/intlist.h"
36#include "util/thread_map.h"
37#include "util/stat.h"
38#include "trace/beauty/beauty.h"
39#include "trace-event.h"
40#include "util/parse-events.h"
41#include "util/bpf-loader.h"
42#include "callchain.h"
43#include "print_binary.h"
44#include "string2.h"
45#include "syscalltbl.h"
46#include "rb_resort.h"
47
48#include <errno.h>
49#include <inttypes.h>
50#include <poll.h>
51#include <signal.h>
52#include <stdlib.h>
53#include <string.h>
54#include <linux/err.h>
55#include <linux/filter.h>
56#include <linux/kernel.h>
57#include <linux/random.h>
58#include <linux/stringify.h>
59#include <linux/time64.h>
60#include <fcntl.h>
61
62#include "sane_ctype.h"
63
64#ifndef O_CLOEXEC
65# define O_CLOEXEC 02000000
66#endif
67
68#ifndef F_LINUX_SPECIFIC_BASE
69# define F_LINUX_SPECIFIC_BASE 1024
70#endif
71
72struct trace {
73 struct perf_tool tool;
74 struct syscalltbl *sctbl;
75 struct {
76 int max;
77 struct syscall *table;
78 struct {
79 struct perf_evsel *sys_enter,
80 *sys_exit;
81 } events;
82 } syscalls;
83 struct record_opts opts;
84 struct perf_evlist *evlist;
85 struct machine *host;
86 struct thread *current;
87 struct cgroup *cgroup;
88 u64 base_time;
89 FILE *output;
90 unsigned long nr_events;
91 struct strlist *ev_qualifier;
92 struct {
93 size_t nr;
94 int *entries;
95 } ev_qualifier_ids;
96 struct {
97 size_t nr;
98 pid_t *entries;
99 } filter_pids;
100 double duration_filter;
101 double runtime_ms;
102 struct {
103 u64 vfs_getname,
104 proc_getname;
105 } stats;
106 unsigned int max_stack;
107 unsigned int min_stack;
108 bool not_ev_qualifier;
109 bool live;
110 bool full_time;
111 bool sched;
112 bool multiple_threads;
113 bool summary;
114 bool summary_only;
115 bool failure_only;
116 bool show_comm;
117 bool print_sample;
118 bool show_tool_stats;
119 bool trace_syscalls;
120 bool kernel_syscallchains;
121 bool force;
122 bool vfs_getname;
123 int trace_pgfaults;
124 int open_id;
125};
126
127struct tp_field {
128 int offset;
129 union {
130 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
131 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
132 };
133};
134
135#define TP_UINT_FIELD(bits) \
136static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
137{ \
138 u##bits value; \
139 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
140 return value; \
141}
142
143TP_UINT_FIELD(8);
144TP_UINT_FIELD(16);
145TP_UINT_FIELD(32);
146TP_UINT_FIELD(64);
147
148#define TP_UINT_FIELD__SWAPPED(bits) \
149static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
150{ \
151 u##bits value; \
152 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
153 return bswap_##bits(value);\
154}
155
156TP_UINT_FIELD__SWAPPED(16);
157TP_UINT_FIELD__SWAPPED(32);
158TP_UINT_FIELD__SWAPPED(64);
159
160static int tp_field__init_uint(struct tp_field *field,
161 struct format_field *format_field,
162 bool needs_swap)
163{
164 field->offset = format_field->offset;
165
166 switch (format_field->size) {
167 case 1:
168 field->integer = tp_field__u8;
169 break;
170 case 2:
171 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
172 break;
173 case 4:
174 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
175 break;
176 case 8:
177 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
178 break;
179 default:
180 return -1;
181 }
182
183 return 0;
184}
185
186static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
187{
188 return sample->raw_data + field->offset;
189}
190
191static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
192{
193 field->offset = format_field->offset;
194 field->pointer = tp_field__ptr;
195 return 0;
196}
197
198struct syscall_tp {
199 struct tp_field id;
200 union {
201 struct tp_field args, ret;
202 };
203};
204
205static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel,
206 struct tp_field *field,
207 const char *name)
208{
209 struct format_field *format_field = perf_evsel__field(evsel, name);
210
211 if (format_field == NULL)
212 return -1;
213
214 return tp_field__init_uint(field, format_field, evsel->needs_swap);
215}
216
217#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
218 ({ struct syscall_tp *sc = evsel->priv;\
219 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
220
221static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
222 struct tp_field *field,
223 const char *name)
224{
225 struct format_field *format_field = perf_evsel__field(evsel, name);
226
227 if (format_field == NULL)
228 return -1;
229
230 return tp_field__init_ptr(field, format_field);
231}
232
233#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
234 ({ struct syscall_tp *sc = evsel->priv;\
235 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
236
237static void perf_evsel__delete_priv(struct perf_evsel *evsel)
238{
239 zfree(&evsel->priv);
240 perf_evsel__delete(evsel);
241}
242
243static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
244{
245 evsel->priv = malloc(sizeof(struct syscall_tp));
246 if (evsel->priv != NULL) {
247 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
248 goto out_delete;
249
250 evsel->handler = handler;
251 return 0;
252 }
253
254 return -ENOMEM;
255
256out_delete:
257 zfree(&evsel->priv);
258 return -ENOENT;
259}
260
261static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler)
262{
263 struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
264
265 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
266 if (IS_ERR(evsel))
267 evsel = perf_evsel__newtp("syscalls", direction);
268
269 if (IS_ERR(evsel))
270 return NULL;
271
272 if (perf_evsel__init_syscall_tp(evsel, handler))
273 goto out_delete;
274
275 return evsel;
276
277out_delete:
278 perf_evsel__delete_priv(evsel);
279 return NULL;
280}
281
282#define perf_evsel__sc_tp_uint(evsel, name, sample) \
283 ({ struct syscall_tp *fields = evsel->priv; \
284 fields->name.integer(&fields->name, sample); })
285
286#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
287 ({ struct syscall_tp *fields = evsel->priv; \
288 fields->name.pointer(&fields->name, sample); })
289
290size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, int val)
291{
292 int idx = val - sa->offset;
293
294 if (idx < 0 || idx >= sa->nr_entries)
295 return scnprintf(bf, size, intfmt, val);
296
297 return scnprintf(bf, size, "%s", sa->entries[idx]);
298}
299
300static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
301 const char *intfmt,
302 struct syscall_arg *arg)
303{
304 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->val);
305}
306
307static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
308 struct syscall_arg *arg)
309{
310 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
311}
312
313#define SCA_STRARRAY syscall_arg__scnprintf_strarray
314
315struct strarrays {
316 int nr_entries;
317 struct strarray **entries;
318};
319
320#define DEFINE_STRARRAYS(array) struct strarrays strarrays__##array = { \
321 .nr_entries = ARRAY_SIZE(array), \
322 .entries = array, \
323}
324
325size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
326 struct syscall_arg *arg)
327{
328 struct strarrays *sas = arg->parm;
329 int i;
330
331 for (i = 0; i < sas->nr_entries; ++i) {
332 struct strarray *sa = sas->entries[i];
333 int idx = arg->val - sa->offset;
334
335 if (idx >= 0 && idx < sa->nr_entries) {
336 if (sa->entries[idx] == NULL)
337 break;
338 return scnprintf(bf, size, "%s", sa->entries[idx]);
339 }
340 }
341
342 return scnprintf(bf, size, "%d", arg->val);
343}
344
345#ifndef AT_FDCWD
346#define AT_FDCWD -100
347#endif
348
349static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
350 struct syscall_arg *arg)
351{
352 int fd = arg->val;
353
354 if (fd == AT_FDCWD)
355 return scnprintf(bf, size, "CWD");
356
357 return syscall_arg__scnprintf_fd(bf, size, arg);
358}
359
360#define SCA_FDAT syscall_arg__scnprintf_fd_at
361
362static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
363 struct syscall_arg *arg);
364
365#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
366
367size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
368{
369 return scnprintf(bf, size, "%#lx", arg->val);
370}
371
372size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
373{
374 return scnprintf(bf, size, "%d", arg->val);
375}
376
377size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
378{
379 return scnprintf(bf, size, "%ld", arg->val);
380}
381
382static const char *bpf_cmd[] = {
383 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
384 "MAP_GET_NEXT_KEY", "PROG_LOAD",
385};
386static DEFINE_STRARRAY(bpf_cmd);
387
388static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
389static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
390
391static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
392static DEFINE_STRARRAY(itimers);
393
394static const char *keyctl_options[] = {
395 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
396 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
397 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
398 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
399 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
400};
401static DEFINE_STRARRAY(keyctl_options);
402
403static const char *whences[] = { "SET", "CUR", "END",
404#ifdef SEEK_DATA
405"DATA",
406#endif
407#ifdef SEEK_HOLE
408"HOLE",
409#endif
410};
411static DEFINE_STRARRAY(whences);
412
413static const char *fcntl_cmds[] = {
414 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
415 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
416 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
417 "GETOWNER_UIDS",
418};
419static DEFINE_STRARRAY(fcntl_cmds);
420
421static const char *fcntl_linux_specific_cmds[] = {
422 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
423 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
424 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
425};
426
427static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, F_LINUX_SPECIFIC_BASE);
428
429static struct strarray *fcntl_cmds_arrays[] = {
430 &strarray__fcntl_cmds,
431 &strarray__fcntl_linux_specific_cmds,
432};
433
434static DEFINE_STRARRAYS(fcntl_cmds_arrays);
435
436static const char *rlimit_resources[] = {
437 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
438 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
439 "RTTIME",
440};
441static DEFINE_STRARRAY(rlimit_resources);
442
443static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
444static DEFINE_STRARRAY(sighow);
445
446static const char *clockid[] = {
447 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
448 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
449 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
450};
451static DEFINE_STRARRAY(clockid);
452
453static const char *socket_families[] = {
454 "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
455 "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
456 "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
457 "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
458 "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
459 "ALG", "NFC", "VSOCK",
460};
461static DEFINE_STRARRAY(socket_families);
462
463static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
464 struct syscall_arg *arg)
465{
466 size_t printed = 0;
467 int mode = arg->val;
468
469 if (mode == F_OK) /* 0 */
470 return scnprintf(bf, size, "F");
471#define P_MODE(n) \
472 if (mode & n##_OK) { \
473 printed += scnprintf(bf + printed, size - printed, "%s", #n); \
474 mode &= ~n##_OK; \
475 }
476
477 P_MODE(R);
478 P_MODE(W);
479 P_MODE(X);
480#undef P_MODE
481
482 if (mode)
483 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
484
485 return printed;
486}
487
488#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
489
490static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
491 struct syscall_arg *arg);
492
493#define SCA_FILENAME syscall_arg__scnprintf_filename
494
495static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
496 struct syscall_arg *arg)
497{
498 int printed = 0, flags = arg->val;
499
500#define P_FLAG(n) \
501 if (flags & O_##n) { \
502 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
503 flags &= ~O_##n; \
504 }
505
506 P_FLAG(CLOEXEC);
507 P_FLAG(NONBLOCK);
508#undef P_FLAG
509
510 if (flags)
511 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
512
513 return printed;
514}
515
516#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
517
518#ifndef GRND_NONBLOCK
519#define GRND_NONBLOCK 0x0001
520#endif
521#ifndef GRND_RANDOM
522#define GRND_RANDOM 0x0002
523#endif
524
525static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
526 struct syscall_arg *arg)
527{
528 int printed = 0, flags = arg->val;
529
530#define P_FLAG(n) \
531 if (flags & GRND_##n) { \
532 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
533 flags &= ~GRND_##n; \
534 }
535
536 P_FLAG(RANDOM);
537 P_FLAG(NONBLOCK);
538#undef P_FLAG
539
540 if (flags)
541 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
542
543 return printed;
544}
545
546#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
547
548#define STRARRAY(name, array) \
549 { .scnprintf = SCA_STRARRAY, \
550 .parm = &strarray__##array, }
551
552#include "trace/beauty/arch_errno_names.c"
553#include "trace/beauty/eventfd.c"
554#include "trace/beauty/futex_op.c"
555#include "trace/beauty/futex_val3.c"
556#include "trace/beauty/mmap.c"
557#include "trace/beauty/mode_t.c"
558#include "trace/beauty/msg_flags.c"
559#include "trace/beauty/open_flags.c"
560#include "trace/beauty/perf_event_open.c"
561#include "trace/beauty/pid.c"
562#include "trace/beauty/sched_policy.c"
563#include "trace/beauty/seccomp.c"
564#include "trace/beauty/signum.c"
565#include "trace/beauty/socket_type.c"
566#include "trace/beauty/waitid_options.c"
567
568struct syscall_arg_fmt {
569 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
570 void *parm;
571 const char *name;
572 bool show_zero;
573};
574
575static struct syscall_fmt {
576 const char *name;
577 const char *alias;
578 struct syscall_arg_fmt arg[6];
579 u8 nr_args;
580 bool errpid;
581 bool timeout;
582 bool hexret;
583} syscall_fmts[] = {
584 { .name = "access",
585 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
586 { .name = "bpf",
587 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
588 { .name = "brk", .hexret = true,
589 .arg = { [0] = { .scnprintf = SCA_HEX, /* brk */ }, }, },
590 { .name = "clock_gettime",
591 .arg = { [0] = STRARRAY(clk_id, clockid), }, },
592 { .name = "clone", .errpid = true, .nr_args = 5,
593 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
594 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
595 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
596 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, },
597 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, },
598 { .name = "close",
599 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
600 { .name = "epoll_ctl",
601 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
602 { .name = "eventfd2",
603 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
604 { .name = "fchmodat",
605 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
606 { .name = "fchownat",
607 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
608 { .name = "fcntl",
609 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
610 .parm = &strarrays__fcntl_cmds_arrays,
611 .show_zero = true, },
612 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
613 { .name = "flock",
614 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
615 { .name = "fstat", .alias = "newfstat", },
616 { .name = "fstatat", .alias = "newfstatat", },
617 { .name = "futex",
618 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
619 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
620 { .name = "futimesat",
621 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
622 { .name = "getitimer",
623 .arg = { [0] = STRARRAY(which, itimers), }, },
624 { .name = "getpid", .errpid = true, },
625 { .name = "getpgid", .errpid = true, },
626 { .name = "getppid", .errpid = true, },
627 { .name = "getrandom",
628 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
629 { .name = "getrlimit",
630 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
631 { .name = "gettid", .errpid = true, },
632 { .name = "ioctl",
633 .arg = {
634#if defined(__i386__) || defined(__x86_64__)
635/*
636 * FIXME: Make this available to all arches.
637 */
638 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
639 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
640#else
641 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
642#endif
643 { .name = "kcmp", .nr_args = 5,
644 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, },
645 [1] = { .name = "pid2", .scnprintf = SCA_PID, },
646 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, },
647 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, },
648 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, },
649 { .name = "keyctl",
650 .arg = { [0] = STRARRAY(option, keyctl_options), }, },
651 { .name = "kill",
652 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
653 { .name = "linkat",
654 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
655 { .name = "lseek",
656 .arg = { [2] = STRARRAY(whence, whences), }, },
657 { .name = "lstat", .alias = "newlstat", },
658 { .name = "madvise",
659 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
660 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
661 { .name = "mkdirat",
662 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
663 { .name = "mknodat",
664 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
665 { .name = "mlock",
666 .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ }, }, },
667 { .name = "mlockall",
668 .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ }, }, },
669 { .name = "mmap", .hexret = true,
670/* The standard mmap maps to old_mmap on s390x */
671#if defined(__s390x__)
672 .alias = "old_mmap",
673#endif
674 .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ },
675 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
676 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ }, }, },
677 { .name = "mprotect",
678 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
679 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, },
680 { .name = "mq_unlink",
681 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
682 { .name = "mremap", .hexret = true,
683 .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ },
684 [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ },
685 [4] = { .scnprintf = SCA_HEX, /* new_addr */ }, }, },
686 { .name = "munlock",
687 .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ }, }, },
688 { .name = "munmap",
689 .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ }, }, },
690 { .name = "name_to_handle_at",
691 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
692 { .name = "newfstatat",
693 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
694 { .name = "open",
695 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
696 { .name = "open_by_handle_at",
697 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
698 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
699 { .name = "openat",
700 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
701 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
702 { .name = "perf_event_open",
703 .arg = { [2] = { .scnprintf = SCA_INT, /* cpu */ },
704 [3] = { .scnprintf = SCA_FD, /* group_fd */ },
705 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
706 { .name = "pipe2",
707 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
708 { .name = "pkey_alloc",
709 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, },
710 { .name = "pkey_free",
711 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, },
712 { .name = "pkey_mprotect",
713 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
714 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
715 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
716 { .name = "poll", .timeout = true, },
717 { .name = "ppoll", .timeout = true, },
718 { .name = "prctl", .alias = "arch_prctl",
719 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ },
720 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
721 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
722 { .name = "pread", .alias = "pread64", },
723 { .name = "preadv", .alias = "pread", },
724 { .name = "prlimit64",
725 .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
726 { .name = "pwrite", .alias = "pwrite64", },
727 { .name = "readlinkat",
728 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
729 { .name = "recvfrom",
730 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
731 { .name = "recvmmsg",
732 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
733 { .name = "recvmsg",
734 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
735 { .name = "renameat",
736 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
737 { .name = "rt_sigaction",
738 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
739 { .name = "rt_sigprocmask",
740 .arg = { [0] = STRARRAY(how, sighow), }, },
741 { .name = "rt_sigqueueinfo",
742 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
743 { .name = "rt_tgsigqueueinfo",
744 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
745 { .name = "sched_setscheduler",
746 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
747 { .name = "seccomp",
748 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ },
749 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
750 { .name = "select", .timeout = true, },
751 { .name = "sendmmsg",
752 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
753 { .name = "sendmsg",
754 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
755 { .name = "sendto",
756 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
757 { .name = "set_tid_address", .errpid = true, },
758 { .name = "setitimer",
759 .arg = { [0] = STRARRAY(which, itimers), }, },
760 { .name = "setrlimit",
761 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
762 { .name = "socket",
763 .arg = { [0] = STRARRAY(family, socket_families),
764 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, }, },
765 { .name = "socketpair",
766 .arg = { [0] = STRARRAY(family, socket_families),
767 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, }, },
768 { .name = "stat", .alias = "newstat", },
769 { .name = "statx",
770 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ },
771 [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
772 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, },
773 { .name = "swapoff",
774 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
775 { .name = "swapon",
776 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
777 { .name = "symlinkat",
778 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
779 { .name = "tgkill",
780 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
781 { .name = "tkill",
782 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
783 { .name = "uname", .alias = "newuname", },
784 { .name = "unlinkat",
785 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
786 { .name = "utimensat",
787 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
788 { .name = "wait4", .errpid = true,
789 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
790 { .name = "waitid", .errpid = true,
791 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
792};
793
794static int syscall_fmt__cmp(const void *name, const void *fmtp)
795{
796 const struct syscall_fmt *fmt = fmtp;
797 return strcmp(name, fmt->name);
798}
799
800static struct syscall_fmt *syscall_fmt__find(const char *name)
801{
802 const int nmemb = ARRAY_SIZE(syscall_fmts);
803 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
804}
805
806struct syscall {
807 struct event_format *tp_format;
808 int nr_args;
809 struct format_field *args;
810 const char *name;
811 bool is_exit;
812 struct syscall_fmt *fmt;
813 struct syscall_arg_fmt *arg_fmt;
814};
815
816/*
817 * We need to have this 'calculated' boolean because in some cases we really
818 * don't know what is the duration of a syscall, for instance, when we start
819 * a session and some threads are waiting for a syscall to finish, say 'poll',
820 * in which case all we can do is to print "( ? ) for duration and for the
821 * start timestamp.
822 */
823static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
824{
825 double duration = (double)t / NSEC_PER_MSEC;
826 size_t printed = fprintf(fp, "(");
827
828 if (!calculated)
829 printed += fprintf(fp, " ");
830 else if (duration >= 1.0)
831 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
832 else if (duration >= 0.01)
833 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
834 else
835 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
836 return printed + fprintf(fp, "): ");
837}
838
839/**
840 * filename.ptr: The filename char pointer that will be vfs_getname'd
841 * filename.entry_str_pos: Where to insert the string translated from
842 * filename.ptr by the vfs_getname tracepoint/kprobe.
843 * ret_scnprintf: syscall args may set this to a different syscall return
844 * formatter, for instance, fcntl may return fds, file flags, etc.
845 */
846struct thread_trace {
847 u64 entry_time;
848 bool entry_pending;
849 unsigned long nr_events;
850 unsigned long pfmaj, pfmin;
851 char *entry_str;
852 double runtime_ms;
853 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
854 struct {
855 unsigned long ptr;
856 short int entry_str_pos;
857 bool pending_open;
858 unsigned int namelen;
859 char *name;
860 } filename;
861 struct {
862 int max;
863 char **table;
864 } paths;
865
866 struct intlist *syscall_stats;
867};
868
869static struct thread_trace *thread_trace__new(void)
870{
871 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
872
873 if (ttrace)
874 ttrace->paths.max = -1;
875
876 ttrace->syscall_stats = intlist__new(NULL);
877
878 return ttrace;
879}
880
881static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
882{
883 struct thread_trace *ttrace;
884
885 if (thread == NULL)
886 goto fail;
887
888 if (thread__priv(thread) == NULL)
889 thread__set_priv(thread, thread_trace__new());
890
891 if (thread__priv(thread) == NULL)
892 goto fail;
893
894 ttrace = thread__priv(thread);
895 ++ttrace->nr_events;
896
897 return ttrace;
898fail:
899 color_fprintf(fp, PERF_COLOR_RED,
900 "WARNING: not enough memory, dropping samples!\n");
901 return NULL;
902}
903
904
905void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
906 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
907{
908 struct thread_trace *ttrace = thread__priv(arg->thread);
909
910 ttrace->ret_scnprintf = ret_scnprintf;
911}
912
913#define TRACE_PFMAJ (1 << 0)
914#define TRACE_PFMIN (1 << 1)
915
916static const size_t trace__entry_str_size = 2048;
917
918static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
919{
920 struct thread_trace *ttrace = thread__priv(thread);
921
922 if (fd > ttrace->paths.max) {
923 char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
924
925 if (npath == NULL)
926 return -1;
927
928 if (ttrace->paths.max != -1) {
929 memset(npath + ttrace->paths.max + 1, 0,
930 (fd - ttrace->paths.max) * sizeof(char *));
931 } else {
932 memset(npath, 0, (fd + 1) * sizeof(char *));
933 }
934
935 ttrace->paths.table = npath;
936 ttrace->paths.max = fd;
937 }
938
939 ttrace->paths.table[fd] = strdup(pathname);
940
941 return ttrace->paths.table[fd] != NULL ? 0 : -1;
942}
943
944static int thread__read_fd_path(struct thread *thread, int fd)
945{
946 char linkname[PATH_MAX], pathname[PATH_MAX];
947 struct stat st;
948 int ret;
949
950 if (thread->pid_ == thread->tid) {
951 scnprintf(linkname, sizeof(linkname),
952 "/proc/%d/fd/%d", thread->pid_, fd);
953 } else {
954 scnprintf(linkname, sizeof(linkname),
955 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
956 }
957
958 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
959 return -1;
960
961 ret = readlink(linkname, pathname, sizeof(pathname));
962
963 if (ret < 0 || ret > st.st_size)
964 return -1;
965
966 pathname[ret] = '\0';
967 return trace__set_fd_pathname(thread, fd, pathname);
968}
969
970static const char *thread__fd_path(struct thread *thread, int fd,
971 struct trace *trace)
972{
973 struct thread_trace *ttrace = thread__priv(thread);
974
975 if (ttrace == NULL)
976 return NULL;
977
978 if (fd < 0)
979 return NULL;
980
981 if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
982 if (!trace->live)
983 return NULL;
984 ++trace->stats.proc_getname;
985 if (thread__read_fd_path(thread, fd))
986 return NULL;
987 }
988
989 return ttrace->paths.table[fd];
990}
991
992size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
993{
994 int fd = arg->val;
995 size_t printed = scnprintf(bf, size, "%d", fd);
996 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
997
998 if (path)
999 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1000
1001 return printed;
1002}
1003
1004size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1005{
1006 size_t printed = scnprintf(bf, size, "%d", fd);
1007 struct thread *thread = machine__find_thread(trace->host, pid, pid);
1008
1009 if (thread) {
1010 const char *path = thread__fd_path(thread, fd, trace);
1011
1012 if (path)
1013 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1014
1015 thread__put(thread);
1016 }
1017
1018 return printed;
1019}
1020
1021static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1022 struct syscall_arg *arg)
1023{
1024 int fd = arg->val;
1025 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1026 struct thread_trace *ttrace = thread__priv(arg->thread);
1027
1028 if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
1029 zfree(&ttrace->paths.table[fd]);
1030
1031 return printed;
1032}
1033
1034static void thread__set_filename_pos(struct thread *thread, const char *bf,
1035 unsigned long ptr)
1036{
1037 struct thread_trace *ttrace = thread__priv(thread);
1038
1039 ttrace->filename.ptr = ptr;
1040 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1041}
1042
1043static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1044 struct syscall_arg *arg)
1045{
1046 unsigned long ptr = arg->val;
1047
1048 if (!arg->trace->vfs_getname)
1049 return scnprintf(bf, size, "%#x", ptr);
1050
1051 thread__set_filename_pos(arg->thread, bf, ptr);
1052 return 0;
1053}
1054
1055static bool trace__filter_duration(struct trace *trace, double t)
1056{
1057 return t < (trace->duration_filter * NSEC_PER_MSEC);
1058}
1059
1060static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1061{
1062 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1063
1064 return fprintf(fp, "%10.3f ", ts);
1065}
1066
1067/*
1068 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1069 * using ttrace->entry_time for a thread that receives a sys_exit without
1070 * first having received a sys_enter ("poll" issued before tracing session
1071 * starts, lost sys_enter exit due to ring buffer overflow).
1072 */
1073static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1074{
1075 if (tstamp > 0)
1076 return __trace__fprintf_tstamp(trace, tstamp, fp);
1077
1078 return fprintf(fp, " ? ");
1079}
1080
1081static bool done = false;
1082static bool interrupted = false;
1083
1084static void sig_handler(int sig)
1085{
1086 done = true;
1087 interrupted = sig == SIGINT;
1088}
1089
1090static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1091 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1092{
1093 size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
1094 printed += fprintf_duration(duration, duration_calculated, fp);
1095
1096 if (trace->multiple_threads) {
1097 if (trace->show_comm)
1098 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1099 printed += fprintf(fp, "%d ", thread->tid);
1100 }
1101
1102 return printed;
1103}
1104
1105static int trace__process_event(struct trace *trace, struct machine *machine,
1106 union perf_event *event, struct perf_sample *sample)
1107{
1108 int ret = 0;
1109
1110 switch (event->header.type) {
1111 case PERF_RECORD_LOST:
1112 color_fprintf(trace->output, PERF_COLOR_RED,
1113 "LOST %" PRIu64 " events!\n", event->lost.lost);
1114 ret = machine__process_lost_event(machine, event, sample);
1115 break;
1116 default:
1117 ret = machine__process_event(machine, event, sample);
1118 break;
1119 }
1120
1121 return ret;
1122}
1123
1124static int trace__tool_process(struct perf_tool *tool,
1125 union perf_event *event,
1126 struct perf_sample *sample,
1127 struct machine *machine)
1128{
1129 struct trace *trace = container_of(tool, struct trace, tool);
1130 return trace__process_event(trace, machine, event, sample);
1131}
1132
1133static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1134{
1135 struct machine *machine = vmachine;
1136
1137 if (machine->kptr_restrict_warned)
1138 return NULL;
1139
1140 if (symbol_conf.kptr_restrict) {
1141 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1142 "Check /proc/sys/kernel/kptr_restrict.\n\n"
1143 "Kernel samples will not be resolved.\n");
1144 machine->kptr_restrict_warned = true;
1145 return NULL;
1146 }
1147
1148 return machine__resolve_kernel_addr(vmachine, addrp, modp);
1149}
1150
1151static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1152{
1153 int err = symbol__init(NULL);
1154
1155 if (err)
1156 return err;
1157
1158 trace->host = machine__new_host();
1159 if (trace->host == NULL)
1160 return -ENOMEM;
1161
1162 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1163 if (err < 0)
1164 goto out;
1165
1166 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1167 evlist->threads, trace__tool_process, false,
1168 trace->opts.proc_map_timeout, 1);
1169out:
1170 if (err)
1171 symbol__exit();
1172
1173 return err;
1174}
1175
1176static void trace__symbols__exit(struct trace *trace)
1177{
1178 machine__exit(trace->host);
1179 trace->host = NULL;
1180
1181 symbol__exit();
1182}
1183
1184static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1185{
1186 int idx;
1187
1188 if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
1189 nr_args = sc->fmt->nr_args;
1190
1191 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1192 if (sc->arg_fmt == NULL)
1193 return -1;
1194
1195 for (idx = 0; idx < nr_args; ++idx) {
1196 if (sc->fmt)
1197 sc->arg_fmt[idx] = sc->fmt->arg[idx];
1198 }
1199
1200 sc->nr_args = nr_args;
1201 return 0;
1202}
1203
1204static int syscall__set_arg_fmts(struct syscall *sc)
1205{
1206 struct format_field *field;
1207 int idx = 0, len;
1208
1209 for (field = sc->args; field; field = field->next, ++idx) {
1210 if (sc->fmt && sc->fmt->arg[idx].scnprintf)
1211 continue;
1212
1213 if (strcmp(field->type, "const char *") == 0 &&
1214 (strcmp(field->name, "filename") == 0 ||
1215 strcmp(field->name, "path") == 0 ||
1216 strcmp(field->name, "pathname") == 0))
1217 sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
1218 else if (field->flags & FIELD_IS_POINTER)
1219 sc->arg_fmt[idx].scnprintf = syscall_arg__scnprintf_hex;
1220 else if (strcmp(field->type, "pid_t") == 0)
1221 sc->arg_fmt[idx].scnprintf = SCA_PID;
1222 else if (strcmp(field->type, "umode_t") == 0)
1223 sc->arg_fmt[idx].scnprintf = SCA_MODE_T;
1224 else if ((strcmp(field->type, "int") == 0 ||
1225 strcmp(field->type, "unsigned int") == 0 ||
1226 strcmp(field->type, "long") == 0) &&
1227 (len = strlen(field->name)) >= 2 &&
1228 strcmp(field->name + len - 2, "fd") == 0) {
1229 /*
1230 * /sys/kernel/tracing/events/syscalls/sys_enter*
1231 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1232 * 65 int
1233 * 23 unsigned int
1234 * 7 unsigned long
1235 */
1236 sc->arg_fmt[idx].scnprintf = SCA_FD;
1237 }
1238 }
1239
1240 return 0;
1241}
1242
1243static int trace__read_syscall_info(struct trace *trace, int id)
1244{
1245 char tp_name[128];
1246 struct syscall *sc;
1247 const char *name = syscalltbl__name(trace->sctbl, id);
1248
1249 if (name == NULL)
1250 return -1;
1251
1252 if (id > trace->syscalls.max) {
1253 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1254
1255 if (nsyscalls == NULL)
1256 return -1;
1257
1258 if (trace->syscalls.max != -1) {
1259 memset(nsyscalls + trace->syscalls.max + 1, 0,
1260 (id - trace->syscalls.max) * sizeof(*sc));
1261 } else {
1262 memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
1263 }
1264
1265 trace->syscalls.table = nsyscalls;
1266 trace->syscalls.max = id;
1267 }
1268
1269 sc = trace->syscalls.table + id;
1270 sc->name = name;
1271
1272 sc->fmt = syscall_fmt__find(sc->name);
1273
1274 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1275 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1276
1277 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1278 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1279 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1280 }
1281
1282 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
1283 return -1;
1284
1285 if (IS_ERR(sc->tp_format))
1286 return -1;
1287
1288 sc->args = sc->tp_format->format.fields;
1289 /*
1290 * We need to check and discard the first variable '__syscall_nr'
1291 * or 'nr' that mean the syscall number. It is needless here.
1292 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1293 */
1294 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1295 sc->args = sc->args->next;
1296 --sc->nr_args;
1297 }
1298
1299 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1300
1301 return syscall__set_arg_fmts(sc);
1302}
1303
1304static int trace__validate_ev_qualifier(struct trace *trace)
1305{
1306 int err = 0, i;
1307 size_t nr_allocated;
1308 struct str_node *pos;
1309
1310 trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
1311 trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
1312 sizeof(trace->ev_qualifier_ids.entries[0]));
1313
1314 if (trace->ev_qualifier_ids.entries == NULL) {
1315 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1316 trace->output);
1317 err = -EINVAL;
1318 goto out;
1319 }
1320
1321 nr_allocated = trace->ev_qualifier_ids.nr;
1322 i = 0;
1323
1324 strlist__for_each_entry(pos, trace->ev_qualifier) {
1325 const char *sc = pos->s;
1326 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1327
1328 if (id < 0) {
1329 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1330 if (id >= 0)
1331 goto matches;
1332
1333 if (err == 0) {
1334 fputs("Error:\tInvalid syscall ", trace->output);
1335 err = -EINVAL;
1336 } else {
1337 fputs(", ", trace->output);
1338 }
1339
1340 fputs(sc, trace->output);
1341 }
1342matches:
1343 trace->ev_qualifier_ids.entries[i++] = id;
1344 if (match_next == -1)
1345 continue;
1346
1347 while (1) {
1348 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1349 if (id < 0)
1350 break;
1351 if (nr_allocated == trace->ev_qualifier_ids.nr) {
1352 void *entries;
1353
1354 nr_allocated += 8;
1355 entries = realloc(trace->ev_qualifier_ids.entries,
1356 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1357 if (entries == NULL) {
1358 err = -ENOMEM;
1359 fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1360 goto out_free;
1361 }
1362 trace->ev_qualifier_ids.entries = entries;
1363 }
1364 trace->ev_qualifier_ids.nr++;
1365 trace->ev_qualifier_ids.entries[i++] = id;
1366 }
1367 }
1368
1369 if (err < 0) {
1370 fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
1371 "\nHint:\tand: 'man syscalls'\n", trace->output);
1372out_free:
1373 zfree(&trace->ev_qualifier_ids.entries);
1374 trace->ev_qualifier_ids.nr = 0;
1375 }
1376out:
1377 return err;
1378}
1379
1380/*
1381 * args is to be interpreted as a series of longs but we need to handle
1382 * 8-byte unaligned accesses. args points to raw_data within the event
1383 * and raw_data is guaranteed to be 8-byte unaligned because it is
1384 * preceded by raw_size which is a u32. So we need to copy args to a temp
1385 * variable to read it. Most notably this avoids extended load instructions
1386 * on unaligned addresses
1387 */
1388unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
1389{
1390 unsigned long val;
1391 unsigned char *p = arg->args + sizeof(unsigned long) * idx;
1392
1393 memcpy(&val, p, sizeof(val));
1394 return val;
1395}
1396
1397static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
1398 struct syscall_arg *arg)
1399{
1400 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
1401 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
1402
1403 return scnprintf(bf, size, "arg%d: ", arg->idx);
1404}
1405
1406static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size,
1407 struct syscall_arg *arg, unsigned long val)
1408{
1409 if (sc->arg_fmt && sc->arg_fmt[arg->idx].scnprintf) {
1410 arg->val = val;
1411 if (sc->arg_fmt[arg->idx].parm)
1412 arg->parm = sc->arg_fmt[arg->idx].parm;
1413 return sc->arg_fmt[arg->idx].scnprintf(bf, size, arg);
1414 }
1415 return scnprintf(bf, size, "%ld", val);
1416}
1417
1418static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1419 unsigned char *args, struct trace *trace,
1420 struct thread *thread)
1421{
1422 size_t printed = 0;
1423 unsigned long val;
1424 u8 bit = 1;
1425 struct syscall_arg arg = {
1426 .args = args,
1427 .idx = 0,
1428 .mask = 0,
1429 .trace = trace,
1430 .thread = thread,
1431 };
1432 struct thread_trace *ttrace = thread__priv(thread);
1433
1434 /*
1435 * Things like fcntl will set this in its 'cmd' formatter to pick the
1436 * right formatter for the return value (an fd? file flags?), which is
1437 * not needed for syscalls that always return a given type, say an fd.
1438 */
1439 ttrace->ret_scnprintf = NULL;
1440
1441 if (sc->args != NULL) {
1442 struct format_field *field;
1443
1444 for (field = sc->args; field;
1445 field = field->next, ++arg.idx, bit <<= 1) {
1446 if (arg.mask & bit)
1447 continue;
1448
1449 val = syscall_arg__val(&arg, arg.idx);
1450
1451 /*
1452 * Suppress this argument if its value is zero and
1453 * and we don't have a string associated in an
1454 * strarray for it.
1455 */
1456 if (val == 0 &&
1457 !(sc->arg_fmt &&
1458 (sc->arg_fmt[arg.idx].show_zero ||
1459 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
1460 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
1461 sc->arg_fmt[arg.idx].parm))
1462 continue;
1463
1464 printed += scnprintf(bf + printed, size - printed,
1465 "%s%s: ", printed ? ", " : "", field->name);
1466 printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
1467 }
1468 } else if (IS_ERR(sc->tp_format)) {
1469 /*
1470 * If we managed to read the tracepoint /format file, then we
1471 * may end up not having any args, like with gettid(), so only
1472 * print the raw args when we didn't manage to read it.
1473 */
1474 while (arg.idx < sc->nr_args) {
1475 if (arg.mask & bit)
1476 goto next_arg;
1477 val = syscall_arg__val(&arg, arg.idx);
1478 if (printed)
1479 printed += scnprintf(bf + printed, size - printed, ", ");
1480 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
1481 printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
1482next_arg:
1483 ++arg.idx;
1484 bit <<= 1;
1485 }
1486 }
1487
1488 return printed;
1489}
1490
1491typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
1492 union perf_event *event,
1493 struct perf_sample *sample);
1494
1495static struct syscall *trace__syscall_info(struct trace *trace,
1496 struct perf_evsel *evsel, int id)
1497{
1498
1499 if (id < 0) {
1500
1501 /*
1502 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1503 * before that, leaving at a higher verbosity level till that is
1504 * explained. Reproduced with plain ftrace with:
1505 *
1506 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1507 * grep "NR -1 " /t/trace_pipe
1508 *
1509 * After generating some load on the machine.
1510 */
1511 if (verbose > 1) {
1512 static u64 n;
1513 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1514 id, perf_evsel__name(evsel), ++n);
1515 }
1516 return NULL;
1517 }
1518
1519 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
1520 trace__read_syscall_info(trace, id))
1521 goto out_cant_read;
1522
1523 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
1524 goto out_cant_read;
1525
1526 return &trace->syscalls.table[id];
1527
1528out_cant_read:
1529 if (verbose > 0) {
1530 fprintf(trace->output, "Problems reading syscall %d", id);
1531 if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
1532 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1533 fputs(" information\n", trace->output);
1534 }
1535 return NULL;
1536}
1537
1538static void thread__update_stats(struct thread_trace *ttrace,
1539 int id, struct perf_sample *sample)
1540{
1541 struct int_node *inode;
1542 struct stats *stats;
1543 u64 duration = 0;
1544
1545 inode = intlist__findnew(ttrace->syscall_stats, id);
1546 if (inode == NULL)
1547 return;
1548
1549 stats = inode->priv;
1550 if (stats == NULL) {
1551 stats = malloc(sizeof(struct stats));
1552 if (stats == NULL)
1553 return;
1554 init_stats(stats);
1555 inode->priv = stats;
1556 }
1557
1558 if (ttrace->entry_time && sample->time > ttrace->entry_time)
1559 duration = sample->time - ttrace->entry_time;
1560
1561 update_stats(stats, duration);
1562}
1563
1564static int trace__printf_interrupted_entry(struct trace *trace)
1565{
1566 struct thread_trace *ttrace;
1567 size_t printed;
1568
1569 if (trace->failure_only || trace->current == NULL)
1570 return 0;
1571
1572 ttrace = thread__priv(trace->current);
1573
1574 if (!ttrace->entry_pending)
1575 return 0;
1576
1577 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
1578 printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
1579 ttrace->entry_pending = false;
1580
1581 return printed;
1582}
1583
1584static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel,
1585 struct perf_sample *sample, struct thread *thread)
1586{
1587 int printed = 0;
1588
1589 if (trace->print_sample) {
1590 double ts = (double)sample->time / NSEC_PER_MSEC;
1591
1592 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
1593 perf_evsel__name(evsel), ts,
1594 thread__comm_str(thread),
1595 sample->pid, sample->tid, sample->cpu);
1596 }
1597
1598 return printed;
1599}
1600
1601static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
1602 union perf_event *event __maybe_unused,
1603 struct perf_sample *sample)
1604{
1605 char *msg;
1606 void *args;
1607 size_t printed = 0;
1608 struct thread *thread;
1609 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
1610 struct syscall *sc = trace__syscall_info(trace, evsel, id);
1611 struct thread_trace *ttrace;
1612
1613 if (sc == NULL)
1614 return -1;
1615
1616 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1617 ttrace = thread__trace(thread, trace->output);
1618 if (ttrace == NULL)
1619 goto out_put;
1620
1621 trace__fprintf_sample(trace, evsel, sample, thread);
1622
1623 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1624
1625 if (ttrace->entry_str == NULL) {
1626 ttrace->entry_str = malloc(trace__entry_str_size);
1627 if (!ttrace->entry_str)
1628 goto out_put;
1629 }
1630
1631 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
1632 trace__printf_interrupted_entry(trace);
1633
1634 ttrace->entry_time = sample->time;
1635 msg = ttrace->entry_str;
1636 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
1637
1638 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
1639 args, trace, thread);
1640
1641 if (sc->is_exit) {
1642 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
1643 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
1644 fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
1645 }
1646 } else {
1647 ttrace->entry_pending = true;
1648 /* See trace__vfs_getname & trace__sys_exit */
1649 ttrace->filename.pending_open = false;
1650 }
1651
1652 if (trace->current != thread) {
1653 thread__put(trace->current);
1654 trace->current = thread__get(thread);
1655 }
1656 err = 0;
1657out_put:
1658 thread__put(thread);
1659 return err;
1660}
1661
1662static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evsel,
1663 struct perf_sample *sample,
1664 struct callchain_cursor *cursor)
1665{
1666 struct addr_location al;
1667 int max_stack = evsel->attr.sample_max_stack ?
1668 evsel->attr.sample_max_stack :
1669 trace->max_stack;
1670
1671 if (machine__resolve(trace->host, &al, sample) < 0 ||
1672 thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack))
1673 return -1;
1674
1675 return 0;
1676}
1677
1678static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
1679{
1680 /* TODO: user-configurable print_opts */
1681 const unsigned int print_opts = EVSEL__PRINT_SYM |
1682 EVSEL__PRINT_DSO |
1683 EVSEL__PRINT_UNKNOWN_AS_ADDR;
1684
1685 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
1686}
1687
1688static const char *errno_to_name(struct perf_evsel *evsel, int err)
1689{
1690 struct perf_env *env = perf_evsel__env(evsel);
1691 const char *arch_name = perf_env__arch(env);
1692
1693 return arch_syscalls__strerrno(arch_name, err);
1694}
1695
1696static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
1697 union perf_event *event __maybe_unused,
1698 struct perf_sample *sample)
1699{
1700 long ret;
1701 u64 duration = 0;
1702 bool duration_calculated = false;
1703 struct thread *thread;
1704 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0;
1705 struct syscall *sc = trace__syscall_info(trace, evsel, id);
1706 struct thread_trace *ttrace;
1707
1708 if (sc == NULL)
1709 return -1;
1710
1711 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1712 ttrace = thread__trace(thread, trace->output);
1713 if (ttrace == NULL)
1714 goto out_put;
1715
1716 trace__fprintf_sample(trace, evsel, sample, thread);
1717
1718 if (trace->summary)
1719 thread__update_stats(ttrace, id, sample);
1720
1721 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
1722
1723 if (id == trace->open_id && ret >= 0 && ttrace->filename.pending_open) {
1724 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
1725 ttrace->filename.pending_open = false;
1726 ++trace->stats.vfs_getname;
1727 }
1728
1729 if (ttrace->entry_time) {
1730 duration = sample->time - ttrace->entry_time;
1731 if (trace__filter_duration(trace, duration))
1732 goto out;
1733 duration_calculated = true;
1734 } else if (trace->duration_filter)
1735 goto out;
1736
1737 if (sample->callchain) {
1738 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1739 if (callchain_ret == 0) {
1740 if (callchain_cursor.nr < trace->min_stack)
1741 goto out;
1742 callchain_ret = 1;
1743 }
1744 }
1745
1746 if (trace->summary_only || (ret >= 0 && trace->failure_only))
1747 goto out;
1748
1749 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
1750
1751 if (ttrace->entry_pending) {
1752 fprintf(trace->output, "%-70s", ttrace->entry_str);
1753 } else {
1754 fprintf(trace->output, " ... [");
1755 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
1756 fprintf(trace->output, "]: %s()", sc->name);
1757 }
1758
1759 if (sc->fmt == NULL) {
1760 if (ret < 0)
1761 goto errno_print;
1762signed_print:
1763 fprintf(trace->output, ") = %ld", ret);
1764 } else if (ret < 0) {
1765errno_print: {
1766 char bf[STRERR_BUFSIZE];
1767 const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
1768 *e = errno_to_name(evsel, -ret);
1769
1770 fprintf(trace->output, ") = -1 %s %s", e, emsg);
1771 }
1772 } else if (ret == 0 && sc->fmt->timeout)
1773 fprintf(trace->output, ") = 0 Timeout");
1774 else if (ttrace->ret_scnprintf) {
1775 char bf[1024];
1776 struct syscall_arg arg = {
1777 .val = ret,
1778 .thread = thread,
1779 .trace = trace,
1780 };
1781 ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
1782 ttrace->ret_scnprintf = NULL;
1783 fprintf(trace->output, ") = %s", bf);
1784 } else if (sc->fmt->hexret)
1785 fprintf(trace->output, ") = %#lx", ret);
1786 else if (sc->fmt->errpid) {
1787 struct thread *child = machine__find_thread(trace->host, ret, ret);
1788
1789 if (child != NULL) {
1790 fprintf(trace->output, ") = %ld", ret);
1791 if (child->comm_set)
1792 fprintf(trace->output, " (%s)", thread__comm_str(child));
1793 thread__put(child);
1794 }
1795 } else
1796 goto signed_print;
1797
1798 fputc('\n', trace->output);
1799
1800 if (callchain_ret > 0)
1801 trace__fprintf_callchain(trace, sample);
1802 else if (callchain_ret < 0)
1803 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1804out:
1805 ttrace->entry_pending = false;
1806 err = 0;
1807out_put:
1808 thread__put(thread);
1809 return err;
1810}
1811
1812static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
1813 union perf_event *event __maybe_unused,
1814 struct perf_sample *sample)
1815{
1816 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1817 struct thread_trace *ttrace;
1818 size_t filename_len, entry_str_len, to_move;
1819 ssize_t remaining_space;
1820 char *pos;
1821 const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
1822
1823 if (!thread)
1824 goto out;
1825
1826 ttrace = thread__priv(thread);
1827 if (!ttrace)
1828 goto out_put;
1829
1830 filename_len = strlen(filename);
1831 if (filename_len == 0)
1832 goto out_put;
1833
1834 if (ttrace->filename.namelen < filename_len) {
1835 char *f = realloc(ttrace->filename.name, filename_len + 1);
1836
1837 if (f == NULL)
1838 goto out_put;
1839
1840 ttrace->filename.namelen = filename_len;
1841 ttrace->filename.name = f;
1842 }
1843
1844 strcpy(ttrace->filename.name, filename);
1845 ttrace->filename.pending_open = true;
1846
1847 if (!ttrace->filename.ptr)
1848 goto out_put;
1849
1850 entry_str_len = strlen(ttrace->entry_str);
1851 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
1852 if (remaining_space <= 0)
1853 goto out_put;
1854
1855 if (filename_len > (size_t)remaining_space) {
1856 filename += filename_len - remaining_space;
1857 filename_len = remaining_space;
1858 }
1859
1860 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
1861 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
1862 memmove(pos + filename_len, pos, to_move);
1863 memcpy(pos, filename, filename_len);
1864
1865 ttrace->filename.ptr = 0;
1866 ttrace->filename.entry_str_pos = 0;
1867out_put:
1868 thread__put(thread);
1869out:
1870 return 0;
1871}
1872
1873static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
1874 union perf_event *event __maybe_unused,
1875 struct perf_sample *sample)
1876{
1877 u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
1878 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
1879 struct thread *thread = machine__findnew_thread(trace->host,
1880 sample->pid,
1881 sample->tid);
1882 struct thread_trace *ttrace = thread__trace(thread, trace->output);
1883
1884 if (ttrace == NULL)
1885 goto out_dump;
1886
1887 ttrace->runtime_ms += runtime_ms;
1888 trace->runtime_ms += runtime_ms;
1889out_put:
1890 thread__put(thread);
1891 return 0;
1892
1893out_dump:
1894 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
1895 evsel->name,
1896 perf_evsel__strval(evsel, sample, "comm"),
1897 (pid_t)perf_evsel__intval(evsel, sample, "pid"),
1898 runtime,
1899 perf_evsel__intval(evsel, sample, "vruntime"));
1900 goto out_put;
1901}
1902
1903static int bpf_output__printer(enum binary_printer_ops op,
1904 unsigned int val, void *extra __maybe_unused, FILE *fp)
1905{
1906 unsigned char ch = (unsigned char)val;
1907
1908 switch (op) {
1909 case BINARY_PRINT_CHAR_DATA:
1910 return fprintf(fp, "%c", isprint(ch) ? ch : '.');
1911 case BINARY_PRINT_DATA_BEGIN:
1912 case BINARY_PRINT_LINE_BEGIN:
1913 case BINARY_PRINT_ADDR:
1914 case BINARY_PRINT_NUM_DATA:
1915 case BINARY_PRINT_NUM_PAD:
1916 case BINARY_PRINT_SEP:
1917 case BINARY_PRINT_CHAR_PAD:
1918 case BINARY_PRINT_LINE_END:
1919 case BINARY_PRINT_DATA_END:
1920 default:
1921 break;
1922 }
1923
1924 return 0;
1925}
1926
1927static void bpf_output__fprintf(struct trace *trace,
1928 struct perf_sample *sample)
1929{
1930 binary__fprintf(sample->raw_data, sample->raw_size, 8,
1931 bpf_output__printer, NULL, trace->output);
1932}
1933
1934static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
1935 union perf_event *event __maybe_unused,
1936 struct perf_sample *sample)
1937{
1938 int callchain_ret = 0;
1939
1940 if (sample->callchain) {
1941 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1942 if (callchain_ret == 0) {
1943 if (callchain_cursor.nr < trace->min_stack)
1944 goto out;
1945 callchain_ret = 1;
1946 }
1947 }
1948
1949 trace__printf_interrupted_entry(trace);
1950 trace__fprintf_tstamp(trace, sample->time, trace->output);
1951
1952 if (trace->trace_syscalls)
1953 fprintf(trace->output, "( ): ");
1954
1955 fprintf(trace->output, "%s:", evsel->name);
1956
1957 if (perf_evsel__is_bpf_output(evsel)) {
1958 bpf_output__fprintf(trace, sample);
1959 } else if (evsel->tp_format) {
1960 event_format__fprintf(evsel->tp_format, sample->cpu,
1961 sample->raw_data, sample->raw_size,
1962 trace->output);
1963 }
1964
1965 fprintf(trace->output, "\n");
1966
1967 if (callchain_ret > 0)
1968 trace__fprintf_callchain(trace, sample);
1969 else if (callchain_ret < 0)
1970 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1971out:
1972 return 0;
1973}
1974
1975static void print_location(FILE *f, struct perf_sample *sample,
1976 struct addr_location *al,
1977 bool print_dso, bool print_sym)
1978{
1979
1980 if ((verbose > 0 || print_dso) && al->map)
1981 fprintf(f, "%s@", al->map->dso->long_name);
1982
1983 if ((verbose > 0 || print_sym) && al->sym)
1984 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
1985 al->addr - al->sym->start);
1986 else if (al->map)
1987 fprintf(f, "0x%" PRIx64, al->addr);
1988 else
1989 fprintf(f, "0x%" PRIx64, sample->addr);
1990}
1991
1992static int trace__pgfault(struct trace *trace,
1993 struct perf_evsel *evsel,
1994 union perf_event *event __maybe_unused,
1995 struct perf_sample *sample)
1996{
1997 struct thread *thread;
1998 struct addr_location al;
1999 char map_type = 'd';
2000 struct thread_trace *ttrace;
2001 int err = -1;
2002 int callchain_ret = 0;
2003
2004 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2005
2006 if (sample->callchain) {
2007 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2008 if (callchain_ret == 0) {
2009 if (callchain_cursor.nr < trace->min_stack)
2010 goto out_put;
2011 callchain_ret = 1;
2012 }
2013 }
2014
2015 ttrace = thread__trace(thread, trace->output);
2016 if (ttrace == NULL)
2017 goto out_put;
2018
2019 if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2020 ttrace->pfmaj++;
2021 else
2022 ttrace->pfmin++;
2023
2024 if (trace->summary_only)
2025 goto out;
2026
2027 thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION,
2028 sample->ip, &al);
2029
2030 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2031
2032 fprintf(trace->output, "%sfault [",
2033 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2034 "maj" : "min");
2035
2036 print_location(trace->output, sample, &al, false, true);
2037
2038 fprintf(trace->output, "] => ");
2039
2040 thread__find_addr_location(thread, sample->cpumode, MAP__VARIABLE,
2041 sample->addr, &al);
2042
2043 if (!al.map) {
2044 thread__find_addr_location(thread, sample->cpumode,
2045 MAP__FUNCTION, sample->addr, &al);
2046
2047 if (al.map)
2048 map_type = 'x';
2049 else
2050 map_type = '?';
2051 }
2052
2053 print_location(trace->output, sample, &al, true, false);
2054
2055 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2056
2057 if (callchain_ret > 0)
2058 trace__fprintf_callchain(trace, sample);
2059 else if (callchain_ret < 0)
2060 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2061out:
2062 err = 0;
2063out_put:
2064 thread__put(thread);
2065 return err;
2066}
2067
2068static void trace__set_base_time(struct trace *trace,
2069 struct perf_evsel *evsel,
2070 struct perf_sample *sample)
2071{
2072 /*
2073 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2074 * and don't use sample->time unconditionally, we may end up having
2075 * some other event in the future without PERF_SAMPLE_TIME for good
2076 * reason, i.e. we may not be interested in its timestamps, just in
2077 * it taking place, picking some piece of information when it
2078 * appears in our event stream (vfs_getname comes to mind).
2079 */
2080 if (trace->base_time == 0 && !trace->full_time &&
2081 (evsel->attr.sample_type & PERF_SAMPLE_TIME))
2082 trace->base_time = sample->time;
2083}
2084
2085static int trace__process_sample(struct perf_tool *tool,
2086 union perf_event *event,
2087 struct perf_sample *sample,
2088 struct perf_evsel *evsel,
2089 struct machine *machine __maybe_unused)
2090{
2091 struct trace *trace = container_of(tool, struct trace, tool);
2092 struct thread *thread;
2093 int err = 0;
2094
2095 tracepoint_handler handler = evsel->handler;
2096
2097 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2098 if (thread && thread__is_filtered(thread))
2099 goto out;
2100
2101 trace__set_base_time(trace, evsel, sample);
2102
2103 if (handler) {
2104 ++trace->nr_events;
2105 handler(trace, evsel, event, sample);
2106 }
2107out:
2108 thread__put(thread);
2109 return err;
2110}
2111
2112static int trace__record(struct trace *trace, int argc, const char **argv)
2113{
2114 unsigned int rec_argc, i, j;
2115 const char **rec_argv;
2116 const char * const record_args[] = {
2117 "record",
2118 "-R",
2119 "-m", "1024",
2120 "-c", "1",
2121 };
2122
2123 const char * const sc_args[] = { "-e", };
2124 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
2125 const char * const majpf_args[] = { "-e", "major-faults" };
2126 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
2127 const char * const minpf_args[] = { "-e", "minor-faults" };
2128 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
2129
2130 /* +1 is for the event string below */
2131 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
2132 majpf_args_nr + minpf_args_nr + argc;
2133 rec_argv = calloc(rec_argc + 1, sizeof(char *));
2134
2135 if (rec_argv == NULL)
2136 return -ENOMEM;
2137
2138 j = 0;
2139 for (i = 0; i < ARRAY_SIZE(record_args); i++)
2140 rec_argv[j++] = record_args[i];
2141
2142 if (trace->trace_syscalls) {
2143 for (i = 0; i < sc_args_nr; i++)
2144 rec_argv[j++] = sc_args[i];
2145
2146 /* event string may be different for older kernels - e.g., RHEL6 */
2147 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2148 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2149 else if (is_valid_tracepoint("syscalls:sys_enter"))
2150 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
2151 else {
2152 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2153 free(rec_argv);
2154 return -1;
2155 }
2156 }
2157
2158 if (trace->trace_pgfaults & TRACE_PFMAJ)
2159 for (i = 0; i < majpf_args_nr; i++)
2160 rec_argv[j++] = majpf_args[i];
2161
2162 if (trace->trace_pgfaults & TRACE_PFMIN)
2163 for (i = 0; i < minpf_args_nr; i++)
2164 rec_argv[j++] = minpf_args[i];
2165
2166 for (i = 0; i < (unsigned int)argc; i++)
2167 rec_argv[j++] = argv[i];
2168
2169 return cmd_record(j, rec_argv);
2170}
2171
2172static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2173
2174static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
2175{
2176 struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
2177
2178 if (IS_ERR(evsel))
2179 return false;
2180
2181 if (perf_evsel__field(evsel, "pathname") == NULL) {
2182 perf_evsel__delete(evsel);
2183 return false;
2184 }
2185
2186 evsel->handler = trace__vfs_getname;
2187 perf_evlist__add(evlist, evsel);
2188 return true;
2189}
2190
2191static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
2192{
2193 struct perf_evsel *evsel;
2194 struct perf_event_attr attr = {
2195 .type = PERF_TYPE_SOFTWARE,
2196 .mmap_data = 1,
2197 };
2198
2199 attr.config = config;
2200 attr.sample_period = 1;
2201
2202 event_attr_init(&attr);
2203
2204 evsel = perf_evsel__new(&attr);
2205 if (evsel)
2206 evsel->handler = trace__pgfault;
2207
2208 return evsel;
2209}
2210
2211static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2212{
2213 const u32 type = event->header.type;
2214 struct perf_evsel *evsel;
2215
2216 if (type != PERF_RECORD_SAMPLE) {
2217 trace__process_event(trace, trace->host, event, sample);
2218 return;
2219 }
2220
2221 evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2222 if (evsel == NULL) {
2223 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2224 return;
2225 }
2226
2227 trace__set_base_time(trace, evsel, sample);
2228
2229 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
2230 sample->raw_data == NULL) {
2231 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2232 perf_evsel__name(evsel), sample->tid,
2233 sample->cpu, sample->raw_size);
2234 } else {
2235 tracepoint_handler handler = evsel->handler;
2236 handler(trace, evsel, event, sample);
2237 }
2238}
2239
2240static int trace__add_syscall_newtp(struct trace *trace)
2241{
2242 int ret = -1;
2243 struct perf_evlist *evlist = trace->evlist;
2244 struct perf_evsel *sys_enter, *sys_exit;
2245
2246 sys_enter = perf_evsel__syscall_newtp("sys_enter", trace__sys_enter);
2247 if (sys_enter == NULL)
2248 goto out;
2249
2250 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2251 goto out_delete_sys_enter;
2252
2253 sys_exit = perf_evsel__syscall_newtp("sys_exit", trace__sys_exit);
2254 if (sys_exit == NULL)
2255 goto out_delete_sys_enter;
2256
2257 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2258 goto out_delete_sys_exit;
2259
2260 perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
2261 perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
2262
2263 perf_evlist__add(evlist, sys_enter);
2264 perf_evlist__add(evlist, sys_exit);
2265
2266 if (callchain_param.enabled && !trace->kernel_syscallchains) {
2267 /*
2268 * We're interested only in the user space callchain
2269 * leading to the syscall, allow overriding that for
2270 * debugging reasons using --kernel_syscall_callchains
2271 */
2272 sys_exit->attr.exclude_callchain_kernel = 1;
2273 }
2274
2275 trace->syscalls.events.sys_enter = sys_enter;
2276 trace->syscalls.events.sys_exit = sys_exit;
2277
2278 ret = 0;
2279out:
2280 return ret;
2281
2282out_delete_sys_exit:
2283 perf_evsel__delete_priv(sys_exit);
2284out_delete_sys_enter:
2285 perf_evsel__delete_priv(sys_enter);
2286 goto out;
2287}
2288
2289static int trace__set_ev_qualifier_filter(struct trace *trace)
2290{
2291 int err = -1;
2292 struct perf_evsel *sys_exit;
2293 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2294 trace->ev_qualifier_ids.nr,
2295 trace->ev_qualifier_ids.entries);
2296
2297 if (filter == NULL)
2298 goto out_enomem;
2299
2300 if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter,
2301 filter)) {
2302 sys_exit = trace->syscalls.events.sys_exit;
2303 err = perf_evsel__append_tp_filter(sys_exit, filter);
2304 }
2305
2306 free(filter);
2307out:
2308 return err;
2309out_enomem:
2310 errno = ENOMEM;
2311 goto out;
2312}
2313
2314static int trace__set_filter_loop_pids(struct trace *trace)
2315{
2316 unsigned int nr = 1;
2317 pid_t pids[32] = {
2318 getpid(),
2319 };
2320 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
2321
2322 while (thread && nr < ARRAY_SIZE(pids)) {
2323 struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
2324
2325 if (parent == NULL)
2326 break;
2327
2328 if (!strcmp(thread__comm_str(parent), "sshd")) {
2329 pids[nr++] = parent->tid;
2330 break;
2331 }
2332 thread = parent;
2333 }
2334
2335 return perf_evlist__set_filter_pids(trace->evlist, nr, pids);
2336}
2337
2338static int trace__run(struct trace *trace, int argc, const char **argv)
2339{
2340 struct perf_evlist *evlist = trace->evlist;
2341 struct perf_evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
2342 int err = -1, i;
2343 unsigned long before;
2344 const bool forks = argc > 0;
2345 bool draining = false;
2346
2347 trace->live = true;
2348
2349 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
2350 goto out_error_raw_syscalls;
2351
2352 if (trace->trace_syscalls)
2353 trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
2354
2355 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
2356 pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
2357 if (pgfault_maj == NULL)
2358 goto out_error_mem;
2359 perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
2360 perf_evlist__add(evlist, pgfault_maj);
2361 }
2362
2363 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
2364 pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
2365 if (pgfault_min == NULL)
2366 goto out_error_mem;
2367 perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
2368 perf_evlist__add(evlist, pgfault_min);
2369 }
2370
2371 if (trace->sched &&
2372 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
2373 trace__sched_stat_runtime))
2374 goto out_error_sched_stat_runtime;
2375
2376 /*
2377 * If a global cgroup was set, apply it to all the events without an
2378 * explicit cgroup. I.e.:
2379 *
2380 * trace -G A -e sched:*switch
2381 *
2382 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
2383 * _and_ sched:sched_switch to the 'A' cgroup, while:
2384 *
2385 * trace -e sched:*switch -G A
2386 *
2387 * will only set the sched:sched_switch event to the 'A' cgroup, all the
2388 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
2389 * a cgroup (on the root cgroup, sys wide, etc).
2390 *
2391 * Multiple cgroups:
2392 *
2393 * trace -G A -e sched:*switch -G B
2394 *
2395 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
2396 * to the 'B' cgroup.
2397 *
2398 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
2399 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
2400 */
2401 if (trace->cgroup)
2402 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
2403
2404 err = perf_evlist__create_maps(evlist, &trace->opts.target);
2405 if (err < 0) {
2406 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
2407 goto out_delete_evlist;
2408 }
2409
2410 err = trace__symbols_init(trace, evlist);
2411 if (err < 0) {
2412 fprintf(trace->output, "Problems initializing symbol libraries!\n");
2413 goto out_delete_evlist;
2414 }
2415
2416 perf_evlist__config(evlist, &trace->opts, &callchain_param);
2417
2418 signal(SIGCHLD, sig_handler);
2419 signal(SIGINT, sig_handler);
2420
2421 if (forks) {
2422 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
2423 argv, false, NULL);
2424 if (err < 0) {
2425 fprintf(trace->output, "Couldn't run the workload!\n");
2426 goto out_delete_evlist;
2427 }
2428 }
2429
2430 err = perf_evlist__open(evlist);
2431 if (err < 0)
2432 goto out_error_open;
2433
2434 err = bpf__apply_obj_config();
2435 if (err) {
2436 char errbuf[BUFSIZ];
2437
2438 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
2439 pr_err("ERROR: Apply config to BPF failed: %s\n",
2440 errbuf);
2441 goto out_error_open;
2442 }
2443
2444 /*
2445 * Better not use !target__has_task() here because we need to cover the
2446 * case where no threads were specified in the command line, but a
2447 * workload was, and in that case we will fill in the thread_map when
2448 * we fork the workload in perf_evlist__prepare_workload.
2449 */
2450 if (trace->filter_pids.nr > 0)
2451 err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
2452 else if (thread_map__pid(evlist->threads, 0) == -1)
2453 err = trace__set_filter_loop_pids(trace);
2454
2455 if (err < 0)
2456 goto out_error_mem;
2457
2458 if (trace->ev_qualifier_ids.nr > 0) {
2459 err = trace__set_ev_qualifier_filter(trace);
2460 if (err < 0)
2461 goto out_errno;
2462
2463 pr_debug("event qualifier tracepoint filter: %s\n",
2464 trace->syscalls.events.sys_exit->filter);
2465 }
2466
2467 err = perf_evlist__apply_filters(evlist, &evsel);
2468 if (err < 0)
2469 goto out_error_apply_filters;
2470
2471 err = perf_evlist__mmap(evlist, trace->opts.mmap_pages);
2472 if (err < 0)
2473 goto out_error_mmap;
2474
2475 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
2476 perf_evlist__enable(evlist);
2477
2478 if (forks)
2479 perf_evlist__start_workload(evlist);
2480
2481 if (trace->opts.initial_delay) {
2482 usleep(trace->opts.initial_delay * 1000);
2483 perf_evlist__enable(evlist);
2484 }
2485
2486 trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
2487 evlist->threads->nr > 1 ||
2488 perf_evlist__first(evlist)->attr.inherit;
2489
2490 /*
2491 * Now that we already used evsel->attr to ask the kernel to setup the
2492 * events, lets reuse evsel->attr.sample_max_stack as the limit in
2493 * trace__resolve_callchain(), allowing per-event max-stack settings
2494 * to override an explicitely set --max-stack global setting.
2495 */
2496 evlist__for_each_entry(evlist, evsel) {
2497 if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) &&
2498 evsel->attr.sample_max_stack == 0)
2499 evsel->attr.sample_max_stack = trace->max_stack;
2500 }
2501again:
2502 before = trace->nr_events;
2503
2504 for (i = 0; i < evlist->nr_mmaps; i++) {
2505 union perf_event *event;
2506 struct perf_mmap *md;
2507
2508 md = &evlist->mmap[i];
2509 if (perf_mmap__read_init(md) < 0)
2510 continue;
2511
2512 while ((event = perf_mmap__read_event(md)) != NULL) {
2513 struct perf_sample sample;
2514
2515 ++trace->nr_events;
2516
2517 err = perf_evlist__parse_sample(evlist, event, &sample);
2518 if (err) {
2519 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
2520 goto next_event;
2521 }
2522
2523 trace__handle_event(trace, event, &sample);
2524next_event:
2525 perf_mmap__consume(md);
2526
2527 if (interrupted)
2528 goto out_disable;
2529
2530 if (done && !draining) {
2531 perf_evlist__disable(evlist);
2532 draining = true;
2533 }
2534 }
2535 perf_mmap__read_done(md);
2536 }
2537
2538 if (trace->nr_events == before) {
2539 int timeout = done ? 100 : -1;
2540
2541 if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
2542 if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
2543 draining = true;
2544
2545 goto again;
2546 }
2547 } else {
2548 goto again;
2549 }
2550
2551out_disable:
2552 thread__zput(trace->current);
2553
2554 perf_evlist__disable(evlist);
2555
2556 if (!err) {
2557 if (trace->summary)
2558 trace__fprintf_thread_summary(trace, trace->output);
2559
2560 if (trace->show_tool_stats) {
2561 fprintf(trace->output, "Stats:\n "
2562 " vfs_getname : %" PRIu64 "\n"
2563 " proc_getname: %" PRIu64 "\n",
2564 trace->stats.vfs_getname,
2565 trace->stats.proc_getname);
2566 }
2567 }
2568
2569out_delete_evlist:
2570 trace__symbols__exit(trace);
2571
2572 perf_evlist__delete(evlist);
2573 cgroup__put(trace->cgroup);
2574 trace->evlist = NULL;
2575 trace->live = false;
2576 return err;
2577{
2578 char errbuf[BUFSIZ];
2579
2580out_error_sched_stat_runtime:
2581 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
2582 goto out_error;
2583
2584out_error_raw_syscalls:
2585 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
2586 goto out_error;
2587
2588out_error_mmap:
2589 perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
2590 goto out_error;
2591
2592out_error_open:
2593 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
2594
2595out_error:
2596 fprintf(trace->output, "%s\n", errbuf);
2597 goto out_delete_evlist;
2598
2599out_error_apply_filters:
2600 fprintf(trace->output,
2601 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
2602 evsel->filter, perf_evsel__name(evsel), errno,
2603 str_error_r(errno, errbuf, sizeof(errbuf)));
2604 goto out_delete_evlist;
2605}
2606out_error_mem:
2607 fprintf(trace->output, "Not enough memory to run!\n");
2608 goto out_delete_evlist;
2609
2610out_errno:
2611 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
2612 goto out_delete_evlist;
2613}
2614
2615static int trace__replay(struct trace *trace)
2616{
2617 const struct perf_evsel_str_handler handlers[] = {
2618 { "probe:vfs_getname", trace__vfs_getname, },
2619 };
2620 struct perf_data data = {
2621 .file = {
2622 .path = input_name,
2623 },
2624 .mode = PERF_DATA_MODE_READ,
2625 .force = trace->force,
2626 };
2627 struct perf_session *session;
2628 struct perf_evsel *evsel;
2629 int err = -1;
2630
2631 trace->tool.sample = trace__process_sample;
2632 trace->tool.mmap = perf_event__process_mmap;
2633 trace->tool.mmap2 = perf_event__process_mmap2;
2634 trace->tool.comm = perf_event__process_comm;
2635 trace->tool.exit = perf_event__process_exit;
2636 trace->tool.fork = perf_event__process_fork;
2637 trace->tool.attr = perf_event__process_attr;
2638 trace->tool.tracing_data = perf_event__process_tracing_data;
2639 trace->tool.build_id = perf_event__process_build_id;
2640 trace->tool.namespaces = perf_event__process_namespaces;
2641
2642 trace->tool.ordered_events = true;
2643 trace->tool.ordering_requires_timestamps = true;
2644
2645 /* add tid to output */
2646 trace->multiple_threads = true;
2647
2648 session = perf_session__new(&data, false, &trace->tool);
2649 if (session == NULL)
2650 return -1;
2651
2652 if (trace->opts.target.pid)
2653 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
2654
2655 if (trace->opts.target.tid)
2656 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
2657
2658 if (symbol__init(&session->header.env) < 0)
2659 goto out;
2660
2661 trace->host = &session->machines.host;
2662
2663 err = perf_session__set_tracepoints_handlers(session, handlers);
2664 if (err)
2665 goto out;
2666
2667 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2668 "raw_syscalls:sys_enter");
2669 /* older kernels have syscalls tp versus raw_syscalls */
2670 if (evsel == NULL)
2671 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2672 "syscalls:sys_enter");
2673
2674 if (evsel &&
2675 (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
2676 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
2677 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
2678 goto out;
2679 }
2680
2681 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2682 "raw_syscalls:sys_exit");
2683 if (evsel == NULL)
2684 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2685 "syscalls:sys_exit");
2686 if (evsel &&
2687 (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
2688 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
2689 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
2690 goto out;
2691 }
2692
2693 evlist__for_each_entry(session->evlist, evsel) {
2694 if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
2695 (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
2696 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
2697 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
2698 evsel->handler = trace__pgfault;
2699 }
2700
2701 setup_pager();
2702
2703 err = perf_session__process_events(session);
2704 if (err)
2705 pr_err("Failed to process events, error %d", err);
2706
2707 else if (trace->summary)
2708 trace__fprintf_thread_summary(trace, trace->output);
2709
2710out:
2711 perf_session__delete(session);
2712
2713 return err;
2714}
2715
2716static size_t trace__fprintf_threads_header(FILE *fp)
2717{
2718 size_t printed;
2719
2720 printed = fprintf(fp, "\n Summary of events:\n\n");
2721
2722 return printed;
2723}
2724
2725DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
2726 struct stats *stats;
2727 double msecs;
2728 int syscall;
2729)
2730{
2731 struct int_node *source = rb_entry(nd, struct int_node, rb_node);
2732 struct stats *stats = source->priv;
2733
2734 entry->syscall = source->i;
2735 entry->stats = stats;
2736 entry->msecs = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
2737}
2738
2739static size_t thread__dump_stats(struct thread_trace *ttrace,
2740 struct trace *trace, FILE *fp)
2741{
2742 size_t printed = 0;
2743 struct syscall *sc;
2744 struct rb_node *nd;
2745 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
2746
2747 if (syscall_stats == NULL)
2748 return 0;
2749
2750 printed += fprintf(fp, "\n");
2751
2752 printed += fprintf(fp, " syscall calls total min avg max stddev\n");
2753 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
2754 printed += fprintf(fp, " --------------- -------- --------- --------- --------- --------- ------\n");
2755
2756 resort_rb__for_each_entry(nd, syscall_stats) {
2757 struct stats *stats = syscall_stats_entry->stats;
2758 if (stats) {
2759 double min = (double)(stats->min) / NSEC_PER_MSEC;
2760 double max = (double)(stats->max) / NSEC_PER_MSEC;
2761 double avg = avg_stats(stats);
2762 double pct;
2763 u64 n = (u64) stats->n;
2764
2765 pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
2766 avg /= NSEC_PER_MSEC;
2767
2768 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
2769 printed += fprintf(fp, " %-15s", sc->name);
2770 printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
2771 n, syscall_stats_entry->msecs, min, avg);
2772 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
2773 }
2774 }
2775
2776 resort_rb__delete(syscall_stats);
2777 printed += fprintf(fp, "\n\n");
2778
2779 return printed;
2780}
2781
2782static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
2783{
2784 size_t printed = 0;
2785 struct thread_trace *ttrace = thread__priv(thread);
2786 double ratio;
2787
2788 if (ttrace == NULL)
2789 return 0;
2790
2791 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
2792
2793 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
2794 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
2795 printed += fprintf(fp, "%.1f%%", ratio);
2796 if (ttrace->pfmaj)
2797 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
2798 if (ttrace->pfmin)
2799 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
2800 if (trace->sched)
2801 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
2802 else if (fputc('\n', fp) != EOF)
2803 ++printed;
2804
2805 printed += thread__dump_stats(ttrace, trace, fp);
2806
2807 return printed;
2808}
2809
2810static unsigned long thread__nr_events(struct thread_trace *ttrace)
2811{
2812 return ttrace ? ttrace->nr_events : 0;
2813}
2814
2815DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
2816 struct thread *thread;
2817)
2818{
2819 entry->thread = rb_entry(nd, struct thread, rb_node);
2820}
2821
2822static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
2823{
2824 size_t printed = trace__fprintf_threads_header(fp);
2825 struct rb_node *nd;
2826 int i;
2827
2828 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2829 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
2830
2831 if (threads == NULL) {
2832 fprintf(fp, "%s", "Error sorting output by nr_events!\n");
2833 return 0;
2834 }
2835
2836 resort_rb__for_each_entry(nd, threads)
2837 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
2838
2839 resort_rb__delete(threads);
2840 }
2841 return printed;
2842}
2843
2844static int trace__set_duration(const struct option *opt, const char *str,
2845 int unset __maybe_unused)
2846{
2847 struct trace *trace = opt->value;
2848
2849 trace->duration_filter = atof(str);
2850 return 0;
2851}
2852
2853static int trace__set_filter_pids(const struct option *opt, const char *str,
2854 int unset __maybe_unused)
2855{
2856 int ret = -1;
2857 size_t i;
2858 struct trace *trace = opt->value;
2859 /*
2860 * FIXME: introduce a intarray class, plain parse csv and create a
2861 * { int nr, int entries[] } struct...
2862 */
2863 struct intlist *list = intlist__new(str);
2864
2865 if (list == NULL)
2866 return -1;
2867
2868 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
2869 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
2870
2871 if (trace->filter_pids.entries == NULL)
2872 goto out;
2873
2874 trace->filter_pids.entries[0] = getpid();
2875
2876 for (i = 1; i < trace->filter_pids.nr; ++i)
2877 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
2878
2879 intlist__delete(list);
2880 ret = 0;
2881out:
2882 return ret;
2883}
2884
2885static int trace__open_output(struct trace *trace, const char *filename)
2886{
2887 struct stat st;
2888
2889 if (!stat(filename, &st) && st.st_size) {
2890 char oldname[PATH_MAX];
2891
2892 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
2893 unlink(oldname);
2894 rename(filename, oldname);
2895 }
2896
2897 trace->output = fopen(filename, "w");
2898
2899 return trace->output == NULL ? -errno : 0;
2900}
2901
2902static int parse_pagefaults(const struct option *opt, const char *str,
2903 int unset __maybe_unused)
2904{
2905 int *trace_pgfaults = opt->value;
2906
2907 if (strcmp(str, "all") == 0)
2908 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
2909 else if (strcmp(str, "maj") == 0)
2910 *trace_pgfaults |= TRACE_PFMAJ;
2911 else if (strcmp(str, "min") == 0)
2912 *trace_pgfaults |= TRACE_PFMIN;
2913 else
2914 return -1;
2915
2916 return 0;
2917}
2918
2919static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
2920{
2921 struct perf_evsel *evsel;
2922
2923 evlist__for_each_entry(evlist, evsel)
2924 evsel->handler = handler;
2925}
2926
2927/*
2928 * XXX: Hackish, just splitting the combined -e+--event (syscalls
2929 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
2930 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
2931 *
2932 * It'd be better to introduce a parse_options() variant that would return a
2933 * list with the terms it didn't match to an event...
2934 */
2935static int trace__parse_events_option(const struct option *opt, const char *str,
2936 int unset __maybe_unused)
2937{
2938 struct trace *trace = (struct trace *)opt->value;
2939 const char *s = str;
2940 char *sep = NULL, *lists[2] = { NULL, NULL, };
2941 int len = strlen(str) + 1, err = -1, list, idx;
2942 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
2943 char group_name[PATH_MAX];
2944
2945 if (strace_groups_dir == NULL)
2946 return -1;
2947
2948 if (*s == '!') {
2949 ++s;
2950 trace->not_ev_qualifier = true;
2951 }
2952
2953 while (1) {
2954 if ((sep = strchr(s, ',')) != NULL)
2955 *sep = '\0';
2956
2957 list = 0;
2958 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
2959 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
2960 list = 1;
2961 } else {
2962 path__join(group_name, sizeof(group_name), strace_groups_dir, s);
2963 if (access(group_name, R_OK) == 0)
2964 list = 1;
2965 }
2966
2967 if (lists[list]) {
2968 sprintf(lists[list] + strlen(lists[list]), ",%s", s);
2969 } else {
2970 lists[list] = malloc(len);
2971 if (lists[list] == NULL)
2972 goto out;
2973 strcpy(lists[list], s);
2974 }
2975
2976 if (!sep)
2977 break;
2978
2979 *sep = ',';
2980 s = sep + 1;
2981 }
2982
2983 if (lists[1] != NULL) {
2984 struct strlist_config slist_config = {
2985 .dirname = strace_groups_dir,
2986 };
2987
2988 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
2989 if (trace->ev_qualifier == NULL) {
2990 fputs("Not enough memory to parse event qualifier", trace->output);
2991 goto out;
2992 }
2993
2994 if (trace__validate_ev_qualifier(trace))
2995 goto out;
2996 }
2997
2998 err = 0;
2999
3000 if (lists[0]) {
3001 struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
3002 "event selector. use 'perf list' to list available events",
3003 parse_events_option);
3004 err = parse_events_option(&o, lists[0], 0);
3005 }
3006out:
3007 if (sep)
3008 *sep = ',';
3009
3010 return err;
3011}
3012
3013static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
3014{
3015 struct trace *trace = opt->value;
3016
3017 if (!list_empty(&trace->evlist->entries))
3018 return parse_cgroups(opt, str, unset);
3019
3020 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
3021
3022 return 0;
3023}
3024
3025int cmd_trace(int argc, const char **argv)
3026{
3027 const char *trace_usage[] = {
3028 "perf trace [<options>] [<command>]",
3029 "perf trace [<options>] -- <command> [<options>]",
3030 "perf trace record [<options>] [<command>]",
3031 "perf trace record [<options>] -- <command> [<options>]",
3032 NULL
3033 };
3034 struct trace trace = {
3035 .syscalls = {
3036 . max = -1,
3037 },
3038 .opts = {
3039 .target = {
3040 .uid = UINT_MAX,
3041 .uses_mmap = true,
3042 },
3043 .user_freq = UINT_MAX,
3044 .user_interval = ULLONG_MAX,
3045 .no_buffering = true,
3046 .mmap_pages = UINT_MAX,
3047 .proc_map_timeout = 500,
3048 },
3049 .output = stderr,
3050 .show_comm = true,
3051 .trace_syscalls = true,
3052 .kernel_syscallchains = false,
3053 .max_stack = UINT_MAX,
3054 };
3055 const char *output_name = NULL;
3056 const struct option trace_options[] = {
3057 OPT_CALLBACK('e', "event", &trace, "event",
3058 "event/syscall selector. use 'perf list' to list available events",
3059 trace__parse_events_option),
3060 OPT_BOOLEAN(0, "comm", &trace.show_comm,
3061 "show the thread COMM next to its id"),
3062 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
3063 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
3064 trace__parse_events_option),
3065 OPT_STRING('o', "output", &output_name, "file", "output file name"),
3066 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
3067 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
3068 "trace events on existing process id"),
3069 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
3070 "trace events on existing thread id"),
3071 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
3072 "pids to filter (by the kernel)", trace__set_filter_pids),
3073 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
3074 "system-wide collection from all CPUs"),
3075 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
3076 "list of cpus to monitor"),
3077 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
3078 "child tasks do not inherit counters"),
3079 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
3080 "number of mmap data pages",
3081 perf_evlist__parse_mmap_pages),
3082 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
3083 "user to profile"),
3084 OPT_CALLBACK(0, "duration", &trace, "float",
3085 "show only events with duration > N.M ms",
3086 trace__set_duration),
3087 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
3088 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
3089 OPT_BOOLEAN('T', "time", &trace.full_time,
3090 "Show full timestamp, not time relative to first start"),
3091 OPT_BOOLEAN(0, "failure", &trace.failure_only,
3092 "Show only syscalls that failed"),
3093 OPT_BOOLEAN('s', "summary", &trace.summary_only,
3094 "Show only syscall summary with statistics"),
3095 OPT_BOOLEAN('S', "with-summary", &trace.summary,
3096 "Show all syscalls and summary with statistics"),
3097 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
3098 "Trace pagefaults", parse_pagefaults, "maj"),
3099 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
3100 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
3101 OPT_CALLBACK(0, "call-graph", &trace.opts,
3102 "record_mode[,record_size]", record_callchain_help,
3103 &record_parse_callchain_opt),
3104 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
3105 "Show the kernel callchains on the syscall exit path"),
3106 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
3107 "Set the minimum stack depth when parsing the callchain, "
3108 "anything below the specified depth will be ignored."),
3109 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
3110 "Set the maximum stack depth when parsing the callchain, "
3111 "anything beyond the specified depth will be ignored. "
3112 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
3113 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
3114 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
3115 OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
3116 "per thread proc mmap processing timeout in ms"),
3117 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
3118 trace__parse_cgroups),
3119 OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
3120 "ms to wait before starting measurement after program "
3121 "start"),
3122 OPT_END()
3123 };
3124 bool __maybe_unused max_stack_user_set = true;
3125 bool mmap_pages_user_set = true;
3126 const char * const trace_subcommands[] = { "record", NULL };
3127 int err;
3128 char bf[BUFSIZ];
3129
3130 signal(SIGSEGV, sighandler_dump_stack);
3131 signal(SIGFPE, sighandler_dump_stack);
3132
3133 trace.evlist = perf_evlist__new();
3134 trace.sctbl = syscalltbl__new();
3135
3136 if (trace.evlist == NULL || trace.sctbl == NULL) {
3137 pr_err("Not enough memory to run!\n");
3138 err = -ENOMEM;
3139 goto out;
3140 }
3141
3142 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
3143 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3144
3145 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
3146 usage_with_options_msg(trace_usage, trace_options,
3147 "cgroup monitoring only available in system-wide mode");
3148 }
3149
3150 err = bpf__setup_stdout(trace.evlist);
3151 if (err) {
3152 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
3153 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
3154 goto out;
3155 }
3156
3157 err = -1;
3158
3159 if (trace.trace_pgfaults) {
3160 trace.opts.sample_address = true;
3161 trace.opts.sample_time = true;
3162 }
3163
3164 if (trace.opts.mmap_pages == UINT_MAX)
3165 mmap_pages_user_set = false;
3166
3167 if (trace.max_stack == UINT_MAX) {
3168 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack;
3169 max_stack_user_set = false;
3170 }
3171
3172#ifdef HAVE_DWARF_UNWIND_SUPPORT
3173 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
3174 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
3175 }
3176#endif
3177
3178 if (callchain_param.enabled) {
3179 if (!mmap_pages_user_set && geteuid() == 0)
3180 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
3181
3182 symbol_conf.use_callchain = true;
3183 }
3184
3185 if (trace.evlist->nr_entries > 0)
3186 evlist__set_evsel_handler(trace.evlist, trace__event_handler);
3187
3188 if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
3189 return trace__record(&trace, argc-1, &argv[1]);
3190
3191 /* summary_only implies summary option, but don't overwrite summary if set */
3192 if (trace.summary_only)
3193 trace.summary = trace.summary_only;
3194
3195 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
3196 trace.evlist->nr_entries == 0 /* Was --events used? */) {
3197 pr_err("Please specify something to trace.\n");
3198 return -1;
3199 }
3200
3201 if (!trace.trace_syscalls && trace.ev_qualifier) {
3202 pr_err("The -e option can't be used with --no-syscalls.\n");
3203 goto out;
3204 }
3205
3206 if (output_name != NULL) {
3207 err = trace__open_output(&trace, output_name);
3208 if (err < 0) {
3209 perror("failed to create output file");
3210 goto out;
3211 }
3212 }
3213
3214 trace.open_id = syscalltbl__id(trace.sctbl, "open");
3215
3216 err = target__validate(&trace.opts.target);
3217 if (err) {
3218 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
3219 fprintf(trace.output, "%s", bf);
3220 goto out_close;
3221 }
3222
3223 err = target__parse_uid(&trace.opts.target);
3224 if (err) {
3225 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
3226 fprintf(trace.output, "%s", bf);
3227 goto out_close;
3228 }
3229
3230 if (!argc && target__none(&trace.opts.target))
3231 trace.opts.target.system_wide = true;
3232
3233 if (input_name)
3234 err = trace__replay(&trace);
3235 else
3236 err = trace__run(&trace, argc, argv);
3237
3238out_close:
3239 if (output_name != NULL)
3240 fclose(trace.output);
3241out:
3242 return err;
3243}
1/*
2 * builtin-trace.c
3 *
4 * Builtin 'trace' command:
5 *
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
9 *
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 *
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
13 *
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15 */
16
17#include "util/record.h"
18#include <traceevent/event-parse.h>
19#include <api/fs/tracing_path.h>
20#include <bpf/bpf.h>
21#include "util/bpf_map.h"
22#include "util/rlimit.h"
23#include "builtin.h"
24#include "util/cgroup.h"
25#include "util/color.h"
26#include "util/config.h"
27#include "util/debug.h"
28#include "util/dso.h"
29#include "util/env.h"
30#include "util/event.h"
31#include "util/evsel.h"
32#include "util/evsel_fprintf.h"
33#include "util/synthetic-events.h"
34#include "util/evlist.h"
35#include "util/evswitch.h"
36#include "util/mmap.h"
37#include <subcmd/pager.h>
38#include <subcmd/exec-cmd.h>
39#include "util/machine.h"
40#include "util/map.h"
41#include "util/symbol.h"
42#include "util/path.h"
43#include "util/session.h"
44#include "util/thread.h"
45#include <subcmd/parse-options.h>
46#include "util/strlist.h"
47#include "util/intlist.h"
48#include "util/thread_map.h"
49#include "util/stat.h"
50#include "util/tool.h"
51#include "util/util.h"
52#include "trace/beauty/beauty.h"
53#include "trace-event.h"
54#include "util/parse-events.h"
55#include "util/bpf-loader.h"
56#include "callchain.h"
57#include "print_binary.h"
58#include "string2.h"
59#include "syscalltbl.h"
60#include "rb_resort.h"
61#include "../perf.h"
62
63#include <errno.h>
64#include <inttypes.h>
65#include <poll.h>
66#include <signal.h>
67#include <stdlib.h>
68#include <string.h>
69#include <linux/err.h>
70#include <linux/filter.h>
71#include <linux/kernel.h>
72#include <linux/random.h>
73#include <linux/stringify.h>
74#include <linux/time64.h>
75#include <linux/zalloc.h>
76#include <fcntl.h>
77#include <sys/sysmacros.h>
78
79#include <linux/ctype.h>
80#include <perf/mmap.h>
81
82#ifndef O_CLOEXEC
83# define O_CLOEXEC 02000000
84#endif
85
86#ifndef F_LINUX_SPECIFIC_BASE
87# define F_LINUX_SPECIFIC_BASE 1024
88#endif
89
90/*
91 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
92 */
93struct syscall_arg_fmt {
94 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
95 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
96 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
97 void *parm;
98 const char *name;
99 u16 nr_entries; // for arrays
100 bool show_zero;
101};
102
103struct syscall_fmt {
104 const char *name;
105 const char *alias;
106 struct {
107 const char *sys_enter,
108 *sys_exit;
109 } bpf_prog_name;
110 struct syscall_arg_fmt arg[6];
111 u8 nr_args;
112 bool errpid;
113 bool timeout;
114 bool hexret;
115};
116
117struct trace {
118 struct perf_tool tool;
119 struct syscalltbl *sctbl;
120 struct {
121 struct syscall *table;
122 struct bpf_map *map;
123 struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
124 struct bpf_map *sys_enter,
125 *sys_exit;
126 } prog_array;
127 struct {
128 struct evsel *sys_enter,
129 *sys_exit,
130 *augmented;
131 } events;
132 struct bpf_program *unaugmented_prog;
133 } syscalls;
134 struct {
135 struct bpf_map *map;
136 } dump;
137 struct record_opts opts;
138 struct evlist *evlist;
139 struct machine *host;
140 struct thread *current;
141 struct bpf_object *bpf_obj;
142 struct cgroup *cgroup;
143 u64 base_time;
144 FILE *output;
145 unsigned long nr_events;
146 unsigned long nr_events_printed;
147 unsigned long max_events;
148 struct evswitch evswitch;
149 struct strlist *ev_qualifier;
150 struct {
151 size_t nr;
152 int *entries;
153 } ev_qualifier_ids;
154 struct {
155 size_t nr;
156 pid_t *entries;
157 struct bpf_map *map;
158 } filter_pids;
159 double duration_filter;
160 double runtime_ms;
161 struct {
162 u64 vfs_getname,
163 proc_getname;
164 } stats;
165 unsigned int max_stack;
166 unsigned int min_stack;
167 int raw_augmented_syscalls_args_size;
168 bool raw_augmented_syscalls;
169 bool fd_path_disabled;
170 bool sort_events;
171 bool not_ev_qualifier;
172 bool live;
173 bool full_time;
174 bool sched;
175 bool multiple_threads;
176 bool summary;
177 bool summary_only;
178 bool errno_summary;
179 bool failure_only;
180 bool show_comm;
181 bool print_sample;
182 bool show_tool_stats;
183 bool trace_syscalls;
184 bool libtraceevent_print;
185 bool kernel_syscallchains;
186 s16 args_alignment;
187 bool show_tstamp;
188 bool show_duration;
189 bool show_zeros;
190 bool show_arg_names;
191 bool show_string_prefix;
192 bool force;
193 bool vfs_getname;
194 int trace_pgfaults;
195 char *perfconfig_events;
196 struct {
197 struct ordered_events data;
198 u64 last;
199 } oe;
200};
201
202struct tp_field {
203 int offset;
204 union {
205 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
206 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
207 };
208};
209
210#define TP_UINT_FIELD(bits) \
211static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
212{ \
213 u##bits value; \
214 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
215 return value; \
216}
217
218TP_UINT_FIELD(8);
219TP_UINT_FIELD(16);
220TP_UINT_FIELD(32);
221TP_UINT_FIELD(64);
222
223#define TP_UINT_FIELD__SWAPPED(bits) \
224static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
225{ \
226 u##bits value; \
227 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
228 return bswap_##bits(value);\
229}
230
231TP_UINT_FIELD__SWAPPED(16);
232TP_UINT_FIELD__SWAPPED(32);
233TP_UINT_FIELD__SWAPPED(64);
234
235static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
236{
237 field->offset = offset;
238
239 switch (size) {
240 case 1:
241 field->integer = tp_field__u8;
242 break;
243 case 2:
244 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
245 break;
246 case 4:
247 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
248 break;
249 case 8:
250 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
251 break;
252 default:
253 return -1;
254 }
255
256 return 0;
257}
258
259static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
260{
261 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
262}
263
264static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
265{
266 return sample->raw_data + field->offset;
267}
268
269static int __tp_field__init_ptr(struct tp_field *field, int offset)
270{
271 field->offset = offset;
272 field->pointer = tp_field__ptr;
273 return 0;
274}
275
276static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
277{
278 return __tp_field__init_ptr(field, format_field->offset);
279}
280
281struct syscall_tp {
282 struct tp_field id;
283 union {
284 struct tp_field args, ret;
285 };
286};
287
288/*
289 * The evsel->priv as used by 'perf trace'
290 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME
291 * fmt: for all the other tracepoints
292 */
293struct evsel_trace {
294 struct syscall_tp sc;
295 struct syscall_arg_fmt *fmt;
296};
297
298static struct evsel_trace *evsel_trace__new(void)
299{
300 return zalloc(sizeof(struct evsel_trace));
301}
302
303static void evsel_trace__delete(struct evsel_trace *et)
304{
305 if (et == NULL)
306 return;
307
308 zfree(&et->fmt);
309 free(et);
310}
311
312/*
313 * Used with raw_syscalls:sys_{enter,exit} and with the
314 * syscalls:sys_{enter,exit}_SYSCALL tracepoints
315 */
316static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel)
317{
318 struct evsel_trace *et = evsel->priv;
319
320 return &et->sc;
321}
322
323static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel)
324{
325 if (evsel->priv == NULL) {
326 evsel->priv = evsel_trace__new();
327 if (evsel->priv == NULL)
328 return NULL;
329 }
330
331 return __evsel__syscall_tp(evsel);
332}
333
334/*
335 * Used with all the other tracepoints.
336 */
337static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel)
338{
339 struct evsel_trace *et = evsel->priv;
340
341 return et->fmt;
342}
343
344static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel)
345{
346 struct evsel_trace *et = evsel->priv;
347
348 if (evsel->priv == NULL) {
349 et = evsel->priv = evsel_trace__new();
350
351 if (et == NULL)
352 return NULL;
353 }
354
355 if (et->fmt == NULL) {
356 et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
357 if (et->fmt == NULL)
358 goto out_delete;
359 }
360
361 return __evsel__syscall_arg_fmt(evsel);
362
363out_delete:
364 evsel_trace__delete(evsel->priv);
365 evsel->priv = NULL;
366 return NULL;
367}
368
369static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name)
370{
371 struct tep_format_field *format_field = evsel__field(evsel, name);
372
373 if (format_field == NULL)
374 return -1;
375
376 return tp_field__init_uint(field, format_field, evsel->needs_swap);
377}
378
379#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
380 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
381 evsel__init_tp_uint_field(evsel, &sc->name, #name); })
382
383static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name)
384{
385 struct tep_format_field *format_field = evsel__field(evsel, name);
386
387 if (format_field == NULL)
388 return -1;
389
390 return tp_field__init_ptr(field, format_field);
391}
392
393#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
394 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
395 evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
396
397static void evsel__delete_priv(struct evsel *evsel)
398{
399 zfree(&evsel->priv);
400 evsel__delete(evsel);
401}
402
403static int evsel__init_syscall_tp(struct evsel *evsel)
404{
405 struct syscall_tp *sc = evsel__syscall_tp(evsel);
406
407 if (sc != NULL) {
408 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
409 evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
410 return -ENOENT;
411 return 0;
412 }
413
414 return -ENOMEM;
415}
416
417static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
418{
419 struct syscall_tp *sc = evsel__syscall_tp(evsel);
420
421 if (sc != NULL) {
422 struct tep_format_field *syscall_id = evsel__field(tp, "id");
423 if (syscall_id == NULL)
424 syscall_id = evsel__field(tp, "__syscall_nr");
425 if (syscall_id == NULL ||
426 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
427 return -EINVAL;
428
429 return 0;
430 }
431
432 return -ENOMEM;
433}
434
435static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
436{
437 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
438
439 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
440}
441
442static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
443{
444 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
445
446 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
447}
448
449static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
450{
451 if (evsel__syscall_tp(evsel) != NULL) {
452 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
453 return -ENOENT;
454
455 evsel->handler = handler;
456 return 0;
457 }
458
459 return -ENOMEM;
460}
461
462static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
463{
464 struct evsel *evsel = evsel__newtp("raw_syscalls", direction);
465
466 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
467 if (IS_ERR(evsel))
468 evsel = evsel__newtp("syscalls", direction);
469
470 if (IS_ERR(evsel))
471 return NULL;
472
473 if (evsel__init_raw_syscall_tp(evsel, handler))
474 goto out_delete;
475
476 return evsel;
477
478out_delete:
479 evsel__delete_priv(evsel);
480 return NULL;
481}
482
483#define perf_evsel__sc_tp_uint(evsel, name, sample) \
484 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
485 fields->name.integer(&fields->name, sample); })
486
487#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
488 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
489 fields->name.pointer(&fields->name, sample); })
490
491size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val)
492{
493 int idx = val - sa->offset;
494
495 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
496 size_t printed = scnprintf(bf, size, intfmt, val);
497 if (show_suffix)
498 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
499 return printed;
500 }
501
502 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : "");
503}
504
505size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
506{
507 int idx = val - sa->offset;
508
509 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
510 size_t printed = scnprintf(bf, size, intfmt, val);
511 if (show_prefix)
512 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
513 return printed;
514 }
515
516 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
517}
518
519static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
520 const char *intfmt,
521 struct syscall_arg *arg)
522{
523 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
524}
525
526static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
527 struct syscall_arg *arg)
528{
529 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
530}
531
532#define SCA_STRARRAY syscall_arg__scnprintf_strarray
533
534bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
535{
536 return strarray__strtoul(arg->parm, bf, size, ret);
537}
538
539bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
540{
541 return strarray__strtoul_flags(arg->parm, bf, size, ret);
542}
543
544bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
545{
546 return strarrays__strtoul(arg->parm, bf, size, ret);
547}
548
549size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
550{
551 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
552}
553
554size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
555{
556 size_t printed;
557 int i;
558
559 for (i = 0; i < sas->nr_entries; ++i) {
560 struct strarray *sa = sas->entries[i];
561 int idx = val - sa->offset;
562
563 if (idx >= 0 && idx < sa->nr_entries) {
564 if (sa->entries[idx] == NULL)
565 break;
566 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
567 }
568 }
569
570 printed = scnprintf(bf, size, intfmt, val);
571 if (show_prefix)
572 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
573 return printed;
574}
575
576bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret)
577{
578 int i;
579
580 for (i = 0; i < sa->nr_entries; ++i) {
581 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') {
582 *ret = sa->offset + i;
583 return true;
584 }
585 }
586
587 return false;
588}
589
590bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret)
591{
592 u64 val = 0;
593 char *tok = bf, *sep, *end;
594
595 *ret = 0;
596
597 while (size != 0) {
598 int toklen = size;
599
600 sep = memchr(tok, '|', size);
601 if (sep != NULL) {
602 size -= sep - tok + 1;
603
604 end = sep - 1;
605 while (end > tok && isspace(*end))
606 --end;
607
608 toklen = end - tok + 1;
609 }
610
611 while (isspace(*tok))
612 ++tok;
613
614 if (isalpha(*tok) || *tok == '_') {
615 if (!strarray__strtoul(sa, tok, toklen, &val))
616 return false;
617 } else {
618 bool is_hexa = tok[0] == 0 && (tok[1] = 'x' || tok[1] == 'X');
619
620 val = strtoul(tok, NULL, is_hexa ? 16 : 0);
621 }
622
623 *ret |= (1 << (val - 1));
624
625 if (sep == NULL)
626 break;
627 tok = sep + 1;
628 }
629
630 return true;
631}
632
633bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret)
634{
635 int i;
636
637 for (i = 0; i < sas->nr_entries; ++i) {
638 struct strarray *sa = sas->entries[i];
639
640 if (strarray__strtoul(sa, bf, size, ret))
641 return true;
642 }
643
644 return false;
645}
646
647size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
648 struct syscall_arg *arg)
649{
650 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
651}
652
653#ifndef AT_FDCWD
654#define AT_FDCWD -100
655#endif
656
657static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
658 struct syscall_arg *arg)
659{
660 int fd = arg->val;
661 const char *prefix = "AT_FD";
662
663 if (fd == AT_FDCWD)
664 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
665
666 return syscall_arg__scnprintf_fd(bf, size, arg);
667}
668
669#define SCA_FDAT syscall_arg__scnprintf_fd_at
670
671static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
672 struct syscall_arg *arg);
673
674#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
675
676size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
677{
678 return scnprintf(bf, size, "%#lx", arg->val);
679}
680
681size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
682{
683 if (arg->val == 0)
684 return scnprintf(bf, size, "NULL");
685 return syscall_arg__scnprintf_hex(bf, size, arg);
686}
687
688size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
689{
690 return scnprintf(bf, size, "%d", arg->val);
691}
692
693size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
694{
695 return scnprintf(bf, size, "%ld", arg->val);
696}
697
698static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg)
699{
700 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can
701 // fill missing comms using thread__set_comm()...
702 // here or in a special syscall_arg__scnprintf_pid_sched_tp...
703 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val);
704}
705
706#define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array
707
708static const char *bpf_cmd[] = {
709 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
710 "MAP_GET_NEXT_KEY", "PROG_LOAD",
711};
712static DEFINE_STRARRAY(bpf_cmd, "BPF_");
713
714static const char *fsmount_flags[] = {
715 [1] = "CLOEXEC",
716};
717static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
718
719#include "trace/beauty/generated/fsconfig_arrays.c"
720
721static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
722
723static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
724static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
725
726static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
727static DEFINE_STRARRAY(itimers, "ITIMER_");
728
729static const char *keyctl_options[] = {
730 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
731 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
732 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
733 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
734 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
735};
736static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
737
738static const char *whences[] = { "SET", "CUR", "END",
739#ifdef SEEK_DATA
740"DATA",
741#endif
742#ifdef SEEK_HOLE
743"HOLE",
744#endif
745};
746static DEFINE_STRARRAY(whences, "SEEK_");
747
748static const char *fcntl_cmds[] = {
749 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
750 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
751 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
752 "GETOWNER_UIDS",
753};
754static DEFINE_STRARRAY(fcntl_cmds, "F_");
755
756static const char *fcntl_linux_specific_cmds[] = {
757 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
758 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
759 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
760};
761
762static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
763
764static struct strarray *fcntl_cmds_arrays[] = {
765 &strarray__fcntl_cmds,
766 &strarray__fcntl_linux_specific_cmds,
767};
768
769static DEFINE_STRARRAYS(fcntl_cmds_arrays);
770
771static const char *rlimit_resources[] = {
772 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
773 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
774 "RTTIME",
775};
776static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
777
778static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
779static DEFINE_STRARRAY(sighow, "SIG_");
780
781static const char *clockid[] = {
782 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
783 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
784 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
785};
786static DEFINE_STRARRAY(clockid, "CLOCK_");
787
788static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
789 struct syscall_arg *arg)
790{
791 bool show_prefix = arg->show_string_prefix;
792 const char *suffix = "_OK";
793 size_t printed = 0;
794 int mode = arg->val;
795
796 if (mode == F_OK) /* 0 */
797 return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
798#define P_MODE(n) \
799 if (mode & n##_OK) { \
800 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
801 mode &= ~n##_OK; \
802 }
803
804 P_MODE(R);
805 P_MODE(W);
806 P_MODE(X);
807#undef P_MODE
808
809 if (mode)
810 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
811
812 return printed;
813}
814
815#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
816
817static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
818 struct syscall_arg *arg);
819
820#define SCA_FILENAME syscall_arg__scnprintf_filename
821
822static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
823 struct syscall_arg *arg)
824{
825 bool show_prefix = arg->show_string_prefix;
826 const char *prefix = "O_";
827 int printed = 0, flags = arg->val;
828
829#define P_FLAG(n) \
830 if (flags & O_##n) { \
831 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
832 flags &= ~O_##n; \
833 }
834
835 P_FLAG(CLOEXEC);
836 P_FLAG(NONBLOCK);
837#undef P_FLAG
838
839 if (flags)
840 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
841
842 return printed;
843}
844
845#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
846
847#ifndef GRND_NONBLOCK
848#define GRND_NONBLOCK 0x0001
849#endif
850#ifndef GRND_RANDOM
851#define GRND_RANDOM 0x0002
852#endif
853
854static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
855 struct syscall_arg *arg)
856{
857 bool show_prefix = arg->show_string_prefix;
858 const char *prefix = "GRND_";
859 int printed = 0, flags = arg->val;
860
861#define P_FLAG(n) \
862 if (flags & GRND_##n) { \
863 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
864 flags &= ~GRND_##n; \
865 }
866
867 P_FLAG(RANDOM);
868 P_FLAG(NONBLOCK);
869#undef P_FLAG
870
871 if (flags)
872 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
873
874 return printed;
875}
876
877#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
878
879#define STRARRAY(name, array) \
880 { .scnprintf = SCA_STRARRAY, \
881 .strtoul = STUL_STRARRAY, \
882 .parm = &strarray__##array, }
883
884#define STRARRAY_FLAGS(name, array) \
885 { .scnprintf = SCA_STRARRAY_FLAGS, \
886 .strtoul = STUL_STRARRAY_FLAGS, \
887 .parm = &strarray__##array, }
888
889#include "trace/beauty/arch_errno_names.c"
890#include "trace/beauty/eventfd.c"
891#include "trace/beauty/futex_op.c"
892#include "trace/beauty/futex_val3.c"
893#include "trace/beauty/mmap.c"
894#include "trace/beauty/mode_t.c"
895#include "trace/beauty/msg_flags.c"
896#include "trace/beauty/open_flags.c"
897#include "trace/beauty/perf_event_open.c"
898#include "trace/beauty/pid.c"
899#include "trace/beauty/sched_policy.c"
900#include "trace/beauty/seccomp.c"
901#include "trace/beauty/signum.c"
902#include "trace/beauty/socket_type.c"
903#include "trace/beauty/waitid_options.c"
904
905static struct syscall_fmt syscall_fmts[] = {
906 { .name = "access",
907 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
908 { .name = "arch_prctl",
909 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
910 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
911 { .name = "bind",
912 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
913 [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
914 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
915 { .name = "bpf",
916 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
917 { .name = "brk", .hexret = true,
918 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
919 { .name = "clock_gettime",
920 .arg = { [0] = STRARRAY(clk_id, clockid), }, },
921 { .name = "clone", .errpid = true, .nr_args = 5,
922 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
923 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
924 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
925 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, },
926 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, },
927 { .name = "close",
928 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
929 { .name = "connect",
930 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
931 [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
932 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
933 { .name = "epoll_ctl",
934 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
935 { .name = "eventfd2",
936 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
937 { .name = "fchmodat",
938 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
939 { .name = "fchownat",
940 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
941 { .name = "fcntl",
942 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
943 .strtoul = STUL_STRARRAYS,
944 .parm = &strarrays__fcntl_cmds_arrays,
945 .show_zero = true, },
946 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
947 { .name = "flock",
948 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
949 { .name = "fsconfig",
950 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
951 { .name = "fsmount",
952 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
953 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
954 { .name = "fspick",
955 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
956 [1] = { .scnprintf = SCA_FILENAME, /* path */ },
957 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
958 { .name = "fstat", .alias = "newfstat", },
959 { .name = "fstatat", .alias = "newfstatat", },
960 { .name = "futex",
961 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
962 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
963 { .name = "futimesat",
964 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
965 { .name = "getitimer",
966 .arg = { [0] = STRARRAY(which, itimers), }, },
967 { .name = "getpid", .errpid = true, },
968 { .name = "getpgid", .errpid = true, },
969 { .name = "getppid", .errpid = true, },
970 { .name = "getrandom",
971 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
972 { .name = "getrlimit",
973 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
974 { .name = "gettid", .errpid = true, },
975 { .name = "ioctl",
976 .arg = {
977#if defined(__i386__) || defined(__x86_64__)
978/*
979 * FIXME: Make this available to all arches.
980 */
981 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
982 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
983#else
984 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
985#endif
986 { .name = "kcmp", .nr_args = 5,
987 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, },
988 [1] = { .name = "pid2", .scnprintf = SCA_PID, },
989 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, },
990 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, },
991 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, },
992 { .name = "keyctl",
993 .arg = { [0] = STRARRAY(option, keyctl_options), }, },
994 { .name = "kill",
995 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
996 { .name = "linkat",
997 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
998 { .name = "lseek",
999 .arg = { [2] = STRARRAY(whence, whences), }, },
1000 { .name = "lstat", .alias = "newlstat", },
1001 { .name = "madvise",
1002 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1003 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
1004 { .name = "mkdirat",
1005 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1006 { .name = "mknodat",
1007 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1008 { .name = "mmap", .hexret = true,
1009/* The standard mmap maps to old_mmap on s390x */
1010#if defined(__s390x__)
1011 .alias = "old_mmap",
1012#endif
1013 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
1014 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */
1015 .strtoul = STUL_STRARRAY_FLAGS,
1016 .parm = &strarray__mmap_flags, },
1017 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, },
1018 { .name = "mount",
1019 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
1020 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
1021 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
1022 { .name = "move_mount",
1023 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ },
1024 [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
1025 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ },
1026 [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
1027 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
1028 { .name = "mprotect",
1029 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1030 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, },
1031 { .name = "mq_unlink",
1032 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
1033 { .name = "mremap", .hexret = true,
1034 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
1035 { .name = "name_to_handle_at",
1036 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1037 { .name = "newfstatat",
1038 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1039 { .name = "open",
1040 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1041 { .name = "open_by_handle_at",
1042 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
1043 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1044 { .name = "openat",
1045 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
1046 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1047 { .name = "perf_event_open",
1048 .arg = { [2] = { .scnprintf = SCA_INT, /* cpu */ },
1049 [3] = { .scnprintf = SCA_FD, /* group_fd */ },
1050 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
1051 { .name = "pipe2",
1052 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
1053 { .name = "pkey_alloc",
1054 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, },
1055 { .name = "pkey_free",
1056 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, },
1057 { .name = "pkey_mprotect",
1058 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1059 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
1060 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
1061 { .name = "poll", .timeout = true, },
1062 { .name = "ppoll", .timeout = true, },
1063 { .name = "prctl",
1064 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */
1065 .strtoul = STUL_STRARRAY,
1066 .parm = &strarray__prctl_options, },
1067 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
1068 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
1069 { .name = "pread", .alias = "pread64", },
1070 { .name = "preadv", .alias = "pread", },
1071 { .name = "prlimit64",
1072 .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
1073 { .name = "pwrite", .alias = "pwrite64", },
1074 { .name = "readlinkat",
1075 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1076 { .name = "recvfrom",
1077 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1078 { .name = "recvmmsg",
1079 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1080 { .name = "recvmsg",
1081 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1082 { .name = "renameat",
1083 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1084 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
1085 { .name = "renameat2",
1086 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1087 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
1088 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
1089 { .name = "rt_sigaction",
1090 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1091 { .name = "rt_sigprocmask",
1092 .arg = { [0] = STRARRAY(how, sighow), }, },
1093 { .name = "rt_sigqueueinfo",
1094 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1095 { .name = "rt_tgsigqueueinfo",
1096 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1097 { .name = "sched_setscheduler",
1098 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
1099 { .name = "seccomp",
1100 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ },
1101 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
1102 { .name = "select", .timeout = true, },
1103 { .name = "sendfile", .alias = "sendfile64", },
1104 { .name = "sendmmsg",
1105 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1106 { .name = "sendmsg",
1107 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1108 { .name = "sendto",
1109 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
1110 [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
1111 { .name = "set_tid_address", .errpid = true, },
1112 { .name = "setitimer",
1113 .arg = { [0] = STRARRAY(which, itimers), }, },
1114 { .name = "setrlimit",
1115 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
1116 { .name = "socket",
1117 .arg = { [0] = STRARRAY(family, socket_families),
1118 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1119 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1120 { .name = "socketpair",
1121 .arg = { [0] = STRARRAY(family, socket_families),
1122 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1123 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1124 { .name = "stat", .alias = "newstat", },
1125 { .name = "statx",
1126 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ },
1127 [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
1128 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, },
1129 { .name = "swapoff",
1130 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
1131 { .name = "swapon",
1132 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
1133 { .name = "symlinkat",
1134 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1135 { .name = "sync_file_range",
1136 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
1137 { .name = "tgkill",
1138 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1139 { .name = "tkill",
1140 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1141 { .name = "umount2", .alias = "umount",
1142 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
1143 { .name = "uname", .alias = "newuname", },
1144 { .name = "unlinkat",
1145 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1146 { .name = "utimensat",
1147 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
1148 { .name = "wait4", .errpid = true,
1149 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1150 { .name = "waitid", .errpid = true,
1151 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1152};
1153
1154static int syscall_fmt__cmp(const void *name, const void *fmtp)
1155{
1156 const struct syscall_fmt *fmt = fmtp;
1157 return strcmp(name, fmt->name);
1158}
1159
1160static struct syscall_fmt *__syscall_fmt__find(struct syscall_fmt *fmts, const int nmemb, const char *name)
1161{
1162 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
1163}
1164
1165static struct syscall_fmt *syscall_fmt__find(const char *name)
1166{
1167 const int nmemb = ARRAY_SIZE(syscall_fmts);
1168 return __syscall_fmt__find(syscall_fmts, nmemb, name);
1169}
1170
1171static struct syscall_fmt *__syscall_fmt__find_by_alias(struct syscall_fmt *fmts, const int nmemb, const char *alias)
1172{
1173 int i;
1174
1175 for (i = 0; i < nmemb; ++i) {
1176 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0)
1177 return &fmts[i];
1178 }
1179
1180 return NULL;
1181}
1182
1183static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
1184{
1185 const int nmemb = ARRAY_SIZE(syscall_fmts);
1186 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias);
1187}
1188
1189/*
1190 * is_exit: is this "exit" or "exit_group"?
1191 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
1192 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
1193 * nonexistent: Just a hole in the syscall table, syscall id not allocated
1194 */
1195struct syscall {
1196 struct tep_event *tp_format;
1197 int nr_args;
1198 int args_size;
1199 struct {
1200 struct bpf_program *sys_enter,
1201 *sys_exit;
1202 } bpf_prog;
1203 bool is_exit;
1204 bool is_open;
1205 bool nonexistent;
1206 struct tep_format_field *args;
1207 const char *name;
1208 struct syscall_fmt *fmt;
1209 struct syscall_arg_fmt *arg_fmt;
1210};
1211
1212/*
1213 * Must match what is in the BPF program:
1214 *
1215 * tools/perf/examples/bpf/augmented_raw_syscalls.c
1216 */
1217struct bpf_map_syscall_entry {
1218 bool enabled;
1219 u16 string_args_len[6];
1220};
1221
1222/*
1223 * We need to have this 'calculated' boolean because in some cases we really
1224 * don't know what is the duration of a syscall, for instance, when we start
1225 * a session and some threads are waiting for a syscall to finish, say 'poll',
1226 * in which case all we can do is to print "( ? ) for duration and for the
1227 * start timestamp.
1228 */
1229static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
1230{
1231 double duration = (double)t / NSEC_PER_MSEC;
1232 size_t printed = fprintf(fp, "(");
1233
1234 if (!calculated)
1235 printed += fprintf(fp, " ");
1236 else if (duration >= 1.0)
1237 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1238 else if (duration >= 0.01)
1239 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1240 else
1241 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1242 return printed + fprintf(fp, "): ");
1243}
1244
1245/**
1246 * filename.ptr: The filename char pointer that will be vfs_getname'd
1247 * filename.entry_str_pos: Where to insert the string translated from
1248 * filename.ptr by the vfs_getname tracepoint/kprobe.
1249 * ret_scnprintf: syscall args may set this to a different syscall return
1250 * formatter, for instance, fcntl may return fds, file flags, etc.
1251 */
1252struct thread_trace {
1253 u64 entry_time;
1254 bool entry_pending;
1255 unsigned long nr_events;
1256 unsigned long pfmaj, pfmin;
1257 char *entry_str;
1258 double runtime_ms;
1259 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1260 struct {
1261 unsigned long ptr;
1262 short int entry_str_pos;
1263 bool pending_open;
1264 unsigned int namelen;
1265 char *name;
1266 } filename;
1267 struct {
1268 int max;
1269 struct file *table;
1270 } files;
1271
1272 struct intlist *syscall_stats;
1273};
1274
1275static struct thread_trace *thread_trace__new(void)
1276{
1277 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
1278
1279 if (ttrace) {
1280 ttrace->files.max = -1;
1281 ttrace->syscall_stats = intlist__new(NULL);
1282 }
1283
1284 return ttrace;
1285}
1286
1287static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1288{
1289 struct thread_trace *ttrace;
1290
1291 if (thread == NULL)
1292 goto fail;
1293
1294 if (thread__priv(thread) == NULL)
1295 thread__set_priv(thread, thread_trace__new());
1296
1297 if (thread__priv(thread) == NULL)
1298 goto fail;
1299
1300 ttrace = thread__priv(thread);
1301 ++ttrace->nr_events;
1302
1303 return ttrace;
1304fail:
1305 color_fprintf(fp, PERF_COLOR_RED,
1306 "WARNING: not enough memory, dropping samples!\n");
1307 return NULL;
1308}
1309
1310
1311void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1312 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1313{
1314 struct thread_trace *ttrace = thread__priv(arg->thread);
1315
1316 ttrace->ret_scnprintf = ret_scnprintf;
1317}
1318
1319#define TRACE_PFMAJ (1 << 0)
1320#define TRACE_PFMIN (1 << 1)
1321
1322static const size_t trace__entry_str_size = 2048;
1323
1324static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
1325{
1326 if (fd < 0)
1327 return NULL;
1328
1329 if (fd > ttrace->files.max) {
1330 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
1331
1332 if (nfiles == NULL)
1333 return NULL;
1334
1335 if (ttrace->files.max != -1) {
1336 memset(nfiles + ttrace->files.max + 1, 0,
1337 (fd - ttrace->files.max) * sizeof(struct file));
1338 } else {
1339 memset(nfiles, 0, (fd + 1) * sizeof(struct file));
1340 }
1341
1342 ttrace->files.table = nfiles;
1343 ttrace->files.max = fd;
1344 }
1345
1346 return ttrace->files.table + fd;
1347}
1348
1349struct file *thread__files_entry(struct thread *thread, int fd)
1350{
1351 return thread_trace__files_entry(thread__priv(thread), fd);
1352}
1353
1354static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1355{
1356 struct thread_trace *ttrace = thread__priv(thread);
1357 struct file *file = thread_trace__files_entry(ttrace, fd);
1358
1359 if (file != NULL) {
1360 struct stat st;
1361 if (stat(pathname, &st) == 0)
1362 file->dev_maj = major(st.st_rdev);
1363 file->pathname = strdup(pathname);
1364 if (file->pathname)
1365 return 0;
1366 }
1367
1368 return -1;
1369}
1370
1371static int thread__read_fd_path(struct thread *thread, int fd)
1372{
1373 char linkname[PATH_MAX], pathname[PATH_MAX];
1374 struct stat st;
1375 int ret;
1376
1377 if (thread->pid_ == thread->tid) {
1378 scnprintf(linkname, sizeof(linkname),
1379 "/proc/%d/fd/%d", thread->pid_, fd);
1380 } else {
1381 scnprintf(linkname, sizeof(linkname),
1382 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1383 }
1384
1385 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1386 return -1;
1387
1388 ret = readlink(linkname, pathname, sizeof(pathname));
1389
1390 if (ret < 0 || ret > st.st_size)
1391 return -1;
1392
1393 pathname[ret] = '\0';
1394 return trace__set_fd_pathname(thread, fd, pathname);
1395}
1396
1397static const char *thread__fd_path(struct thread *thread, int fd,
1398 struct trace *trace)
1399{
1400 struct thread_trace *ttrace = thread__priv(thread);
1401
1402 if (ttrace == NULL || trace->fd_path_disabled)
1403 return NULL;
1404
1405 if (fd < 0)
1406 return NULL;
1407
1408 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
1409 if (!trace->live)
1410 return NULL;
1411 ++trace->stats.proc_getname;
1412 if (thread__read_fd_path(thread, fd))
1413 return NULL;
1414 }
1415
1416 return ttrace->files.table[fd].pathname;
1417}
1418
1419size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
1420{
1421 int fd = arg->val;
1422 size_t printed = scnprintf(bf, size, "%d", fd);
1423 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1424
1425 if (path)
1426 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1427
1428 return printed;
1429}
1430
1431size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1432{
1433 size_t printed = scnprintf(bf, size, "%d", fd);
1434 struct thread *thread = machine__find_thread(trace->host, pid, pid);
1435
1436 if (thread) {
1437 const char *path = thread__fd_path(thread, fd, trace);
1438
1439 if (path)
1440 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1441
1442 thread__put(thread);
1443 }
1444
1445 return printed;
1446}
1447
1448static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1449 struct syscall_arg *arg)
1450{
1451 int fd = arg->val;
1452 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1453 struct thread_trace *ttrace = thread__priv(arg->thread);
1454
1455 if (ttrace && fd >= 0 && fd <= ttrace->files.max)
1456 zfree(&ttrace->files.table[fd].pathname);
1457
1458 return printed;
1459}
1460
1461static void thread__set_filename_pos(struct thread *thread, const char *bf,
1462 unsigned long ptr)
1463{
1464 struct thread_trace *ttrace = thread__priv(thread);
1465
1466 ttrace->filename.ptr = ptr;
1467 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1468}
1469
1470static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1471{
1472 struct augmented_arg *augmented_arg = arg->augmented.args;
1473 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1474 /*
1475 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1476 * we would have two strings, each prefixed by its size.
1477 */
1478 int consumed = sizeof(*augmented_arg) + augmented_arg->size;
1479
1480 arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1481 arg->augmented.size -= consumed;
1482
1483 return printed;
1484}
1485
1486static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1487 struct syscall_arg *arg)
1488{
1489 unsigned long ptr = arg->val;
1490
1491 if (arg->augmented.args)
1492 return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1493
1494 if (!arg->trace->vfs_getname)
1495 return scnprintf(bf, size, "%#x", ptr);
1496
1497 thread__set_filename_pos(arg->thread, bf, ptr);
1498 return 0;
1499}
1500
1501static bool trace__filter_duration(struct trace *trace, double t)
1502{
1503 return t < (trace->duration_filter * NSEC_PER_MSEC);
1504}
1505
1506static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1507{
1508 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1509
1510 return fprintf(fp, "%10.3f ", ts);
1511}
1512
1513/*
1514 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1515 * using ttrace->entry_time for a thread that receives a sys_exit without
1516 * first having received a sys_enter ("poll" issued before tracing session
1517 * starts, lost sys_enter exit due to ring buffer overflow).
1518 */
1519static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1520{
1521 if (tstamp > 0)
1522 return __trace__fprintf_tstamp(trace, tstamp, fp);
1523
1524 return fprintf(fp, " ? ");
1525}
1526
1527static bool done = false;
1528static bool interrupted = false;
1529
1530static void sig_handler(int sig)
1531{
1532 done = true;
1533 interrupted = sig == SIGINT;
1534}
1535
1536static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1537{
1538 size_t printed = 0;
1539
1540 if (trace->multiple_threads) {
1541 if (trace->show_comm)
1542 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1543 printed += fprintf(fp, "%d ", thread->tid);
1544 }
1545
1546 return printed;
1547}
1548
1549static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1550 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1551{
1552 size_t printed = 0;
1553
1554 if (trace->show_tstamp)
1555 printed = trace__fprintf_tstamp(trace, tstamp, fp);
1556 if (trace->show_duration)
1557 printed += fprintf_duration(duration, duration_calculated, fp);
1558 return printed + trace__fprintf_comm_tid(trace, thread, fp);
1559}
1560
1561static int trace__process_event(struct trace *trace, struct machine *machine,
1562 union perf_event *event, struct perf_sample *sample)
1563{
1564 int ret = 0;
1565
1566 switch (event->header.type) {
1567 case PERF_RECORD_LOST:
1568 color_fprintf(trace->output, PERF_COLOR_RED,
1569 "LOST %" PRIu64 " events!\n", event->lost.lost);
1570 ret = machine__process_lost_event(machine, event, sample);
1571 break;
1572 default:
1573 ret = machine__process_event(machine, event, sample);
1574 break;
1575 }
1576
1577 return ret;
1578}
1579
1580static int trace__tool_process(struct perf_tool *tool,
1581 union perf_event *event,
1582 struct perf_sample *sample,
1583 struct machine *machine)
1584{
1585 struct trace *trace = container_of(tool, struct trace, tool);
1586 return trace__process_event(trace, machine, event, sample);
1587}
1588
1589static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1590{
1591 struct machine *machine = vmachine;
1592
1593 if (machine->kptr_restrict_warned)
1594 return NULL;
1595
1596 if (symbol_conf.kptr_restrict) {
1597 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1598 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1599 "Kernel samples will not be resolved.\n");
1600 machine->kptr_restrict_warned = true;
1601 return NULL;
1602 }
1603
1604 return machine__resolve_kernel_addr(vmachine, addrp, modp);
1605}
1606
1607static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1608{
1609 int err = symbol__init(NULL);
1610
1611 if (err)
1612 return err;
1613
1614 trace->host = machine__new_host();
1615 if (trace->host == NULL)
1616 return -ENOMEM;
1617
1618 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1619 if (err < 0)
1620 goto out;
1621
1622 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1623 evlist->core.threads, trace__tool_process, false,
1624 1);
1625out:
1626 if (err)
1627 symbol__exit();
1628
1629 return err;
1630}
1631
1632static void trace__symbols__exit(struct trace *trace)
1633{
1634 machine__exit(trace->host);
1635 trace->host = NULL;
1636
1637 symbol__exit();
1638}
1639
1640static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1641{
1642 int idx;
1643
1644 if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
1645 nr_args = sc->fmt->nr_args;
1646
1647 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1648 if (sc->arg_fmt == NULL)
1649 return -1;
1650
1651 for (idx = 0; idx < nr_args; ++idx) {
1652 if (sc->fmt)
1653 sc->arg_fmt[idx] = sc->fmt->arg[idx];
1654 }
1655
1656 sc->nr_args = nr_args;
1657 return 0;
1658}
1659
1660static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
1661 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, },
1662 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, },
1663};
1664
1665static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
1666{
1667 const struct syscall_arg_fmt *fmt = fmtp;
1668 return strcmp(name, fmt->name);
1669}
1670
1671static struct syscall_arg_fmt *
1672__syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name)
1673{
1674 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
1675}
1676
1677static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
1678{
1679 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
1680 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
1681}
1682
1683static struct tep_format_field *
1684syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
1685{
1686 struct tep_format_field *last_field = NULL;
1687 int len;
1688
1689 for (; field; field = field->next, ++arg) {
1690 last_field = field;
1691
1692 if (arg->scnprintf)
1693 continue;
1694
1695 len = strlen(field->name);
1696
1697 if (strcmp(field->type, "const char *") == 0 &&
1698 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
1699 strstr(field->name, "path") != NULL))
1700 arg->scnprintf = SCA_FILENAME;
1701 else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
1702 arg->scnprintf = SCA_PTR;
1703 else if (strcmp(field->type, "pid_t") == 0)
1704 arg->scnprintf = SCA_PID;
1705 else if (strcmp(field->type, "umode_t") == 0)
1706 arg->scnprintf = SCA_MODE_T;
1707 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) {
1708 arg->scnprintf = SCA_CHAR_ARRAY;
1709 arg->nr_entries = field->arraylen;
1710 } else if ((strcmp(field->type, "int") == 0 ||
1711 strcmp(field->type, "unsigned int") == 0 ||
1712 strcmp(field->type, "long") == 0) &&
1713 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
1714 /*
1715 * /sys/kernel/tracing/events/syscalls/sys_enter*
1716 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1717 * 65 int
1718 * 23 unsigned int
1719 * 7 unsigned long
1720 */
1721 arg->scnprintf = SCA_FD;
1722 } else {
1723 struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name);
1724
1725 if (fmt) {
1726 arg->scnprintf = fmt->scnprintf;
1727 arg->strtoul = fmt->strtoul;
1728 }
1729 }
1730 }
1731
1732 return last_field;
1733}
1734
1735static int syscall__set_arg_fmts(struct syscall *sc)
1736{
1737 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
1738
1739 if (last_field)
1740 sc->args_size = last_field->offset + last_field->size;
1741
1742 return 0;
1743}
1744
1745static int trace__read_syscall_info(struct trace *trace, int id)
1746{
1747 char tp_name[128];
1748 struct syscall *sc;
1749 const char *name = syscalltbl__name(trace->sctbl, id);
1750
1751#ifdef HAVE_SYSCALL_TABLE_SUPPORT
1752 if (trace->syscalls.table == NULL) {
1753 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1754 if (trace->syscalls.table == NULL)
1755 return -ENOMEM;
1756 }
1757#else
1758 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
1759 // When using libaudit we don't know beforehand what is the max syscall id
1760 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1761
1762 if (table == NULL)
1763 return -ENOMEM;
1764
1765 // Need to memset from offset 0 and +1 members if brand new
1766 if (trace->syscalls.table == NULL)
1767 memset(table, 0, (id + 1) * sizeof(*sc));
1768 else
1769 memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
1770
1771 trace->syscalls.table = table;
1772 trace->sctbl->syscalls.max_id = id;
1773 }
1774#endif
1775 sc = trace->syscalls.table + id;
1776 if (sc->nonexistent)
1777 return 0;
1778
1779 if (name == NULL) {
1780 sc->nonexistent = true;
1781 return 0;
1782 }
1783
1784 sc->name = name;
1785 sc->fmt = syscall_fmt__find(sc->name);
1786
1787 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1788 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1789
1790 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1791 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1792 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1793 }
1794
1795 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
1796 return -ENOMEM;
1797
1798 if (IS_ERR(sc->tp_format))
1799 return PTR_ERR(sc->tp_format);
1800
1801 sc->args = sc->tp_format->format.fields;
1802 /*
1803 * We need to check and discard the first variable '__syscall_nr'
1804 * or 'nr' that mean the syscall number. It is needless here.
1805 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1806 */
1807 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1808 sc->args = sc->args->next;
1809 --sc->nr_args;
1810 }
1811
1812 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1813 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
1814
1815 return syscall__set_arg_fmts(sc);
1816}
1817
1818static int evsel__init_tp_arg_scnprintf(struct evsel *evsel)
1819{
1820 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
1821
1822 if (fmt != NULL) {
1823 syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields);
1824 return 0;
1825 }
1826
1827 return -ENOMEM;
1828}
1829
1830static int intcmp(const void *a, const void *b)
1831{
1832 const int *one = a, *another = b;
1833
1834 return *one - *another;
1835}
1836
1837static int trace__validate_ev_qualifier(struct trace *trace)
1838{
1839 int err = 0;
1840 bool printed_invalid_prefix = false;
1841 struct str_node *pos;
1842 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
1843
1844 trace->ev_qualifier_ids.entries = malloc(nr_allocated *
1845 sizeof(trace->ev_qualifier_ids.entries[0]));
1846
1847 if (trace->ev_qualifier_ids.entries == NULL) {
1848 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1849 trace->output);
1850 err = -EINVAL;
1851 goto out;
1852 }
1853
1854 strlist__for_each_entry(pos, trace->ev_qualifier) {
1855 const char *sc = pos->s;
1856 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1857
1858 if (id < 0) {
1859 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1860 if (id >= 0)
1861 goto matches;
1862
1863 if (!printed_invalid_prefix) {
1864 pr_debug("Skipping unknown syscalls: ");
1865 printed_invalid_prefix = true;
1866 } else {
1867 pr_debug(", ");
1868 }
1869
1870 pr_debug("%s", sc);
1871 continue;
1872 }
1873matches:
1874 trace->ev_qualifier_ids.entries[nr_used++] = id;
1875 if (match_next == -1)
1876 continue;
1877
1878 while (1) {
1879 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1880 if (id < 0)
1881 break;
1882 if (nr_allocated == nr_used) {
1883 void *entries;
1884
1885 nr_allocated += 8;
1886 entries = realloc(trace->ev_qualifier_ids.entries,
1887 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1888 if (entries == NULL) {
1889 err = -ENOMEM;
1890 fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1891 goto out_free;
1892 }
1893 trace->ev_qualifier_ids.entries = entries;
1894 }
1895 trace->ev_qualifier_ids.entries[nr_used++] = id;
1896 }
1897 }
1898
1899 trace->ev_qualifier_ids.nr = nr_used;
1900 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
1901out:
1902 if (printed_invalid_prefix)
1903 pr_debug("\n");
1904 return err;
1905out_free:
1906 zfree(&trace->ev_qualifier_ids.entries);
1907 trace->ev_qualifier_ids.nr = 0;
1908 goto out;
1909}
1910
1911static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
1912{
1913 bool in_ev_qualifier;
1914
1915 if (trace->ev_qualifier_ids.nr == 0)
1916 return true;
1917
1918 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
1919 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
1920
1921 if (in_ev_qualifier)
1922 return !trace->not_ev_qualifier;
1923
1924 return trace->not_ev_qualifier;
1925}
1926
1927/*
1928 * args is to be interpreted as a series of longs but we need to handle
1929 * 8-byte unaligned accesses. args points to raw_data within the event
1930 * and raw_data is guaranteed to be 8-byte unaligned because it is
1931 * preceded by raw_size which is a u32. So we need to copy args to a temp
1932 * variable to read it. Most notably this avoids extended load instructions
1933 * on unaligned addresses
1934 */
1935unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
1936{
1937 unsigned long val;
1938 unsigned char *p = arg->args + sizeof(unsigned long) * idx;
1939
1940 memcpy(&val, p, sizeof(val));
1941 return val;
1942}
1943
1944static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
1945 struct syscall_arg *arg)
1946{
1947 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
1948 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
1949
1950 return scnprintf(bf, size, "arg%d: ", arg->idx);
1951}
1952
1953/*
1954 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1955 * as mount 'flags' argument that needs ignoring some magic flag, see comment
1956 * in tools/perf/trace/beauty/mount_flags.c
1957 */
1958static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val)
1959{
1960 if (fmt && fmt->mask_val)
1961 return fmt->mask_val(arg, val);
1962
1963 return val;
1964}
1965
1966static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size,
1967 struct syscall_arg *arg, unsigned long val)
1968{
1969 if (fmt && fmt->scnprintf) {
1970 arg->val = val;
1971 if (fmt->parm)
1972 arg->parm = fmt->parm;
1973 return fmt->scnprintf(bf, size, arg);
1974 }
1975 return scnprintf(bf, size, "%ld", val);
1976}
1977
1978static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1979 unsigned char *args, void *augmented_args, int augmented_args_size,
1980 struct trace *trace, struct thread *thread)
1981{
1982 size_t printed = 0;
1983 unsigned long val;
1984 u8 bit = 1;
1985 struct syscall_arg arg = {
1986 .args = args,
1987 .augmented = {
1988 .size = augmented_args_size,
1989 .args = augmented_args,
1990 },
1991 .idx = 0,
1992 .mask = 0,
1993 .trace = trace,
1994 .thread = thread,
1995 .show_string_prefix = trace->show_string_prefix,
1996 };
1997 struct thread_trace *ttrace = thread__priv(thread);
1998
1999 /*
2000 * Things like fcntl will set this in its 'cmd' formatter to pick the
2001 * right formatter for the return value (an fd? file flags?), which is
2002 * not needed for syscalls that always return a given type, say an fd.
2003 */
2004 ttrace->ret_scnprintf = NULL;
2005
2006 if (sc->args != NULL) {
2007 struct tep_format_field *field;
2008
2009 for (field = sc->args; field;
2010 field = field->next, ++arg.idx, bit <<= 1) {
2011 if (arg.mask & bit)
2012 continue;
2013
2014 arg.fmt = &sc->arg_fmt[arg.idx];
2015 val = syscall_arg__val(&arg, arg.idx);
2016 /*
2017 * Some syscall args need some mask, most don't and
2018 * return val untouched.
2019 */
2020 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
2021
2022 /*
2023 * Suppress this argument if its value is zero and
2024 * and we don't have a string associated in an
2025 * strarray for it.
2026 */
2027 if (val == 0 &&
2028 !trace->show_zeros &&
2029 !(sc->arg_fmt &&
2030 (sc->arg_fmt[arg.idx].show_zero ||
2031 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
2032 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
2033 sc->arg_fmt[arg.idx].parm))
2034 continue;
2035
2036 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2037
2038 if (trace->show_arg_names)
2039 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2040
2041 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
2042 bf + printed, size - printed, &arg, val);
2043 }
2044 } else if (IS_ERR(sc->tp_format)) {
2045 /*
2046 * If we managed to read the tracepoint /format file, then we
2047 * may end up not having any args, like with gettid(), so only
2048 * print the raw args when we didn't manage to read it.
2049 */
2050 while (arg.idx < sc->nr_args) {
2051 if (arg.mask & bit)
2052 goto next_arg;
2053 val = syscall_arg__val(&arg, arg.idx);
2054 if (printed)
2055 printed += scnprintf(bf + printed, size - printed, ", ");
2056 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
2057 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val);
2058next_arg:
2059 ++arg.idx;
2060 bit <<= 1;
2061 }
2062 }
2063
2064 return printed;
2065}
2066
2067typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2068 union perf_event *event,
2069 struct perf_sample *sample);
2070
2071static struct syscall *trace__syscall_info(struct trace *trace,
2072 struct evsel *evsel, int id)
2073{
2074 int err = 0;
2075
2076 if (id < 0) {
2077
2078 /*
2079 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
2080 * before that, leaving at a higher verbosity level till that is
2081 * explained. Reproduced with plain ftrace with:
2082 *
2083 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
2084 * grep "NR -1 " /t/trace_pipe
2085 *
2086 * After generating some load on the machine.
2087 */
2088 if (verbose > 1) {
2089 static u64 n;
2090 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2091 id, evsel__name(evsel), ++n);
2092 }
2093 return NULL;
2094 }
2095
2096 err = -EINVAL;
2097
2098#ifdef HAVE_SYSCALL_TABLE_SUPPORT
2099 if (id > trace->sctbl->syscalls.max_id) {
2100#else
2101 if (id >= trace->sctbl->syscalls.max_id) {
2102 /*
2103 * With libaudit we don't know beforehand what is the max_id,
2104 * so we let trace__read_syscall_info() figure that out as we
2105 * go on reading syscalls.
2106 */
2107 err = trace__read_syscall_info(trace, id);
2108 if (err)
2109#endif
2110 goto out_cant_read;
2111 }
2112
2113 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2114 (err = trace__read_syscall_info(trace, id)) != 0)
2115 goto out_cant_read;
2116
2117 if (trace->syscalls.table[id].name == NULL) {
2118 if (trace->syscalls.table[id].nonexistent)
2119 return NULL;
2120 goto out_cant_read;
2121 }
2122
2123 return &trace->syscalls.table[id];
2124
2125out_cant_read:
2126 if (verbose > 0) {
2127 char sbuf[STRERR_BUFSIZE];
2128 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
2129 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2130 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2131 fputs(" information\n", trace->output);
2132 }
2133 return NULL;
2134}
2135
2136struct syscall_stats {
2137 struct stats stats;
2138 u64 nr_failures;
2139 int max_errno;
2140 u32 *errnos;
2141};
2142
2143static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace,
2144 int id, struct perf_sample *sample, long err, bool errno_summary)
2145{
2146 struct int_node *inode;
2147 struct syscall_stats *stats;
2148 u64 duration = 0;
2149
2150 inode = intlist__findnew(ttrace->syscall_stats, id);
2151 if (inode == NULL)
2152 return;
2153
2154 stats = inode->priv;
2155 if (stats == NULL) {
2156 stats = malloc(sizeof(*stats));
2157 if (stats == NULL)
2158 return;
2159
2160 stats->nr_failures = 0;
2161 stats->max_errno = 0;
2162 stats->errnos = NULL;
2163 init_stats(&stats->stats);
2164 inode->priv = stats;
2165 }
2166
2167 if (ttrace->entry_time && sample->time > ttrace->entry_time)
2168 duration = sample->time - ttrace->entry_time;
2169
2170 update_stats(&stats->stats, duration);
2171
2172 if (err < 0) {
2173 ++stats->nr_failures;
2174
2175 if (!errno_summary)
2176 return;
2177
2178 err = -err;
2179 if (err > stats->max_errno) {
2180 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32));
2181
2182 if (new_errnos) {
2183 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
2184 } else {
2185 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n",
2186 thread__comm_str(thread), thread->pid_, thread->tid);
2187 return;
2188 }
2189
2190 stats->errnos = new_errnos;
2191 stats->max_errno = err;
2192 }
2193
2194 ++stats->errnos[err - 1];
2195 }
2196}
2197
2198static int trace__printf_interrupted_entry(struct trace *trace)
2199{
2200 struct thread_trace *ttrace;
2201 size_t printed;
2202 int len;
2203
2204 if (trace->failure_only || trace->current == NULL)
2205 return 0;
2206
2207 ttrace = thread__priv(trace->current);
2208
2209 if (!ttrace->entry_pending)
2210 return 0;
2211
2212 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
2213 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2214
2215 if (len < trace->args_alignment - 4)
2216 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2217
2218 printed += fprintf(trace->output, " ...\n");
2219
2220 ttrace->entry_pending = false;
2221 ++trace->nr_events_printed;
2222
2223 return printed;
2224}
2225
2226static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
2227 struct perf_sample *sample, struct thread *thread)
2228{
2229 int printed = 0;
2230
2231 if (trace->print_sample) {
2232 double ts = (double)sample->time / NSEC_PER_MSEC;
2233
2234 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2235 evsel__name(evsel), ts,
2236 thread__comm_str(thread),
2237 sample->pid, sample->tid, sample->cpu);
2238 }
2239
2240 return printed;
2241}
2242
2243static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
2244{
2245 void *augmented_args = NULL;
2246 /*
2247 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
2248 * and there we get all 6 syscall args plus the tracepoint common fields
2249 * that gets calculated at the start and the syscall_nr (another long).
2250 * So we check if that is the case and if so don't look after the
2251 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
2252 * which is fixed.
2253 *
2254 * We'll revisit this later to pass s->args_size to the BPF augmenter
2255 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
2256 * copies only what we need for each syscall, like what happens when we
2257 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
2258 * traffic to just what is needed for each syscall.
2259 */
2260 int args_size = raw_augmented_args_size ?: sc->args_size;
2261
2262 *augmented_args_size = sample->raw_size - args_size;
2263 if (*augmented_args_size > 0)
2264 augmented_args = sample->raw_data + args_size;
2265
2266 return augmented_args;
2267}
2268
2269static void syscall__exit(struct syscall *sc)
2270{
2271 if (!sc)
2272 return;
2273
2274 free(sc->arg_fmt);
2275}
2276
2277static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
2278 union perf_event *event __maybe_unused,
2279 struct perf_sample *sample)
2280{
2281 char *msg;
2282 void *args;
2283 int printed = 0;
2284 struct thread *thread;
2285 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2286 int augmented_args_size = 0;
2287 void *augmented_args = NULL;
2288 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2289 struct thread_trace *ttrace;
2290
2291 if (sc == NULL)
2292 return -1;
2293
2294 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2295 ttrace = thread__trace(thread, trace->output);
2296 if (ttrace == NULL)
2297 goto out_put;
2298
2299 trace__fprintf_sample(trace, evsel, sample, thread);
2300
2301 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2302
2303 if (ttrace->entry_str == NULL) {
2304 ttrace->entry_str = malloc(trace__entry_str_size);
2305 if (!ttrace->entry_str)
2306 goto out_put;
2307 }
2308
2309 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2310 trace__printf_interrupted_entry(trace);
2311 /*
2312 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
2313 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
2314 * this breaks syscall__augmented_args() check for augmented args, as we calculate
2315 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
2316 * so when handling, say the openat syscall, we end up getting 6 args for the
2317 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
2318 * thinking that the extra 2 u64 args are the augmented filename, so just check
2319 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
2320 */
2321 if (evsel != trace->syscalls.events.sys_enter)
2322 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2323 ttrace->entry_time = sample->time;
2324 msg = ttrace->entry_str;
2325 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
2326
2327 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
2328 args, augmented_args, augmented_args_size, trace, thread);
2329
2330 if (sc->is_exit) {
2331 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2332 int alignment = 0;
2333
2334 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2335 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2336 if (trace->args_alignment > printed)
2337 alignment = trace->args_alignment - printed;
2338 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2339 }
2340 } else {
2341 ttrace->entry_pending = true;
2342 /* See trace__vfs_getname & trace__sys_exit */
2343 ttrace->filename.pending_open = false;
2344 }
2345
2346 if (trace->current != thread) {
2347 thread__put(trace->current);
2348 trace->current = thread__get(thread);
2349 }
2350 err = 0;
2351out_put:
2352 thread__put(thread);
2353 return err;
2354}
2355
2356static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2357 struct perf_sample *sample)
2358{
2359 struct thread_trace *ttrace;
2360 struct thread *thread;
2361 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2362 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2363 char msg[1024];
2364 void *args, *augmented_args = NULL;
2365 int augmented_args_size;
2366
2367 if (sc == NULL)
2368 return -1;
2369
2370 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2371 ttrace = thread__trace(thread, trace->output);
2372 /*
2373 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2374 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2375 */
2376 if (ttrace == NULL)
2377 goto out_put;
2378
2379 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2380 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2381 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2382 fprintf(trace->output, "%s", msg);
2383 err = 0;
2384out_put:
2385 thread__put(thread);
2386 return err;
2387}
2388
2389static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2390 struct perf_sample *sample,
2391 struct callchain_cursor *cursor)
2392{
2393 struct addr_location al;
2394 int max_stack = evsel->core.attr.sample_max_stack ?
2395 evsel->core.attr.sample_max_stack :
2396 trace->max_stack;
2397 int err;
2398
2399 if (machine__resolve(trace->host, &al, sample) < 0)
2400 return -1;
2401
2402 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
2403 addr_location__put(&al);
2404 return err;
2405}
2406
2407static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2408{
2409 /* TODO: user-configurable print_opts */
2410 const unsigned int print_opts = EVSEL__PRINT_SYM |
2411 EVSEL__PRINT_DSO |
2412 EVSEL__PRINT_UNKNOWN_AS_ADDR;
2413
2414 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
2415}
2416
2417static const char *errno_to_name(struct evsel *evsel, int err)
2418{
2419 struct perf_env *env = evsel__env(evsel);
2420 const char *arch_name = perf_env__arch(env);
2421
2422 return arch_syscalls__strerrno(arch_name, err);
2423}
2424
2425static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2426 union perf_event *event __maybe_unused,
2427 struct perf_sample *sample)
2428{
2429 long ret;
2430 u64 duration = 0;
2431 bool duration_calculated = false;
2432 struct thread *thread;
2433 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2434 int alignment = trace->args_alignment;
2435 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2436 struct thread_trace *ttrace;
2437
2438 if (sc == NULL)
2439 return -1;
2440
2441 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2442 ttrace = thread__trace(thread, trace->output);
2443 if (ttrace == NULL)
2444 goto out_put;
2445
2446 trace__fprintf_sample(trace, evsel, sample, thread);
2447
2448 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2449
2450 if (trace->summary)
2451 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2452
2453 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2454 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2455 ttrace->filename.pending_open = false;
2456 ++trace->stats.vfs_getname;
2457 }
2458
2459 if (ttrace->entry_time) {
2460 duration = sample->time - ttrace->entry_time;
2461 if (trace__filter_duration(trace, duration))
2462 goto out;
2463 duration_calculated = true;
2464 } else if (trace->duration_filter)
2465 goto out;
2466
2467 if (sample->callchain) {
2468 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2469 if (callchain_ret == 0) {
2470 if (callchain_cursor.nr < trace->min_stack)
2471 goto out;
2472 callchain_ret = 1;
2473 }
2474 }
2475
2476 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2477 goto out;
2478
2479 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2480
2481 if (ttrace->entry_pending) {
2482 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2483 } else {
2484 printed += fprintf(trace->output, " ... [");
2485 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2486 printed += 9;
2487 printed += fprintf(trace->output, "]: %s()", sc->name);
2488 }
2489
2490 printed++; /* the closing ')' */
2491
2492 if (alignment > printed)
2493 alignment -= printed;
2494 else
2495 alignment = 0;
2496
2497 fprintf(trace->output, ")%*s= ", alignment, " ");
2498
2499 if (sc->fmt == NULL) {
2500 if (ret < 0)
2501 goto errno_print;
2502signed_print:
2503 fprintf(trace->output, "%ld", ret);
2504 } else if (ret < 0) {
2505errno_print: {
2506 char bf[STRERR_BUFSIZE];
2507 const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2508 *e = errno_to_name(evsel, -ret);
2509
2510 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2511 }
2512 } else if (ret == 0 && sc->fmt->timeout)
2513 fprintf(trace->output, "0 (Timeout)");
2514 else if (ttrace->ret_scnprintf) {
2515 char bf[1024];
2516 struct syscall_arg arg = {
2517 .val = ret,
2518 .thread = thread,
2519 .trace = trace,
2520 };
2521 ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2522 ttrace->ret_scnprintf = NULL;
2523 fprintf(trace->output, "%s", bf);
2524 } else if (sc->fmt->hexret)
2525 fprintf(trace->output, "%#lx", ret);
2526 else if (sc->fmt->errpid) {
2527 struct thread *child = machine__find_thread(trace->host, ret, ret);
2528
2529 if (child != NULL) {
2530 fprintf(trace->output, "%ld", ret);
2531 if (child->comm_set)
2532 fprintf(trace->output, " (%s)", thread__comm_str(child));
2533 thread__put(child);
2534 }
2535 } else
2536 goto signed_print;
2537
2538 fputc('\n', trace->output);
2539
2540 /*
2541 * We only consider an 'event' for the sake of --max-events a non-filtered
2542 * sys_enter + sys_exit and other tracepoint events.
2543 */
2544 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2545 interrupted = true;
2546
2547 if (callchain_ret > 0)
2548 trace__fprintf_callchain(trace, sample);
2549 else if (callchain_ret < 0)
2550 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2551out:
2552 ttrace->entry_pending = false;
2553 err = 0;
2554out_put:
2555 thread__put(thread);
2556 return err;
2557}
2558
2559static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2560 union perf_event *event __maybe_unused,
2561 struct perf_sample *sample)
2562{
2563 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2564 struct thread_trace *ttrace;
2565 size_t filename_len, entry_str_len, to_move;
2566 ssize_t remaining_space;
2567 char *pos;
2568 const char *filename = evsel__rawptr(evsel, sample, "pathname");
2569
2570 if (!thread)
2571 goto out;
2572
2573 ttrace = thread__priv(thread);
2574 if (!ttrace)
2575 goto out_put;
2576
2577 filename_len = strlen(filename);
2578 if (filename_len == 0)
2579 goto out_put;
2580
2581 if (ttrace->filename.namelen < filename_len) {
2582 char *f = realloc(ttrace->filename.name, filename_len + 1);
2583
2584 if (f == NULL)
2585 goto out_put;
2586
2587 ttrace->filename.namelen = filename_len;
2588 ttrace->filename.name = f;
2589 }
2590
2591 strcpy(ttrace->filename.name, filename);
2592 ttrace->filename.pending_open = true;
2593
2594 if (!ttrace->filename.ptr)
2595 goto out_put;
2596
2597 entry_str_len = strlen(ttrace->entry_str);
2598 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2599 if (remaining_space <= 0)
2600 goto out_put;
2601
2602 if (filename_len > (size_t)remaining_space) {
2603 filename += filename_len - remaining_space;
2604 filename_len = remaining_space;
2605 }
2606
2607 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2608 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2609 memmove(pos + filename_len, pos, to_move);
2610 memcpy(pos, filename, filename_len);
2611
2612 ttrace->filename.ptr = 0;
2613 ttrace->filename.entry_str_pos = 0;
2614out_put:
2615 thread__put(thread);
2616out:
2617 return 0;
2618}
2619
2620static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2621 union perf_event *event __maybe_unused,
2622 struct perf_sample *sample)
2623{
2624 u64 runtime = evsel__intval(evsel, sample, "runtime");
2625 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2626 struct thread *thread = machine__findnew_thread(trace->host,
2627 sample->pid,
2628 sample->tid);
2629 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2630
2631 if (ttrace == NULL)
2632 goto out_dump;
2633
2634 ttrace->runtime_ms += runtime_ms;
2635 trace->runtime_ms += runtime_ms;
2636out_put:
2637 thread__put(thread);
2638 return 0;
2639
2640out_dump:
2641 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2642 evsel->name,
2643 evsel__strval(evsel, sample, "comm"),
2644 (pid_t)evsel__intval(evsel, sample, "pid"),
2645 runtime,
2646 evsel__intval(evsel, sample, "vruntime"));
2647 goto out_put;
2648}
2649
2650static int bpf_output__printer(enum binary_printer_ops op,
2651 unsigned int val, void *extra __maybe_unused, FILE *fp)
2652{
2653 unsigned char ch = (unsigned char)val;
2654
2655 switch (op) {
2656 case BINARY_PRINT_CHAR_DATA:
2657 return fprintf(fp, "%c", isprint(ch) ? ch : '.');
2658 case BINARY_PRINT_DATA_BEGIN:
2659 case BINARY_PRINT_LINE_BEGIN:
2660 case BINARY_PRINT_ADDR:
2661 case BINARY_PRINT_NUM_DATA:
2662 case BINARY_PRINT_NUM_PAD:
2663 case BINARY_PRINT_SEP:
2664 case BINARY_PRINT_CHAR_PAD:
2665 case BINARY_PRINT_LINE_END:
2666 case BINARY_PRINT_DATA_END:
2667 default:
2668 break;
2669 }
2670
2671 return 0;
2672}
2673
2674static void bpf_output__fprintf(struct trace *trace,
2675 struct perf_sample *sample)
2676{
2677 binary__fprintf(sample->raw_data, sample->raw_size, 8,
2678 bpf_output__printer, NULL, trace->output);
2679 ++trace->nr_events_printed;
2680}
2681
2682static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
2683 struct thread *thread, void *augmented_args, int augmented_args_size)
2684{
2685 char bf[2048];
2686 size_t size = sizeof(bf);
2687 struct tep_format_field *field = evsel->tp_format->format.fields;
2688 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
2689 size_t printed = 0;
2690 unsigned long val;
2691 u8 bit = 1;
2692 struct syscall_arg syscall_arg = {
2693 .augmented = {
2694 .size = augmented_args_size,
2695 .args = augmented_args,
2696 },
2697 .idx = 0,
2698 .mask = 0,
2699 .trace = trace,
2700 .thread = thread,
2701 .show_string_prefix = trace->show_string_prefix,
2702 };
2703
2704 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
2705 if (syscall_arg.mask & bit)
2706 continue;
2707
2708 syscall_arg.len = 0;
2709 syscall_arg.fmt = arg;
2710 if (field->flags & TEP_FIELD_IS_ARRAY) {
2711 int offset = field->offset;
2712
2713 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2714 offset = format_field__intval(field, sample, evsel->needs_swap);
2715 syscall_arg.len = offset >> 16;
2716 offset &= 0xffff;
2717 }
2718
2719 val = (uintptr_t)(sample->raw_data + offset);
2720 } else
2721 val = format_field__intval(field, sample, evsel->needs_swap);
2722 /*
2723 * Some syscall args need some mask, most don't and
2724 * return val untouched.
2725 */
2726 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
2727
2728 /*
2729 * Suppress this argument if its value is zero and
2730 * and we don't have a string associated in an
2731 * strarray for it.
2732 */
2733 if (val == 0 &&
2734 !trace->show_zeros &&
2735 !((arg->show_zero ||
2736 arg->scnprintf == SCA_STRARRAY ||
2737 arg->scnprintf == SCA_STRARRAYS) &&
2738 arg->parm))
2739 continue;
2740
2741 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2742
2743 /*
2744 * XXX Perhaps we should have a show_tp_arg_names,
2745 * leaving show_arg_names just for syscalls?
2746 */
2747 if (1 || trace->show_arg_names)
2748 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2749
2750 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
2751 }
2752
2753 return printed + fprintf(trace->output, "%s", bf);
2754}
2755
2756static int trace__event_handler(struct trace *trace, struct evsel *evsel,
2757 union perf_event *event __maybe_unused,
2758 struct perf_sample *sample)
2759{
2760 struct thread *thread;
2761 int callchain_ret = 0;
2762 /*
2763 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2764 * this event's max_events having been hit and this is an entry coming
2765 * from the ring buffer that we should discard, since the max events
2766 * have already been considered/printed.
2767 */
2768 if (evsel->disabled)
2769 return 0;
2770
2771 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2772
2773 if (sample->callchain) {
2774 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2775 if (callchain_ret == 0) {
2776 if (callchain_cursor.nr < trace->min_stack)
2777 goto out;
2778 callchain_ret = 1;
2779 }
2780 }
2781
2782 trace__printf_interrupted_entry(trace);
2783 trace__fprintf_tstamp(trace, sample->time, trace->output);
2784
2785 if (trace->trace_syscalls && trace->show_duration)
2786 fprintf(trace->output, "( ): ");
2787
2788 if (thread)
2789 trace__fprintf_comm_tid(trace, thread, trace->output);
2790
2791 if (evsel == trace->syscalls.events.augmented) {
2792 int id = perf_evsel__sc_tp_uint(evsel, id, sample);
2793 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2794
2795 if (sc) {
2796 fprintf(trace->output, "%s(", sc->name);
2797 trace__fprintf_sys_enter(trace, evsel, sample);
2798 fputc(')', trace->output);
2799 goto newline;
2800 }
2801
2802 /*
2803 * XXX: Not having the associated syscall info or not finding/adding
2804 * the thread should never happen, but if it does...
2805 * fall thru and print it as a bpf_output event.
2806 */
2807 }
2808
2809 fprintf(trace->output, "%s(", evsel->name);
2810
2811 if (evsel__is_bpf_output(evsel)) {
2812 bpf_output__fprintf(trace, sample);
2813 } else if (evsel->tp_format) {
2814 if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
2815 trace__fprintf_sys_enter(trace, evsel, sample)) {
2816 if (trace->libtraceevent_print) {
2817 event_format__fprintf(evsel->tp_format, sample->cpu,
2818 sample->raw_data, sample->raw_size,
2819 trace->output);
2820 } else {
2821 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
2822 }
2823 }
2824 }
2825
2826newline:
2827 fprintf(trace->output, ")\n");
2828
2829 if (callchain_ret > 0)
2830 trace__fprintf_callchain(trace, sample);
2831 else if (callchain_ret < 0)
2832 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2833
2834 ++trace->nr_events_printed;
2835
2836 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
2837 evsel__disable(evsel);
2838 evsel__close(evsel);
2839 }
2840out:
2841 thread__put(thread);
2842 return 0;
2843}
2844
2845static void print_location(FILE *f, struct perf_sample *sample,
2846 struct addr_location *al,
2847 bool print_dso, bool print_sym)
2848{
2849
2850 if ((verbose > 0 || print_dso) && al->map)
2851 fprintf(f, "%s@", al->map->dso->long_name);
2852
2853 if ((verbose > 0 || print_sym) && al->sym)
2854 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2855 al->addr - al->sym->start);
2856 else if (al->map)
2857 fprintf(f, "0x%" PRIx64, al->addr);
2858 else
2859 fprintf(f, "0x%" PRIx64, sample->addr);
2860}
2861
2862static int trace__pgfault(struct trace *trace,
2863 struct evsel *evsel,
2864 union perf_event *event __maybe_unused,
2865 struct perf_sample *sample)
2866{
2867 struct thread *thread;
2868 struct addr_location al;
2869 char map_type = 'd';
2870 struct thread_trace *ttrace;
2871 int err = -1;
2872 int callchain_ret = 0;
2873
2874 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2875
2876 if (sample->callchain) {
2877 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2878 if (callchain_ret == 0) {
2879 if (callchain_cursor.nr < trace->min_stack)
2880 goto out_put;
2881 callchain_ret = 1;
2882 }
2883 }
2884
2885 ttrace = thread__trace(thread, trace->output);
2886 if (ttrace == NULL)
2887 goto out_put;
2888
2889 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2890 ttrace->pfmaj++;
2891 else
2892 ttrace->pfmin++;
2893
2894 if (trace->summary_only)
2895 goto out;
2896
2897 thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
2898
2899 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2900
2901 fprintf(trace->output, "%sfault [",
2902 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2903 "maj" : "min");
2904
2905 print_location(trace->output, sample, &al, false, true);
2906
2907 fprintf(trace->output, "] => ");
2908
2909 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2910
2911 if (!al.map) {
2912 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2913
2914 if (al.map)
2915 map_type = 'x';
2916 else
2917 map_type = '?';
2918 }
2919
2920 print_location(trace->output, sample, &al, true, false);
2921
2922 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2923
2924 if (callchain_ret > 0)
2925 trace__fprintf_callchain(trace, sample);
2926 else if (callchain_ret < 0)
2927 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2928
2929 ++trace->nr_events_printed;
2930out:
2931 err = 0;
2932out_put:
2933 thread__put(thread);
2934 return err;
2935}
2936
2937static void trace__set_base_time(struct trace *trace,
2938 struct evsel *evsel,
2939 struct perf_sample *sample)
2940{
2941 /*
2942 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2943 * and don't use sample->time unconditionally, we may end up having
2944 * some other event in the future without PERF_SAMPLE_TIME for good
2945 * reason, i.e. we may not be interested in its timestamps, just in
2946 * it taking place, picking some piece of information when it
2947 * appears in our event stream (vfs_getname comes to mind).
2948 */
2949 if (trace->base_time == 0 && !trace->full_time &&
2950 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2951 trace->base_time = sample->time;
2952}
2953
2954static int trace__process_sample(struct perf_tool *tool,
2955 union perf_event *event,
2956 struct perf_sample *sample,
2957 struct evsel *evsel,
2958 struct machine *machine __maybe_unused)
2959{
2960 struct trace *trace = container_of(tool, struct trace, tool);
2961 struct thread *thread;
2962 int err = 0;
2963
2964 tracepoint_handler handler = evsel->handler;
2965
2966 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2967 if (thread && thread__is_filtered(thread))
2968 goto out;
2969
2970 trace__set_base_time(trace, evsel, sample);
2971
2972 if (handler) {
2973 ++trace->nr_events;
2974 handler(trace, evsel, event, sample);
2975 }
2976out:
2977 thread__put(thread);
2978 return err;
2979}
2980
2981static int trace__record(struct trace *trace, int argc, const char **argv)
2982{
2983 unsigned int rec_argc, i, j;
2984 const char **rec_argv;
2985 const char * const record_args[] = {
2986 "record",
2987 "-R",
2988 "-m", "1024",
2989 "-c", "1",
2990 };
2991 pid_t pid = getpid();
2992 char *filter = asprintf__tp_filter_pids(1, &pid);
2993 const char * const sc_args[] = { "-e", };
2994 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
2995 const char * const majpf_args[] = { "-e", "major-faults" };
2996 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
2997 const char * const minpf_args[] = { "-e", "minor-faults" };
2998 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
2999 int err = -1;
3000
3001 /* +3 is for the event string below and the pid filter */
3002 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 +
3003 majpf_args_nr + minpf_args_nr + argc;
3004 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3005
3006 if (rec_argv == NULL || filter == NULL)
3007 goto out_free;
3008
3009 j = 0;
3010 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3011 rec_argv[j++] = record_args[i];
3012
3013 if (trace->trace_syscalls) {
3014 for (i = 0; i < sc_args_nr; i++)
3015 rec_argv[j++] = sc_args[i];
3016
3017 /* event string may be different for older kernels - e.g., RHEL6 */
3018 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
3019 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
3020 else if (is_valid_tracepoint("syscalls:sys_enter"))
3021 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
3022 else {
3023 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
3024 goto out_free;
3025 }
3026 }
3027
3028 rec_argv[j++] = "--filter";
3029 rec_argv[j++] = filter;
3030
3031 if (trace->trace_pgfaults & TRACE_PFMAJ)
3032 for (i = 0; i < majpf_args_nr; i++)
3033 rec_argv[j++] = majpf_args[i];
3034
3035 if (trace->trace_pgfaults & TRACE_PFMIN)
3036 for (i = 0; i < minpf_args_nr; i++)
3037 rec_argv[j++] = minpf_args[i];
3038
3039 for (i = 0; i < (unsigned int)argc; i++)
3040 rec_argv[j++] = argv[i];
3041
3042 err = cmd_record(j, rec_argv);
3043out_free:
3044 free(filter);
3045 free(rec_argv);
3046 return err;
3047}
3048
3049static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3050
3051static bool evlist__add_vfs_getname(struct evlist *evlist)
3052{
3053 bool found = false;
3054 struct evsel *evsel, *tmp;
3055 struct parse_events_error err;
3056 int ret;
3057
3058 bzero(&err, sizeof(err));
3059 ret = parse_events(evlist, "probe:vfs_getname*", &err);
3060 if (ret) {
3061 free(err.str);
3062 free(err.help);
3063 free(err.first_str);
3064 free(err.first_help);
3065 return false;
3066 }
3067
3068 evlist__for_each_entry_safe(evlist, evsel, tmp) {
3069 if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
3070 continue;
3071
3072 if (evsel__field(evsel, "pathname")) {
3073 evsel->handler = trace__vfs_getname;
3074 found = true;
3075 continue;
3076 }
3077
3078 list_del_init(&evsel->core.node);
3079 evsel->evlist = NULL;
3080 evsel__delete(evsel);
3081 }
3082
3083 return found;
3084}
3085
3086static struct evsel *evsel__new_pgfault(u64 config)
3087{
3088 struct evsel *evsel;
3089 struct perf_event_attr attr = {
3090 .type = PERF_TYPE_SOFTWARE,
3091 .mmap_data = 1,
3092 };
3093
3094 attr.config = config;
3095 attr.sample_period = 1;
3096
3097 event_attr_init(&attr);
3098
3099 evsel = evsel__new(&attr);
3100 if (evsel)
3101 evsel->handler = trace__pgfault;
3102
3103 return evsel;
3104}
3105
3106static void evlist__free_syscall_tp_fields(struct evlist *evlist)
3107{
3108 struct evsel *evsel;
3109
3110 evlist__for_each_entry(evlist, evsel) {
3111 struct evsel_trace *et = evsel->priv;
3112
3113 if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls"))
3114 continue;
3115
3116 free(et->fmt);
3117 free(et);
3118 }
3119}
3120
3121static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
3122{
3123 const u32 type = event->header.type;
3124 struct evsel *evsel;
3125
3126 if (type != PERF_RECORD_SAMPLE) {
3127 trace__process_event(trace, trace->host, event, sample);
3128 return;
3129 }
3130
3131 evsel = evlist__id2evsel(trace->evlist, sample->id);
3132 if (evsel == NULL) {
3133 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3134 return;
3135 }
3136
3137 if (evswitch__discard(&trace->evswitch, evsel))
3138 return;
3139
3140 trace__set_base_time(trace, evsel, sample);
3141
3142 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
3143 sample->raw_data == NULL) {
3144 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3145 evsel__name(evsel), sample->tid,
3146 sample->cpu, sample->raw_size);
3147 } else {
3148 tracepoint_handler handler = evsel->handler;
3149 handler(trace, evsel, event, sample);
3150 }
3151
3152 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3153 interrupted = true;
3154}
3155
3156static int trace__add_syscall_newtp(struct trace *trace)
3157{
3158 int ret = -1;
3159 struct evlist *evlist = trace->evlist;
3160 struct evsel *sys_enter, *sys_exit;
3161
3162 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
3163 if (sys_enter == NULL)
3164 goto out;
3165
3166 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
3167 goto out_delete_sys_enter;
3168
3169 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
3170 if (sys_exit == NULL)
3171 goto out_delete_sys_enter;
3172
3173 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
3174 goto out_delete_sys_exit;
3175
3176 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3177 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3178
3179 evlist__add(evlist, sys_enter);
3180 evlist__add(evlist, sys_exit);
3181
3182 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3183 /*
3184 * We're interested only in the user space callchain
3185 * leading to the syscall, allow overriding that for
3186 * debugging reasons using --kernel_syscall_callchains
3187 */
3188 sys_exit->core.attr.exclude_callchain_kernel = 1;
3189 }
3190
3191 trace->syscalls.events.sys_enter = sys_enter;
3192 trace->syscalls.events.sys_exit = sys_exit;
3193
3194 ret = 0;
3195out:
3196 return ret;
3197
3198out_delete_sys_exit:
3199 evsel__delete_priv(sys_exit);
3200out_delete_sys_enter:
3201 evsel__delete_priv(sys_enter);
3202 goto out;
3203}
3204
3205static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
3206{
3207 int err = -1;
3208 struct evsel *sys_exit;
3209 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3210 trace->ev_qualifier_ids.nr,
3211 trace->ev_qualifier_ids.entries);
3212
3213 if (filter == NULL)
3214 goto out_enomem;
3215
3216 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3217 sys_exit = trace->syscalls.events.sys_exit;
3218 err = evsel__append_tp_filter(sys_exit, filter);
3219 }
3220
3221 free(filter);
3222out:
3223 return err;
3224out_enomem:
3225 errno = ENOMEM;
3226 goto out;
3227}
3228
3229#ifdef HAVE_LIBBPF_SUPPORT
3230static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
3231{
3232 if (trace->bpf_obj == NULL)
3233 return NULL;
3234
3235 return bpf_object__find_map_by_name(trace->bpf_obj, name);
3236}
3237
3238static void trace__set_bpf_map_filtered_pids(struct trace *trace)
3239{
3240 trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
3241}
3242
3243static void trace__set_bpf_map_syscalls(struct trace *trace)
3244{
3245 trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
3246 trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
3247 trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
3248}
3249
3250static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
3251{
3252 if (trace->bpf_obj == NULL)
3253 return NULL;
3254
3255 return bpf_object__find_program_by_title(trace->bpf_obj, name);
3256}
3257
3258static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
3259 const char *prog_name, const char *type)
3260{
3261 struct bpf_program *prog;
3262
3263 if (prog_name == NULL) {
3264 char default_prog_name[256];
3265 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
3266 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3267 if (prog != NULL)
3268 goto out_found;
3269 if (sc->fmt && sc->fmt->alias) {
3270 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
3271 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3272 if (prog != NULL)
3273 goto out_found;
3274 }
3275 goto out_unaugmented;
3276 }
3277
3278 prog = trace__find_bpf_program_by_title(trace, prog_name);
3279
3280 if (prog != NULL) {
3281out_found:
3282 return prog;
3283 }
3284
3285 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
3286 prog_name, type, sc->name);
3287out_unaugmented:
3288 return trace->syscalls.unaugmented_prog;
3289}
3290
3291static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
3292{
3293 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3294
3295 if (sc == NULL)
3296 return;
3297
3298 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3299 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
3300}
3301
3302static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
3303{
3304 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3305 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3306}
3307
3308static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
3309{
3310 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3311 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3312}
3313
3314static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
3315{
3316 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3317 int arg = 0;
3318
3319 if (sc == NULL)
3320 goto out;
3321
3322 for (; arg < sc->nr_args; ++arg) {
3323 entry->string_args_len[arg] = 0;
3324 if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
3325 /* Should be set like strace -s strsize */
3326 entry->string_args_len[arg] = PATH_MAX;
3327 }
3328 }
3329out:
3330 for (; arg < 6; ++arg)
3331 entry->string_args_len[arg] = 0;
3332}
3333static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
3334{
3335 int fd = bpf_map__fd(trace->syscalls.map);
3336 struct bpf_map_syscall_entry value = {
3337 .enabled = !trace->not_ev_qualifier,
3338 };
3339 int err = 0;
3340 size_t i;
3341
3342 for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
3343 int key = trace->ev_qualifier_ids.entries[i];
3344
3345 if (value.enabled) {
3346 trace__init_bpf_map_syscall_args(trace, key, &value);
3347 trace__init_syscall_bpf_progs(trace, key);
3348 }
3349
3350 err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
3351 if (err)
3352 break;
3353 }
3354
3355 return err;
3356}
3357
3358static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
3359{
3360 int fd = bpf_map__fd(trace->syscalls.map);
3361 struct bpf_map_syscall_entry value = {
3362 .enabled = enabled,
3363 };
3364 int err = 0, key;
3365
3366 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3367 if (enabled)
3368 trace__init_bpf_map_syscall_args(trace, key, &value);
3369
3370 err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
3371 if (err)
3372 break;
3373 }
3374
3375 return err;
3376}
3377
3378static int trace__init_syscalls_bpf_map(struct trace *trace)
3379{
3380 bool enabled = true;
3381
3382 if (trace->ev_qualifier_ids.nr)
3383 enabled = trace->not_ev_qualifier;
3384
3385 return __trace__init_syscalls_bpf_map(trace, enabled);
3386}
3387
3388static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
3389{
3390 struct tep_format_field *field, *candidate_field;
3391 int id;
3392
3393 /*
3394 * We're only interested in syscalls that have a pointer:
3395 */
3396 for (field = sc->args; field; field = field->next) {
3397 if (field->flags & TEP_FIELD_IS_POINTER)
3398 goto try_to_find_pair;
3399 }
3400
3401 return NULL;
3402
3403try_to_find_pair:
3404 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3405 struct syscall *pair = trace__syscall_info(trace, NULL, id);
3406 struct bpf_program *pair_prog;
3407 bool is_candidate = false;
3408
3409 if (pair == NULL || pair == sc ||
3410 pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
3411 continue;
3412
3413 for (field = sc->args, candidate_field = pair->args;
3414 field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
3415 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
3416 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
3417
3418 if (is_pointer) {
3419 if (!candidate_is_pointer) {
3420 // The candidate just doesn't copies our pointer arg, might copy other pointers we want.
3421 continue;
3422 }
3423 } else {
3424 if (candidate_is_pointer) {
3425 // The candidate might copy a pointer we don't have, skip it.
3426 goto next_candidate;
3427 }
3428 continue;
3429 }
3430
3431 if (strcmp(field->type, candidate_field->type))
3432 goto next_candidate;
3433
3434 is_candidate = true;
3435 }
3436
3437 if (!is_candidate)
3438 goto next_candidate;
3439
3440 /*
3441 * Check if the tentative pair syscall augmenter has more pointers, if it has,
3442 * then it may be collecting that and we then can't use it, as it would collect
3443 * more than what is common to the two syscalls.
3444 */
3445 if (candidate_field) {
3446 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
3447 if (candidate_field->flags & TEP_FIELD_IS_POINTER)
3448 goto next_candidate;
3449 }
3450
3451 pair_prog = pair->bpf_prog.sys_enter;
3452 /*
3453 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
3454 * have been searched for, so search it here and if it returns the
3455 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
3456 * program for a filtered syscall on a non-filtered one.
3457 *
3458 * For instance, we have "!syscalls:sys_enter_renameat" and that is
3459 * useful for "renameat2".
3460 */
3461 if (pair_prog == NULL) {
3462 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3463 if (pair_prog == trace->syscalls.unaugmented_prog)
3464 goto next_candidate;
3465 }
3466
3467 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
3468 return pair_prog;
3469 next_candidate:
3470 continue;
3471 }
3472
3473 return NULL;
3474}
3475
3476static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3477{
3478 int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3479 map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3480 int err = 0, key;
3481
3482 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3483 int prog_fd;
3484
3485 if (!trace__syscall_enabled(trace, key))
3486 continue;
3487
3488 trace__init_syscall_bpf_progs(trace, key);
3489
3490 // It'll get at least the "!raw_syscalls:unaugmented"
3491 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3492 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3493 if (err)
3494 break;
3495 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3496 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
3497 if (err)
3498 break;
3499 }
3500
3501 /*
3502 * Now lets do a second pass looking for enabled syscalls without
3503 * an augmenter that have a signature that is a superset of another
3504 * syscall with an augmenter so that we can auto-reuse it.
3505 *
3506 * I.e. if we have an augmenter for the "open" syscall that has
3507 * this signature:
3508 *
3509 * int open(const char *pathname, int flags, mode_t mode);
3510 *
3511 * I.e. that will collect just the first string argument, then we
3512 * can reuse it for the 'creat' syscall, that has this signature:
3513 *
3514 * int creat(const char *pathname, mode_t mode);
3515 *
3516 * and for:
3517 *
3518 * int stat(const char *pathname, struct stat *statbuf);
3519 * int lstat(const char *pathname, struct stat *statbuf);
3520 *
3521 * Because the 'open' augmenter will collect the first arg as a string,
3522 * and leave alone all the other args, which already helps with
3523 * beautifying 'stat' and 'lstat''s pathname arg.
3524 *
3525 * Then, in time, when 'stat' gets an augmenter that collects both
3526 * first and second arg (this one on the raw_syscalls:sys_exit prog
3527 * array tail call, then that one will be used.
3528 */
3529 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3530 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3531 struct bpf_program *pair_prog;
3532 int prog_fd;
3533
3534 if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
3535 continue;
3536
3537 /*
3538 * For now we're just reusing the sys_enter prog, and if it
3539 * already has an augmenter, we don't need to find one.
3540 */
3541 if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3542 continue;
3543
3544 /*
3545 * Look at all the other syscalls for one that has a signature
3546 * that is close enough that we can share:
3547 */
3548 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3549 if (pair_prog == NULL)
3550 continue;
3551
3552 sc->bpf_prog.sys_enter = pair_prog;
3553
3554 /*
3555 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3556 * with the fd for the program we're reusing:
3557 */
3558 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
3559 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3560 if (err)
3561 break;
3562 }
3563
3564
3565 return err;
3566}
3567
3568static void trace__delete_augmented_syscalls(struct trace *trace)
3569{
3570 struct evsel *evsel, *tmp;
3571
3572 evlist__remove(trace->evlist, trace->syscalls.events.augmented);
3573 evsel__delete(trace->syscalls.events.augmented);
3574 trace->syscalls.events.augmented = NULL;
3575
3576 evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
3577 if (evsel->bpf_obj == trace->bpf_obj) {
3578 evlist__remove(trace->evlist, evsel);
3579 evsel__delete(evsel);
3580 }
3581
3582 }
3583
3584 bpf_object__close(trace->bpf_obj);
3585 trace->bpf_obj = NULL;
3586}
3587#else // HAVE_LIBBPF_SUPPORT
3588static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
3589 const char *name __maybe_unused)
3590{
3591 return NULL;
3592}
3593
3594static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
3595{
3596}
3597
3598static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
3599{
3600}
3601
3602static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
3603{
3604 return 0;
3605}
3606
3607static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
3608{
3609 return 0;
3610}
3611
3612static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3613 const char *name __maybe_unused)
3614{
3615 return NULL;
3616}
3617
3618static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3619{
3620 return 0;
3621}
3622
3623static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
3624{
3625}
3626#endif // HAVE_LIBBPF_SUPPORT
3627
3628static bool trace__only_augmented_syscalls_evsels(struct trace *trace)
3629{
3630 struct evsel *evsel;
3631
3632 evlist__for_each_entry(trace->evlist, evsel) {
3633 if (evsel == trace->syscalls.events.augmented ||
3634 evsel->bpf_obj == trace->bpf_obj)
3635 continue;
3636
3637 return false;
3638 }
3639
3640 return true;
3641}
3642
3643static int trace__set_ev_qualifier_filter(struct trace *trace)
3644{
3645 if (trace->syscalls.map)
3646 return trace__set_ev_qualifier_bpf_filter(trace);
3647 if (trace->syscalls.events.sys_enter)
3648 return trace__set_ev_qualifier_tp_filter(trace);
3649 return 0;
3650}
3651
3652static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
3653 size_t npids __maybe_unused, pid_t *pids __maybe_unused)
3654{
3655 int err = 0;
3656#ifdef HAVE_LIBBPF_SUPPORT
3657 bool value = true;
3658 int map_fd = bpf_map__fd(map);
3659 size_t i;
3660
3661 for (i = 0; i < npids; ++i) {
3662 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
3663 if (err)
3664 break;
3665 }
3666#endif
3667 return err;
3668}
3669
3670static int trace__set_filter_loop_pids(struct trace *trace)
3671{
3672 unsigned int nr = 1, err;
3673 pid_t pids[32] = {
3674 getpid(),
3675 };
3676 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3677
3678 while (thread && nr < ARRAY_SIZE(pids)) {
3679 struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3680
3681 if (parent == NULL)
3682 break;
3683
3684 if (!strcmp(thread__comm_str(parent), "sshd") ||
3685 strstarts(thread__comm_str(parent), "gnome-terminal")) {
3686 pids[nr++] = parent->tid;
3687 break;
3688 }
3689 thread = parent;
3690 }
3691
3692 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3693 if (!err && trace->filter_pids.map)
3694 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3695
3696 return err;
3697}
3698
3699static int trace__set_filter_pids(struct trace *trace)
3700{
3701 int err = 0;
3702 /*
3703 * Better not use !target__has_task() here because we need to cover the
3704 * case where no threads were specified in the command line, but a
3705 * workload was, and in that case we will fill in the thread_map when
3706 * we fork the workload in evlist__prepare_workload.
3707 */
3708 if (trace->filter_pids.nr > 0) {
3709 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3710 trace->filter_pids.entries);
3711 if (!err && trace->filter_pids.map) {
3712 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3713 trace->filter_pids.entries);
3714 }
3715 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3716 err = trace__set_filter_loop_pids(trace);
3717 }
3718
3719 return err;
3720}
3721
3722static int __trace__deliver_event(struct trace *trace, union perf_event *event)
3723{
3724 struct evlist *evlist = trace->evlist;
3725 struct perf_sample sample;
3726 int err = evlist__parse_sample(evlist, event, &sample);
3727
3728 if (err)
3729 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3730 else
3731 trace__handle_event(trace, event, &sample);
3732
3733 return 0;
3734}
3735
3736static int __trace__flush_events(struct trace *trace)
3737{
3738 u64 first = ordered_events__first_time(&trace->oe.data);
3739 u64 flush = trace->oe.last - NSEC_PER_SEC;
3740
3741 /* Is there some thing to flush.. */
3742 if (first && first < flush)
3743 return ordered_events__flush_time(&trace->oe.data, flush);
3744
3745 return 0;
3746}
3747
3748static int trace__flush_events(struct trace *trace)
3749{
3750 return !trace->sort_events ? 0 : __trace__flush_events(trace);
3751}
3752
3753static int trace__deliver_event(struct trace *trace, union perf_event *event)
3754{
3755 int err;
3756
3757 if (!trace->sort_events)
3758 return __trace__deliver_event(trace, event);
3759
3760 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3761 if (err && err != -1)
3762 return err;
3763
3764 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
3765 if (err)
3766 return err;
3767
3768 return trace__flush_events(trace);
3769}
3770
3771static int ordered_events__deliver_event(struct ordered_events *oe,
3772 struct ordered_event *event)
3773{
3774 struct trace *trace = container_of(oe, struct trace, oe.data);
3775
3776 return __trace__deliver_event(trace, event->event);
3777}
3778
3779static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg)
3780{
3781 struct tep_format_field *field;
3782 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
3783
3784 if (evsel->tp_format == NULL || fmt == NULL)
3785 return NULL;
3786
3787 for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
3788 if (strcmp(field->name, arg) == 0)
3789 return fmt;
3790
3791 return NULL;
3792}
3793
3794static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
3795{
3796 char *tok, *left = evsel->filter, *new_filter = evsel->filter;
3797
3798 while ((tok = strpbrk(left, "=<>!")) != NULL) {
3799 char *right = tok + 1, *right_end;
3800
3801 if (*right == '=')
3802 ++right;
3803
3804 while (isspace(*right))
3805 ++right;
3806
3807 if (*right == '\0')
3808 break;
3809
3810 while (!isalpha(*left))
3811 if (++left == tok) {
3812 /*
3813 * Bail out, can't find the name of the argument that is being
3814 * used in the filter, let it try to set this filter, will fail later.
3815 */
3816 return 0;
3817 }
3818
3819 right_end = right + 1;
3820 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|')
3821 ++right_end;
3822
3823 if (isalpha(*right)) {
3824 struct syscall_arg_fmt *fmt;
3825 int left_size = tok - left,
3826 right_size = right_end - right;
3827 char arg[128];
3828
3829 while (isspace(left[left_size - 1]))
3830 --left_size;
3831
3832 scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
3833
3834 fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg);
3835 if (fmt == NULL) {
3836 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
3837 arg, evsel->name, evsel->filter);
3838 return -1;
3839 }
3840
3841 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
3842 arg, (int)(right - tok), tok, right_size, right);
3843
3844 if (fmt->strtoul) {
3845 u64 val;
3846 struct syscall_arg syscall_arg = {
3847 .parm = fmt->parm,
3848 };
3849
3850 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) {
3851 char *n, expansion[19];
3852 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val);
3853 int expansion_offset = right - new_filter;
3854
3855 pr_debug("%s", expansion);
3856
3857 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) {
3858 pr_debug(" out of memory!\n");
3859 free(new_filter);
3860 return -1;
3861 }
3862 if (new_filter != evsel->filter)
3863 free(new_filter);
3864 left = n + expansion_offset + expansion_lenght;
3865 new_filter = n;
3866 } else {
3867 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n",
3868 right_size, right, arg, evsel->name, evsel->filter);
3869 return -1;
3870 }
3871 } else {
3872 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n",
3873 arg, evsel->name, evsel->filter);
3874 return -1;
3875 }
3876
3877 pr_debug("\n");
3878 } else {
3879 left = right_end;
3880 }
3881 }
3882
3883 if (new_filter != evsel->filter) {
3884 pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
3885 evsel__set_filter(evsel, new_filter);
3886 free(new_filter);
3887 }
3888
3889 return 0;
3890}
3891
3892static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
3893{
3894 struct evlist *evlist = trace->evlist;
3895 struct evsel *evsel;
3896
3897 evlist__for_each_entry(evlist, evsel) {
3898 if (evsel->filter == NULL)
3899 continue;
3900
3901 if (trace__expand_filter(trace, evsel)) {
3902 *err_evsel = evsel;
3903 return -1;
3904 }
3905 }
3906
3907 return 0;
3908}
3909
3910static int trace__run(struct trace *trace, int argc, const char **argv)
3911{
3912 struct evlist *evlist = trace->evlist;
3913 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
3914 int err = -1, i;
3915 unsigned long before;
3916 const bool forks = argc > 0;
3917 bool draining = false;
3918
3919 trace->live = true;
3920
3921 if (!trace->raw_augmented_syscalls) {
3922 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3923 goto out_error_raw_syscalls;
3924
3925 if (trace->trace_syscalls)
3926 trace->vfs_getname = evlist__add_vfs_getname(evlist);
3927 }
3928
3929 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3930 pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
3931 if (pgfault_maj == NULL)
3932 goto out_error_mem;
3933 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3934 evlist__add(evlist, pgfault_maj);
3935 }
3936
3937 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3938 pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
3939 if (pgfault_min == NULL)
3940 goto out_error_mem;
3941 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3942 evlist__add(evlist, pgfault_min);
3943 }
3944
3945 if (trace->sched &&
3946 evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
3947 goto out_error_sched_stat_runtime;
3948 /*
3949 * If a global cgroup was set, apply it to all the events without an
3950 * explicit cgroup. I.e.:
3951 *
3952 * trace -G A -e sched:*switch
3953 *
3954 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3955 * _and_ sched:sched_switch to the 'A' cgroup, while:
3956 *
3957 * trace -e sched:*switch -G A
3958 *
3959 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3960 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3961 * a cgroup (on the root cgroup, sys wide, etc).
3962 *
3963 * Multiple cgroups:
3964 *
3965 * trace -G A -e sched:*switch -G B
3966 *
3967 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3968 * to the 'B' cgroup.
3969 *
3970 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3971 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3972 */
3973 if (trace->cgroup)
3974 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3975
3976 err = evlist__create_maps(evlist, &trace->opts.target);
3977 if (err < 0) {
3978 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3979 goto out_delete_evlist;
3980 }
3981
3982 err = trace__symbols_init(trace, evlist);
3983 if (err < 0) {
3984 fprintf(trace->output, "Problems initializing symbol libraries!\n");
3985 goto out_delete_evlist;
3986 }
3987
3988 evlist__config(evlist, &trace->opts, &callchain_param);
3989
3990 if (forks) {
3991 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
3992 if (err < 0) {
3993 fprintf(trace->output, "Couldn't run the workload!\n");
3994 goto out_delete_evlist;
3995 }
3996 }
3997
3998 err = evlist__open(evlist);
3999 if (err < 0)
4000 goto out_error_open;
4001
4002 err = bpf__apply_obj_config();
4003 if (err) {
4004 char errbuf[BUFSIZ];
4005
4006 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
4007 pr_err("ERROR: Apply config to BPF failed: %s\n",
4008 errbuf);
4009 goto out_error_open;
4010 }
4011
4012 err = trace__set_filter_pids(trace);
4013 if (err < 0)
4014 goto out_error_mem;
4015
4016 if (trace->syscalls.map)
4017 trace__init_syscalls_bpf_map(trace);
4018
4019 if (trace->syscalls.prog_array.sys_enter)
4020 trace__init_syscalls_bpf_prog_array_maps(trace);
4021
4022 if (trace->ev_qualifier_ids.nr > 0) {
4023 err = trace__set_ev_qualifier_filter(trace);
4024 if (err < 0)
4025 goto out_errno;
4026
4027 if (trace->syscalls.events.sys_exit) {
4028 pr_debug("event qualifier tracepoint filter: %s\n",
4029 trace->syscalls.events.sys_exit->filter);
4030 }
4031 }
4032
4033 /*
4034 * If the "close" syscall is not traced, then we will not have the
4035 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
4036 * fd->pathname table and were ending up showing the last value set by
4037 * syscalls opening a pathname and associating it with a descriptor or
4038 * reading it from /proc/pid/fd/ in cases where that doesn't make
4039 * sense.
4040 *
4041 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
4042 * not in use.
4043 */
4044 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
4045
4046 err = trace__expand_filters(trace, &evsel);
4047 if (err)
4048 goto out_delete_evlist;
4049 err = evlist__apply_filters(evlist, &evsel);
4050 if (err < 0)
4051 goto out_error_apply_filters;
4052
4053 if (trace->dump.map)
4054 bpf_map__fprintf(trace->dump.map, trace->output);
4055
4056 err = evlist__mmap(evlist, trace->opts.mmap_pages);
4057 if (err < 0)
4058 goto out_error_mmap;
4059
4060 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
4061 evlist__enable(evlist);
4062
4063 if (forks)
4064 evlist__start_workload(evlist);
4065
4066 if (trace->opts.initial_delay) {
4067 usleep(trace->opts.initial_delay * 1000);
4068 evlist__enable(evlist);
4069 }
4070
4071 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4072 evlist->core.threads->nr > 1 ||
4073 evlist__first(evlist)->core.attr.inherit;
4074
4075 /*
4076 * Now that we already used evsel->core.attr to ask the kernel to setup the
4077 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
4078 * trace__resolve_callchain(), allowing per-event max-stack settings
4079 * to override an explicitly set --max-stack global setting.
4080 */
4081 evlist__for_each_entry(evlist, evsel) {
4082 if (evsel__has_callchain(evsel) &&
4083 evsel->core.attr.sample_max_stack == 0)
4084 evsel->core.attr.sample_max_stack = trace->max_stack;
4085 }
4086again:
4087 before = trace->nr_events;
4088
4089 for (i = 0; i < evlist->core.nr_mmaps; i++) {
4090 union perf_event *event;
4091 struct mmap *md;
4092
4093 md = &evlist->mmap[i];
4094 if (perf_mmap__read_init(&md->core) < 0)
4095 continue;
4096
4097 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
4098 ++trace->nr_events;
4099
4100 err = trace__deliver_event(trace, event);
4101 if (err)
4102 goto out_disable;
4103
4104 perf_mmap__consume(&md->core);
4105
4106 if (interrupted)
4107 goto out_disable;
4108
4109 if (done && !draining) {
4110 evlist__disable(evlist);
4111 draining = true;
4112 }
4113 }
4114 perf_mmap__read_done(&md->core);
4115 }
4116
4117 if (trace->nr_events == before) {
4118 int timeout = done ? 100 : -1;
4119
4120 if (!draining && evlist__poll(evlist, timeout) > 0) {
4121 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
4122 draining = true;
4123
4124 goto again;
4125 } else {
4126 if (trace__flush_events(trace))
4127 goto out_disable;
4128 }
4129 } else {
4130 goto again;
4131 }
4132
4133out_disable:
4134 thread__zput(trace->current);
4135
4136 evlist__disable(evlist);
4137
4138 if (trace->sort_events)
4139 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4140
4141 if (!err) {
4142 if (trace->summary)
4143 trace__fprintf_thread_summary(trace, trace->output);
4144
4145 if (trace->show_tool_stats) {
4146 fprintf(trace->output, "Stats:\n "
4147 " vfs_getname : %" PRIu64 "\n"
4148 " proc_getname: %" PRIu64 "\n",
4149 trace->stats.vfs_getname,
4150 trace->stats.proc_getname);
4151 }
4152 }
4153
4154out_delete_evlist:
4155 trace__symbols__exit(trace);
4156 evlist__free_syscall_tp_fields(evlist);
4157 evlist__delete(evlist);
4158 cgroup__put(trace->cgroup);
4159 trace->evlist = NULL;
4160 trace->live = false;
4161 return err;
4162{
4163 char errbuf[BUFSIZ];
4164
4165out_error_sched_stat_runtime:
4166 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
4167 goto out_error;
4168
4169out_error_raw_syscalls:
4170 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
4171 goto out_error;
4172
4173out_error_mmap:
4174 evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
4175 goto out_error;
4176
4177out_error_open:
4178 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
4179
4180out_error:
4181 fprintf(trace->output, "%s\n", errbuf);
4182 goto out_delete_evlist;
4183
4184out_error_apply_filters:
4185 fprintf(trace->output,
4186 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
4187 evsel->filter, evsel__name(evsel), errno,
4188 str_error_r(errno, errbuf, sizeof(errbuf)));
4189 goto out_delete_evlist;
4190}
4191out_error_mem:
4192 fprintf(trace->output, "Not enough memory to run!\n");
4193 goto out_delete_evlist;
4194
4195out_errno:
4196 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4197 goto out_delete_evlist;
4198}
4199
4200static int trace__replay(struct trace *trace)
4201{
4202 const struct evsel_str_handler handlers[] = {
4203 { "probe:vfs_getname", trace__vfs_getname, },
4204 };
4205 struct perf_data data = {
4206 .path = input_name,
4207 .mode = PERF_DATA_MODE_READ,
4208 .force = trace->force,
4209 };
4210 struct perf_session *session;
4211 struct evsel *evsel;
4212 int err = -1;
4213
4214 trace->tool.sample = trace__process_sample;
4215 trace->tool.mmap = perf_event__process_mmap;
4216 trace->tool.mmap2 = perf_event__process_mmap2;
4217 trace->tool.comm = perf_event__process_comm;
4218 trace->tool.exit = perf_event__process_exit;
4219 trace->tool.fork = perf_event__process_fork;
4220 trace->tool.attr = perf_event__process_attr;
4221 trace->tool.tracing_data = perf_event__process_tracing_data;
4222 trace->tool.build_id = perf_event__process_build_id;
4223 trace->tool.namespaces = perf_event__process_namespaces;
4224
4225 trace->tool.ordered_events = true;
4226 trace->tool.ordering_requires_timestamps = true;
4227
4228 /* add tid to output */
4229 trace->multiple_threads = true;
4230
4231 session = perf_session__new(&data, false, &trace->tool);
4232 if (IS_ERR(session))
4233 return PTR_ERR(session);
4234
4235 if (trace->opts.target.pid)
4236 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4237
4238 if (trace->opts.target.tid)
4239 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4240
4241 if (symbol__init(&session->header.env) < 0)
4242 goto out;
4243
4244 trace->host = &session->machines.host;
4245
4246 err = perf_session__set_tracepoints_handlers(session, handlers);
4247 if (err)
4248 goto out;
4249
4250 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
4251 /* older kernels have syscalls tp versus raw_syscalls */
4252 if (evsel == NULL)
4253 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
4254
4255 if (evsel &&
4256 (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
4257 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
4258 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
4259 goto out;
4260 }
4261
4262 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
4263 if (evsel == NULL)
4264 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
4265 if (evsel &&
4266 (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
4267 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
4268 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
4269 goto out;
4270 }
4271
4272 evlist__for_each_entry(session->evlist, evsel) {
4273 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
4274 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
4275 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
4276 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
4277 evsel->handler = trace__pgfault;
4278 }
4279
4280 setup_pager();
4281
4282 err = perf_session__process_events(session);
4283 if (err)
4284 pr_err("Failed to process events, error %d", err);
4285
4286 else if (trace->summary)
4287 trace__fprintf_thread_summary(trace, trace->output);
4288
4289out:
4290 perf_session__delete(session);
4291
4292 return err;
4293}
4294
4295static size_t trace__fprintf_threads_header(FILE *fp)
4296{
4297 size_t printed;
4298
4299 printed = fprintf(fp, "\n Summary of events:\n\n");
4300
4301 return printed;
4302}
4303
4304DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
4305 struct syscall_stats *stats;
4306 double msecs;
4307 int syscall;
4308)
4309{
4310 struct int_node *source = rb_entry(nd, struct int_node, rb_node);
4311 struct syscall_stats *stats = source->priv;
4312
4313 entry->syscall = source->i;
4314 entry->stats = stats;
4315 entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
4316}
4317
4318static size_t thread__dump_stats(struct thread_trace *ttrace,
4319 struct trace *trace, FILE *fp)
4320{
4321 size_t printed = 0;
4322 struct syscall *sc;
4323 struct rb_node *nd;
4324 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
4325
4326 if (syscall_stats == NULL)
4327 return 0;
4328
4329 printed += fprintf(fp, "\n");
4330
4331 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n");
4332 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
4333 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n");
4334
4335 resort_rb__for_each_entry(nd, syscall_stats) {
4336 struct syscall_stats *stats = syscall_stats_entry->stats;
4337 if (stats) {
4338 double min = (double)(stats->stats.min) / NSEC_PER_MSEC;
4339 double max = (double)(stats->stats.max) / NSEC_PER_MSEC;
4340 double avg = avg_stats(&stats->stats);
4341 double pct;
4342 u64 n = (u64)stats->stats.n;
4343
4344 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
4345 avg /= NSEC_PER_MSEC;
4346
4347 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4348 printed += fprintf(fp, " %-15s", sc->name);
4349 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f",
4350 n, stats->nr_failures, syscall_stats_entry->msecs, min, avg);
4351 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
4352
4353 if (trace->errno_summary && stats->nr_failures) {
4354 const char *arch_name = perf_env__arch(trace->host->env);
4355 int e;
4356
4357 for (e = 0; e < stats->max_errno; ++e) {
4358 if (stats->errnos[e] != 0)
4359 fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]);
4360 }
4361 }
4362 }
4363 }
4364
4365 resort_rb__delete(syscall_stats);
4366 printed += fprintf(fp, "\n\n");
4367
4368 return printed;
4369}
4370
4371static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
4372{
4373 size_t printed = 0;
4374 struct thread_trace *ttrace = thread__priv(thread);
4375 double ratio;
4376
4377 if (ttrace == NULL)
4378 return 0;
4379
4380 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4381
4382 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
4383 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
4384 printed += fprintf(fp, "%.1f%%", ratio);
4385 if (ttrace->pfmaj)
4386 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
4387 if (ttrace->pfmin)
4388 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
4389 if (trace->sched)
4390 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
4391 else if (fputc('\n', fp) != EOF)
4392 ++printed;
4393
4394 printed += thread__dump_stats(ttrace, trace, fp);
4395
4396 return printed;
4397}
4398
4399static unsigned long thread__nr_events(struct thread_trace *ttrace)
4400{
4401 return ttrace ? ttrace->nr_events : 0;
4402}
4403
4404DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
4405 struct thread *thread;
4406)
4407{
4408 entry->thread = rb_entry(nd, struct thread, rb_node);
4409}
4410
4411static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
4412{
4413 size_t printed = trace__fprintf_threads_header(fp);
4414 struct rb_node *nd;
4415 int i;
4416
4417 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
4418 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
4419
4420 if (threads == NULL) {
4421 fprintf(fp, "%s", "Error sorting output by nr_events!\n");
4422 return 0;
4423 }
4424
4425 resort_rb__for_each_entry(nd, threads)
4426 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
4427
4428 resort_rb__delete(threads);
4429 }
4430 return printed;
4431}
4432
4433static int trace__set_duration(const struct option *opt, const char *str,
4434 int unset __maybe_unused)
4435{
4436 struct trace *trace = opt->value;
4437
4438 trace->duration_filter = atof(str);
4439 return 0;
4440}
4441
4442static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
4443 int unset __maybe_unused)
4444{
4445 int ret = -1;
4446 size_t i;
4447 struct trace *trace = opt->value;
4448 /*
4449 * FIXME: introduce a intarray class, plain parse csv and create a
4450 * { int nr, int entries[] } struct...
4451 */
4452 struct intlist *list = intlist__new(str);
4453
4454 if (list == NULL)
4455 return -1;
4456
4457 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4458 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4459
4460 if (trace->filter_pids.entries == NULL)
4461 goto out;
4462
4463 trace->filter_pids.entries[0] = getpid();
4464
4465 for (i = 1; i < trace->filter_pids.nr; ++i)
4466 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4467
4468 intlist__delete(list);
4469 ret = 0;
4470out:
4471 return ret;
4472}
4473
4474static int trace__open_output(struct trace *trace, const char *filename)
4475{
4476 struct stat st;
4477
4478 if (!stat(filename, &st) && st.st_size) {
4479 char oldname[PATH_MAX];
4480
4481 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
4482 unlink(oldname);
4483 rename(filename, oldname);
4484 }
4485
4486 trace->output = fopen(filename, "w");
4487
4488 return trace->output == NULL ? -errno : 0;
4489}
4490
4491static int parse_pagefaults(const struct option *opt, const char *str,
4492 int unset __maybe_unused)
4493{
4494 int *trace_pgfaults = opt->value;
4495
4496 if (strcmp(str, "all") == 0)
4497 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
4498 else if (strcmp(str, "maj") == 0)
4499 *trace_pgfaults |= TRACE_PFMAJ;
4500 else if (strcmp(str, "min") == 0)
4501 *trace_pgfaults |= TRACE_PFMIN;
4502 else
4503 return -1;
4504
4505 return 0;
4506}
4507
4508static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler)
4509{
4510 struct evsel *evsel;
4511
4512 evlist__for_each_entry(evlist, evsel) {
4513 if (evsel->handler == NULL)
4514 evsel->handler = handler;
4515 }
4516}
4517
4518static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
4519{
4520 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
4521
4522 if (fmt) {
4523 struct syscall_fmt *scfmt = syscall_fmt__find(name);
4524
4525 if (scfmt) {
4526 int skip = 0;
4527
4528 if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 ||
4529 strcmp(evsel->tp_format->format.fields->name, "nr") == 0)
4530 ++skip;
4531
4532 memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt));
4533 }
4534 }
4535}
4536
4537static int evlist__set_syscall_tp_fields(struct evlist *evlist)
4538{
4539 struct evsel *evsel;
4540
4541 evlist__for_each_entry(evlist, evsel) {
4542 if (evsel->priv || !evsel->tp_format)
4543 continue;
4544
4545 if (strcmp(evsel->tp_format->system, "syscalls")) {
4546 evsel__init_tp_arg_scnprintf(evsel);
4547 continue;
4548 }
4549
4550 if (evsel__init_syscall_tp(evsel))
4551 return -1;
4552
4553 if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
4554 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4555
4556 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
4557 return -1;
4558
4559 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1);
4560 } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
4561 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4562
4563 if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
4564 return -1;
4565
4566 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1);
4567 }
4568 }
4569
4570 return 0;
4571}
4572
4573/*
4574 * XXX: Hackish, just splitting the combined -e+--event (syscalls
4575 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
4576 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4577 *
4578 * It'd be better to introduce a parse_options() variant that would return a
4579 * list with the terms it didn't match to an event...
4580 */
4581static int trace__parse_events_option(const struct option *opt, const char *str,
4582 int unset __maybe_unused)
4583{
4584 struct trace *trace = (struct trace *)opt->value;
4585 const char *s = str;
4586 char *sep = NULL, *lists[2] = { NULL, NULL, };
4587 int len = strlen(str) + 1, err = -1, list, idx;
4588 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
4589 char group_name[PATH_MAX];
4590 struct syscall_fmt *fmt;
4591
4592 if (strace_groups_dir == NULL)
4593 return -1;
4594
4595 if (*s == '!') {
4596 ++s;
4597 trace->not_ev_qualifier = true;
4598 }
4599
4600 while (1) {
4601 if ((sep = strchr(s, ',')) != NULL)
4602 *sep = '\0';
4603
4604 list = 0;
4605 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4606 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4607 list = 1;
4608 goto do_concat;
4609 }
4610
4611 fmt = syscall_fmt__find_by_alias(s);
4612 if (fmt != NULL) {
4613 list = 1;
4614 s = fmt->name;
4615 } else {
4616 path__join(group_name, sizeof(group_name), strace_groups_dir, s);
4617 if (access(group_name, R_OK) == 0)
4618 list = 1;
4619 }
4620do_concat:
4621 if (lists[list]) {
4622 sprintf(lists[list] + strlen(lists[list]), ",%s", s);
4623 } else {
4624 lists[list] = malloc(len);
4625 if (lists[list] == NULL)
4626 goto out;
4627 strcpy(lists[list], s);
4628 }
4629
4630 if (!sep)
4631 break;
4632
4633 *sep = ',';
4634 s = sep + 1;
4635 }
4636
4637 if (lists[1] != NULL) {
4638 struct strlist_config slist_config = {
4639 .dirname = strace_groups_dir,
4640 };
4641
4642 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4643 if (trace->ev_qualifier == NULL) {
4644 fputs("Not enough memory to parse event qualifier", trace->output);
4645 goto out;
4646 }
4647
4648 if (trace__validate_ev_qualifier(trace))
4649 goto out;
4650 trace->trace_syscalls = true;
4651 }
4652
4653 err = 0;
4654
4655 if (lists[0]) {
4656 struct option o = {
4657 .value = &trace->evlist,
4658 };
4659 err = parse_events_option(&o, lists[0], 0);
4660 }
4661out:
4662 free(strace_groups_dir);
4663 free(lists[0]);
4664 free(lists[1]);
4665 if (sep)
4666 *sep = ',';
4667
4668 return err;
4669}
4670
4671static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
4672{
4673 struct trace *trace = opt->value;
4674
4675 if (!list_empty(&trace->evlist->core.entries)) {
4676 struct option o = {
4677 .value = &trace->evlist,
4678 };
4679 return parse_cgroups(&o, str, unset);
4680 }
4681 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4682
4683 return 0;
4684}
4685
4686static int trace__config(const char *var, const char *value, void *arg)
4687{
4688 struct trace *trace = arg;
4689 int err = 0;
4690
4691 if (!strcmp(var, "trace.add_events")) {
4692 trace->perfconfig_events = strdup(value);
4693 if (trace->perfconfig_events == NULL) {
4694 pr_err("Not enough memory for %s\n", "trace.add_events");
4695 return -1;
4696 }
4697 } else if (!strcmp(var, "trace.show_timestamp")) {
4698 trace->show_tstamp = perf_config_bool(var, value);
4699 } else if (!strcmp(var, "trace.show_duration")) {
4700 trace->show_duration = perf_config_bool(var, value);
4701 } else if (!strcmp(var, "trace.show_arg_names")) {
4702 trace->show_arg_names = perf_config_bool(var, value);
4703 if (!trace->show_arg_names)
4704 trace->show_zeros = true;
4705 } else if (!strcmp(var, "trace.show_zeros")) {
4706 bool new_show_zeros = perf_config_bool(var, value);
4707 if (!trace->show_arg_names && !new_show_zeros) {
4708 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4709 goto out;
4710 }
4711 trace->show_zeros = new_show_zeros;
4712 } else if (!strcmp(var, "trace.show_prefix")) {
4713 trace->show_string_prefix = perf_config_bool(var, value);
4714 } else if (!strcmp(var, "trace.no_inherit")) {
4715 trace->opts.no_inherit = perf_config_bool(var, value);
4716 } else if (!strcmp(var, "trace.args_alignment")) {
4717 int args_alignment = 0;
4718 if (perf_config_int(&args_alignment, var, value) == 0)
4719 trace->args_alignment = args_alignment;
4720 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
4721 if (strcasecmp(value, "libtraceevent") == 0)
4722 trace->libtraceevent_print = true;
4723 else if (strcasecmp(value, "libbeauty") == 0)
4724 trace->libtraceevent_print = false;
4725 }
4726out:
4727 return err;
4728}
4729
4730static void trace__exit(struct trace *trace)
4731{
4732 int i;
4733
4734 strlist__delete(trace->ev_qualifier);
4735 free(trace->ev_qualifier_ids.entries);
4736 if (trace->syscalls.table) {
4737 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
4738 syscall__exit(&trace->syscalls.table[i]);
4739 free(trace->syscalls.table);
4740 }
4741 syscalltbl__delete(trace->sctbl);
4742 zfree(&trace->perfconfig_events);
4743}
4744
4745int cmd_trace(int argc, const char **argv)
4746{
4747 const char *trace_usage[] = {
4748 "perf trace [<options>] [<command>]",
4749 "perf trace [<options>] -- <command> [<options>]",
4750 "perf trace record [<options>] [<command>]",
4751 "perf trace record [<options>] -- <command> [<options>]",
4752 NULL
4753 };
4754 struct trace trace = {
4755 .opts = {
4756 .target = {
4757 .uid = UINT_MAX,
4758 .uses_mmap = true,
4759 },
4760 .user_freq = UINT_MAX,
4761 .user_interval = ULLONG_MAX,
4762 .no_buffering = true,
4763 .mmap_pages = UINT_MAX,
4764 },
4765 .output = stderr,
4766 .show_comm = true,
4767 .show_tstamp = true,
4768 .show_duration = true,
4769 .show_arg_names = true,
4770 .args_alignment = 70,
4771 .trace_syscalls = false,
4772 .kernel_syscallchains = false,
4773 .max_stack = UINT_MAX,
4774 .max_events = ULONG_MAX,
4775 };
4776 const char *map_dump_str = NULL;
4777 const char *output_name = NULL;
4778 const struct option trace_options[] = {
4779 OPT_CALLBACK('e', "event", &trace, "event",
4780 "event/syscall selector. use 'perf list' to list available events",
4781 trace__parse_events_option),
4782 OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
4783 "event filter", parse_filter),
4784 OPT_BOOLEAN(0, "comm", &trace.show_comm,
4785 "show the thread COMM next to its id"),
4786 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4787 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4788 trace__parse_events_option),
4789 OPT_STRING('o', "output", &output_name, "file", "output file name"),
4790 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
4791 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4792 "trace events on existing process id"),
4793 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4794 "trace events on existing thread id"),
4795 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4796 "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
4797 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4798 "system-wide collection from all CPUs"),
4799 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4800 "list of cpus to monitor"),
4801 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4802 "child tasks do not inherit counters"),
4803 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4804 "number of mmap data pages", evlist__parse_mmap_pages),
4805 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4806 "user to profile"),
4807 OPT_CALLBACK(0, "duration", &trace, "float",
4808 "show only events with duration > N.M ms",
4809 trace__set_duration),
4810#ifdef HAVE_LIBBPF_SUPPORT
4811 OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
4812#endif
4813 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4814 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
4815 OPT_BOOLEAN('T', "time", &trace.full_time,
4816 "Show full timestamp, not time relative to first start"),
4817 OPT_BOOLEAN(0, "failure", &trace.failure_only,
4818 "Show only syscalls that failed"),
4819 OPT_BOOLEAN('s', "summary", &trace.summary_only,
4820 "Show only syscall summary with statistics"),
4821 OPT_BOOLEAN('S', "with-summary", &trace.summary,
4822 "Show all syscalls and summary with statistics"),
4823 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
4824 "Show errno stats per syscall, use with -s or -S"),
4825 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4826 "Trace pagefaults", parse_pagefaults, "maj"),
4827 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4828 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4829 OPT_CALLBACK(0, "call-graph", &trace.opts,
4830 "record_mode[,record_size]", record_callchain_help,
4831 &record_parse_callchain_opt),
4832 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
4833 "Use libtraceevent to print the tracepoint arguments."),
4834 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4835 "Show the kernel callchains on the syscall exit path"),
4836 OPT_ULONG(0, "max-events", &trace.max_events,
4837 "Set the maximum number of events to print, exit after that is reached. "),
4838 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4839 "Set the minimum stack depth when parsing the callchain, "
4840 "anything below the specified depth will be ignored."),
4841 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4842 "Set the maximum stack depth when parsing the callchain, "
4843 "anything beyond the specified depth will be ignored. "
4844 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
4845 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4846 "Sort batch of events before processing, use if getting out of order events"),
4847 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4848 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4849 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
4850 "per thread proc mmap processing timeout in ms"),
4851 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4852 trace__parse_cgroups),
4853 OPT_INTEGER('D', "delay", &trace.opts.initial_delay,
4854 "ms to wait before starting measurement after program "
4855 "start"),
4856 OPTS_EVSWITCH(&trace.evswitch),
4857 OPT_END()
4858 };
4859 bool __maybe_unused max_stack_user_set = true;
4860 bool mmap_pages_user_set = true;
4861 struct evsel *evsel;
4862 const char * const trace_subcommands[] = { "record", NULL };
4863 int err = -1;
4864 char bf[BUFSIZ];
4865
4866 signal(SIGSEGV, sighandler_dump_stack);
4867 signal(SIGFPE, sighandler_dump_stack);
4868 signal(SIGCHLD, sig_handler);
4869 signal(SIGINT, sig_handler);
4870
4871 trace.evlist = evlist__new();
4872 trace.sctbl = syscalltbl__new();
4873
4874 if (trace.evlist == NULL || trace.sctbl == NULL) {
4875 pr_err("Not enough memory to run!\n");
4876 err = -ENOMEM;
4877 goto out;
4878 }
4879
4880 /*
4881 * Parsing .perfconfig may entail creating a BPF event, that may need
4882 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4883 * is too small. This affects just this process, not touching the
4884 * global setting. If it fails we'll get something in 'perf trace -v'
4885 * to help diagnose the problem.
4886 */
4887 rlimit__bump_memlock();
4888
4889 err = perf_config(trace__config, &trace);
4890 if (err)
4891 goto out;
4892
4893 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
4894 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
4895
4896 /*
4897 * Here we already passed thru trace__parse_events_option() and it has
4898 * already figured out if -e syscall_name, if not but if --event
4899 * foo:bar was used, the user is interested _just_ in those, say,
4900 * tracepoint events, not in the strace-like syscall-name-based mode.
4901 *
4902 * This is important because we need to check if strace-like mode is
4903 * needed to decided if we should filter out the eBPF
4904 * __augmented_syscalls__ code, if it is in the mix, say, via
4905 * .perfconfig trace.add_events, and filter those out.
4906 */
4907 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4908 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4909 trace.trace_syscalls = true;
4910 }
4911 /*
4912 * Now that we have --verbose figured out, lets see if we need to parse
4913 * events from .perfconfig, so that if those events fail parsing, say some
4914 * BPF program fails, then we'll be able to use --verbose to see what went
4915 * wrong in more detail.
4916 */
4917 if (trace.perfconfig_events != NULL) {
4918 struct parse_events_error parse_err;
4919
4920 bzero(&parse_err, sizeof(parse_err));
4921 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
4922 if (err) {
4923 parse_events_print_error(&parse_err, trace.perfconfig_events);
4924 goto out;
4925 }
4926 }
4927
4928 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4929 usage_with_options_msg(trace_usage, trace_options,
4930 "cgroup monitoring only available in system-wide mode");
4931 }
4932
4933 evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4934 if (IS_ERR(evsel)) {
4935 bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4936 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4937 goto out;
4938 }
4939
4940 if (evsel) {
4941 trace.syscalls.events.augmented = evsel;
4942
4943 evsel = evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4944 if (evsel == NULL) {
4945 pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4946 goto out;
4947 }
4948
4949 if (evsel->bpf_obj == NULL) {
4950 pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4951 goto out;
4952 }
4953
4954 trace.bpf_obj = evsel->bpf_obj;
4955
4956 /*
4957 * If we have _just_ the augmenter event but don't have a
4958 * explicit --syscalls, then assume we want all strace-like
4959 * syscalls:
4960 */
4961 if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
4962 trace.trace_syscalls = true;
4963 /*
4964 * So, if we have a syscall augmenter, but trace_syscalls, aka
4965 * strace-like syscall tracing is not set, then we need to trow
4966 * away the augmenter, i.e. all the events that were created
4967 * from that BPF object file.
4968 *
4969 * This is more to fix the current .perfconfig trace.add_events
4970 * style of setting up the strace-like eBPF based syscall point
4971 * payload augmenter.
4972 *
4973 * All this complexity will be avoided by adding an alternative
4974 * to trace.add_events in the form of
4975 * trace.bpf_augmented_syscalls, that will be only parsed if we
4976 * need it.
4977 *
4978 * .perfconfig trace.add_events is still useful if we want, for
4979 * instance, have msr_write.msr in some .perfconfig profile based
4980 * 'perf trace --config determinism.profile' mode, where for some
4981 * particular goal/workload type we want a set of events and
4982 * output mode (with timings, etc) instead of having to add
4983 * all via the command line.
4984 *
4985 * Also --config to specify an alternate .perfconfig file needs
4986 * to be implemented.
4987 */
4988 if (!trace.trace_syscalls) {
4989 trace__delete_augmented_syscalls(&trace);
4990 } else {
4991 trace__set_bpf_map_filtered_pids(&trace);
4992 trace__set_bpf_map_syscalls(&trace);
4993 trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
4994 }
4995 }
4996
4997 err = bpf__setup_stdout(trace.evlist);
4998 if (err) {
4999 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
5000 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
5001 goto out;
5002 }
5003
5004 err = -1;
5005
5006 if (map_dump_str) {
5007 trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
5008 if (trace.dump.map == NULL) {
5009 pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
5010 goto out;
5011 }
5012 }
5013
5014 if (trace.trace_pgfaults) {
5015 trace.opts.sample_address = true;
5016 trace.opts.sample_time = true;
5017 }
5018
5019 if (trace.opts.mmap_pages == UINT_MAX)
5020 mmap_pages_user_set = false;
5021
5022 if (trace.max_stack == UINT_MAX) {
5023 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
5024 max_stack_user_set = false;
5025 }
5026
5027#ifdef HAVE_DWARF_UNWIND_SUPPORT
5028 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
5029 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
5030 }
5031#endif
5032
5033 if (callchain_param.enabled) {
5034 if (!mmap_pages_user_set && geteuid() == 0)
5035 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
5036
5037 symbol_conf.use_callchain = true;
5038 }
5039
5040 if (trace.evlist->core.nr_entries > 0) {
5041 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
5042 if (evlist__set_syscall_tp_fields(trace.evlist)) {
5043 perror("failed to set syscalls:* tracepoint fields");
5044 goto out;
5045 }
5046 }
5047
5048 if (trace.sort_events) {
5049 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
5050 ordered_events__set_copy_on_queue(&trace.oe.data, true);
5051 }
5052
5053 /*
5054 * If we are augmenting syscalls, then combine what we put in the
5055 * __augmented_syscalls__ BPF map with what is in the
5056 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
5057 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
5058 *
5059 * We'll switch to look at two BPF maps, one for sys_enter and the
5060 * other for sys_exit when we start augmenting the sys_exit paths with
5061 * buffers that are being copied from kernel to userspace, think 'read'
5062 * syscall.
5063 */
5064 if (trace.syscalls.events.augmented) {
5065 evlist__for_each_entry(trace.evlist, evsel) {
5066 bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
5067
5068 if (raw_syscalls_sys_exit) {
5069 trace.raw_augmented_syscalls = true;
5070 goto init_augmented_syscall_tp;
5071 }
5072
5073 if (trace.syscalls.events.augmented->priv == NULL &&
5074 strstr(evsel__name(evsel), "syscalls:sys_enter")) {
5075 struct evsel *augmented = trace.syscalls.events.augmented;
5076 if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
5077 evsel__init_augmented_syscall_tp_args(augmented))
5078 goto out;
5079 /*
5080 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
5081 * Above we made sure we can get from the payload the tp fields
5082 * that we get from syscalls:sys_enter tracefs format file.
5083 */
5084 augmented->handler = trace__sys_enter;
5085 /*
5086 * Now we do the same for the *syscalls:sys_enter event so that
5087 * if we handle it directly, i.e. if the BPF prog returns 0 so
5088 * as not to filter it, then we'll handle it just like we would
5089 * for the BPF_OUTPUT one:
5090 */
5091 if (evsel__init_augmented_syscall_tp(evsel, evsel) ||
5092 evsel__init_augmented_syscall_tp_args(evsel))
5093 goto out;
5094 evsel->handler = trace__sys_enter;
5095 }
5096
5097 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
5098 struct syscall_tp *sc;
5099init_augmented_syscall_tp:
5100 if (evsel__init_augmented_syscall_tp(evsel, evsel))
5101 goto out;
5102 sc = __evsel__syscall_tp(evsel);
5103 /*
5104 * For now with BPF raw_augmented we hook into
5105 * raw_syscalls:sys_enter and there we get all
5106 * 6 syscall args plus the tracepoint common
5107 * fields and the syscall_nr (another long).
5108 * So we check if that is the case and if so
5109 * don't look after the sc->args_size but
5110 * always after the full raw_syscalls:sys_enter
5111 * payload, which is fixed.
5112 *
5113 * We'll revisit this later to pass
5114 * s->args_size to the BPF augmenter (now
5115 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
5116 * so that it copies only what we need for each
5117 * syscall, like what happens when we use
5118 * syscalls:sys_enter_NAME, so that we reduce
5119 * the kernel/userspace traffic to just what is
5120 * needed for each syscall.
5121 */
5122 if (trace.raw_augmented_syscalls)
5123 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5124 evsel__init_augmented_syscall_tp_ret(evsel);
5125 evsel->handler = trace__sys_exit;
5126 }
5127 }
5128 }
5129
5130 if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
5131 return trace__record(&trace, argc-1, &argv[1]);
5132
5133 /* Using just --errno-summary will trigger --summary */
5134 if (trace.errno_summary && !trace.summary && !trace.summary_only)
5135 trace.summary_only = true;
5136
5137 /* summary_only implies summary option, but don't overwrite summary if set */
5138 if (trace.summary_only)
5139 trace.summary = trace.summary_only;
5140
5141 if (output_name != NULL) {
5142 err = trace__open_output(&trace, output_name);
5143 if (err < 0) {
5144 perror("failed to create output file");
5145 goto out;
5146 }
5147 }
5148
5149 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5150 if (err)
5151 goto out_close;
5152
5153 err = target__validate(&trace.opts.target);
5154 if (err) {
5155 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5156 fprintf(trace.output, "%s", bf);
5157 goto out_close;
5158 }
5159
5160 err = target__parse_uid(&trace.opts.target);
5161 if (err) {
5162 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5163 fprintf(trace.output, "%s", bf);
5164 goto out_close;
5165 }
5166
5167 if (!argc && target__none(&trace.opts.target))
5168 trace.opts.target.system_wide = true;
5169
5170 if (input_name)
5171 err = trace__replay(&trace);
5172 else
5173 err = trace__run(&trace, argc, argv);
5174
5175out_close:
5176 if (output_name != NULL)
5177 fclose(trace.output);
5178out:
5179 trace__exit(&trace);
5180 return err;
5181}